aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c59x.c6
-rw-r--r--drivers/net/8139cp.c10
-rw-r--r--drivers/net/Kconfig42
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atl1c/atl1c.h2
-rw-r--r--drivers/net/atl1c/atl1c_main.c6
-rw-r--r--drivers/net/atlx/atl1.c13
-rw-r--r--drivers/net/atlx/atl1.h9
-rw-r--r--drivers/net/atlx/atlx.c4
-rw-r--r--drivers/net/benet/be_cmds.c36
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_main.c49
-rw-r--r--drivers/net/bfin_mac.c145
-rw-r--r--drivers/net/bfin_mac.h2
-rw-r--r--drivers/net/bnx2x/bnx2x.h9
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c5
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h55
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h9
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h34
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c194
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h15
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c57
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/caif/Kconfig7
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/caif/caif_shm_u5500.c129
-rw-r--r--drivers/net/caif/caif_shmcore.c744
-rw-r--r--drivers/net/caif/caif_spi.c61
-rw-r--r--drivers/net/caif/caif_spi_slave.c13
-rw-r--r--drivers/net/can/Kconfig8
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c97
-rw-r--r--drivers/net/can/flexcan.c3
-rw-r--r--drivers/net/can/mcp251x.c3
-rw-r--r--drivers/net/can/pch_can.c1463
-rw-r--r--drivers/net/can/sja1000/Kconfig12
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/tscan1.c216
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c9
-rw-r--r--drivers/net/cxgb3/sge.c4
-rw-r--r--drivers/net/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c32
-rw-r--r--drivers/net/cxgb4/sge.c23
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c43
-rw-r--r--drivers/net/cxgb4vf/sge.c122
-rw-r--r--drivers/net/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c19
-rw-r--r--drivers/net/davinci_cpdma.c965
-rw-r--r--drivers/net/davinci_cpdma.h108
-rw-r--r--drivers/net/davinci_emac.c1338
-rw-r--r--drivers/net/davinci_mdio.c475
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/e1000e/82571.c38
-rw-r--r--drivers/net/e1000e/e1000.h3
-rw-r--r--drivers/net/e1000e/netdev.c29
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c42
-rw-r--r--drivers/net/gianfar.c13
-rw-r--r--drivers/net/gianfar_ethtool.c5
-rw-r--r--drivers/net/ibm_newemac/core.c1
-rw-r--r--drivers/net/igb/igb_main.c1
-rw-r--r--drivers/net/igbvf/netdev.c8
-rw-r--r--drivers/net/ipg.c6
-rw-r--r--drivers/net/ixgb/ixgb_main.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c39
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c72
-rw-r--r--drivers/net/jme.c49
-rw-r--r--drivers/net/lib8390.c1
-rw-r--r--drivers/net/macb.c27
-rw-r--r--drivers/net/mlx4/en_main.c15
-rw-r--r--drivers/net/mlx4/en_netdev.c10
-rw-r--r--drivers/net/mlx4/en_port.c4
-rw-r--r--drivers/net/mlx4/en_port.h3
-rw-r--r--drivers/net/mlx4/fw.c3
-rw-r--r--drivers/net/mlx4/icm.c28
-rw-r--r--drivers/net/mlx4/icm.h2
-rw-r--r--drivers/net/mlx4/intf.c21
-rw-r--r--drivers/net/mlx4/main.c4
-rw-r--r--drivers/net/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/mlx4/port.c30
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c15
-rw-r--r--drivers/net/netxen/netxen_nic_main.c11
-rw-r--r--drivers/net/pcmcia/axnet_cs.c30
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/phy/marvell.c18
-rw-r--r--drivers/net/phy/phy.c13
-rw-r--r--drivers/net/phy/phy_device.c19
-rw-r--r--drivers/net/qlcnic/qlcnic.h7
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c23
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c20
-rw-r--r--drivers/net/qlge/qlge.h12
-rw-r--r--drivers/net/qlge/qlge_main.c24
-rw-r--r--drivers/net/qlge/qlge_mpi.c6
-rw-r--r--drivers/net/r8169.c12
-rw-r--r--drivers/net/sb1000.c6
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/skge.c1
-rw-r--r--drivers/net/slhc.c15
-rw-r--r--drivers/net/smsc911x.c3
-rw-r--r--drivers/net/smsc911x.h13
-rw-r--r--drivers/net/stmmac/stmmac_main.c40
-rw-r--r--drivers/net/tg3.c10
-rw-r--r--drivers/net/tokenring/tms380tr.c2
-rw-r--r--drivers/net/tulip/de2104x.c1
-rw-r--r--drivers/net/typhoon.c92
-rw-r--r--drivers/net/ucc_geth.c25
-rw-r--r--drivers/net/usb/usbnet.c11
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c24
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h27
-rw-r--r--drivers/net/vxge/vxge-config.c332
-rw-r--r--drivers/net/vxge/vxge-config.h227
-rw-r--r--drivers/net/vxge/vxge-ethtool.c2
-rw-r--r--drivers/net/vxge/vxge-main.c64
-rw-r--r--drivers/net/vxge/vxge-main.h59
-rw-r--r--drivers/net/vxge/vxge-traffic.c101
-rw-r--r--drivers/net/vxge/vxge-traffic.h134
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig3
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/net/xilinx_emaclite.c8
127 files changed, 5772 insertions, 2772 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index e1da258bbfb7..0a92436f0538 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -699,7 +699,8 @@ DEFINE_WINDOW_IO(32)
699#define DEVICE_PCI(dev) NULL 699#define DEVICE_PCI(dev) NULL
700#endif 700#endif
701 701
702#define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL) 702#define VORTEX_PCI(vp) \
703 ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
703 704
704#ifdef CONFIG_EISA 705#ifdef CONFIG_EISA
705#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL) 706#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
@@ -707,7 +708,8 @@ DEFINE_WINDOW_IO(32)
707#define DEVICE_EISA(dev) NULL 708#define DEVICE_EISA(dev) NULL
708#endif 709#endif
709 710
710#define VORTEX_EISA(vp) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL) 711#define VORTEX_EISA(vp) \
712 ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
711 713
712/* The action to take with a media selection timer tick. 714/* The action to take with a media selection timer tick.
713 Note that we deviate from the 3Com order by checking 10base2 before AUI. 715 Note that we deviate from the 3Com order by checking 10base2 before AUI.
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index ac422cd332ea..dd16e83933a2 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -490,13 +490,11 @@ static inline unsigned int cp_rx_csum_ok (u32 status)
490{ 490{
491 unsigned int protocol = (status >> 16) & 0x3; 491 unsigned int protocol = (status >> 16) & 0x3;
492 492
493 if (likely((protocol == RxProtoTCP) && (!(status & TCPFail)))) 493 if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
494 ((protocol == RxProtoUDP) && !(status & UDPFail)))
494 return 1; 495 return 1;
495 else if ((protocol == RxProtoUDP) && (!(status & UDPFail))) 496 else
496 return 1; 497 return 0;
497 else if ((protocol == RxProtoIP) && (!(status & IPFail)))
498 return 1;
499 return 0;
500} 498}
501 499
502static int cp_rx_poll(struct napi_struct *napi, int budget) 500static int cp_rx_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 77c1fab7d774..f6668cdaac85 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -883,14 +883,6 @@ config BFIN_RX_DESC_NUM
883 help 883 help
884 Set the number of buffer packets used in driver. 884 Set the number of buffer packets used in driver.
885 885
886config BFIN_MAC_RMII
887 bool "RMII PHY Interface"
888 depends on BFIN_MAC
889 default y if BFIN527_EZKIT
890 default n if BFIN537_STAMP
891 help
892 Use Reduced PHY MII Interface
893
894config BFIN_MAC_USE_HWSTAMP 886config BFIN_MAC_USE_HWSTAMP
895 bool "Use IEEE 1588 hwstamp" 887 bool "Use IEEE 1588 hwstamp"
896 depends on BFIN_MAC && BF518 888 depends on BFIN_MAC && BF518
@@ -954,6 +946,8 @@ config NET_NETX
954config TI_DAVINCI_EMAC 946config TI_DAVINCI_EMAC
955 tristate "TI DaVinci EMAC Support" 947 tristate "TI DaVinci EMAC Support"
956 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) 948 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
949 select TI_DAVINCI_MDIO
950 select TI_DAVINCI_CPDMA
957 select PHYLIB 951 select PHYLIB
958 help 952 help
959 This driver supports TI's DaVinci Ethernet . 953 This driver supports TI's DaVinci Ethernet .
@@ -961,6 +955,25 @@ config TI_DAVINCI_EMAC
961 To compile this driver as a module, choose M here: the module 955 To compile this driver as a module, choose M here: the module
962 will be called davinci_emac_driver. This is recommended. 956 will be called davinci_emac_driver. This is recommended.
963 957
958config TI_DAVINCI_MDIO
959 tristate "TI DaVinci MDIO Support"
960 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
961 select PHYLIB
962 help
963 This driver supports TI's DaVinci MDIO module.
964
965 To compile this driver as a module, choose M here: the module
966 will be called davinci_mdio. This is recommended.
967
968config TI_DAVINCI_CPDMA
969 tristate "TI DaVinci CPDMA Support"
970 depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
971 help
972 This driver supports TI's DaVinci CPDMA dma engine.
973
974 To compile this driver as a module, choose M here: the module
975 will be called davinci_cpdma. This is recommended.
976
964config DM9000 977config DM9000
965 tristate "DM9000 support" 978 tristate "DM9000 support"
966 depends on ARM || BLACKFIN || MIPS 979 depends on ARM || BLACKFIN || MIPS
@@ -1028,7 +1041,7 @@ config SMC911X
1028 tristate "SMSC LAN911[5678] support" 1041 tristate "SMSC LAN911[5678] support"
1029 select CRC32 1042 select CRC32
1030 select MII 1043 select MII
1031 depends on ARM || SUPERH 1044 depends on ARM || SUPERH || MN10300
1032 help 1045 help
1033 This is a driver for SMSC's LAN911x series of Ethernet chipsets 1046 This is a driver for SMSC's LAN911x series of Ethernet chipsets
1034 including the new LAN9115, LAN9116, LAN9117, and LAN9118. 1047 including the new LAN9115, LAN9116, LAN9117, and LAN9118.
@@ -1042,7 +1055,7 @@ config SMC911X
1042 1055
1043config SMSC911X 1056config SMSC911X
1044 tristate "SMSC LAN911x/LAN921x families embedded ethernet support" 1057 tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
1045 depends on ARM || SUPERH || BLACKFIN || MIPS 1058 depends on ARM || SUPERH || BLACKFIN || MIPS || MN10300
1046 select CRC32 1059 select CRC32
1047 select MII 1060 select MII
1048 select PHYLIB 1061 select PHYLIB
@@ -1054,6 +1067,14 @@ config SMSC911X
1054 <file:Documentation/networking/net-modules.txt>. The module 1067 <file:Documentation/networking/net-modules.txt>. The module
1055 will be called smsc911x. 1068 will be called smsc911x.
1056 1069
1070config SMSC911X_ARCH_HOOKS
1071 def_bool n
1072 depends on SMSC911X
1073 help
1074 If the arch enables this, it allows the arch to implement various
1075 hooks for more comprehensive interrupt control and also to override
1076 the source of the MAC address.
1077
1057config NET_VENDOR_RACAL 1078config NET_VENDOR_RACAL
1058 bool "Racal-Interlan (Micom) NI cards" 1079 bool "Racal-Interlan (Micom) NI cards"
1059 depends on ISA 1080 depends on ISA
@@ -2520,6 +2541,7 @@ source "drivers/net/stmmac/Kconfig"
2520config PCH_GBE 2541config PCH_GBE
2521 tristate "PCH Gigabit Ethernet" 2542 tristate "PCH Gigabit Ethernet"
2522 depends on PCI 2543 depends on PCI
2544 select MII
2523 ---help--- 2545 ---help---
2524 This is a gigabit ethernet driver for Topcliff PCH. 2546 This is a gigabit ethernet driver for Topcliff PCH.
2525 Topcliff PCH is the platform controller hub that is used in Intel's 2547 Topcliff PCH is the platform controller hub that is used in Intel's
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b8bf93d4a132..652fc6b98039 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_MDIO) += mdio.o
7obj-$(CONFIG_PHYLIB) += phy/ 7obj-$(CONFIG_PHYLIB) += phy/
8 8
9obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o 9obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
10obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
11obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
10 12
11obj-$(CONFIG_E1000) += e1000/ 13obj-$(CONFIG_E1000) += e1000/
12obj-$(CONFIG_E1000E) += e1000e/ 14obj-$(CONFIG_E1000E) += e1000e/
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 3134e5326231..8cb27cb7bca1 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -407,7 +407,7 @@ static noinline int __init addr_accessible(volatile void *regp, int wordflag,
407 int writeflag) 407 int writeflag)
408{ 408{
409 int ret; 409 int ret;
410 long flags; 410 unsigned long flags;
411 long *vbr, save_berr; 411 long *vbr, save_berr;
412 412
413 local_irq_save(flags); 413 local_irq_save(flags);
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index ef4115b897bf..9ab58097fa2e 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -631,8 +631,6 @@ struct atl1c_adapter {
631extern char atl1c_driver_name[]; 631extern char atl1c_driver_name[];
632extern char atl1c_driver_version[]; 632extern char atl1c_driver_version[];
633 633
634extern int atl1c_up(struct atl1c_adapter *adapter);
635extern void atl1c_down(struct atl1c_adapter *adapter);
636extern void atl1c_reinit_locked(struct atl1c_adapter *adapter); 634extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
637extern s32 atl1c_reset_hw(struct atl1c_hw *hw); 635extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
638extern void atl1c_set_ethtool_ops(struct net_device *netdev); 636extern void atl1c_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 99ffcf667d1f..09b099bfab2b 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -66,6 +66,8 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup);
66static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter); 66static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
67static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que, 67static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
68 int *work_done, int work_to_do); 68 int *work_done, int work_to_do);
69static int atl1c_up(struct atl1c_adapter *adapter);
70static void atl1c_down(struct atl1c_adapter *adapter);
69 71
70static const u16 atl1c_pay_load_size[] = { 72static const u16 atl1c_pay_load_size[] = {
71 128, 256, 512, 1024, 2048, 4096, 73 128, 256, 512, 1024, 2048, 4096,
@@ -2309,7 +2311,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
2309 return err; 2311 return err;
2310} 2312}
2311 2313
2312int atl1c_up(struct atl1c_adapter *adapter) 2314static int atl1c_up(struct atl1c_adapter *adapter)
2313{ 2315{
2314 struct net_device *netdev = adapter->netdev; 2316 struct net_device *netdev = adapter->netdev;
2315 int num; 2317 int num;
@@ -2351,7 +2353,7 @@ err_alloc_rx:
2351 return err; 2353 return err;
2352} 2354}
2353 2355
2354void atl1c_down(struct atl1c_adapter *adapter) 2356static void atl1c_down(struct atl1c_adapter *adapter)
2355{ 2357{
2356 struct net_device *netdev = adapter->netdev; 2358 struct net_device *netdev = adapter->netdev;
2357 2359
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index dbd27b8e66bd..53363108994e 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -91,6 +91,8 @@ MODULE_VERSION(ATLX_DRIVER_VERSION);
91/* Temporary hack for merging atl1 and atl2 */ 91/* Temporary hack for merging atl1 and atl2 */
92#include "atlx.c" 92#include "atlx.c"
93 93
94static const struct ethtool_ops atl1_ethtool_ops;
95
94/* 96/*
95 * This is the only thing that needs to be changed to adjust the 97 * This is the only thing that needs to be changed to adjust the
96 * maximum number of ports that the driver can manage. 98 * maximum number of ports that the driver can manage.
@@ -353,7 +355,7 @@ static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
353 * hw - Struct containing variables accessed by shared code 355 * hw - Struct containing variables accessed by shared code
354 * reg_addr - address of the PHY register to read 356 * reg_addr - address of the PHY register to read
355 */ 357 */
356s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data) 358static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
357{ 359{
358 u32 val; 360 u32 val;
359 int i; 361 int i;
@@ -553,7 +555,7 @@ static s32 atl1_read_mac_addr(struct atl1_hw *hw)
553 * 1. calcu 32bit CRC for multicast address 555 * 1. calcu 32bit CRC for multicast address
554 * 2. reverse crc with MSB to LSB 556 * 2. reverse crc with MSB to LSB
555 */ 557 */
556u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr) 558static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
557{ 559{
558 u32 crc32, value = 0; 560 u32 crc32, value = 0;
559 int i; 561 int i;
@@ -570,7 +572,7 @@ u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
570 * hw - Struct containing variables accessed by shared code 572 * hw - Struct containing variables accessed by shared code
571 * hash_value - Multicast address hash value 573 * hash_value - Multicast address hash value
572 */ 574 */
573void atl1_hash_set(struct atl1_hw *hw, u32 hash_value) 575static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
574{ 576{
575 u32 hash_bit, hash_reg; 577 u32 hash_bit, hash_reg;
576 u32 mta; 578 u32 mta;
@@ -914,7 +916,7 @@ static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex
914 return 0; 916 return 0;
915} 917}
916 918
917void atl1_set_mac_addr(struct atl1_hw *hw) 919static void atl1_set_mac_addr(struct atl1_hw *hw)
918{ 920{
919 u32 value; 921 u32 value;
920 /* 922 /*
@@ -3041,7 +3043,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
3041 atl1_pcie_patch(adapter); 3043 atl1_pcie_patch(adapter);
3042 /* assume we have no link for now */ 3044 /* assume we have no link for now */
3043 netif_carrier_off(netdev); 3045 netif_carrier_off(netdev);
3044 netif_stop_queue(netdev);
3045 3046
3046 setup_timer(&adapter->phy_config_timer, atl1_phy_config, 3047 setup_timer(&adapter->phy_config_timer, atl1_phy_config,
3047 (unsigned long)adapter); 3048 (unsigned long)adapter);
@@ -3658,7 +3659,7 @@ static int atl1_nway_reset(struct net_device *netdev)
3658 return 0; 3659 return 0;
3659} 3660}
3660 3661
3661const struct ethtool_ops atl1_ethtool_ops = { 3662static const struct ethtool_ops atl1_ethtool_ops = {
3662 .get_settings = atl1_get_settings, 3663 .get_settings = atl1_get_settings,
3663 .set_settings = atl1_set_settings, 3664 .set_settings = atl1_set_settings,
3664 .get_drvinfo = atl1_get_drvinfo, 3665 .get_drvinfo = atl1_get_drvinfo,
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index 9c0ddb273ac8..68de8cbfb3ec 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -56,16 +56,13 @@ struct atl1_adapter;
56struct atl1_hw; 56struct atl1_hw;
57 57
58/* function prototypes needed by multiple files */ 58/* function prototypes needed by multiple files */
59u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr); 59static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
60void atl1_hash_set(struct atl1_hw *hw, u32 hash_value); 60static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
61s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data); 61static void atl1_set_mac_addr(struct atl1_hw *hw);
62void atl1_set_mac_addr(struct atl1_hw *hw);
63static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 62static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
64 int cmd); 63 int cmd);
65static u32 atl1_check_link(struct atl1_adapter *adapter); 64static u32 atl1_check_link(struct atl1_adapter *adapter);
66 65
67extern const struct ethtool_ops atl1_ethtool_ops;
68
69/* hardware definitions specific to L1 */ 66/* hardware definitions specific to L1 */
70 67
71/* Block IDLE Status Register */ 68/* Block IDLE Status Register */
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index f979ea2d6d3c..afb7f7dd1bb1 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -41,6 +41,10 @@
41 41
42#include "atlx.h" 42#include "atlx.h"
43 43
44static s32 atlx_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
45static u32 atlx_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
46static void atlx_set_mac_addr(struct atl1_hw *hw);
47
44static struct atlx_spi_flash_dev flash_table[] = { 48static struct atlx_spi_flash_dev flash_table[] = {
45/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */ 49/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */
46 {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62}, 50 {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62},
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 1e7f305ed00b..36eca1ce75d4 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1471,42 +1471,6 @@ err:
1471 return status; 1471 return status;
1472} 1472}
1473 1473
1474/* Uses sync mcc */
1475int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
1476 u8 *connector)
1477{
1478 struct be_mcc_wrb *wrb;
1479 struct be_cmd_req_port_type *req;
1480 int status;
1481
1482 spin_lock_bh(&adapter->mcc_lock);
1483
1484 wrb = wrb_from_mccq(adapter);
1485 if (!wrb) {
1486 status = -EBUSY;
1487 goto err;
1488 }
1489 req = embedded_payload(wrb);
1490
1491 be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
1492 OPCODE_COMMON_READ_TRANSRECV_DATA);
1493
1494 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1495 OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
1496
1497 req->port = cpu_to_le32(port);
1498 req->page_num = cpu_to_le32(TR_PAGE_A0);
1499 status = be_mcc_notify_wait(adapter);
1500 if (!status) {
1501 struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
1502 *connector = resp->data.connector;
1503 }
1504
1505err:
1506 spin_unlock_bh(&adapter->mcc_lock);
1507 return status;
1508}
1509
1510int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 1474int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1511 u32 flash_type, u32 flash_opcode, u32 buf_size) 1475 u32 flash_type, u32 flash_opcode, u32 buf_size)
1512{ 1476{
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index c7f6cdfe1c73..8469ff061f30 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -1022,8 +1022,6 @@ extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
1022 u8 port_num, u8 beacon, u8 status, u8 state); 1022 u8 port_num, u8 beacon, u8 status, u8 state);
1023extern int be_cmd_get_beacon_state(struct be_adapter *adapter, 1023extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
1024 u8 port_num, u32 *state); 1024 u8 port_num, u32 *state);
1025extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
1026 u8 *connector);
1027extern int be_cmd_write_flashrom(struct be_adapter *adapter, 1025extern int be_cmd_write_flashrom(struct be_adapter *adapter,
1028 struct be_dma_mem *cmd, u32 flash_oper, 1026 struct be_dma_mem *cmd, u32 flash_oper,
1029 u32 flash_opcode, u32 buf_size); 1027 u32 flash_opcode, u32 buf_size);
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 45b1f6635282..c36cd2ffbadc 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -849,20 +849,16 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
849 stats->rx_mcast_pkts++; 849 stats->rx_mcast_pkts++;
850} 850}
851 851
852static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) 852static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
853{ 853{
854 u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk; 854 u8 l4_cksm, ipv6, ipcksm;
855 855
856 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp); 856 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
857 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp); 857 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
858 ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp); 858 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
859 if (ip_version) {
860 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
861 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
862 }
863 ipv6_chk = (ip_version && (tcpf || udpf));
864 859
865 return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true; 860 /* Ignore ipcksm for ipv6 pkts */
861 return l4_cksm && (ipcksm || ipv6);
866} 862}
867 863
868static struct be_rx_page_info * 864static struct be_rx_page_info *
@@ -1017,10 +1013,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1017 1013
1018 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd); 1014 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1019 1015
1020 if (do_pkt_csum(rxcp, adapter->rx_csum)) 1016 if (likely(adapter->rx_csum && csum_passed(rxcp)))
1021 skb_checksum_none_assert(skb);
1022 else
1023 skb->ip_summed = CHECKSUM_UNNECESSARY; 1017 skb->ip_summed = CHECKSUM_UNNECESSARY;
1018 else
1019 skb_checksum_none_assert(skb);
1024 1020
1025 skb->truesize = skb->len + sizeof(struct sk_buff); 1021 skb->truesize = skb->len + sizeof(struct sk_buff);
1026 skb->protocol = eth_type_trans(skb, adapter->netdev); 1022 skb->protocol = eth_type_trans(skb, adapter->netdev);
@@ -1674,7 +1670,7 @@ static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
1674 return (tcp_frame && !err) ? true : false; 1670 return (tcp_frame && !err) ? true : false;
1675} 1671}
1676 1672
1677int be_poll_rx(struct napi_struct *napi, int budget) 1673static int be_poll_rx(struct napi_struct *napi, int budget)
1678{ 1674{
1679 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi); 1675 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1680 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq); 1676 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
@@ -1806,6 +1802,20 @@ static void be_worker(struct work_struct *work)
1806 struct be_rx_obj *rxo; 1802 struct be_rx_obj *rxo;
1807 int i; 1803 int i;
1808 1804
1805 /* when interrupts are not yet enabled, just reap any pending
1806 * mcc completions */
1807 if (!netif_running(adapter->netdev)) {
1808 int mcc_compl, status = 0;
1809
1810 mcc_compl = be_process_mcc(adapter, &status);
1811
1812 if (mcc_compl) {
1813 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1814 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1815 }
1816 goto reschedule;
1817 }
1818
1809 if (!adapter->stats_ioctl_sent) 1819 if (!adapter->stats_ioctl_sent)
1810 be_cmd_get_stats(adapter, &adapter->stats_cmd); 1820 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1811 1821
@@ -1824,6 +1834,7 @@ static void be_worker(struct work_struct *work)
1824 if (!adapter->ue_detected) 1834 if (!adapter->ue_detected)
1825 be_detect_dump_ue(adapter); 1835 be_detect_dump_ue(adapter);
1826 1836
1837reschedule:
1827 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 1838 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1828} 1839}
1829 1840
@@ -2019,8 +2030,6 @@ static int be_close(struct net_device *netdev)
2019 struct be_eq_obj *tx_eq = &adapter->tx_eq; 2030 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2020 int vec, i; 2031 int vec, i;
2021 2032
2022 cancel_delayed_work_sync(&adapter->work);
2023
2024 be_async_mcc_disable(adapter); 2033 be_async_mcc_disable(adapter);
2025 2034
2026 netif_stop_queue(netdev); 2035 netif_stop_queue(netdev);
@@ -2085,8 +2094,6 @@ static int be_open(struct net_device *netdev)
2085 /* Now that interrupts are on we can process async mcc */ 2094 /* Now that interrupts are on we can process async mcc */
2086 be_async_mcc_enable(adapter); 2095 be_async_mcc_enable(adapter);
2087 2096
2088 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2089
2090 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed, 2097 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2091 &link_speed); 2098 &link_speed);
2092 if (status) 2099 if (status)
@@ -2299,9 +2306,6 @@ static int be_clear(struct be_adapter *adapter)
2299 2306
2300 2307
2301#define FW_FILE_HDR_SIGN "ServerEngines Corp. " 2308#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2302char flash_cookie[2][16] = {"*** SE FLAS",
2303 "H DIRECTORY *** "};
2304
2305static bool be_flash_redboot(struct be_adapter *adapter, 2309static bool be_flash_redboot(struct be_adapter *adapter,
2306 const u8 *p, u32 img_start, int image_size, 2310 const u8 *p, u32 img_start, int image_size,
2307 int hdr_size) 2311 int hdr_size)
@@ -2559,7 +2563,6 @@ static void be_netdev_init(struct net_device *netdev)
2559 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, 2563 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2560 BE_NAPI_WEIGHT); 2564 BE_NAPI_WEIGHT);
2561 2565
2562 netif_carrier_off(netdev);
2563 netif_stop_queue(netdev); 2566 netif_stop_queue(netdev);
2564} 2567}
2565 2568
@@ -2715,6 +2718,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
2715 if (!adapter) 2718 if (!adapter)
2716 return; 2719 return;
2717 2720
2721 cancel_delayed_work_sync(&adapter->work);
2722
2718 unregister_netdev(adapter->netdev); 2723 unregister_netdev(adapter->netdev);
2719 2724
2720 be_clear(adapter); 2725 be_clear(adapter);
@@ -2868,8 +2873,10 @@ static int __devinit be_probe(struct pci_dev *pdev,
2868 status = register_netdev(netdev); 2873 status = register_netdev(netdev);
2869 if (status != 0) 2874 if (status != 0)
2870 goto unsetup; 2875 goto unsetup;
2876 netif_carrier_off(netdev);
2871 2877
2872 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); 2878 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2879 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2873 return 0; 2880 return 0;
2874 2881
2875unsetup: 2882unsetup:
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index f7233191162b..ce1e5e9d06f6 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Blackfin On-Chip MAC Driver 2 * Blackfin On-Chip MAC Driver
3 * 3 *
4 * Copyright 2004-2007 Analog Devices Inc. 4 * Copyright 2004-2010 Analog Devices Inc.
5 * 5 *
6 * Enter bugs at http://blackfin.uclinux.org/ 6 * Enter bugs at http://blackfin.uclinux.org/
7 * 7 *
@@ -23,7 +23,6 @@
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/mii.h> 25#include <linux/mii.h>
26#include <linux/phy.h>
27#include <linux/netdevice.h> 26#include <linux/netdevice.h>
28#include <linux/etherdevice.h> 27#include <linux/etherdevice.h>
29#include <linux/ethtool.h> 28#include <linux/ethtool.h>
@@ -76,12 +75,6 @@ static struct net_dma_desc_tx *current_tx_ptr;
76static struct net_dma_desc_tx *tx_desc; 75static struct net_dma_desc_tx *tx_desc;
77static struct net_dma_desc_rx *rx_desc; 76static struct net_dma_desc_rx *rx_desc;
78 77
79#if defined(CONFIG_BFIN_MAC_RMII)
80static u16 pin_req[] = P_RMII0;
81#else
82static u16 pin_req[] = P_MII0;
83#endif
84
85static void desc_list_free(void) 78static void desc_list_free(void)
86{ 79{
87 struct net_dma_desc_rx *r; 80 struct net_dma_desc_rx *r;
@@ -347,23 +340,23 @@ static void bfin_mac_adjust_link(struct net_device *dev)
347 } 340 }
348 341
349 if (phydev->speed != lp->old_speed) { 342 if (phydev->speed != lp->old_speed) {
350#if defined(CONFIG_BFIN_MAC_RMII) 343 if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
351 u32 opmode = bfin_read_EMAC_OPMODE(); 344 u32 opmode = bfin_read_EMAC_OPMODE();
352 switch (phydev->speed) { 345 switch (phydev->speed) {
353 case 10: 346 case 10:
354 opmode |= RMII_10; 347 opmode |= RMII_10;
355 break; 348 break;
356 case 100: 349 case 100:
357 opmode &= ~(RMII_10); 350 opmode &= ~RMII_10;
358 break; 351 break;
359 default: 352 default:
360 printk(KERN_WARNING 353 printk(KERN_WARNING
361 "%s: Ack! Speed (%d) is not 10/100!\n", 354 "%s: Ack! Speed (%d) is not 10/100!\n",
362 DRV_NAME, phydev->speed); 355 DRV_NAME, phydev->speed);
363 break; 356 break;
357 }
358 bfin_write_EMAC_OPMODE(opmode);
364 } 359 }
365 bfin_write_EMAC_OPMODE(opmode);
366#endif
367 360
368 new_state = 1; 361 new_state = 1;
369 lp->old_speed = phydev->speed; 362 lp->old_speed = phydev->speed;
@@ -392,7 +385,7 @@ static void bfin_mac_adjust_link(struct net_device *dev)
392/* MDC = 2.5 MHz */ 385/* MDC = 2.5 MHz */
393#define MDC_CLK 2500000 386#define MDC_CLK 2500000
394 387
395static int mii_probe(struct net_device *dev) 388static int mii_probe(struct net_device *dev, int phy_mode)
396{ 389{
397 struct bfin_mac_local *lp = netdev_priv(dev); 390 struct bfin_mac_local *lp = netdev_priv(dev);
398 struct phy_device *phydev = NULL; 391 struct phy_device *phydev = NULL;
@@ -411,8 +404,8 @@ static int mii_probe(struct net_device *dev)
411 sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div); 404 sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
412 bfin_write_EMAC_SYSCTL(sysctl); 405 bfin_write_EMAC_SYSCTL(sysctl);
413 406
414 /* search for connect PHY device */ 407 /* search for connected PHY device */
415 for (i = 0; i < PHY_MAX_ADDR; i++) { 408 for (i = 0; i < PHY_MAX_ADDR; ++i) {
416 struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i]; 409 struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i];
417 410
418 if (!tmp_phydev) 411 if (!tmp_phydev)
@@ -429,13 +422,14 @@ static int mii_probe(struct net_device *dev)
429 return -ENODEV; 422 return -ENODEV;
430 } 423 }
431 424
432#if defined(CONFIG_BFIN_MAC_RMII) 425 if (phy_mode != PHY_INTERFACE_MODE_RMII &&
433 phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link, 426 phy_mode != PHY_INTERFACE_MODE_MII) {
434 0, PHY_INTERFACE_MODE_RMII); 427 printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name);
435#else 428 return -EINVAL;
429 }
430
436 phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link, 431 phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
437 0, PHY_INTERFACE_MODE_MII); 432 0, phy_mode);
438#endif
439 433
440 if (IS_ERR(phydev)) { 434 if (IS_ERR(phydev)) {
441 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 435 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
@@ -570,6 +564,8 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
570/**************************************************************************/ 564/**************************************************************************/
571void setup_system_regs(struct net_device *dev) 565void setup_system_regs(struct net_device *dev)
572{ 566{
567 struct bfin_mac_local *lp = netdev_priv(dev);
568 int i;
573 unsigned short sysctl; 569 unsigned short sysctl;
574 570
575 /* 571 /*
@@ -577,6 +573,15 @@ void setup_system_regs(struct net_device *dev)
577 * Configure checksum support and rcve frame word alignment 573 * Configure checksum support and rcve frame word alignment
578 */ 574 */
579 sysctl = bfin_read_EMAC_SYSCTL(); 575 sysctl = bfin_read_EMAC_SYSCTL();
576 /*
577 * check if interrupt is requested for any PHY,
578 * enable PHY interrupt only if needed
579 */
580 for (i = 0; i < PHY_MAX_ADDR; ++i)
581 if (lp->mii_bus->irq[i] != PHY_POLL)
582 break;
583 if (i < PHY_MAX_ADDR)
584 sysctl |= PHYIE;
580 sysctl |= RXDWA; 585 sysctl |= RXDWA;
581#if defined(BFIN_MAC_CSUM_OFFLOAD) 586#if defined(BFIN_MAC_CSUM_OFFLOAD)
582 sysctl |= RXCKS; 587 sysctl |= RXCKS;
@@ -1203,7 +1208,7 @@ static void bfin_mac_disable(void)
1203/* 1208/*
1204 * Enable Interrupts, Receive, and Transmit 1209 * Enable Interrupts, Receive, and Transmit
1205 */ 1210 */
1206static int bfin_mac_enable(void) 1211static int bfin_mac_enable(struct phy_device *phydev)
1207{ 1212{
1208 int ret; 1213 int ret;
1209 u32 opmode; 1214 u32 opmode;
@@ -1233,12 +1238,13 @@ static int bfin_mac_enable(void)
1233 opmode |= DRO | DC | PSF; 1238 opmode |= DRO | DC | PSF;
1234 opmode |= RE; 1239 opmode |= RE;
1235 1240
1236#if defined(CONFIG_BFIN_MAC_RMII) 1241 if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
1237 opmode |= RMII; /* For Now only 100MBit are supported */ 1242 opmode |= RMII; /* For Now only 100MBit are supported */
1238#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2 1243#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2
1239 opmode |= TE; 1244 opmode |= TE;
1240#endif
1241#endif 1245#endif
1246 }
1247
1242 /* Turn on the EMAC rx */ 1248 /* Turn on the EMAC rx */
1243 bfin_write_EMAC_OPMODE(opmode); 1249 bfin_write_EMAC_OPMODE(opmode);
1244 1250
@@ -1270,7 +1276,7 @@ static void bfin_mac_timeout(struct net_device *dev)
1270 if (netif_queue_stopped(lp->ndev)) 1276 if (netif_queue_stopped(lp->ndev))
1271 netif_wake_queue(lp->ndev); 1277 netif_wake_queue(lp->ndev);
1272 1278
1273 bfin_mac_enable(); 1279 bfin_mac_enable(lp->phydev);
1274 1280
1275 /* We can accept TX packets again */ 1281 /* We can accept TX packets again */
1276 dev->trans_start = jiffies; /* prevent tx timeout */ 1282 dev->trans_start = jiffies; /* prevent tx timeout */
@@ -1342,11 +1348,19 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
1342 1348
1343static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1349static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1344{ 1350{
1351 struct bfin_mac_local *lp = netdev_priv(netdev);
1352
1353 if (!netif_running(netdev))
1354 return -EINVAL;
1355
1345 switch (cmd) { 1356 switch (cmd) {
1346 case SIOCSHWTSTAMP: 1357 case SIOCSHWTSTAMP:
1347 return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd); 1358 return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
1348 default: 1359 default:
1349 return -EOPNOTSUPP; 1360 if (lp->phydev)
1361 return phy_mii_ioctl(lp->phydev, ifr, cmd);
1362 else
1363 return -EOPNOTSUPP;
1350 } 1364 }
1351} 1365}
1352 1366
@@ -1394,7 +1408,7 @@ static int bfin_mac_open(struct net_device *dev)
1394 setup_mac_addr(dev->dev_addr); 1408 setup_mac_addr(dev->dev_addr);
1395 1409
1396 bfin_mac_disable(); 1410 bfin_mac_disable();
1397 ret = bfin_mac_enable(); 1411 ret = bfin_mac_enable(lp->phydev);
1398 if (ret) 1412 if (ret)
1399 return ret; 1413 return ret;
1400 pr_debug("hardware init finished\n"); 1414 pr_debug("hardware init finished\n");
@@ -1450,6 +1464,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1450 struct net_device *ndev; 1464 struct net_device *ndev;
1451 struct bfin_mac_local *lp; 1465 struct bfin_mac_local *lp;
1452 struct platform_device *pd; 1466 struct platform_device *pd;
1467 struct bfin_mii_bus_platform_data *mii_bus_data;
1453 int rc; 1468 int rc;
1454 1469
1455 ndev = alloc_etherdev(sizeof(struct bfin_mac_local)); 1470 ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
@@ -1501,11 +1516,12 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1501 if (!lp->mii_bus) { 1516 if (!lp->mii_bus) {
1502 dev_err(&pdev->dev, "Cannot get mii_bus!\n"); 1517 dev_err(&pdev->dev, "Cannot get mii_bus!\n");
1503 rc = -ENODEV; 1518 rc = -ENODEV;
1504 goto out_err_mii_bus_probe; 1519 goto out_err_probe_mac;
1505 } 1520 }
1506 lp->mii_bus->priv = ndev; 1521 lp->mii_bus->priv = ndev;
1522 mii_bus_data = pd->dev.platform_data;
1507 1523
1508 rc = mii_probe(ndev); 1524 rc = mii_probe(ndev, mii_bus_data->phy_mode);
1509 if (rc) { 1525 if (rc) {
1510 dev_err(&pdev->dev, "MII Probe failed!\n"); 1526 dev_err(&pdev->dev, "MII Probe failed!\n");
1511 goto out_err_mii_probe; 1527 goto out_err_mii_probe;
@@ -1552,8 +1568,6 @@ out_err_request_irq:
1552out_err_mii_probe: 1568out_err_mii_probe:
1553 mdiobus_unregister(lp->mii_bus); 1569 mdiobus_unregister(lp->mii_bus);
1554 mdiobus_free(lp->mii_bus); 1570 mdiobus_free(lp->mii_bus);
1555out_err_mii_bus_probe:
1556 peripheral_free_list(pin_req);
1557out_err_probe_mac: 1571out_err_probe_mac:
1558 platform_set_drvdata(pdev, NULL); 1572 platform_set_drvdata(pdev, NULL);
1559 free_netdev(ndev); 1573 free_netdev(ndev);
@@ -1576,8 +1590,6 @@ static int __devexit bfin_mac_remove(struct platform_device *pdev)
1576 1590
1577 free_netdev(ndev); 1591 free_netdev(ndev);
1578 1592
1579 peripheral_free_list(pin_req);
1580
1581 return 0; 1593 return 0;
1582} 1594}
1583 1595
@@ -1623,12 +1635,21 @@ static int bfin_mac_resume(struct platform_device *pdev)
1623static int __devinit bfin_mii_bus_probe(struct platform_device *pdev) 1635static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
1624{ 1636{
1625 struct mii_bus *miibus; 1637 struct mii_bus *miibus;
1638 struct bfin_mii_bus_platform_data *mii_bus_pd;
1639 const unsigned short *pin_req;
1626 int rc, i; 1640 int rc, i;
1627 1641
1642 mii_bus_pd = dev_get_platdata(&pdev->dev);
1643 if (!mii_bus_pd) {
1644 dev_err(&pdev->dev, "No peripherals in platform data!\n");
1645 return -EINVAL;
1646 }
1647
1628 /* 1648 /*
1629 * We are setting up a network card, 1649 * We are setting up a network card,
1630 * so set the GPIO pins to Ethernet mode 1650 * so set the GPIO pins to Ethernet mode
1631 */ 1651 */
1652 pin_req = mii_bus_pd->mac_peripherals;
1632 rc = peripheral_request_list(pin_req, DRV_NAME); 1653 rc = peripheral_request_list(pin_req, DRV_NAME);
1633 if (rc) { 1654 if (rc) {
1634 dev_err(&pdev->dev, "Requesting peripherals failed!\n"); 1655 dev_err(&pdev->dev, "Requesting peripherals failed!\n");
@@ -1645,13 +1666,30 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
1645 1666
1646 miibus->parent = &pdev->dev; 1667 miibus->parent = &pdev->dev;
1647 miibus->name = "bfin_mii_bus"; 1668 miibus->name = "bfin_mii_bus";
1669 miibus->phy_mask = mii_bus_pd->phy_mask;
1670
1648 snprintf(miibus->id, MII_BUS_ID_SIZE, "0"); 1671 snprintf(miibus->id, MII_BUS_ID_SIZE, "0");
1649 miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 1672 miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1650 if (miibus->irq == NULL) 1673 if (!miibus->irq)
1651 goto out_err_alloc; 1674 goto out_err_irq_alloc;
1652 for (i = 0; i < PHY_MAX_ADDR; ++i) 1675
1676 for (i = rc; i < PHY_MAX_ADDR; ++i)
1653 miibus->irq[i] = PHY_POLL; 1677 miibus->irq[i] = PHY_POLL;
1654 1678
1679 rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR);
1680 if (rc != mii_bus_pd->phydev_number)
1681 dev_err(&pdev->dev, "Invalid number (%i) of phydevs\n",
1682 mii_bus_pd->phydev_number);
1683 for (i = 0; i < rc; ++i) {
1684 unsigned short phyaddr = mii_bus_pd->phydev_data[i].addr;
1685 if (phyaddr < PHY_MAX_ADDR)
1686 miibus->irq[phyaddr] = mii_bus_pd->phydev_data[i].irq;
1687 else
1688 dev_err(&pdev->dev,
1689 "Invalid PHY address %i for phydev %i\n",
1690 phyaddr, i);
1691 }
1692
1655 rc = mdiobus_register(miibus); 1693 rc = mdiobus_register(miibus);
1656 if (rc) { 1694 if (rc) {
1657 dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); 1695 dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
@@ -1663,6 +1701,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
1663 1701
1664out_err_mdiobus_register: 1702out_err_mdiobus_register:
1665 kfree(miibus->irq); 1703 kfree(miibus->irq);
1704out_err_irq_alloc:
1666 mdiobus_free(miibus); 1705 mdiobus_free(miibus);
1667out_err_alloc: 1706out_err_alloc:
1668 peripheral_free_list(pin_req); 1707 peripheral_free_list(pin_req);
@@ -1673,11 +1712,15 @@ out_err_alloc:
1673static int __devexit bfin_mii_bus_remove(struct platform_device *pdev) 1712static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
1674{ 1713{
1675 struct mii_bus *miibus = platform_get_drvdata(pdev); 1714 struct mii_bus *miibus = platform_get_drvdata(pdev);
1715 struct bfin_mii_bus_platform_data *mii_bus_pd =
1716 dev_get_platdata(&pdev->dev);
1717
1676 platform_set_drvdata(pdev, NULL); 1718 platform_set_drvdata(pdev, NULL);
1677 mdiobus_unregister(miibus); 1719 mdiobus_unregister(miibus);
1678 kfree(miibus->irq); 1720 kfree(miibus->irq);
1679 mdiobus_free(miibus); 1721 mdiobus_free(miibus);
1680 peripheral_free_list(pin_req); 1722 peripheral_free_list(mii_bus_pd->mac_peripherals);
1723
1681 return 0; 1724 return 0;
1682} 1725}
1683 1726
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index 04e4050df18b..aed68bed2365 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -14,6 +14,8 @@
14#include <linux/clocksource.h> 14#include <linux/clocksource.h>
15#include <linux/timecompare.h> 15#include <linux/timecompare.h>
16#include <linux/timer.h> 16#include <linux/timer.h>
17#include <linux/etherdevice.h>
18#include <linux/bfin_mac.h>
17 19
18#define BFIN_MAC_CSUM_OFFLOAD 20#define BFIN_MAC_CSUM_OFFLOAD
19 21
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 9571ecf48f35..863e73a85fbe 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
20 * (you will need to reboot afterwards) */ 20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */ 21/* #define BNX2X_STOP_ON_ERROR */
22 22
23#define DRV_MODULE_VERSION "1.60.00-3" 23#define DRV_MODULE_VERSION "1.60.00-4"
24#define DRV_MODULE_RELDATE "2010/10/19" 24#define DRV_MODULE_RELDATE "2010/11/01"
25#define BNX2X_BC_VER 0x040200 25#define BNX2X_BC_VER 0x040200
26 26
27#define BNX2X_MULTI_QUEUE 27#define BNX2X_MULTI_QUEUE
@@ -1288,15 +1288,11 @@ struct bnx2x_func_init_params {
1288 1288
1289#define WAIT_RAMROD_POLL 0x01 1289#define WAIT_RAMROD_POLL 0x01
1290#define WAIT_RAMROD_COMMON 0x02 1290#define WAIT_RAMROD_COMMON 0x02
1291int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
1292 int *state_p, int flags);
1293 1291
1294/* dmae */ 1292/* dmae */
1295void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); 1293void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
1296void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 1294void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
1297 u32 len32); 1295 u32 len32);
1298void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
1299 u32 addr, u32 len);
1300void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx); 1296void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1301u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type); 1297u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
1302u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); 1298u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
@@ -1307,7 +1303,6 @@ int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
1307int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 1303int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
1308int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 1304int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
1309u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); 1305u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
1310void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
1311 1306
1312void bnx2x_calc_fc_adv(struct bnx2x *bp); 1307void bnx2x_calc_fc_adv(struct bnx2x *bp);
1313int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 1308int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index bc5837514074..94d5f59d5a6f 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -25,6 +25,7 @@
25 25
26#include "bnx2x_init.h" 26#include "bnx2x_init.h"
27 27
28static int bnx2x_setup_irqs(struct bnx2x *bp);
28 29
29/* free skb in the packet ring at pos idx 30/* free skb in the packet ring at pos idx
30 * return idx of last bd freed 31 * return idx of last bd freed
@@ -1679,7 +1680,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1679 rc = XMIT_PLAIN; 1680 rc = XMIT_PLAIN;
1680 1681
1681 else { 1682 else {
1682 if (skb->protocol == htons(ETH_P_IPV6)) { 1683 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
1683 rc = XMIT_CSUM_V6; 1684 rc = XMIT_CSUM_V6;
1684 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1685 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1685 rc |= XMIT_CSUM_TCP; 1686 rc |= XMIT_CSUM_TCP;
@@ -2187,7 +2188,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2187} 2188}
2188 2189
2189 2190
2190int bnx2x_setup_irqs(struct bnx2x *bp) 2191static int bnx2x_setup_irqs(struct bnx2x *bp)
2191{ 2192{
2192 int rc = 0; 2193 int rc = 0;
2193 if (bp->flags & USING_MSIX_FLAG) { 2194 if (bp->flags & USING_MSIX_FLAG) {
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 5bfe0ab1d2d4..6b28739c5302 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -117,13 +117,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
117void bnx2x_int_enable(struct bnx2x *bp); 117void bnx2x_int_enable(struct bnx2x *bp);
118 118
119/** 119/**
120 * Disable HW interrupts.
121 *
122 * @param bp
123 */
124void bnx2x_int_disable(struct bnx2x *bp);
125
126/**
127 * Disable interrupts. This function ensures that there are no 120 * Disable interrupts. This function ensures that there are no
128 * ISRs or SP DPCs (sp_task) are running after it returns. 121 * ISRs or SP DPCs (sp_task) are running after it returns.
129 * 122 *
@@ -192,17 +185,6 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
192 int is_leading); 185 int is_leading);
193 186
194/** 187/**
195 * Bring down an eth client.
196 *
197 * @param bp
198 * @param p
199 *
200 * @return int
201 */
202int bnx2x_stop_fw_client(struct bnx2x *bp,
203 struct bnx2x_client_ramrod_params *p);
204
205/**
206 * Set number of queues according to mode 188 * Set number of queues according to mode
207 * 189 *
208 * @param bp 190 * @param bp
@@ -250,34 +232,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
250 */ 232 */
251void bnx2x_set_eth_mac(struct bnx2x *bp, int set); 233void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
252 234
253#ifdef BCM_CNIC
254/**
255 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
256 * MAC(s). The function will wait until the ramrod completion
257 * returns.
258 *
259 * @param bp driver handle
260 * @param set set or clear the CAM entry
261 *
262 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
263 */
264int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
265#endif
266
267/**
268 * Initialize status block in FW and HW
269 *
270 * @param bp driver handle
271 * @param dma_addr_t mapping
272 * @param int sb_id
273 * @param int vfid
274 * @param u8 vf_valid
275 * @param int fw_sb_id
276 * @param int igu_sb_id
277 */
278void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
279 u8 vf_valid, int fw_sb_id, int igu_sb_id);
280
281/** 235/**
282 * Set MAC filtering configurations. 236 * Set MAC filtering configurations.
283 * 237 *
@@ -326,7 +280,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
326 * @return int 280 * @return int
327 */ 281 */
328int bnx2x_func_start(struct bnx2x *bp); 282int bnx2x_func_start(struct bnx2x *bp);
329int bnx2x_func_stop(struct bnx2x *bp);
330 283
331/** 284/**
332 * Prepare ILT configurations according to current driver 285 * Prepare ILT configurations according to current driver
@@ -396,14 +349,6 @@ int bnx2x_enable_msix(struct bnx2x *bp);
396int bnx2x_enable_msi(struct bnx2x *bp); 349int bnx2x_enable_msi(struct bnx2x *bp);
397 350
398/** 351/**
399 * Request IRQ vectors from OS.
400 *
401 * @param bp
402 *
403 * @return int
404 */
405int bnx2x_setup_irqs(struct bnx2x *bp);
406/**
407 * NAPI callback 352 * NAPI callback
408 * 353 *
409 * @param napi 354 * @param napi
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 18c8e23a0e82..4cfd4e9b5586 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -244,7 +244,14 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
244 244
245 u16 xgxs_config_tx[4]; /* 0x1A0 */ 245 u16 xgxs_config_tx[4]; /* 0x1A0 */
246 246
247 u32 Reserved1[57]; /* 0x1A8 */ 247 u32 Reserved1[56]; /* 0x1A8 */
248 u32 default_cfg; /* 0x288 */
249 /* Enable BAM on KR */
250#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
251#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
252#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
253#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
254
248 u32 speed_capability_mask2; /* 0x28C */ 255 u32 speed_capability_mask2; /* 0x28C */
249#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF 256#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
250#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 257#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index e65de784182c..a306b0e46b61 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -16,7 +16,9 @@
16#define BNX2X_INIT_OPS_H 16#define BNX2X_INIT_OPS_H
17 17
18static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len); 18static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
19 19static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
20static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
21 u32 addr, u32 len);
20 22
21static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data, 23static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
22 u32 len) 24 u32 len)
@@ -589,7 +591,7 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
589 return rc; 591 return rc;
590} 592}
591 593
592int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop) 594static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
593{ 595{
594 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop); 596 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
595 if (!rc) 597 if (!rc)
@@ -635,7 +637,7 @@ static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
635 } 637 }
636} 638}
637 639
638void bnx2x_ilt_boundry_init_op(struct bnx2x *bp, 640static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
639 struct ilt_client_info *ilt_cli, 641 struct ilt_client_info *ilt_cli,
640 u32 ilt_start, u8 initop) 642 u32 ilt_start, u8 initop)
641{ 643{
@@ -688,8 +690,10 @@ void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
688 } 690 }
689} 691}
690 692
691void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt, 693static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
692 struct ilt_client_info *ilt_cli, u8 initop) 694 struct bnx2x_ilt *ilt,
695 struct ilt_client_info *ilt_cli,
696 u8 initop)
693{ 697{
694 int i; 698 int i;
695 699
@@ -703,8 +707,8 @@ void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
703 bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop); 707 bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
704} 708}
705 709
706void bnx2x_ilt_client_init_op(struct bnx2x *bp, 710static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
707 struct ilt_client_info *ilt_cli, u8 initop) 711 struct ilt_client_info *ilt_cli, u8 initop)
708{ 712{
709 struct bnx2x_ilt *ilt = BP_ILT(bp); 713 struct bnx2x_ilt *ilt = BP_ILT(bp);
710 714
@@ -720,7 +724,7 @@ static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
720 bnx2x_ilt_client_init_op(bp, ilt_cli, initop); 724 bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
721} 725}
722 726
723void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop) 727static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
724{ 728{
725 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop); 729 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
726 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop); 730 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
@@ -752,7 +756,7 @@ static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
752 * called during init common stage, ilt clients should be initialized 756 * called during init common stage, ilt clients should be initialized
753 * prioir to calling this function 757 * prioir to calling this function
754 */ 758 */
755void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop) 759static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
756{ 760{
757 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU, 761 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
758 PXP2_REG_RQ_CDU_P_SIZE, initop); 762 PXP2_REG_RQ_CDU_P_SIZE, initop);
@@ -772,8 +776,8 @@ void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
772#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT) 776#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
773 777
774/* called during init port stage */ 778/* called during init port stage */
775void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count, 779static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
776 u8 initop) 780 u8 initop)
777{ 781{
778 int port = BP_PORT(bp); 782 int port = BP_PORT(bp);
779 783
@@ -814,8 +818,8 @@ static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
814} 818}
815 819
816/* called during init common stage */ 820/* called during init common stage */
817void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count, 821static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
818 u8 initop) 822 u8 initop)
819{ 823{
820 if (!QM_INIT(qm_cid_count)) 824 if (!QM_INIT(qm_cid_count))
821 return; 825 return;
@@ -836,8 +840,8 @@ void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
836****************************************************************************/ 840****************************************************************************/
837 841
838/* called during init func stage */ 842/* called during init func stage */
839void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, 843static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
840 dma_addr_t t2_mapping, int src_cid_count) 844 dma_addr_t t2_mapping, int src_cid_count)
841{ 845{
842 int i; 846 int i;
843 int port = BP_PORT(bp); 847 int port = BP_PORT(bp);
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 3e99bf9c42b9..580919619252 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -181,6 +181,12 @@
181 (_bank + (_addr & 0xf)), \ 181 (_bank + (_addr & 0xf)), \
182 _val) 182 _val)
183 183
184static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
185 u8 devad, u16 reg, u16 *ret_val);
186
187static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
188 u8 devad, u16 reg, u16 val);
189
184static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) 190static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
185{ 191{
186 u32 val = REG_RD(bp, reg); 192 u32 val = REG_RD(bp, reg);
@@ -594,7 +600,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
594 return 0; 600 return 0;
595} 601}
596 602
597u8 bnx2x_bmac_enable(struct link_params *params, 603static u8 bnx2x_bmac_enable(struct link_params *params,
598 struct link_vars *vars, 604 struct link_vars *vars,
599 u8 is_lb) 605 u8 is_lb)
600{ 606{
@@ -604,7 +610,7 @@ u8 bnx2x_bmac_enable(struct link_params *params,
604 /* reset and unreset the BigMac */ 610 /* reset and unreset the BigMac */
605 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 611 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
606 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 612 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
607 udelay(10); 613 msleep(1);
608 614
609 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 615 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
610 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 616 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -2537,122 +2543,6 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
2537 } 2543 }
2538} 2544}
2539 2545
2540/*
2541 *------------------------------------------------------------------------
2542 * bnx2x_override_led_value -
2543 *
2544 * Override the led value of the requested led
2545 *
2546 *------------------------------------------------------------------------
2547 */
2548u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
2549 u32 led_idx, u32 value)
2550{
2551 u32 reg_val;
2552
2553 /* If port 0 then use EMAC0, else use EMAC1*/
2554 u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2555
2556 DP(NETIF_MSG_LINK,
2557 "bnx2x_override_led_value() port %x led_idx %d value %d\n",
2558 port, led_idx, value);
2559
2560 switch (led_idx) {
2561 case 0: /* 10MB led */
2562 /* Read the current value of the LED register in
2563 the EMAC block */
2564 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2565 /* Set the OVERRIDE bit to 1 */
2566 reg_val |= EMAC_LED_OVERRIDE;
2567 /* If value is 1, set the 10M_OVERRIDE bit,
2568 otherwise reset it.*/
2569 reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
2570 (reg_val & ~EMAC_LED_10MB_OVERRIDE);
2571 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2572 break;
2573 case 1: /*100MB led */
2574 /*Read the current value of the LED register in
2575 the EMAC block */
2576 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2577 /* Set the OVERRIDE bit to 1 */
2578 reg_val |= EMAC_LED_OVERRIDE;
2579 /* If value is 1, set the 100M_OVERRIDE bit,
2580 otherwise reset it.*/
2581 reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
2582 (reg_val & ~EMAC_LED_100MB_OVERRIDE);
2583 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2584 break;
2585 case 2: /* 1000MB led */
2586 /* Read the current value of the LED register in the
2587 EMAC block */
2588 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2589 /* Set the OVERRIDE bit to 1 */
2590 reg_val |= EMAC_LED_OVERRIDE;
2591 /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
2592 reset it. */
2593 reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
2594 (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
2595 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2596 break;
2597 case 3: /* 2500MB led */
2598 /* Read the current value of the LED register in the
2599 EMAC block*/
2600 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2601 /* Set the OVERRIDE bit to 1 */
2602 reg_val |= EMAC_LED_OVERRIDE;
2603 /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
2604 reset it.*/
2605 reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
2606 (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
2607 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2608 break;
2609 case 4: /*10G led */
2610 if (port == 0) {
2611 REG_WR(bp, NIG_REG_LED_10G_P0,
2612 value);
2613 } else {
2614 REG_WR(bp, NIG_REG_LED_10G_P1,
2615 value);
2616 }
2617 break;
2618 case 5: /* TRAFFIC led */
2619 /* Find if the traffic control is via BMAC or EMAC */
2620 if (port == 0)
2621 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
2622 else
2623 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
2624
2625 /* Override the traffic led in the EMAC:*/
2626 if (reg_val == 1) {
2627 /* Read the current value of the LED register in
2628 the EMAC block */
2629 reg_val = REG_RD(bp, emac_base +
2630 EMAC_REG_EMAC_LED);
2631 /* Set the TRAFFIC_OVERRIDE bit to 1 */
2632 reg_val |= EMAC_LED_OVERRIDE;
2633 /* If value is 1, set the TRAFFIC bit, otherwise
2634 reset it.*/
2635 reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
2636 (reg_val & ~EMAC_LED_TRAFFIC);
2637 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2638 } else { /* Override the traffic led in the BMAC: */
2639 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
2640 + port*4, 1);
2641 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
2642 value);
2643 }
2644 break;
2645 default:
2646 DP(NETIF_MSG_LINK,
2647 "bnx2x_override_led_value() unknown led index %d "
2648 "(should be 0-5)\n", led_idx);
2649 return -EINVAL;
2650 }
2651
2652 return 0;
2653}
2654
2655
2656u8 bnx2x_set_led(struct link_params *params, 2546u8 bnx2x_set_led(struct link_params *params,
2657 struct link_vars *vars, u8 mode, u32 speed) 2547 struct link_vars *vars, u8 mode, u32 speed)
2658{ 2548{
@@ -3635,13 +3525,19 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
3635 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); 3525 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
3636 3526
3637 /* Enable CL37 BAM */ 3527 /* Enable CL37 BAM */
3638 bnx2x_cl45_read(bp, phy, 3528 if (REG_RD(bp, params->shmem_base +
3639 MDIO_AN_DEVAD, 3529 offsetof(struct shmem_region, dev_info.
3640 MDIO_AN_REG_8073_BAM, &val); 3530 port_hw_config[params->port].default_cfg)) &
3641 bnx2x_cl45_write(bp, phy, 3531 PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
3642 MDIO_AN_DEVAD,
3643 MDIO_AN_REG_8073_BAM, val | 1);
3644 3532
3533 bnx2x_cl45_read(bp, phy,
3534 MDIO_AN_DEVAD,
3535 MDIO_AN_REG_8073_BAM, &val);
3536 bnx2x_cl45_write(bp, phy,
3537 MDIO_AN_DEVAD,
3538 MDIO_AN_REG_8073_BAM, val | 1);
3539 DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
3540 }
3645 if (params->loopback_mode == LOOPBACK_EXT) { 3541 if (params->loopback_mode == LOOPBACK_EXT) {
3646 bnx2x_807x_force_10G(bp, phy); 3542 bnx2x_807x_force_10G(bp, phy);
3647 DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n"); 3543 DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
@@ -4099,9 +3995,9 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4099 return -EINVAL; 3995 return -EINVAL;
4100} 3996}
4101 3997
4102u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, 3998static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
4103 struct link_params *params, u16 addr, 3999 struct link_params *params, u16 addr,
4104 u8 byte_cnt, u8 *o_buf) 4000 u8 byte_cnt, u8 *o_buf)
4105{ 4001{
4106 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) 4002 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
4107 return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, 4003 return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@@ -5412,7 +5308,7 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
5412{ 5308{
5413 struct bnx2x *bp = params->bp; 5309 struct bnx2x *bp = params->bp;
5414 u16 autoneg_val, an_1000_val, an_10_100_val; 5310 u16 autoneg_val, an_1000_val, an_10_100_val;
5415 bnx2x_wait_reset_complete(bp, phy); 5311
5416 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 5312 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
5417 1 << NIG_LATCH_BC_ENABLE_MI_INT); 5313 1 << NIG_LATCH_BC_ENABLE_MI_INT);
5418 5314
@@ -5541,6 +5437,7 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
5541 5437
5542 /* HW reset */ 5438 /* HW reset */
5543 bnx2x_ext_phy_hw_reset(bp, params->port); 5439 bnx2x_ext_phy_hw_reset(bp, params->port);
5440 bnx2x_wait_reset_complete(bp, phy);
5544 5441
5545 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 5442 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
5546 return bnx2x_848xx_cmn_config_init(phy, params, vars); 5443 return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -5551,7 +5448,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
5551 struct link_vars *vars) 5448 struct link_vars *vars)
5552{ 5449{
5553 struct bnx2x *bp = params->bp; 5450 struct bnx2x *bp = params->bp;
5554 u8 port = params->port, initialize = 1; 5451 u8 port, initialize = 1;
5555 u16 val; 5452 u16 val;
5556 u16 temp; 5453 u16 temp;
5557 u32 actual_phy_selection; 5454 u32 actual_phy_selection;
@@ -5560,11 +5457,16 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
5560 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */ 5457 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
5561 5458
5562 msleep(1); 5459 msleep(1);
5460 if (CHIP_IS_E2(bp))
5461 port = BP_PATH(bp);
5462 else
5463 port = params->port;
5563 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 5464 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5564 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 5465 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
5565 port); 5466 port);
5566 msleep(200); /* 100 is not enough */ 5467 bnx2x_wait_reset_complete(bp, phy);
5567 5468 /* Wait for GPHY to come out of reset */
5469 msleep(50);
5568 /* BCM84823 requires that XGXS links up first @ 10G for normal 5470 /* BCM84823 requires that XGXS links up first @ 10G for normal
5569 behavior */ 5471 behavior */
5570 temp = vars->line_speed; 5472 temp = vars->line_speed;
@@ -5735,7 +5637,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
5735 struct link_params *params) 5637 struct link_params *params)
5736{ 5638{
5737 struct bnx2x *bp = params->bp; 5639 struct bnx2x *bp = params->bp;
5738 u8 port = params->port; 5640 u8 port;
5641 if (CHIP_IS_E2(bp))
5642 port = BP_PATH(bp);
5643 else
5644 port = params->port;
5739 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 5645 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5740 MISC_REGISTERS_GPIO_OUTPUT_LOW, 5646 MISC_REGISTERS_GPIO_OUTPUT_LOW,
5741 port); 5647 port);
@@ -6819,13 +6725,6 @@ u8 bnx2x_phy_probe(struct link_params *params)
6819 return 0; 6725 return 0;
6820} 6726}
6821 6727
6822u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx)
6823{
6824 if (phy_idx < params->num_phys)
6825 return params->phy[phy_idx].supported;
6826 return 0;
6827}
6828
6829static void set_phy_vars(struct link_params *params) 6728static void set_phy_vars(struct link_params *params)
6830{ 6729{
6831 struct bnx2x *bp = params->bp; 6730 struct bnx2x *bp = params->bp;
@@ -7045,7 +6944,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
7045 u8 reset_ext_phy) 6944 u8 reset_ext_phy)
7046{ 6945{
7047 struct bnx2x *bp = params->bp; 6946 struct bnx2x *bp = params->bp;
7048 u8 phy_index, port = params->port; 6947 u8 phy_index, port = params->port, clear_latch_ind = 0;
7049 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); 6948 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
7050 /* disable attentions */ 6949 /* disable attentions */
7051 vars->link_status = 0; 6950 vars->link_status = 0;
@@ -7083,9 +6982,18 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
7083 params->phy[phy_index].link_reset( 6982 params->phy[phy_index].link_reset(
7084 &params->phy[phy_index], 6983 &params->phy[phy_index],
7085 params); 6984 params);
6985 if (params->phy[phy_index].flags &
6986 FLAGS_REARM_LATCH_SIGNAL)
6987 clear_latch_ind = 1;
7086 } 6988 }
7087 } 6989 }
7088 6990
6991 if (clear_latch_ind) {
6992 /* Clear latching indication */
6993 bnx2x_rearm_latch_signal(bp, port, 0);
6994 bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
6995 1 << NIG_LATCH_BC_ENABLE_MI_INT);
6996 }
7089 if (params->phy[INT_PHY].link_reset) 6997 if (params->phy[INT_PHY].link_reset)
7090 params->phy[INT_PHY].link_reset( 6998 params->phy[INT_PHY].link_reset(
7091 &params->phy[INT_PHY], params); 6999 &params->phy[INT_PHY], params);
@@ -7116,6 +7024,7 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7116 s8 port; 7024 s8 port;
7117 s8 port_of_path = 0; 7025 s8 port_of_path = 0;
7118 7026
7027 bnx2x_ext_phy_hw_reset(bp, 0);
7119 /* PART1 - Reset both phys */ 7028 /* PART1 - Reset both phys */
7120 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7029 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7121 u32 shmem_base, shmem2_base; 7030 u32 shmem_base, shmem2_base;
@@ -7138,7 +7047,8 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7138 return -EINVAL; 7047 return -EINVAL;
7139 } 7048 }
7140 /* disable attentions */ 7049 /* disable attentions */
7141 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 7050 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
7051 port_of_path*4,
7142 (NIG_MASK_XGXS0_LINK_STATUS | 7052 (NIG_MASK_XGXS0_LINK_STATUS |
7143 NIG_MASK_XGXS0_LINK10G | 7053 NIG_MASK_XGXS0_LINK10G |
7144 NIG_MASK_SERDES0_LINK_STATUS | 7054 NIG_MASK_SERDES0_LINK_STATUS |
@@ -7249,7 +7159,7 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
7249 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT))); 7159 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
7250 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); 7160 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
7251 7161
7252 bnx2x_ext_phy_hw_reset(bp, 1); 7162 bnx2x_ext_phy_hw_reset(bp, 0);
7253 msleep(5); 7163 msleep(5);
7254 for (port = 0; port < PORT_MAX; port++) { 7164 for (port = 0; port < PORT_MAX; port++) {
7255 u32 shmem_base, shmem2_base; 7165 u32 shmem_base, shmem2_base;
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 58a4c7199276..171abf8097ee 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -279,12 +279,6 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
279 279
280u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr, 280u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
281 u8 devad, u16 reg, u16 val); 281 u8 devad, u16 reg, u16 val);
282
283u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
284 u8 devad, u16 reg, u16 *ret_val);
285
286u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
287 u8 devad, u16 reg, u16 val);
288/* Reads the link_status from the shmem, 282/* Reads the link_status from the shmem,
289 and update the link vars accordingly */ 283 and update the link vars accordingly */
290void bnx2x_link_status_update(struct link_params *input, 284void bnx2x_link_status_update(struct link_params *input,
@@ -304,8 +298,6 @@ u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
304#define LED_MODE_OPER 2 298#define LED_MODE_OPER 2
305#define LED_MODE_FRONT_PANEL_OFF 3 299#define LED_MODE_FRONT_PANEL_OFF 3
306 300
307u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
308
309/* bnx2x_handle_module_detect_int should be called upon module detection 301/* bnx2x_handle_module_detect_int should be called upon module detection
310 interrupt */ 302 interrupt */
311void bnx2x_handle_module_detect_int(struct link_params *params); 303void bnx2x_handle_module_detect_int(struct link_params *params);
@@ -325,19 +317,12 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
325/* Reset the external of SFX7101 */ 317/* Reset the external of SFX7101 */
326void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); 318void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
327 319
328u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
329 struct link_params *params, u16 addr,
330 u8 byte_cnt, u8 *o_buf);
331
332void bnx2x_hw_reset_phy(struct link_params *params); 320void bnx2x_hw_reset_phy(struct link_params *params);
333 321
334/* Checks if HW lock is required for this phy/board type */ 322/* Checks if HW lock is required for this phy/board type */
335u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, 323u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
336 u32 shmem2_base); 324 u32 shmem2_base);
337 325
338/* Returns the aggregative supported attributes of the phys on board */
339u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx);
340
341/* Check swap bit and adjust PHY order */ 326/* Check swap bit and adjust PHY order */
342u32 bnx2x_phy_selection(struct link_params *params); 327u32 bnx2x_phy_selection(struct link_params *params);
343 328
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index ff99a2fc0426..9709b8569666 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -403,7 +403,7 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
403/* used only at init 403/* used only at init
404 * locking is done by mcp 404 * locking is done by mcp
405 */ 405 */
406void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) 406static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
407{ 407{
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); 409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
@@ -429,7 +429,8 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" 429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430#define DMAE_DP_DST_NONE "dst_addr [none]" 430#define DMAE_DP_DST_NONE "dst_addr [none]"
431 431
432void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl) 432static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
433 int msglvl)
433{ 434{
434 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; 435 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
435 436
@@ -551,8 +552,9 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
551 return opcode; 552 return opcode;
552} 553}
553 554
554void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, 555static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
555 u8 src_type, u8 dst_type) 556 struct dmae_command *dmae,
557 u8 src_type, u8 dst_type)
556{ 558{
557 memset(dmae, 0, sizeof(struct dmae_command)); 559 memset(dmae, 0, sizeof(struct dmae_command));
558 560
@@ -567,7 +569,8 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
567} 569}
568 570
569/* issue a dmae command over the init-channel and wailt for completion */ 571/* issue a dmae command over the init-channel and wailt for completion */
570int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) 572static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
573 struct dmae_command *dmae)
571{ 574{
572 u32 *wb_comp = bnx2x_sp(bp, wb_comp); 575 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
573 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40; 576 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
@@ -674,8 +677,8 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
674 bnx2x_issue_dmae_with_comp(bp, &dmae); 677 bnx2x_issue_dmae_with_comp(bp, &dmae);
675} 678}
676 679
677void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 680static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
678 u32 addr, u32 len) 681 u32 addr, u32 len)
679{ 682{
680 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); 683 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
681 int offset = 0; 684 int offset = 0;
@@ -1267,7 +1270,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
1267 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 1270 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1268} 1271}
1269 1272
1270void bnx2x_int_disable(struct bnx2x *bp) 1273static void bnx2x_int_disable(struct bnx2x *bp)
1271{ 1274{
1272 if (bp->common.int_block == INT_BLOCK_HC) 1275 if (bp->common.int_block == INT_BLOCK_HC)
1273 bnx2x_hc_int_disable(bp); 1276 bnx2x_hc_int_disable(bp);
@@ -2236,7 +2239,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2236} 2239}
2237 2240
2238/* must be called under rtnl_lock */ 2241/* must be called under rtnl_lock */
2239void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters) 2242static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2240{ 2243{
2241 u32 mask = (1 << cl_id); 2244 u32 mask = (1 << cl_id);
2242 2245
@@ -2303,7 +2306,7 @@ void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2303 bp->mac_filters.unmatched_unicast & ~mask; 2306 bp->mac_filters.unmatched_unicast & ~mask;
2304} 2307}
2305 2308
2306void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) 2309static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2307{ 2310{
2308 struct tstorm_eth_function_common_config tcfg = {0}; 2311 struct tstorm_eth_function_common_config tcfg = {0};
2309 u16 rss_flgs; 2312 u16 rss_flgs;
@@ -2460,7 +2463,7 @@ static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2460 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0; 2463 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2461} 2464}
2462 2465
2463void bnx2x_pf_init(struct bnx2x *bp) 2466static void bnx2x_pf_init(struct bnx2x *bp)
2464{ 2467{
2465 struct bnx2x_func_init_params func_init = {0}; 2468 struct bnx2x_func_init_params func_init = {0};
2466 struct bnx2x_rss_params rss = {0}; 2469 struct bnx2x_rss_params rss = {0};
@@ -3928,7 +3931,7 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3928 hc_sm->time_to_expire = 0xFFFFFFFF; 3931 hc_sm->time_to_expire = 0xFFFFFFFF;
3929} 3932}
3930 3933
3931void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 3934static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3932 u8 vf_valid, int fw_sb_id, int igu_sb_id) 3935 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3933{ 3936{
3934 int igu_seg_id; 3937 int igu_seg_id;
@@ -6021,6 +6024,9 @@ alloc_mem_err:
6021/* 6024/*
6022 * Init service functions 6025 * Init service functions
6023 */ 6026 */
6027static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6028 int *state_p, int flags);
6029
6024int bnx2x_func_start(struct bnx2x *bp) 6030int bnx2x_func_start(struct bnx2x *bp)
6025{ 6031{
6026 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1); 6032 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
@@ -6030,7 +6036,7 @@ int bnx2x_func_start(struct bnx2x *bp)
6030 WAIT_RAMROD_COMMON); 6036 WAIT_RAMROD_COMMON);
6031} 6037}
6032 6038
6033int bnx2x_func_stop(struct bnx2x *bp) 6039static int bnx2x_func_stop(struct bnx2x *bp)
6034{ 6040{
6035 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1); 6041 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
6036 6042
@@ -6103,8 +6109,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
6103 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags); 6109 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
6104} 6110}
6105 6111
6106int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, 6112static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6107 int *state_p, int flags) 6113 int *state_p, int flags)
6108{ 6114{
6109 /* can take a while if any port is running */ 6115 /* can take a while if any port is running */
6110 int cnt = 5000; 6116 int cnt = 5000;
@@ -6154,7 +6160,7 @@ int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6154 return -EBUSY; 6160 return -EBUSY;
6155} 6161}
6156 6162
6157u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset) 6163static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6158{ 6164{
6159 if (CHIP_IS_E1H(bp)) 6165 if (CHIP_IS_E1H(bp))
6160 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp); 6166 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
@@ -6273,7 +6279,7 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6273 * 6279 *
6274 * @return 0 if cussess, -ENODEV if ramrod doesn't return. 6280 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6275 */ 6281 */
6276int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) 6282static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6277{ 6283{
6278 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) : 6284 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6279 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE)); 6285 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
@@ -6383,11 +6389,11 @@ static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6383 ETH_CONNECTION_TYPE); 6389 ETH_CONNECTION_TYPE);
6384} 6390}
6385 6391
6386int bnx2x_setup_fw_client(struct bnx2x *bp, 6392static int bnx2x_setup_fw_client(struct bnx2x *bp,
6387 struct bnx2x_client_init_params *params, 6393 struct bnx2x_client_init_params *params,
6388 u8 activate, 6394 u8 activate,
6389 struct client_init_ramrod_data *data, 6395 struct client_init_ramrod_data *data,
6390 dma_addr_t data_mapping) 6396 dma_addr_t data_mapping)
6391{ 6397{
6392 u16 hc_usec; 6398 u16 hc_usec;
6393 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; 6399 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
@@ -6633,7 +6639,8 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6633 return rc; 6639 return rc;
6634} 6640}
6635 6641
6636int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p) 6642static int bnx2x_stop_fw_client(struct bnx2x *bp,
6643 struct bnx2x_client_ramrod_params *p)
6637{ 6644{
6638 int rc; 6645 int rc;
6639 6646
@@ -7440,7 +7447,7 @@ reset_task_exit:
7440 * Init service functions 7447 * Init service functions
7441 */ 7448 */
7442 7449
7443u32 bnx2x_get_pretend_reg(struct bnx2x *bp) 7450static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
7444{ 7451{
7445 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; 7452 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7446 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; 7453 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
@@ -9057,7 +9064,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9057 default: 9064 default:
9058 pr_err("Unknown board_type (%ld), aborting\n", 9065 pr_err("Unknown board_type (%ld), aborting\n",
9059 ent->driver_data); 9066 ent->driver_data);
9060 return ENODEV; 9067 return -ENODEV;
9061 } 9068 }
9062 9069
9063 cid_count += CNIC_CONTEXT_USE; 9070 cid_count += CNIC_CONTEXT_USE;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index beb3b7cecd52..71a169740d05 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -493,9 +493,9 @@ static void bond_vlan_rx_register(struct net_device *bond_dev,
493 struct slave *slave; 493 struct slave *slave;
494 int i; 494 int i;
495 495
496 write_lock(&bond->lock); 496 write_lock_bh(&bond->lock);
497 bond->vlgrp = grp; 497 bond->vlgrp = grp;
498 write_unlock(&bond->lock); 498 write_unlock_bh(&bond->lock);
499 499
500 bond_for_each_slave(bond, slave, i) { 500 bond_for_each_slave(bond, slave, i) {
501 struct net_device *slave_dev = slave->dev; 501 struct net_device *slave_dev = slave->dev;
@@ -878,8 +878,10 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev)
878 rcu_read_lock(); 878 rcu_read_lock();
879 in_dev = __in_dev_get_rcu(dev); 879 in_dev = __in_dev_get_rcu(dev);
880 if (in_dev) { 880 if (in_dev) {
881 read_lock(&in_dev->mc_list_lock);
881 for (im = in_dev->mc_list; im; im = im->next) 882 for (im = in_dev->mc_list; im; im = im->next)
882 ip_mc_rejoin_group(im); 883 ip_mc_rejoin_group(im);
884 read_unlock(&in_dev->mc_list_lock);
883 } 885 }
884 886
885 rcu_read_unlock(); 887 rcu_read_unlock();
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 75bfc3a9d95f..09ed3f42d673 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -31,3 +31,10 @@ config CAIF_SPI_SYNC
31 Putting the next command and length in the start of the frame can 31 Putting the next command and length in the start of the frame can
32 help to synchronize to the next transfer in case of over or under-runs. 32 help to synchronize to the next transfer in case of over or under-runs.
33 This option also needs to be enabled on the modem. 33 This option also needs to be enabled on the modem.
34
35config CAIF_SHM
36 tristate "CAIF shared memory protocol driver"
37 depends on CAIF && U5500_MBOX
38 default n
39 ---help---
40 The CAIF shared memory protocol driver for the STE UX5500 platform.
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 3a11d619452b..b38d987da67d 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -8,3 +8,7 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
8# SPI slave physical interfaces module 8# SPI slave physical interfaces module
9cfspi_slave-objs := caif_spi.o caif_spi_slave.o 9cfspi_slave-objs := caif_spi.o caif_spi_slave.o
10obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o 10obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
11
12# Shared memory
13caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
14obj-$(CONFIG_CAIF_SHM) += caif_shm.o
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
new file mode 100644
index 000000000000..1cd90da86f13
--- /dev/null
+++ b/drivers/net/caif/caif_shm_u5500.c
@@ -0,0 +1,129 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
9
10#include <linux/version.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/netdevice.h>
14#include <mach/mbox.h>
15#include <net/caif/caif_shm.h>
16
17MODULE_LICENSE("GPL");
18MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
19
20#define MAX_SHM_INSTANCES 1
21
22enum {
23 MBX_ACC0,
24 MBX_ACC1,
25 MBX_DSP
26};
27
28static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
29
30static unsigned int shm_start;
31static unsigned int shm_size;
32
33module_param(shm_size, uint , 0440);
34MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
35
36module_param(shm_start, uint , 0440);
37MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
38
39static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
40{
41 /* Always block until msg is written successfully */
42 mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
43 return 0;
44}
45
46static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
47 void *pshm_drv)
48{
49 /*
50 * For UX5500, we have only 1 SHM instance which uses MBX0
51 * for communication with the peer modem
52 */
53 pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
54
55 if (!pshm_dev->hmbx)
56 return -ENODEV;
57 else
58 return 0;
59}
60
61static int __init caif_shmdev_init(void)
62{
63 int i, result;
64
65 /* Loop is currently overkill, there is only one instance */
66 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
67
68 shmdev_lyr[i].shm_base_addr = shm_start;
69 shmdev_lyr[i].shm_total_sz = shm_size;
70
71 if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
72 || (shmdev_lyr[i].shm_total_sz <= 0)) {
73 pr_warn("ERROR,"
74 "Shared memory Address and/or Size incorrect"
75 ", Bailing out ...\n");
76 result = -EINVAL;
77 goto clean;
78 }
79
80 pr_info("SHM AREA (instance %d) STARTS"
81 " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
82
83 shmdev_lyr[i].shm_id = i;
84 shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
85 shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
86
87 /*
88 * Finally, CAIF core module is called with details in place:
89 * 1. SHM base address
90 * 2. SHM size
91 * 3. MBX handle
92 */
93 result = caif_shmcore_probe(&shmdev_lyr[i]);
94 if (result) {
95 pr_warn("ERROR[%d],"
96 "Could not probe SHM core (instance %d)"
97 " Bailing out ...\n", result, i);
98 goto clean;
99 }
100 }
101
102 return 0;
103
104clean:
105 /*
106 * For now, we assume that even if one instance of SHM fails, we bail
107 * out of the driver support completely. For this, we need to release
108 * any memory allocated and unregister any instance of SHM net device.
109 */
110 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
111 if (shmdev_lyr[i].pshm_netdev)
112 unregister_netdev(shmdev_lyr[i].pshm_netdev);
113 }
114 return result;
115}
116
117static void __exit caif_shmdev_exit(void)
118{
119 int i;
120
121 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
122 caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
123 kfree((void *)shmdev_lyr[i].shm_base_addr);
124 }
125
126}
127
128module_init(caif_shmdev_init);
129module_exit(caif_shmdev_exit);
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
new file mode 100644
index 000000000000..19f9c0656667
--- /dev/null
+++ b/drivers/net/caif/caif_shmcore.c
@@ -0,0 +1,744 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
5 * Daniel Martensson / daniel.martensson@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
10
11#include <linux/spinlock.h>
12#include <linux/sched.h>
13#include <linux/list.h>
14#include <linux/netdevice.h>
15#include <linux/if_arp.h>
16
17#include <net/caif/caif_device.h>
18#include <net/caif/caif_shm.h>
19
20#define NR_TX_BUF 6
21#define NR_RX_BUF 6
22#define TX_BUF_SZ 0x2000
23#define RX_BUF_SZ 0x2000
24
25#define CAIF_NEEDED_HEADROOM 32
26
27#define CAIF_FLOW_ON 1
28#define CAIF_FLOW_OFF 0
29
30#define LOW_WATERMARK 3
31#define HIGH_WATERMARK 4
32
33/* Maximum number of CAIF buffers per shared memory buffer. */
34#define SHM_MAX_FRMS_PER_BUF 10
35
36/*
37 * Size in bytes of the descriptor area
38 * (With end of descriptor signalling)
39 */
40#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
41 sizeof(struct shm_pck_desc))
42
43/*
44 * Offset to the first CAIF frame within a shared memory buffer.
45 * Aligned on 32 bytes.
46 */
47#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
48
49/* Number of bytes for CAIF shared memory header. */
50#define SHM_HDR_LEN 1
51
52/* Number of padding bytes for the complete CAIF frame. */
53#define SHM_FRM_PAD_LEN 4
54
55#define CAIF_MAX_MTU 4096
56
57#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
58#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
59
60#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
61#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
62
63#define SHM_FULL_MASK (0x0F << 0)
64#define SHM_EMPTY_MASK (0x0F << 4)
65
66struct shm_pck_desc {
67 /*
68 * Offset from start of shared memory area to start of
69 * shared memory CAIF frame.
70 */
71 u32 frm_ofs;
72 u32 frm_len;
73};
74
75struct buf_list {
76 unsigned char *desc_vptr;
77 u32 phy_addr;
78 u32 index;
79 u32 len;
80 u32 frames;
81 u32 frm_ofs;
82 struct list_head list;
83};
84
85struct shm_caif_frm {
86 /* Number of bytes of padding before the CAIF frame. */
87 u8 hdr_ofs;
88};
89
90struct shmdrv_layer {
91 /* caif_dev_common must always be first in the structure*/
92 struct caif_dev_common cfdev;
93
94 u32 shm_tx_addr;
95 u32 shm_rx_addr;
96 u32 shm_base_addr;
97 u32 tx_empty_available;
98 spinlock_t lock;
99
100 struct list_head tx_empty_list;
101 struct list_head tx_pend_list;
102 struct list_head tx_full_list;
103 struct list_head rx_empty_list;
104 struct list_head rx_pend_list;
105 struct list_head rx_full_list;
106
107 struct workqueue_struct *pshm_tx_workqueue;
108 struct workqueue_struct *pshm_rx_workqueue;
109
110 struct work_struct shm_tx_work;
111 struct work_struct shm_rx_work;
112
113 struct sk_buff_head sk_qhead;
114 struct shmdev_layer *pshm_dev;
115};
116
117static int shm_netdev_open(struct net_device *shm_netdev)
118{
119 netif_wake_queue(shm_netdev);
120 return 0;
121}
122
123static int shm_netdev_close(struct net_device *shm_netdev)
124{
125 netif_stop_queue(shm_netdev);
126 return 0;
127}
128
129int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
130{
131 struct buf_list *pbuf;
132 struct shmdrv_layer *pshm_drv;
133 struct list_head *pos;
134 u32 avail_emptybuff = 0;
135 unsigned long flags = 0;
136
137 pshm_drv = (struct shmdrv_layer *)priv;
138
139 /* Check for received buffers. */
140 if (mbx_msg & SHM_FULL_MASK) {
141 int idx;
142
143 spin_lock_irqsave(&pshm_drv->lock, flags);
144
145 /* Check whether we have any outstanding buffers. */
146 if (list_empty(&pshm_drv->rx_empty_list)) {
147
148 /* Release spin lock. */
149 spin_unlock_irqrestore(&pshm_drv->lock, flags);
150
151 /* We print even in IRQ context... */
152 pr_warn("No empty Rx buffers to fill: "
153 "mbx_msg:%x\n", mbx_msg);
154
155 /* Bail out. */
156 goto err_sync;
157 }
158
159 pbuf =
160 list_entry(pshm_drv->rx_empty_list.next,
161 struct buf_list, list);
162 idx = pbuf->index;
163
164 /* Check buffer synchronization. */
165 if (idx != SHM_GET_FULL(mbx_msg)) {
166
167 /* We print even in IRQ context... */
168 pr_warn(
169 "phyif_shm_mbx_msg_cb: RX full out of sync:"
170 " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
171 idx, mbx_msg, SHM_GET_FULL(mbx_msg));
172
173 spin_unlock_irqrestore(&pshm_drv->lock, flags);
174
175 /* Bail out. */
176 goto err_sync;
177 }
178
179 list_del_init(&pbuf->list);
180 list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
181
182 spin_unlock_irqrestore(&pshm_drv->lock, flags);
183
184 /* Schedule RX work queue. */
185 if (!work_pending(&pshm_drv->shm_rx_work))
186 queue_work(pshm_drv->pshm_rx_workqueue,
187 &pshm_drv->shm_rx_work);
188 }
189
190 /* Check for emptied buffers. */
191 if (mbx_msg & SHM_EMPTY_MASK) {
192 int idx;
193
194 spin_lock_irqsave(&pshm_drv->lock, flags);
195
196 /* Check whether we have any outstanding buffers. */
197 if (list_empty(&pshm_drv->tx_full_list)) {
198
199 /* We print even in IRQ context... */
200 pr_warn("No TX to empty: msg:%x\n", mbx_msg);
201
202 spin_unlock_irqrestore(&pshm_drv->lock, flags);
203
204 /* Bail out. */
205 goto err_sync;
206 }
207
208 pbuf =
209 list_entry(pshm_drv->tx_full_list.next,
210 struct buf_list, list);
211 idx = pbuf->index;
212
213 /* Check buffer synchronization. */
214 if (idx != SHM_GET_EMPTY(mbx_msg)) {
215
216 spin_unlock_irqrestore(&pshm_drv->lock, flags);
217
218 /* We print even in IRQ context... */
219 pr_warn("TX empty "
220 "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
221
222 /* Bail out. */
223 goto err_sync;
224 }
225 list_del_init(&pbuf->list);
226
227 /* Reset buffer parameters. */
228 pbuf->frames = 0;
229 pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
230
231 list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
232
233 /* Check the available no. of buffers in the empty list */
234 list_for_each(pos, &pshm_drv->tx_empty_list)
235 avail_emptybuff++;
236
237 /* Check whether we have to wake up the transmitter. */
238 if ((avail_emptybuff > HIGH_WATERMARK) &&
239 (!pshm_drv->tx_empty_available)) {
240 pshm_drv->tx_empty_available = 1;
241 pshm_drv->cfdev.flowctrl
242 (pshm_drv->pshm_dev->pshm_netdev,
243 CAIF_FLOW_ON);
244
245 spin_unlock_irqrestore(&pshm_drv->lock, flags);
246
247 /* Schedule the work queue. if required */
248 if (!work_pending(&pshm_drv->shm_tx_work))
249 queue_work(pshm_drv->pshm_tx_workqueue,
250 &pshm_drv->shm_tx_work);
251 } else
252 spin_unlock_irqrestore(&pshm_drv->lock, flags);
253 }
254
255 return 0;
256
257err_sync:
258 return -EIO;
259}
260
261static void shm_rx_work_func(struct work_struct *rx_work)
262{
263 struct shmdrv_layer *pshm_drv;
264 struct buf_list *pbuf;
265 unsigned long flags = 0;
266 struct sk_buff *skb;
267 char *p;
268 int ret;
269
270 pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
271
272 while (1) {
273
274 struct shm_pck_desc *pck_desc;
275
276 spin_lock_irqsave(&pshm_drv->lock, flags);
277
278 /* Check for received buffers. */
279 if (list_empty(&pshm_drv->rx_full_list)) {
280 spin_unlock_irqrestore(&pshm_drv->lock, flags);
281 break;
282 }
283
284 pbuf =
285 list_entry(pshm_drv->rx_full_list.next, struct buf_list,
286 list);
287 list_del_init(&pbuf->list);
288
289 /* Retrieve pointer to start of the packet descriptor area. */
290 pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
291
292 /*
293 * Check whether descriptor contains a CAIF shared memory
294 * frame.
295 */
296 while (pck_desc->frm_ofs) {
297 unsigned int frm_buf_ofs;
298 unsigned int frm_pck_ofs;
299 unsigned int frm_pck_len;
300 /*
301 * Check whether offset is within buffer limits
302 * (lower).
303 */
304 if (pck_desc->frm_ofs <
305 (pbuf->phy_addr - pshm_drv->shm_base_addr))
306 break;
307 /*
308 * Check whether offset is within buffer limits
309 * (higher).
310 */
311 if (pck_desc->frm_ofs >
312 ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
313 pbuf->len))
314 break;
315
316 /* Calculate offset from start of buffer. */
317 frm_buf_ofs =
318 pck_desc->frm_ofs - (pbuf->phy_addr -
319 pshm_drv->shm_base_addr);
320
321 /*
322 * Calculate offset and length of CAIF packet while
323 * taking care of the shared memory header.
324 */
325 frm_pck_ofs =
326 frm_buf_ofs + SHM_HDR_LEN +
327 (*(pbuf->desc_vptr + frm_buf_ofs));
328 frm_pck_len =
329 (pck_desc->frm_len - SHM_HDR_LEN -
330 (*(pbuf->desc_vptr + frm_buf_ofs)));
331
332 /* Check whether CAIF packet is within buffer limits */
333 if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
334 break;
335
336 /* Get a suitable CAIF packet and copy in data. */
337 skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
338 frm_pck_len + 1);
339 BUG_ON(skb == NULL);
340
341 p = skb_put(skb, frm_pck_len);
342 memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
343
344 skb->protocol = htons(ETH_P_CAIF);
345 skb_reset_mac_header(skb);
346 skb->dev = pshm_drv->pshm_dev->pshm_netdev;
347
348 /* Push received packet up the stack. */
349 ret = netif_rx_ni(skb);
350
351 if (!ret) {
352 pshm_drv->pshm_dev->pshm_netdev->stats.
353 rx_packets++;
354 pshm_drv->pshm_dev->pshm_netdev->stats.
355 rx_bytes += pck_desc->frm_len;
356 } else
357 ++pshm_drv->pshm_dev->pshm_netdev->stats.
358 rx_dropped;
359 /* Move to next packet descriptor. */
360 pck_desc++;
361 }
362
363 list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
364
365 spin_unlock_irqrestore(&pshm_drv->lock, flags);
366
367 }
368
369 /* Schedule the work queue. if required */
370 if (!work_pending(&pshm_drv->shm_tx_work))
371 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
372
373}
374
375static void shm_tx_work_func(struct work_struct *tx_work)
376{
377 u32 mbox_msg;
378 unsigned int frmlen, avail_emptybuff, append = 0;
379 unsigned long flags = 0;
380 struct buf_list *pbuf = NULL;
381 struct shmdrv_layer *pshm_drv;
382 struct shm_caif_frm *frm;
383 struct sk_buff *skb;
384 struct shm_pck_desc *pck_desc;
385 struct list_head *pos;
386
387 pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
388
389 do {
390 /* Initialize mailbox message. */
391 mbox_msg = 0x00;
392 avail_emptybuff = 0;
393
394 spin_lock_irqsave(&pshm_drv->lock, flags);
395
396 /* Check for pending receive buffers. */
397 if (!list_empty(&pshm_drv->rx_pend_list)) {
398
399 pbuf = list_entry(pshm_drv->rx_pend_list.next,
400 struct buf_list, list);
401
402 list_del_init(&pbuf->list);
403 list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
404 /*
405 * Value index is never changed,
406 * so read access should be safe.
407 */
408 mbox_msg |= SHM_SET_EMPTY(pbuf->index);
409 }
410
411 skb = skb_peek(&pshm_drv->sk_qhead);
412
413 if (skb == NULL)
414 goto send_msg;
415
416 /* Check the available no. of buffers in the empty list */
417 list_for_each(pos, &pshm_drv->tx_empty_list)
418 avail_emptybuff++;
419
420 if ((avail_emptybuff < LOW_WATERMARK) &&
421 pshm_drv->tx_empty_available) {
422 /* Update blocking condition. */
423 pshm_drv->tx_empty_available = 0;
424 pshm_drv->cfdev.flowctrl
425 (pshm_drv->pshm_dev->pshm_netdev,
426 CAIF_FLOW_OFF);
427 }
428 /*
429 * We simply return back to the caller if we do not have space
430 * either in Tx pending list or Tx empty list. In this case,
431 * we hold the received skb in the skb list, waiting to
432 * be transmitted once Tx buffers become available
433 */
434 if (list_empty(&pshm_drv->tx_empty_list))
435 goto send_msg;
436
437 /* Get the first free Tx buffer. */
438 pbuf = list_entry(pshm_drv->tx_empty_list.next,
439 struct buf_list, list);
440 do {
441 if (append) {
442 skb = skb_peek(&pshm_drv->sk_qhead);
443 if (skb == NULL)
444 break;
445 }
446
447 frm = (struct shm_caif_frm *)
448 (pbuf->desc_vptr + pbuf->frm_ofs);
449
450 frm->hdr_ofs = 0;
451 frmlen = 0;
452 frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
453
454 /* Add tail padding if needed. */
455 if (frmlen % SHM_FRM_PAD_LEN)
456 frmlen += SHM_FRM_PAD_LEN -
457 (frmlen % SHM_FRM_PAD_LEN);
458
459 /*
460 * Verify that packet, header and additional padding
461 * can fit within the buffer frame area.
462 */
463 if (frmlen >= (pbuf->len - pbuf->frm_ofs))
464 break;
465
466 if (!append) {
467 list_del_init(&pbuf->list);
468 append = 1;
469 }
470
471 skb = skb_dequeue(&pshm_drv->sk_qhead);
472 /* Copy in CAIF frame. */
473 skb_copy_bits(skb, 0, pbuf->desc_vptr +
474 pbuf->frm_ofs + SHM_HDR_LEN +
475 frm->hdr_ofs, skb->len);
476
477 pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
478 pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
479 frmlen;
480 dev_kfree_skb(skb);
481
482 /* Fill in the shared memory packet descriptor area. */
483 pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
484 /* Forward to current frame. */
485 pck_desc += pbuf->frames;
486 pck_desc->frm_ofs = (pbuf->phy_addr -
487 pshm_drv->shm_base_addr) +
488 pbuf->frm_ofs;
489 pck_desc->frm_len = frmlen;
490 /* Terminate packet descriptor area. */
491 pck_desc++;
492 pck_desc->frm_ofs = 0;
493 /* Update buffer parameters. */
494 pbuf->frames++;
495 pbuf->frm_ofs += frmlen + (frmlen % 32);
496
497 } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
498
499 /* Assign buffer as full. */
500 list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
501 append = 0;
502 mbox_msg |= SHM_SET_FULL(pbuf->index);
503send_msg:
504 spin_unlock_irqrestore(&pshm_drv->lock, flags);
505
506 if (mbox_msg)
507 pshm_drv->pshm_dev->pshmdev_mbxsend
508 (pshm_drv->pshm_dev->shm_id, mbox_msg);
509 } while (mbox_msg);
510}
511
512static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
513{
514 struct shmdrv_layer *pshm_drv;
515 unsigned long flags = 0;
516
517 pshm_drv = netdev_priv(shm_netdev);
518
519 spin_lock_irqsave(&pshm_drv->lock, flags);
520
521 skb_queue_tail(&pshm_drv->sk_qhead, skb);
522
523 spin_unlock_irqrestore(&pshm_drv->lock, flags);
524
525 /* Schedule Tx work queue. for deferred processing of skbs*/
526 if (!work_pending(&pshm_drv->shm_tx_work))
527 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
528
529 return 0;
530}
531
532static const struct net_device_ops netdev_ops = {
533 .ndo_open = shm_netdev_open,
534 .ndo_stop = shm_netdev_close,
535 .ndo_start_xmit = shm_netdev_tx,
536};
537
538static void shm_netdev_setup(struct net_device *pshm_netdev)
539{
540 struct shmdrv_layer *pshm_drv;
541 pshm_netdev->netdev_ops = &netdev_ops;
542
543 pshm_netdev->mtu = CAIF_MAX_MTU;
544 pshm_netdev->type = ARPHRD_CAIF;
545 pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
546 pshm_netdev->tx_queue_len = 0;
547 pshm_netdev->destructor = free_netdev;
548
549 pshm_drv = netdev_priv(pshm_netdev);
550
551 /* Initialize structures in a clean state. */
552 memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
553
554 pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
555}
556
557int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
558{
559 int result, j;
560 struct shmdrv_layer *pshm_drv = NULL;
561
562 pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
563 "cfshm%d", shm_netdev_setup);
564 if (!pshm_dev->pshm_netdev)
565 return -ENOMEM;
566
567 pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
568 pshm_drv->pshm_dev = pshm_dev;
569
570 /*
571 * Initialization starts with the verification of the
572 * availability of MBX driver by calling its setup function.
573 * MBX driver must be available by this time for proper
574 * functioning of SHM driver.
575 */
576 if ((pshm_dev->pshmdev_mbxsetup
577 (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
578 pr_warn("Could not config. SHM Mailbox,"
579 " Bailing out.....\n");
580 free_netdev(pshm_dev->pshm_netdev);
581 return -ENODEV;
582 }
583
584 skb_queue_head_init(&pshm_drv->sk_qhead);
585
586 pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
587 " INSTANCE AT pshm_drv =0x%p\n",
588 pshm_drv->pshm_dev->shm_id, pshm_drv);
589
590 if (pshm_dev->shm_total_sz <
591 (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
592
593 pr_warn("ERROR, Amount of available"
594 " Phys. SHM cannot accomodate current SHM "
595 "driver configuration, Bailing out ...\n");
596 free_netdev(pshm_dev->pshm_netdev);
597 return -ENOMEM;
598 }
599
600 pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
601 pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
602
603 if (pshm_dev->shm_loopback)
604 pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
605 else
606 pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
607 (NR_TX_BUF * TX_BUF_SZ);
608
609 INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
610 INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
611 INIT_LIST_HEAD(&pshm_drv->tx_full_list);
612
613 INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
614 INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
615 INIT_LIST_HEAD(&pshm_drv->rx_full_list);
616
617 INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
618 INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
619
620 pshm_drv->pshm_tx_workqueue =
621 create_singlethread_workqueue("shm_tx_work");
622 pshm_drv->pshm_rx_workqueue =
623 create_singlethread_workqueue("shm_rx_work");
624
625 for (j = 0; j < NR_TX_BUF; j++) {
626 struct buf_list *tx_buf =
627 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
628
629 if (tx_buf == NULL) {
630 pr_warn("ERROR, Could not"
631 " allocate dynamic mem. for tx_buf,"
632 " Bailing out ...\n");
633 free_netdev(pshm_dev->pshm_netdev);
634 return -ENOMEM;
635 }
636 tx_buf->index = j;
637 tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
638 tx_buf->len = TX_BUF_SZ;
639 tx_buf->frames = 0;
640 tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
641
642 if (pshm_dev->shm_loopback)
643 tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
644 else
645 tx_buf->desc_vptr =
646 ioremap(tx_buf->phy_addr, TX_BUF_SZ);
647
648 list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
649 }
650
651 for (j = 0; j < NR_RX_BUF; j++) {
652 struct buf_list *rx_buf =
653 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
654
655 if (rx_buf == NULL) {
656 pr_warn("ERROR, Could not"
657 " allocate dynamic mem.for rx_buf,"
658 " Bailing out ...\n");
659 free_netdev(pshm_dev->pshm_netdev);
660 return -ENOMEM;
661 }
662 rx_buf->index = j;
663 rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
664 rx_buf->len = RX_BUF_SZ;
665
666 if (pshm_dev->shm_loopback)
667 rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
668 else
669 rx_buf->desc_vptr =
670 ioremap(rx_buf->phy_addr, RX_BUF_SZ);
671 list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
672 }
673
674 pshm_drv->tx_empty_available = 1;
675 result = register_netdev(pshm_dev->pshm_netdev);
676 if (result)
677 pr_warn("ERROR[%d], SHM could not, "
678 "register with NW FRMWK Bailing out ...\n", result);
679
680 return result;
681}
682
683void caif_shmcore_remove(struct net_device *pshm_netdev)
684{
685 struct buf_list *pbuf;
686 struct shmdrv_layer *pshm_drv = NULL;
687
688 pshm_drv = netdev_priv(pshm_netdev);
689
690 while (!(list_empty(&pshm_drv->tx_pend_list))) {
691 pbuf =
692 list_entry(pshm_drv->tx_pend_list.next,
693 struct buf_list, list);
694
695 list_del(&pbuf->list);
696 kfree(pbuf);
697 }
698
699 while (!(list_empty(&pshm_drv->tx_full_list))) {
700 pbuf =
701 list_entry(pshm_drv->tx_full_list.next,
702 struct buf_list, list);
703 list_del(&pbuf->list);
704 kfree(pbuf);
705 }
706
707 while (!(list_empty(&pshm_drv->tx_empty_list))) {
708 pbuf =
709 list_entry(pshm_drv->tx_empty_list.next,
710 struct buf_list, list);
711 list_del(&pbuf->list);
712 kfree(pbuf);
713 }
714
715 while (!(list_empty(&pshm_drv->rx_full_list))) {
716 pbuf =
717 list_entry(pshm_drv->tx_full_list.next,
718 struct buf_list, list);
719 list_del(&pbuf->list);
720 kfree(pbuf);
721 }
722
723 while (!(list_empty(&pshm_drv->rx_pend_list))) {
724 pbuf =
725 list_entry(pshm_drv->tx_pend_list.next,
726 struct buf_list, list);
727 list_del(&pbuf->list);
728 kfree(pbuf);
729 }
730
731 while (!(list_empty(&pshm_drv->rx_empty_list))) {
732 pbuf =
733 list_entry(pshm_drv->rx_empty_list.next,
734 struct buf_list, list);
735 list_del(&pbuf->list);
736 kfree(pbuf);
737 }
738
739 /* Destroy work queues. */
740 destroy_workqueue(pshm_drv->pshm_tx_workqueue);
741 destroy_workqueue(pshm_drv->pshm_rx_workqueue);
742
743 unregister_netdev(pshm_netdev);
744}
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 8427533fe313..20da1996d354 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -33,6 +33,9 @@ MODULE_LICENSE("GPL");
33MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>"); 33MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
34MODULE_DESCRIPTION("CAIF SPI driver"); 34MODULE_DESCRIPTION("CAIF SPI driver");
35 35
36/* Returns the number of padding bytes for alignment. */
37#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
38
36static int spi_loop; 39static int spi_loop;
37module_param(spi_loop, bool, S_IRUGO); 40module_param(spi_loop, bool, S_IRUGO);
38MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode."); 41MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -41,7 +44,10 @@ MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
41module_param(spi_frm_align, int, S_IRUGO); 44module_param(spi_frm_align, int, S_IRUGO);
42MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment."); 45MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
43 46
44/* SPI padding options. */ 47/*
48 * SPI padding options.
49 * Warning: must be a base of 2 (& operation used) and can not be zero !
50 */
45module_param(spi_up_head_align, int, S_IRUGO); 51module_param(spi_up_head_align, int, S_IRUGO);
46MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment."); 52MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
47 53
@@ -240,15 +246,13 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
240static const struct file_operations dbgfs_state_fops = { 246static const struct file_operations dbgfs_state_fops = {
241 .open = dbgfs_open, 247 .open = dbgfs_open,
242 .read = dbgfs_state, 248 .read = dbgfs_state,
243 .owner = THIS_MODULE, 249 .owner = THIS_MODULE
244 .llseek = default_llseek,
245}; 250};
246 251
247static const struct file_operations dbgfs_frame_fops = { 252static const struct file_operations dbgfs_frame_fops = {
248 .open = dbgfs_open, 253 .open = dbgfs_open,
249 .read = dbgfs_frame, 254 .read = dbgfs_frame,
250 .owner = THIS_MODULE, 255 .owner = THIS_MODULE
251 .llseek = default_llseek,
252}; 256};
253 257
254static inline void dev_debugfs_add(struct cfspi *cfspi) 258static inline void dev_debugfs_add(struct cfspi *cfspi)
@@ -337,6 +341,9 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
337 u8 *dst = buf; 341 u8 *dst = buf;
338 caif_assert(buf); 342 caif_assert(buf);
339 343
344 if (cfspi->slave && !cfspi->slave_talked)
345 cfspi->slave_talked = true;
346
340 do { 347 do {
341 struct sk_buff *skb; 348 struct sk_buff *skb;
342 struct caif_payload_info *info; 349 struct caif_payload_info *info;
@@ -357,8 +364,8 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
357 * Compute head offset i.e. number of bytes to add to 364 * Compute head offset i.e. number of bytes to add to
358 * get the start of the payload aligned. 365 * get the start of the payload aligned.
359 */ 366 */
360 if (spi_up_head_align) { 367 if (spi_up_head_align > 1) {
361 spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); 368 spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
362 *dst = (u8)(spad - 1); 369 *dst = (u8)(spad - 1);
363 dst += spad; 370 dst += spad;
364 } 371 }
@@ -373,7 +380,7 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
373 * Compute tail offset i.e. number of bytes to add to 380 * Compute tail offset i.e. number of bytes to add to
374 * get the complete CAIF frame aligned. 381 * get the complete CAIF frame aligned.
375 */ 382 */
376 epad = (skb->len + spad) & spi_up_tail_align; 383 epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
377 dst += epad; 384 dst += epad;
378 385
379 dev_kfree_skb(skb); 386 dev_kfree_skb(skb);
@@ -417,14 +424,14 @@ int cfspi_xmitlen(struct cfspi *cfspi)
417 * Compute head offset i.e. number of bytes to add to 424 * Compute head offset i.e. number of bytes to add to
418 * get the start of the payload aligned. 425 * get the start of the payload aligned.
419 */ 426 */
420 if (spi_up_head_align) 427 if (spi_up_head_align > 1)
421 spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); 428 spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
422 429
423 /* 430 /*
424 * Compute tail offset i.e. number of bytes to add to 431 * Compute tail offset i.e. number of bytes to add to
425 * get the complete CAIF frame aligned. 432 * get the complete CAIF frame aligned.
426 */ 433 */
427 epad = (skb->len + spad) & spi_up_tail_align; 434 epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
428 435
429 if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) { 436 if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
430 skb_queue_tail(&cfspi->chead, skb); 437 skb_queue_tail(&cfspi->chead, skb);
@@ -433,6 +440,7 @@ int cfspi_xmitlen(struct cfspi *cfspi)
433 } else { 440 } else {
434 /* Put back packet. */ 441 /* Put back packet. */
435 skb_queue_head(&cfspi->qhead, skb); 442 skb_queue_head(&cfspi->qhead, skb);
443 break;
436 } 444 }
437 } while (pkts <= CAIF_MAX_SPI_PKTS); 445 } while (pkts <= CAIF_MAX_SPI_PKTS);
438 446
@@ -453,6 +461,15 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
453{ 461{
454 struct cfspi *cfspi = (struct cfspi *)ifc->priv; 462 struct cfspi *cfspi = (struct cfspi *)ifc->priv;
455 463
464 /*
465 * The slave device is the master on the link. Interrupts before the
466 * slave has transmitted are considered spurious.
467 */
468 if (cfspi->slave && !cfspi->slave_talked) {
469 printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
470 return;
471 }
472
456 if (!in_interrupt()) 473 if (!in_interrupt())
457 spin_lock(&cfspi->lock); 474 spin_lock(&cfspi->lock);
458 if (assert) { 475 if (assert) {
@@ -465,7 +482,8 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
465 spin_unlock(&cfspi->lock); 482 spin_unlock(&cfspi->lock);
466 483
467 /* Wake up the xfer thread. */ 484 /* Wake up the xfer thread. */
468 wake_up_interruptible(&cfspi->wait); 485 if (assert)
486 wake_up_interruptible(&cfspi->wait);
469} 487}
470 488
471static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc) 489static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
@@ -523,7 +541,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
523 * Compute head offset i.e. number of bytes added to 541 * Compute head offset i.e. number of bytes added to
524 * get the start of the payload aligned. 542 * get the start of the payload aligned.
525 */ 543 */
526 if (spi_down_head_align) { 544 if (spi_down_head_align > 1) {
527 spad = 1 + *src; 545 spad = 1 + *src;
528 src += spad; 546 src += spad;
529 } 547 }
@@ -564,7 +582,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
564 * Compute tail offset i.e. number of bytes added to 582 * Compute tail offset i.e. number of bytes added to
565 * get the complete CAIF frame aligned. 583 * get the complete CAIF frame aligned.
566 */ 584 */
567 epad = (pkt_len + spad) & spi_down_tail_align; 585 epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
568 src += epad; 586 src += epad;
569 } while ((src - buf) < len); 587 } while ((src - buf) < len);
570 588
@@ -617,19 +635,28 @@ int cfspi_spi_probe(struct platform_device *pdev)
617 635
618 ndev = alloc_netdev(sizeof(struct cfspi), 636 ndev = alloc_netdev(sizeof(struct cfspi),
619 "cfspi%d", cfspi_setup); 637 "cfspi%d", cfspi_setup);
620 if (!dev) 638 if (!ndev)
621 return -ENODEV; 639 return -ENOMEM;
622 640
623 cfspi = netdev_priv(ndev); 641 cfspi = netdev_priv(ndev);
624 netif_stop_queue(ndev); 642 netif_stop_queue(ndev);
625 cfspi->ndev = ndev; 643 cfspi->ndev = ndev;
626 cfspi->pdev = pdev; 644 cfspi->pdev = pdev;
627 645
628 /* Set flow info */ 646 /* Set flow info. */
629 cfspi->flow_off_sent = 0; 647 cfspi->flow_off_sent = 0;
630 cfspi->qd_low_mark = LOW_WATER_MARK; 648 cfspi->qd_low_mark = LOW_WATER_MARK;
631 cfspi->qd_high_mark = HIGH_WATER_MARK; 649 cfspi->qd_high_mark = HIGH_WATER_MARK;
632 650
651 /* Set slave info. */
652 if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
653 cfspi->slave = true;
654 cfspi->slave_talked = false;
655 } else {
656 cfspi->slave = false;
657 cfspi->slave_talked = false;
658 }
659
633 /* Assign the SPI device. */ 660 /* Assign the SPI device. */
634 cfspi->dev = dev; 661 cfspi->dev = dev;
635 /* Assign the device ifc to this SPI interface. */ 662 /* Assign the device ifc to this SPI interface. */
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 2111dbfea6fe..1b9943a4edab 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -36,10 +36,15 @@ static inline int forward_to_spi_cmd(struct cfspi *cfspi)
36#endif 36#endif
37 37
38int spi_frm_align = 2; 38int spi_frm_align = 2;
39int spi_up_head_align = 1; 39
40int spi_up_tail_align; 40/*
41int spi_down_head_align = 3; 41 * SPI padding options.
42int spi_down_tail_align = 1; 42 * Warning: must be a base of 2 (& operation used) and can not be zero !
43 */
44int spi_up_head_align = 1 << 1;
45int spi_up_tail_align = 1 << 0;
46int spi_down_head_align = 1 << 2;
47int spi_down_tail_align = 1 << 1;
43 48
44#ifdef CONFIG_DEBUG_FS 49#ifdef CONFIG_DEBUG_FS
45static inline void debugfs_store_prev(struct cfspi *cfspi) 50static inline void debugfs_store_prev(struct cfspi *cfspi)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9d9e45394433..080574b0fff0 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -82,6 +82,14 @@ config CAN_FLEXCAN
82 ---help--- 82 ---help---
83 Say Y here if you want to support for Freescale FlexCAN. 83 Say Y here if you want to support for Freescale FlexCAN.
84 84
85config PCH_CAN
86 tristate "PCH CAN"
87 depends on CAN_DEV && PCI
88 ---help---
89 This driver is for PCH CAN of Topcliff which is an IOH for x86
90 embedded processor.
91 This driver can access CAN bus.
92
85source "drivers/net/can/mscan/Kconfig" 93source "drivers/net/can/mscan/Kconfig"
86 94
87source "drivers/net/can/sja1000/Kconfig" 95source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 00575373bbd0..90af15a4f106 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -17,5 +17,6 @@ obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
17obj-$(CONFIG_CAN_BFIN) += bfin_can.o 17obj-$(CONFIG_CAN_BFIN) += bfin_can.o
18obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o 18obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
19obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o 19obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
20obj-$(CONFIG_PCH_CAN) += pch_can.o
20 21
21ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 22ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 2d8bd86bc5e2..7ef83d06f7ed 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * at91_can.c - CAN network driver for AT91 SoC CAN controller 2 * at91_can.c - CAN network driver for AT91 SoC CAN controller
3 * 3 *
4 * (C) 2007 by Hans J. Koch <hjk@linutronix.de> 4 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
5 * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de> 5 * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
6 * 6 *
7 * This software may be distributed under the terms of the GNU General 7 * This software may be distributed under the terms of the GNU General
8 * Public License ("GPL") version 2 as distributed in the 'COPYING' 8 * Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -40,7 +40,6 @@
40 40
41#include <mach/board.h> 41#include <mach/board.h>
42 42
43#define DRV_NAME "at91_can"
44#define AT91_NAPI_WEIGHT 12 43#define AT91_NAPI_WEIGHT 12
45 44
46/* 45/*
@@ -172,6 +171,7 @@ struct at91_priv {
172}; 171};
173 172
174static struct can_bittiming_const at91_bittiming_const = { 173static struct can_bittiming_const at91_bittiming_const = {
174 .name = KBUILD_MODNAME,
175 .tseg1_min = 4, 175 .tseg1_min = 4,
176 .tseg1_max = 16, 176 .tseg1_max = 16,
177 .tseg2_min = 2, 177 .tseg2_min = 2,
@@ -199,13 +199,13 @@ static inline int get_tx_echo_mb(const struct at91_priv *priv)
199 199
200static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg) 200static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
201{ 201{
202 return readl(priv->reg_base + reg); 202 return __raw_readl(priv->reg_base + reg);
203} 203}
204 204
205static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg, 205static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
206 u32 value) 206 u32 value)
207{ 207{
208 writel(value, priv->reg_base + reg); 208 __raw_writel(value, priv->reg_base + reg);
209} 209}
210 210
211static inline void set_mb_mode_prio(const struct at91_priv *priv, 211static inline void set_mb_mode_prio(const struct at91_priv *priv,
@@ -243,6 +243,12 @@ static void at91_setup_mailboxes(struct net_device *dev)
243 set_mb_mode(priv, i, AT91_MB_MODE_RX); 243 set_mb_mode(priv, i, AT91_MB_MODE_RX);
244 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR); 244 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
245 245
246 /* reset acceptance mask and id register */
247 for (i = AT91_MB_RX_FIRST; i <= AT91_MB_RX_LAST; i++) {
248 at91_write(priv, AT91_MAM(i), 0x0 );
249 at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
250 }
251
246 /* The last 4 mailboxes are used for transmitting. */ 252 /* The last 4 mailboxes are used for transmitting. */
247 for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++) 253 for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
248 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); 254 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
@@ -257,18 +263,30 @@ static int at91_set_bittiming(struct net_device *dev)
257 const struct can_bittiming *bt = &priv->can.bittiming; 263 const struct can_bittiming *bt = &priv->can.bittiming;
258 u32 reg_br; 264 u32 reg_br;
259 265
260 reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) | 266 reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) |
261 ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) | 267 ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
262 ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) | 268 ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
263 ((bt->phase_seg2 - 1) << 0); 269 ((bt->phase_seg2 - 1) << 0);
264 270
265 dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br); 271 netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br);
266 272
267 at91_write(priv, AT91_BR, reg_br); 273 at91_write(priv, AT91_BR, reg_br);
268 274
269 return 0; 275 return 0;
270} 276}
271 277
278static int at91_get_berr_counter(const struct net_device *dev,
279 struct can_berr_counter *bec)
280{
281 const struct at91_priv *priv = netdev_priv(dev);
282 u32 reg_ecr = at91_read(priv, AT91_ECR);
283
284 bec->rxerr = reg_ecr & 0xff;
285 bec->txerr = reg_ecr >> 16;
286
287 return 0;
288}
289
272static void at91_chip_start(struct net_device *dev) 290static void at91_chip_start(struct net_device *dev)
273{ 291{
274 struct at91_priv *priv = netdev_priv(dev); 292 struct at91_priv *priv = netdev_priv(dev);
@@ -281,6 +299,7 @@ static void at91_chip_start(struct net_device *dev)
281 reg_mr = at91_read(priv, AT91_MR); 299 reg_mr = at91_read(priv, AT91_MR);
282 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN); 300 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
283 301
302 at91_set_bittiming(dev);
284 at91_setup_mailboxes(dev); 303 at91_setup_mailboxes(dev);
285 at91_transceiver_switch(priv, 1); 304 at91_transceiver_switch(priv, 1);
286 305
@@ -350,8 +369,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
350 if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) { 369 if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
351 netif_stop_queue(dev); 370 netif_stop_queue(dev);
352 371
353 dev_err(dev->dev.parent, 372 netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
354 "BUG! TX buffer full when queue awake!\n");
355 return NETDEV_TX_BUSY; 373 return NETDEV_TX_BUSY;
356 } 374 }
357 375
@@ -435,7 +453,7 @@ static void at91_rx_overflow_err(struct net_device *dev)
435 struct sk_buff *skb; 453 struct sk_buff *skb;
436 struct can_frame *cf; 454 struct can_frame *cf;
437 455
438 dev_dbg(dev->dev.parent, "RX buffer overflow\n"); 456 netdev_dbg(dev, "RX buffer overflow\n");
439 stats->rx_over_errors++; 457 stats->rx_over_errors++;
440 stats->rx_errors++; 458 stats->rx_errors++;
441 459
@@ -480,6 +498,9 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
480 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb)); 498 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
481 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb)); 499 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
482 500
501 /* allow RX of extended frames */
502 at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
503
483 if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI)) 504 if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
484 at91_rx_overflow_err(dev); 505 at91_rx_overflow_err(dev);
485} 506}
@@ -565,8 +586,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
565 586
566 if (priv->rx_next > AT91_MB_RX_LOW_LAST && 587 if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
567 reg_sr & AT91_MB_RX_LOW_MASK) 588 reg_sr & AT91_MB_RX_LOW_MASK)
568 dev_info(dev->dev.parent, 589 netdev_info(dev,
569 "order of incoming frames cannot be guaranteed\n"); 590 "order of incoming frames cannot be guaranteed\n");
570 591
571 again: 592 again:
572 for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next); 593 for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
@@ -604,7 +625,7 @@ static void at91_poll_err_frame(struct net_device *dev,
604 625
605 /* CRC error */ 626 /* CRC error */
606 if (reg_sr & AT91_IRQ_CERR) { 627 if (reg_sr & AT91_IRQ_CERR) {
607 dev_dbg(dev->dev.parent, "CERR irq\n"); 628 netdev_dbg(dev, "CERR irq\n");
608 dev->stats.rx_errors++; 629 dev->stats.rx_errors++;
609 priv->can.can_stats.bus_error++; 630 priv->can.can_stats.bus_error++;
610 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 631 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -612,7 +633,7 @@ static void at91_poll_err_frame(struct net_device *dev,
612 633
613 /* Stuffing Error */ 634 /* Stuffing Error */
614 if (reg_sr & AT91_IRQ_SERR) { 635 if (reg_sr & AT91_IRQ_SERR) {
615 dev_dbg(dev->dev.parent, "SERR irq\n"); 636 netdev_dbg(dev, "SERR irq\n");
616 dev->stats.rx_errors++; 637 dev->stats.rx_errors++;
617 priv->can.can_stats.bus_error++; 638 priv->can.can_stats.bus_error++;
618 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 639 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -621,14 +642,14 @@ static void at91_poll_err_frame(struct net_device *dev,
621 642
622 /* Acknowledgement Error */ 643 /* Acknowledgement Error */
623 if (reg_sr & AT91_IRQ_AERR) { 644 if (reg_sr & AT91_IRQ_AERR) {
624 dev_dbg(dev->dev.parent, "AERR irq\n"); 645 netdev_dbg(dev, "AERR irq\n");
625 dev->stats.tx_errors++; 646 dev->stats.tx_errors++;
626 cf->can_id |= CAN_ERR_ACK; 647 cf->can_id |= CAN_ERR_ACK;
627 } 648 }
628 649
629 /* Form error */ 650 /* Form error */
630 if (reg_sr & AT91_IRQ_FERR) { 651 if (reg_sr & AT91_IRQ_FERR) {
631 dev_dbg(dev->dev.parent, "FERR irq\n"); 652 netdev_dbg(dev, "FERR irq\n");
632 dev->stats.rx_errors++; 653 dev->stats.rx_errors++;
633 priv->can.can_stats.bus_error++; 654 priv->can.can_stats.bus_error++;
634 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 655 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -637,7 +658,7 @@ static void at91_poll_err_frame(struct net_device *dev,
637 658
638 /* Bit Error */ 659 /* Bit Error */
639 if (reg_sr & AT91_IRQ_BERR) { 660 if (reg_sr & AT91_IRQ_BERR) {
640 dev_dbg(dev->dev.parent, "BERR irq\n"); 661 netdev_dbg(dev, "BERR irq\n");
641 dev->stats.tx_errors++; 662 dev->stats.tx_errors++;
642 priv->can.can_stats.bus_error++; 663 priv->can.can_stats.bus_error++;
643 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 664 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -755,12 +776,10 @@ static void at91_irq_err_state(struct net_device *dev,
755 struct can_frame *cf, enum can_state new_state) 776 struct can_frame *cf, enum can_state new_state)
756{ 777{
757 struct at91_priv *priv = netdev_priv(dev); 778 struct at91_priv *priv = netdev_priv(dev);
758 u32 reg_idr, reg_ier, reg_ecr; 779 u32 reg_idr = 0, reg_ier = 0;
759 u8 tec, rec; 780 struct can_berr_counter bec;
760 781
761 reg_ecr = at91_read(priv, AT91_ECR); 782 at91_get_berr_counter(dev, &bec);
762 rec = reg_ecr & 0xff;
763 tec = reg_ecr >> 16;
764 783
765 switch (priv->can.state) { 784 switch (priv->can.state) {
766 case CAN_STATE_ERROR_ACTIVE: 785 case CAN_STATE_ERROR_ACTIVE:
@@ -771,11 +790,11 @@ static void at91_irq_err_state(struct net_device *dev,
771 */ 790 */
772 if (new_state >= CAN_STATE_ERROR_WARNING && 791 if (new_state >= CAN_STATE_ERROR_WARNING &&
773 new_state <= CAN_STATE_BUS_OFF) { 792 new_state <= CAN_STATE_BUS_OFF) {
774 dev_dbg(dev->dev.parent, "Error Warning IRQ\n"); 793 netdev_dbg(dev, "Error Warning IRQ\n");
775 priv->can.can_stats.error_warning++; 794 priv->can.can_stats.error_warning++;
776 795
777 cf->can_id |= CAN_ERR_CRTL; 796 cf->can_id |= CAN_ERR_CRTL;
778 cf->data[1] = (tec > rec) ? 797 cf->data[1] = (bec.txerr > bec.rxerr) ?
779 CAN_ERR_CRTL_TX_WARNING : 798 CAN_ERR_CRTL_TX_WARNING :
780 CAN_ERR_CRTL_RX_WARNING; 799 CAN_ERR_CRTL_RX_WARNING;
781 } 800 }
@@ -787,11 +806,11 @@ static void at91_irq_err_state(struct net_device *dev,
787 */ 806 */
788 if (new_state >= CAN_STATE_ERROR_PASSIVE && 807 if (new_state >= CAN_STATE_ERROR_PASSIVE &&
789 new_state <= CAN_STATE_BUS_OFF) { 808 new_state <= CAN_STATE_BUS_OFF) {
790 dev_dbg(dev->dev.parent, "Error Passive IRQ\n"); 809 netdev_dbg(dev, "Error Passive IRQ\n");
791 priv->can.can_stats.error_passive++; 810 priv->can.can_stats.error_passive++;
792 811
793 cf->can_id |= CAN_ERR_CRTL; 812 cf->can_id |= CAN_ERR_CRTL;
794 cf->data[1] = (tec > rec) ? 813 cf->data[1] = (bec.txerr > bec.rxerr) ?
795 CAN_ERR_CRTL_TX_PASSIVE : 814 CAN_ERR_CRTL_TX_PASSIVE :
796 CAN_ERR_CRTL_RX_PASSIVE; 815 CAN_ERR_CRTL_RX_PASSIVE;
797 } 816 }
@@ -804,7 +823,7 @@ static void at91_irq_err_state(struct net_device *dev,
804 if (new_state <= CAN_STATE_ERROR_PASSIVE) { 823 if (new_state <= CAN_STATE_ERROR_PASSIVE) {
805 cf->can_id |= CAN_ERR_RESTARTED; 824 cf->can_id |= CAN_ERR_RESTARTED;
806 825
807 dev_dbg(dev->dev.parent, "restarted\n"); 826 netdev_dbg(dev, "restarted\n");
808 priv->can.can_stats.restarts++; 827 priv->can.can_stats.restarts++;
809 828
810 netif_carrier_on(dev); 829 netif_carrier_on(dev);
@@ -825,7 +844,7 @@ static void at91_irq_err_state(struct net_device *dev,
825 * circumstances. so just enable AT91_IRQ_ERRP, thus 844 * circumstances. so just enable AT91_IRQ_ERRP, thus
826 * the "fallthrough" 845 * the "fallthrough"
827 */ 846 */
828 dev_dbg(dev->dev.parent, "Error Active\n"); 847 netdev_dbg(dev, "Error Active\n");
829 cf->can_id |= CAN_ERR_PROT; 848 cf->can_id |= CAN_ERR_PROT;
830 cf->data[2] = CAN_ERR_PROT_ACTIVE; 849 cf->data[2] = CAN_ERR_PROT_ACTIVE;
831 case CAN_STATE_ERROR_WARNING: /* fallthrough */ 850 case CAN_STATE_ERROR_WARNING: /* fallthrough */
@@ -843,7 +862,7 @@ static void at91_irq_err_state(struct net_device *dev,
843 862
844 cf->can_id |= CAN_ERR_BUSOFF; 863 cf->can_id |= CAN_ERR_BUSOFF;
845 864
846 dev_dbg(dev->dev.parent, "bus-off\n"); 865 netdev_dbg(dev, "bus-off\n");
847 netif_carrier_off(dev); 866 netif_carrier_off(dev);
848 priv->can.can_stats.bus_off++; 867 priv->can.can_stats.bus_off++;
849 868
@@ -881,7 +900,7 @@ static void at91_irq_err(struct net_device *dev)
881 else if (likely(reg_sr & AT91_IRQ_ERRA)) 900 else if (likely(reg_sr & AT91_IRQ_ERRA))
882 new_state = CAN_STATE_ERROR_ACTIVE; 901 new_state = CAN_STATE_ERROR_ACTIVE;
883 else { 902 else {
884 dev_err(dev->dev.parent, "BUG! hardware in undefined state\n"); 903 netdev_err(dev, "BUG! hardware in undefined state\n");
885 return; 904 return;
886 } 905 }
887 906
@@ -1018,7 +1037,7 @@ static const struct net_device_ops at91_netdev_ops = {
1018 .ndo_start_xmit = at91_start_xmit, 1037 .ndo_start_xmit = at91_start_xmit,
1019}; 1038};
1020 1039
1021static int __init at91_can_probe(struct platform_device *pdev) 1040static int __devinit at91_can_probe(struct platform_device *pdev)
1022{ 1041{
1023 struct net_device *dev; 1042 struct net_device *dev;
1024 struct at91_priv *priv; 1043 struct at91_priv *priv;
@@ -1067,8 +1086,8 @@ static int __init at91_can_probe(struct platform_device *pdev)
1067 priv = netdev_priv(dev); 1086 priv = netdev_priv(dev);
1068 priv->can.clock.freq = clk_get_rate(clk); 1087 priv->can.clock.freq = clk_get_rate(clk);
1069 priv->can.bittiming_const = &at91_bittiming_const; 1088 priv->can.bittiming_const = &at91_bittiming_const;
1070 priv->can.do_set_bittiming = at91_set_bittiming;
1071 priv->can.do_set_mode = at91_set_mode; 1089 priv->can.do_set_mode = at91_set_mode;
1090 priv->can.do_get_berr_counter = at91_get_berr_counter;
1072 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; 1091 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
1073 priv->reg_base = addr; 1092 priv->reg_base = addr;
1074 priv->dev = dev; 1093 priv->dev = dev;
@@ -1092,7 +1111,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
1092 return 0; 1111 return 0;
1093 1112
1094 exit_free: 1113 exit_free:
1095 free_netdev(dev); 1114 free_candev(dev);
1096 exit_iounmap: 1115 exit_iounmap:
1097 iounmap(addr); 1116 iounmap(addr);
1098 exit_release: 1117 exit_release:
@@ -1113,8 +1132,6 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
1113 1132
1114 platform_set_drvdata(pdev, NULL); 1133 platform_set_drvdata(pdev, NULL);
1115 1134
1116 free_netdev(dev);
1117
1118 iounmap(priv->reg_base); 1135 iounmap(priv->reg_base);
1119 1136
1120 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1137 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1122,6 +1139,8 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
1122 1139
1123 clk_put(priv->clk); 1140 clk_put(priv->clk);
1124 1141
1142 free_candev(dev);
1143
1125 return 0; 1144 return 0;
1126} 1145}
1127 1146
@@ -1129,21 +1148,19 @@ static struct platform_driver at91_can_driver = {
1129 .probe = at91_can_probe, 1148 .probe = at91_can_probe,
1130 .remove = __devexit_p(at91_can_remove), 1149 .remove = __devexit_p(at91_can_remove),
1131 .driver = { 1150 .driver = {
1132 .name = DRV_NAME, 1151 .name = KBUILD_MODNAME,
1133 .owner = THIS_MODULE, 1152 .owner = THIS_MODULE,
1134 }, 1153 },
1135}; 1154};
1136 1155
1137static int __init at91_can_module_init(void) 1156static int __init at91_can_module_init(void)
1138{ 1157{
1139 printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
1140 return platform_driver_register(&at91_can_driver); 1158 return platform_driver_register(&at91_can_driver);
1141} 1159}
1142 1160
1143static void __exit at91_can_module_exit(void) 1161static void __exit at91_can_module_exit(void)
1144{ 1162{
1145 platform_driver_unregister(&at91_can_driver); 1163 platform_driver_unregister(&at91_can_driver);
1146 printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
1147} 1164}
1148 1165
1149module_init(at91_can_module_init); 1166module_init(at91_can_module_init);
@@ -1151,4 +1168,4 @@ module_exit(at91_can_module_exit);
1151 1168
1152MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>"); 1169MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
1153MODULE_LICENSE("GPL v2"); 1170MODULE_LICENSE("GPL v2");
1154MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver"); 1171MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index ef443a090ba7..d4990568baee 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -992,7 +992,6 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
992 992
993 unregister_flexcandev(dev); 993 unregister_flexcandev(dev);
994 platform_set_drvdata(pdev, NULL); 994 platform_set_drvdata(pdev, NULL);
995 free_candev(dev);
996 iounmap(priv->base); 995 iounmap(priv->base);
997 996
998 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 997 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1000,6 +999,8 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
1000 999
1001 clk_put(priv->clk); 1000 clk_put(priv->clk);
1002 1001
1002 free_candev(dev);
1003
1003 return 0; 1004 return 0;
1004} 1005}
1005 1006
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 6aadc3e32bd5..7ab534aee452 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -169,6 +169,7 @@
169# define RXBSIDH_SHIFT 3 169# define RXBSIDH_SHIFT 3
170#define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF) 170#define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
171# define RXBSIDL_IDE 0x08 171# define RXBSIDL_IDE 0x08
172# define RXBSIDL_SRR 0x10
172# define RXBSIDL_EID 3 173# define RXBSIDL_EID 3
173# define RXBSIDL_SHIFT 5 174# define RXBSIDL_SHIFT 5
174#define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF) 175#define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF)
@@ -475,6 +476,8 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
475 frame->can_id = 476 frame->can_id =
476 (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) | 477 (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
477 (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT); 478 (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
479 if (buf[RXBSIDL_OFF] & RXBSIDL_SRR)
480 frame->can_id |= CAN_RTR_FLAG;
478 } 481 }
479 /* Data length */ 482 /* Data length */
480 frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK); 483 frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
new file mode 100644
index 000000000000..672718261c68
--- /dev/null
+++ b/drivers/net/can/pch_can.c
@@ -0,0 +1,1463 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#include <linux/interrupt.h>
20#include <linux/delay.h>
21#include <linux/io.h>
22#include <linux/module.h>
23#include <linux/sched.h>
24#include <linux/pci.h>
25#include <linux/init.h>
26#include <linux/kernel.h>
27#include <linux/types.h>
28#include <linux/errno.h>
29#include <linux/netdevice.h>
30#include <linux/skbuff.h>
31#include <linux/can.h>
32#include <linux/can/dev.h>
33#include <linux/can/error.h>
34
35#define MAX_MSG_OBJ 32
36#define MSG_OBJ_RX 0 /* The receive message object flag. */
37#define MSG_OBJ_TX 1 /* The transmit message object flag. */
38
39#define ENABLE 1 /* The enable flag */
40#define DISABLE 0 /* The disable flag */
41#define CAN_CTRL_INIT 0x0001 /* The INIT bit of CANCONT register. */
42#define CAN_CTRL_IE 0x0002 /* The IE bit of CAN control register */
43#define CAN_CTRL_IE_SIE_EIE 0x000e
44#define CAN_CTRL_CCE 0x0040
45#define CAN_CTRL_OPT 0x0080 /* The OPT bit of CANCONT register. */
46#define CAN_OPT_SILENT 0x0008 /* The Silent bit of CANOPT reg. */
47#define CAN_OPT_LBACK 0x0010 /* The LoopBack bit of CANOPT reg. */
48#define CAN_CMASK_RX_TX_SET 0x00f3
49#define CAN_CMASK_RX_TX_GET 0x0073
50#define CAN_CMASK_ALL 0xff
51#define CAN_CMASK_RDWR 0x80
52#define CAN_CMASK_ARB 0x20
53#define CAN_CMASK_CTRL 0x10
54#define CAN_CMASK_MASK 0x40
55#define CAN_CMASK_NEWDAT 0x04
56#define CAN_CMASK_CLRINTPND 0x08
57
58#define CAN_IF_MCONT_NEWDAT 0x8000
59#define CAN_IF_MCONT_INTPND 0x2000
60#define CAN_IF_MCONT_UMASK 0x1000
61#define CAN_IF_MCONT_TXIE 0x0800
62#define CAN_IF_MCONT_RXIE 0x0400
63#define CAN_IF_MCONT_RMTEN 0x0200
64#define CAN_IF_MCONT_TXRQXT 0x0100
65#define CAN_IF_MCONT_EOB 0x0080
66#define CAN_IF_MCONT_DLC 0x000f
67#define CAN_IF_MCONT_MSGLOST 0x4000
68#define CAN_MASK2_MDIR_MXTD 0xc000
69#define CAN_ID2_DIR 0x2000
70#define CAN_ID_MSGVAL 0x8000
71
72#define CAN_STATUS_INT 0x8000
73#define CAN_IF_CREQ_BUSY 0x8000
74#define CAN_ID2_XTD 0x4000
75
76#define CAN_REC 0x00007f00
77#define CAN_TEC 0x000000ff
78
79#define PCH_RX_OK 0x00000010
80#define PCH_TX_OK 0x00000008
81#define PCH_BUS_OFF 0x00000080
82#define PCH_EWARN 0x00000040
83#define PCH_EPASSIV 0x00000020
84#define PCH_LEC0 0x00000001
85#define PCH_LEC1 0x00000002
86#define PCH_LEC2 0x00000004
87#define PCH_LEC_ALL (PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
88#define PCH_STUF_ERR PCH_LEC0
89#define PCH_FORM_ERR PCH_LEC1
90#define PCH_ACK_ERR (PCH_LEC0 | PCH_LEC1)
91#define PCH_BIT1_ERR PCH_LEC2
92#define PCH_BIT0_ERR (PCH_LEC0 | PCH_LEC2)
93#define PCH_CRC_ERR (PCH_LEC1 | PCH_LEC2)
94
95/* bit position of certain controller bits. */
96#define BIT_BITT_BRP 0
97#define BIT_BITT_SJW 6
98#define BIT_BITT_TSEG1 8
99#define BIT_BITT_TSEG2 12
100#define BIT_IF1_MCONT_RXIE 10
101#define BIT_IF2_MCONT_TXIE 11
102#define BIT_BRPE_BRPE 6
103#define BIT_ES_TXERRCNT 0
104#define BIT_ES_RXERRCNT 8
105#define MSK_BITT_BRP 0x3f
106#define MSK_BITT_SJW 0xc0
107#define MSK_BITT_TSEG1 0xf00
108#define MSK_BITT_TSEG2 0x7000
109#define MSK_BRPE_BRPE 0x3c0
110#define MSK_BRPE_GET 0x0f
111#define MSK_CTRL_IE_SIE_EIE 0x07
112#define MSK_MCONT_TXIE 0x08
113#define MSK_MCONT_RXIE 0x10
114#define PCH_CAN_NO_TX_BUFF 1
115#define COUNTER_LIMIT 10
116
117#define PCH_CAN_CLK 50000000 /* 50MHz */
118
119/* Define the number of message object.
120 * PCH CAN communications are done via Message RAM.
121 * The Message RAM consists of 32 message objects. */
122#define PCH_RX_OBJ_NUM 26 /* 1~ PCH_RX_OBJ_NUM is Rx*/
123#define PCH_TX_OBJ_NUM 6 /* PCH_RX_OBJ_NUM is RX ~ Tx*/
124#define PCH_OBJ_NUM (PCH_TX_OBJ_NUM + PCH_RX_OBJ_NUM)
125
126#define PCH_FIFO_THRESH 16
127
128enum pch_can_mode {
129 PCH_CAN_ENABLE,
130 PCH_CAN_DISABLE,
131 PCH_CAN_ALL,
132 PCH_CAN_NONE,
133 PCH_CAN_STOP,
134 PCH_CAN_RUN
135};
136
137struct pch_can_regs {
138 u32 cont;
139 u32 stat;
140 u32 errc;
141 u32 bitt;
142 u32 intr;
143 u32 opt;
144 u32 brpe;
145 u32 reserve1;
146 u32 if1_creq;
147 u32 if1_cmask;
148 u32 if1_mask1;
149 u32 if1_mask2;
150 u32 if1_id1;
151 u32 if1_id2;
152 u32 if1_mcont;
153 u32 if1_dataa1;
154 u32 if1_dataa2;
155 u32 if1_datab1;
156 u32 if1_datab2;
157 u32 reserve2;
158 u32 reserve3[12];
159 u32 if2_creq;
160 u32 if2_cmask;
161 u32 if2_mask1;
162 u32 if2_mask2;
163 u32 if2_id1;
164 u32 if2_id2;
165 u32 if2_mcont;
166 u32 if2_dataa1;
167 u32 if2_dataa2;
168 u32 if2_datab1;
169 u32 if2_datab2;
170 u32 reserve4;
171 u32 reserve5[20];
172 u32 treq1;
173 u32 treq2;
174 u32 reserve6[2];
175 u32 reserve7[56];
176 u32 reserve8[3];
177 u32 srst;
178};
179
180struct pch_can_priv {
181 struct can_priv can;
182 unsigned int can_num;
183 struct pci_dev *dev;
184 unsigned int tx_enable[MAX_MSG_OBJ];
185 unsigned int rx_enable[MAX_MSG_OBJ];
186 unsigned int rx_link[MAX_MSG_OBJ];
187 unsigned int int_enables;
188 unsigned int int_stat;
189 struct net_device *ndev;
190 spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/
191 unsigned int msg_obj[MAX_MSG_OBJ];
192 struct pch_can_regs __iomem *regs;
193 struct napi_struct napi;
194 unsigned int tx_obj; /* Point next Tx Obj index */
195 unsigned int use_msi;
196};
197
198static struct can_bittiming_const pch_can_bittiming_const = {
199 .name = KBUILD_MODNAME,
200 .tseg1_min = 1,
201 .tseg1_max = 16,
202 .tseg2_min = 1,
203 .tseg2_max = 8,
204 .sjw_max = 4,
205 .brp_min = 1,
206 .brp_max = 1024, /* 6bit + extended 4bit */
207 .brp_inc = 1,
208};
209
210static DEFINE_PCI_DEVICE_TABLE(pch_pci_tbl) = {
211 {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,},
212 {0,}
213};
214MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
215
216static inline void pch_can_bit_set(void __iomem *addr, u32 mask)
217{
218 iowrite32(ioread32(addr) | mask, addr);
219}
220
221static inline void pch_can_bit_clear(void __iomem *addr, u32 mask)
222{
223 iowrite32(ioread32(addr) & ~mask, addr);
224}
225
226static void pch_can_set_run_mode(struct pch_can_priv *priv,
227 enum pch_can_mode mode)
228{
229 switch (mode) {
230 case PCH_CAN_RUN:
231 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT);
232 break;
233
234 case PCH_CAN_STOP:
235 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT);
236 break;
237
238 default:
239 dev_err(&priv->ndev->dev, "%s -> Invalid Mode.\n", __func__);
240 break;
241 }
242}
243
244static void pch_can_set_optmode(struct pch_can_priv *priv)
245{
246 u32 reg_val = ioread32(&priv->regs->opt);
247
248 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
249 reg_val |= CAN_OPT_SILENT;
250
251 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
252 reg_val |= CAN_OPT_LBACK;
253
254 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT);
255 iowrite32(reg_val, &priv->regs->opt);
256}
257
258static void pch_can_set_int_custom(struct pch_can_priv *priv)
259{
260 /* Clearing the IE, SIE and EIE bits of Can control register. */
261 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
262
263 /* Appropriately setting them. */
264 pch_can_bit_set(&priv->regs->cont,
265 ((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1));
266}
267
268/* This function retrieves interrupt enabled for the CAN device. */
269static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables)
270{
271 /* Obtaining the status of IE, SIE and EIE interrupt bits. */
272 *enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1);
273}
274
275static void pch_can_set_int_enables(struct pch_can_priv *priv,
276 enum pch_can_mode interrupt_no)
277{
278 switch (interrupt_no) {
279 case PCH_CAN_ENABLE:
280 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE);
281 break;
282
283 case PCH_CAN_DISABLE:
284 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE);
285 break;
286
287 case PCH_CAN_ALL:
288 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
289 break;
290
291 case PCH_CAN_NONE:
292 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
293 break;
294
295 default:
296 dev_err(&priv->ndev->dev, "Invalid interrupt number.\n");
297 break;
298 }
299}
300
301static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
302{
303 u32 counter = COUNTER_LIMIT;
304 u32 ifx_creq;
305
306 iowrite32(num, creq_addr);
307 while (counter) {
308 ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY;
309 if (!ifx_creq)
310 break;
311 counter--;
312 udelay(1);
313 }
314 if (!counter)
315 pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
316}
317
318static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num,
319 u32 set)
320{
321 unsigned long flags;
322
323 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
324 /* Reading the receive buffer data from RAM to Interface1 registers */
325 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
326 pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
327
328 /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
329 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
330 &priv->regs->if1_cmask);
331
332 if (set == ENABLE) {
333 /* Setting the MsgVal and RxIE bits */
334 pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
335 pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL);
336
337 } else if (set == DISABLE) {
338 /* Resetting the MsgVal and RxIE bits */
339 pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
340 pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL);
341 }
342
343 pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
344 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
345}
346
347static void pch_can_rx_enable_all(struct pch_can_priv *priv)
348{
349 int i;
350
351 /* Traversing to obtain the object configured as receivers. */
352 for (i = 0; i < PCH_OBJ_NUM; i++) {
353 if (priv->msg_obj[i] == MSG_OBJ_RX)
354 pch_can_set_rx_enable(priv, i + 1, ENABLE);
355 }
356}
357
358static void pch_can_rx_disable_all(struct pch_can_priv *priv)
359{
360 int i;
361
362 /* Traversing to obtain the object configured as receivers. */
363 for (i = 0; i < PCH_OBJ_NUM; i++) {
364 if (priv->msg_obj[i] == MSG_OBJ_RX)
365 pch_can_set_rx_enable(priv, i + 1, DISABLE);
366 }
367}
368
369static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num,
370 u32 set)
371{
372 unsigned long flags;
373
374 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
375 /* Reading the Msg buffer from Message RAM to Interface2 registers. */
376 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
377 pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
378
379 /* Setting the IF2CMASK register for accessing the
380 MsgVal and TxIE bits */
381 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
382 &priv->regs->if2_cmask);
383
384 if (set == ENABLE) {
385 /* Setting the MsgVal and TxIE bits */
386 pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
387 pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
388 } else if (set == DISABLE) {
389 /* Resetting the MsgVal and TxIE bits. */
390 pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
391 pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
392 }
393
394 pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
395 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
396}
397
398static void pch_can_tx_enable_all(struct pch_can_priv *priv)
399{
400 int i;
401
402 /* Traversing to obtain the object configured as transmit object. */
403 for (i = 0; i < PCH_OBJ_NUM; i++) {
404 if (priv->msg_obj[i] == MSG_OBJ_TX)
405 pch_can_set_tx_enable(priv, i + 1, ENABLE);
406 }
407}
408
409static void pch_can_tx_disable_all(struct pch_can_priv *priv)
410{
411 int i;
412
413 /* Traversing to obtain the object configured as transmit object. */
414 for (i = 0; i < PCH_OBJ_NUM; i++) {
415 if (priv->msg_obj[i] == MSG_OBJ_TX)
416 pch_can_set_tx_enable(priv, i + 1, DISABLE);
417 }
418}
419
420static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num,
421 u32 *enable)
422{
423 unsigned long flags;
424
425 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
426 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
427 pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
428
429 if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
430 ((ioread32(&priv->regs->if1_mcont)) &
431 CAN_IF_MCONT_RXIE))
432 *enable = ENABLE;
433 else
434 *enable = DISABLE;
435 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
436}
437
438static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num,
439 u32 *enable)
440{
441 unsigned long flags;
442
443 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
444 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
445 pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
446
447 if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) &&
448 ((ioread32(&priv->regs->if2_mcont)) &
449 CAN_IF_MCONT_TXIE)) {
450 *enable = ENABLE;
451 } else {
452 *enable = DISABLE;
453 }
454 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
455}
456
457static int pch_can_int_pending(struct pch_can_priv *priv)
458{
459 return ioread32(&priv->regs->intr) & 0xffff;
460}
461
462static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
463 u32 buffer_num, u32 set)
464{
465 unsigned long flags;
466
467 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
468 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
469 pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
470 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask);
471 if (set == ENABLE)
472 pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
473 else
474 pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
475
476 pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
477 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
478}
479
480static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
481 u32 buffer_num, u32 *link)
482{
483 unsigned long flags;
484
485 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
486 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
487 pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
488
489 if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB)
490 *link = DISABLE;
491 else
492 *link = ENABLE;
493 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
494}
495
496static void pch_can_clear_buffers(struct pch_can_priv *priv)
497{
498 int i;
499
500 for (i = 0; i < PCH_RX_OBJ_NUM; i++) {
501 iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
502 iowrite32(0xffff, &priv->regs->if1_mask1);
503 iowrite32(0xffff, &priv->regs->if1_mask2);
504 iowrite32(0x0, &priv->regs->if1_id1);
505 iowrite32(0x0, &priv->regs->if1_id2);
506 iowrite32(0x0, &priv->regs->if1_mcont);
507 iowrite32(0x0, &priv->regs->if1_dataa1);
508 iowrite32(0x0, &priv->regs->if1_dataa2);
509 iowrite32(0x0, &priv->regs->if1_datab1);
510 iowrite32(0x0, &priv->regs->if1_datab2);
511 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
512 CAN_CMASK_ARB | CAN_CMASK_CTRL,
513 &priv->regs->if1_cmask);
514 pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
515 }
516
517 for (i = i; i < PCH_OBJ_NUM; i++) {
518 iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
519 iowrite32(0xffff, &priv->regs->if2_mask1);
520 iowrite32(0xffff, &priv->regs->if2_mask2);
521 iowrite32(0x0, &priv->regs->if2_id1);
522 iowrite32(0x0, &priv->regs->if2_id2);
523 iowrite32(0x0, &priv->regs->if2_mcont);
524 iowrite32(0x0, &priv->regs->if2_dataa1);
525 iowrite32(0x0, &priv->regs->if2_dataa2);
526 iowrite32(0x0, &priv->regs->if2_datab1);
527 iowrite32(0x0, &priv->regs->if2_datab2);
528 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
529 CAN_CMASK_ARB | CAN_CMASK_CTRL,
530 &priv->regs->if2_cmask);
531 pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
532 }
533}
534
535static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
536{
537 int i;
538 unsigned long flags;
539
540 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
541
542 for (i = 0; i < PCH_OBJ_NUM; i++) {
543 if (priv->msg_obj[i] == MSG_OBJ_RX) {
544 iowrite32(CAN_CMASK_RX_TX_GET,
545 &priv->regs->if1_cmask);
546 pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
547
548 iowrite32(0x0, &priv->regs->if1_id1);
549 iowrite32(0x0, &priv->regs->if1_id2);
550
551 pch_can_bit_set(&priv->regs->if1_mcont,
552 CAN_IF_MCONT_UMASK);
553
554 /* Set FIFO mode set to 0 except last Rx Obj*/
555 pch_can_bit_clear(&priv->regs->if1_mcont,
556 CAN_IF_MCONT_EOB);
557 /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
558 if (i == (PCH_RX_OBJ_NUM - 1))
559 pch_can_bit_set(&priv->regs->if1_mcont,
560 CAN_IF_MCONT_EOB);
561
562 iowrite32(0, &priv->regs->if1_mask1);
563 pch_can_bit_clear(&priv->regs->if1_mask2,
564 0x1fff | CAN_MASK2_MDIR_MXTD);
565
566 /* Setting CMASK for writing */
567 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
568 CAN_CMASK_ARB | CAN_CMASK_CTRL,
569 &priv->regs->if1_cmask);
570
571 pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
572 } else if (priv->msg_obj[i] == MSG_OBJ_TX) {
573 iowrite32(CAN_CMASK_RX_TX_GET,
574 &priv->regs->if2_cmask);
575 pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
576
577 /* Resetting DIR bit for reception */
578 iowrite32(0x0, &priv->regs->if2_id1);
579 iowrite32(0x0, &priv->regs->if2_id2);
580 pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR);
581
582 /* Setting EOB bit for transmitter */
583 iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont);
584
585 pch_can_bit_set(&priv->regs->if2_mcont,
586 CAN_IF_MCONT_UMASK);
587
588 iowrite32(0, &priv->regs->if2_mask1);
589 pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff);
590
591 /* Setting CMASK for writing */
592 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
593 CAN_CMASK_ARB | CAN_CMASK_CTRL,
594 &priv->regs->if2_cmask);
595
596 pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
597 }
598 }
599 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
600}
601
602static void pch_can_init(struct pch_can_priv *priv)
603{
604 /* Stopping the Can device. */
605 pch_can_set_run_mode(priv, PCH_CAN_STOP);
606
607 /* Clearing all the message object buffers. */
608 pch_can_clear_buffers(priv);
609
610 /* Configuring the respective message object as either rx/tx object. */
611 pch_can_config_rx_tx_buffers(priv);
612
613 /* Enabling the interrupts. */
614 pch_can_set_int_enables(priv, PCH_CAN_ALL);
615}
616
617static void pch_can_release(struct pch_can_priv *priv)
618{
619 /* Stooping the CAN device. */
620 pch_can_set_run_mode(priv, PCH_CAN_STOP);
621
622 /* Disabling the interrupts. */
623 pch_can_set_int_enables(priv, PCH_CAN_NONE);
624
625 /* Disabling all the receive object. */
626 pch_can_rx_disable_all(priv);
627
628 /* Disabling all the transmit object. */
629 pch_can_tx_disable_all(priv);
630}
631
632/* This function clears interrupt(s) from the CAN device. */
633static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
634{
635 if (mask == CAN_STATUS_INT) {
636 ioread32(&priv->regs->stat);
637 return;
638 }
639
640 /* Clear interrupt for transmit object */
641 if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) {
642 /* Setting CMASK for clearing interrupts for
643 frame transmission. */
644 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
645 &priv->regs->if2_cmask);
646
647 /* Resetting the ID registers. */
648 pch_can_bit_set(&priv->regs->if2_id2,
649 CAN_ID2_DIR | (0x7ff << 2));
650 iowrite32(0x0, &priv->regs->if2_id1);
651
652 /* Claring NewDat, TxRqst & IntPnd */
653 pch_can_bit_clear(&priv->regs->if2_mcont,
654 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
655 CAN_IF_MCONT_TXRQXT);
656 pch_can_check_if_busy(&priv->regs->if2_creq, mask);
657 } else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
658 /* Setting CMASK for clearing the reception interrupts. */
659 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
660 &priv->regs->if1_cmask);
661
662 /* Clearing the Dir bit. */
663 pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
664
665 /* Clearing NewDat & IntPnd */
666 pch_can_bit_clear(&priv->regs->if1_mcont,
667 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND);
668
669 pch_can_check_if_busy(&priv->regs->if1_creq, mask);
670 }
671}
672
673static int pch_can_get_buffer_status(struct pch_can_priv *priv)
674{
675 return (ioread32(&priv->regs->treq1) & 0xffff) |
676 ((ioread32(&priv->regs->treq2) & 0xffff) << 16);
677}
678
679static void pch_can_reset(struct pch_can_priv *priv)
680{
681 /* write to sw reset register */
682 iowrite32(1, &priv->regs->srst);
683 iowrite32(0, &priv->regs->srst);
684}
685
686static void pch_can_error(struct net_device *ndev, u32 status)
687{
688 struct sk_buff *skb;
689 struct pch_can_priv *priv = netdev_priv(ndev);
690 struct can_frame *cf;
691 u32 errc;
692 struct net_device_stats *stats = &(priv->ndev->stats);
693 enum can_state state = priv->can.state;
694
695 skb = alloc_can_err_skb(ndev, &cf);
696 if (!skb)
697 return;
698
699 if (status & PCH_BUS_OFF) {
700 pch_can_tx_disable_all(priv);
701 pch_can_rx_disable_all(priv);
702 state = CAN_STATE_BUS_OFF;
703 cf->can_id |= CAN_ERR_BUSOFF;
704 can_bus_off(ndev);
705 pch_can_set_run_mode(priv, PCH_CAN_RUN);
706 dev_err(&ndev->dev, "%s -> Bus Off occurres.\n", __func__);
707 }
708
709 /* Warning interrupt. */
710 if (status & PCH_EWARN) {
711 state = CAN_STATE_ERROR_WARNING;
712 priv->can.can_stats.error_warning++;
713 cf->can_id |= CAN_ERR_CRTL;
714 errc = ioread32(&priv->regs->errc);
715 if (((errc & CAN_REC) >> 8) > 96)
716 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
717 if ((errc & CAN_TEC) > 96)
718 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
719 dev_warn(&ndev->dev,
720 "%s -> Error Counter is more than 96.\n", __func__);
721 }
722 /* Error passive interrupt. */
723 if (status & PCH_EPASSIV) {
724 priv->can.can_stats.error_passive++;
725 state = CAN_STATE_ERROR_PASSIVE;
726 cf->can_id |= CAN_ERR_CRTL;
727 errc = ioread32(&priv->regs->errc);
728 if (((errc & CAN_REC) >> 8) > 127)
729 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
730 if ((errc & CAN_TEC) > 127)
731 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
732 dev_err(&ndev->dev,
733 "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
734 }
735
736 if (status & PCH_LEC_ALL) {
737 priv->can.can_stats.bus_error++;
738 stats->rx_errors++;
739 switch (status & PCH_LEC_ALL) {
740 case PCH_STUF_ERR:
741 cf->data[2] |= CAN_ERR_PROT_STUFF;
742 break;
743 case PCH_FORM_ERR:
744 cf->data[2] |= CAN_ERR_PROT_FORM;
745 break;
746 case PCH_ACK_ERR:
747 cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
748 CAN_ERR_PROT_LOC_ACK_DEL;
749 break;
750 case PCH_BIT1_ERR:
751 case PCH_BIT0_ERR:
752 cf->data[2] |= CAN_ERR_PROT_BIT;
753 break;
754 case PCH_CRC_ERR:
755 cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
756 CAN_ERR_PROT_LOC_CRC_DEL;
757 break;
758 default:
759 iowrite32(status | PCH_LEC_ALL, &priv->regs->stat);
760 break;
761 }
762
763 }
764
765 priv->can.state = state;
766 netif_rx(skb);
767
768 stats->rx_packets++;
769 stats->rx_bytes += cf->can_dlc;
770}
771
772static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
773{
774 struct net_device *ndev = (struct net_device *)dev_id;
775 struct pch_can_priv *priv = netdev_priv(ndev);
776
777 pch_can_set_int_enables(priv, PCH_CAN_NONE);
778
779 napi_schedule(&priv->napi);
780
781 return IRQ_HANDLED;
782}
783
784static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
785{
786 u32 reg;
787 canid_t id;
788 u32 ide;
789 u32 rtr;
790 int i, j, k;
791 int rcv_pkts = 0;
792 struct sk_buff *skb;
793 struct can_frame *cf;
794 struct pch_can_priv *priv = netdev_priv(ndev);
795 struct net_device_stats *stats = &(priv->ndev->stats);
796
797 /* Reading the messsage object from the Message RAM */
798 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
799 pch_can_check_if_busy(&priv->regs->if1_creq, int_stat);
800
801 /* Reading the MCONT register. */
802 reg = ioread32(&priv->regs->if1_mcont);
803 reg &= 0xffff;
804
805 for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) {
806 /* If MsgLost bit set. */
807 if (reg & CAN_IF_MCONT_MSGLOST) {
808 dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n");
809 pch_can_bit_clear(&priv->regs->if1_mcont,
810 CAN_IF_MCONT_MSGLOST);
811 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL,
812 &priv->regs->if1_cmask);
813 pch_can_check_if_busy(&priv->regs->if1_creq, k);
814
815 skb = alloc_can_err_skb(ndev, &cf);
816 if (!skb)
817 return -ENOMEM;
818
819 priv->can.can_stats.error_passive++;
820 priv->can.state = CAN_STATE_ERROR_PASSIVE;
821 cf->can_id |= CAN_ERR_CRTL;
822 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
823 cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
824 stats->rx_packets++;
825 stats->rx_bytes += cf->can_dlc;
826
827 netif_receive_skb(skb);
828 rcv_pkts++;
829 goto RX_NEXT;
830 }
831 if (!(reg & CAN_IF_MCONT_NEWDAT))
832 goto RX_NEXT;
833
834 skb = alloc_can_skb(priv->ndev, &cf);
835 if (!skb)
836 return -ENOMEM;
837
838 /* Get Received data */
839 ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14;
840 if (ide) {
841 id = (ioread32(&priv->regs->if1_id1) & 0xffff);
842 id |= (((ioread32(&priv->regs->if1_id2)) &
843 0x1fff) << 16);
844 cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
845 } else {
846 id = (((ioread32(&priv->regs->if1_id2)) &
847 (CAN_SFF_MASK << 2)) >> 2);
848 cf->can_id = (id & CAN_SFF_MASK);
849 }
850
851 rtr = (ioread32(&priv->regs->if1_id2) & CAN_ID2_DIR);
852 if (rtr) {
853 cf->can_dlc = 0;
854 cf->can_id |= CAN_RTR_FLAG;
855 } else {
856 cf->can_dlc = ((ioread32(&priv->regs->if1_mcont)) &
857 0x0f);
858 }
859
860 for (i = 0, j = 0; i < cf->can_dlc; j++) {
861 reg = ioread32(&priv->regs->if1_dataa1 + j*4);
862 cf->data[i++] = cpu_to_le32(reg & 0xff);
863 if (i == cf->can_dlc)
864 break;
865 cf->data[i++] = cpu_to_le32((reg >> 8) & 0xff);
866 }
867
868 netif_receive_skb(skb);
869 rcv_pkts++;
870 stats->rx_packets++;
871 stats->rx_bytes += cf->can_dlc;
872
873 if (k < PCH_FIFO_THRESH) {
874 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL |
875 CAN_CMASK_ARB, &priv->regs->if1_cmask);
876
877 /* Clearing the Dir bit. */
878 pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
879
880 /* Clearing NewDat & IntPnd */
881 pch_can_bit_clear(&priv->regs->if1_mcont,
882 CAN_IF_MCONT_INTPND);
883 pch_can_check_if_busy(&priv->regs->if1_creq, k);
884 } else if (k > PCH_FIFO_THRESH) {
885 pch_can_int_clr(priv, k);
886 } else if (k == PCH_FIFO_THRESH) {
887 int cnt;
888 for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
889 pch_can_int_clr(priv, cnt+1);
890 }
891RX_NEXT:
892 /* Reading the messsage object from the Message RAM */
893 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
894 pch_can_check_if_busy(&priv->regs->if1_creq, k + 1);
895 reg = ioread32(&priv->regs->if1_mcont);
896 }
897
898 return rcv_pkts;
899}
900static int pch_can_rx_poll(struct napi_struct *napi, int quota)
901{
902 struct net_device *ndev = napi->dev;
903 struct pch_can_priv *priv = netdev_priv(ndev);
904 struct net_device_stats *stats = &(priv->ndev->stats);
905 u32 dlc;
906 u32 int_stat;
907 int rcv_pkts = 0;
908 u32 reg_stat;
909 unsigned long flags;
910
911 int_stat = pch_can_int_pending(priv);
912 if (!int_stat)
913 return 0;
914
915INT_STAT:
916 if (int_stat == CAN_STATUS_INT) {
917 reg_stat = ioread32(&priv->regs->stat);
918 if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
919 if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
920 pch_can_error(ndev, reg_stat);
921 }
922
923 if (reg_stat & PCH_TX_OK) {
924 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
925 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
926 pch_can_check_if_busy(&priv->regs->if2_creq,
927 ioread32(&priv->regs->intr));
928 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
929 pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK);
930 }
931
932 if (reg_stat & PCH_RX_OK)
933 pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK);
934
935 int_stat = pch_can_int_pending(priv);
936 if (int_stat == CAN_STATUS_INT)
937 goto INT_STAT;
938 }
939
940MSG_OBJ:
941 if ((int_stat >= 1) && (int_stat <= PCH_RX_OBJ_NUM)) {
942 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
943 rcv_pkts = pch_can_rx_normal(ndev, int_stat);
944 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
945 if (rcv_pkts < 0)
946 return 0;
947 } else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) {
948 if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) {
949 /* Handle transmission interrupt */
950 can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1);
951 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
952 iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND,
953 &priv->regs->if2_cmask);
954 dlc = ioread32(&priv->regs->if2_mcont) &
955 CAN_IF_MCONT_DLC;
956 pch_can_check_if_busy(&priv->regs->if2_creq, int_stat);
957 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
958 if (dlc > 8)
959 dlc = 8;
960 stats->tx_bytes += dlc;
961 stats->tx_packets++;
962 }
963 }
964
965 int_stat = pch_can_int_pending(priv);
966 if (int_stat == CAN_STATUS_INT)
967 goto INT_STAT;
968 else if (int_stat >= 1 && int_stat <= 32)
969 goto MSG_OBJ;
970
971 napi_complete(napi);
972 pch_can_set_int_enables(priv, PCH_CAN_ALL);
973
974 return rcv_pkts;
975}
976
977static int pch_set_bittiming(struct net_device *ndev)
978{
979 struct pch_can_priv *priv = netdev_priv(ndev);
980 const struct can_bittiming *bt = &priv->can.bittiming;
981 u32 canbit;
982 u32 bepe;
983 u32 brp;
984
985 /* Setting the CCE bit for accessing the Can Timing register. */
986 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE);
987
988 brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1;
989 canbit = brp & MSK_BITT_BRP;
990 canbit |= (bt->sjw - 1) << BIT_BITT_SJW;
991 canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1;
992 canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2;
993 bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE;
994 iowrite32(canbit, &priv->regs->bitt);
995 iowrite32(bepe, &priv->regs->brpe);
996 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE);
997
998 return 0;
999}
1000
1001static void pch_can_start(struct net_device *ndev)
1002{
1003 struct pch_can_priv *priv = netdev_priv(ndev);
1004
1005 if (priv->can.state != CAN_STATE_STOPPED)
1006 pch_can_reset(priv);
1007
1008 pch_set_bittiming(ndev);
1009 pch_can_set_optmode(priv);
1010
1011 pch_can_tx_enable_all(priv);
1012 pch_can_rx_enable_all(priv);
1013
1014 /* Setting the CAN to run mode. */
1015 pch_can_set_run_mode(priv, PCH_CAN_RUN);
1016
1017 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1018
1019 return;
1020}
1021
1022static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
1023{
1024 int ret = 0;
1025
1026 switch (mode) {
1027 case CAN_MODE_START:
1028 pch_can_start(ndev);
1029 netif_wake_queue(ndev);
1030 break;
1031 default:
1032 ret = -EOPNOTSUPP;
1033 break;
1034 }
1035
1036 return ret;
1037}
1038
1039static int pch_can_open(struct net_device *ndev)
1040{
1041 struct pch_can_priv *priv = netdev_priv(ndev);
1042 int retval;
1043
1044 retval = pci_enable_msi(priv->dev);
1045 if (retval) {
1046 dev_info(&ndev->dev, "PCH CAN opened without MSI\n");
1047 priv->use_msi = 0;
1048 } else {
1049 dev_info(&ndev->dev, "PCH CAN opened with MSI\n");
1050 priv->use_msi = 1;
1051 }
1052
1053 /* Regsitering the interrupt. */
1054 retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
1055 ndev->name, ndev);
1056 if (retval) {
1057 dev_err(&ndev->dev, "request_irq failed.\n");
1058 goto req_irq_err;
1059 }
1060
1061 /* Open common can device */
1062 retval = open_candev(ndev);
1063 if (retval) {
1064 dev_err(ndev->dev.parent, "open_candev() failed %d\n", retval);
1065 goto err_open_candev;
1066 }
1067
1068 pch_can_init(priv);
1069 pch_can_start(ndev);
1070 napi_enable(&priv->napi);
1071 netif_start_queue(ndev);
1072
1073 return 0;
1074
1075err_open_candev:
1076 free_irq(priv->dev->irq, ndev);
1077req_irq_err:
1078 if (priv->use_msi)
1079 pci_disable_msi(priv->dev);
1080
1081 pch_can_release(priv);
1082
1083 return retval;
1084}
1085
1086static int pch_close(struct net_device *ndev)
1087{
1088 struct pch_can_priv *priv = netdev_priv(ndev);
1089
1090 netif_stop_queue(ndev);
1091 napi_disable(&priv->napi);
1092 pch_can_release(priv);
1093 free_irq(priv->dev->irq, ndev);
1094 if (priv->use_msi)
1095 pci_disable_msi(priv->dev);
1096 close_candev(ndev);
1097 priv->can.state = CAN_STATE_STOPPED;
1098 return 0;
1099}
1100
1101static int pch_get_msg_obj_sts(struct net_device *ndev, u32 obj_id)
1102{
1103 u32 buffer_status = 0;
1104 struct pch_can_priv *priv = netdev_priv(ndev);
1105
1106 /* Getting the message object status. */
1107 buffer_status = (u32) pch_can_get_buffer_status(priv);
1108
1109 return buffer_status & obj_id;
1110}
1111
1112
1113static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
1114{
1115 int i, j;
1116 unsigned long flags;
1117 struct pch_can_priv *priv = netdev_priv(ndev);
1118 struct can_frame *cf = (struct can_frame *)skb->data;
1119 int tx_buffer_avail = 0;
1120
1121 if (can_dropped_invalid_skb(ndev, skb))
1122 return NETDEV_TX_OK;
1123
1124 if (priv->tx_obj == (PCH_OBJ_NUM + 1)) { /* Point tail Obj */
1125 while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) <<
1126 PCH_RX_OBJ_NUM)))
1127 udelay(500);
1128
1129 priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj ID */
1130 tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */
1131 } else {
1132 tx_buffer_avail = priv->tx_obj;
1133 }
1134 priv->tx_obj++;
1135
1136 /* Attaining the lock. */
1137 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
1138
1139 /* Reading the Msg Obj from the Msg RAM to the Interface register. */
1140 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
1141 pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
1142
1143 /* Setting the CMASK register. */
1144 pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL);
1145
1146 /* If ID extended is set. */
1147 pch_can_bit_clear(&priv->regs->if2_id1, 0xffff);
1148 pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD);
1149 if (cf->can_id & CAN_EFF_FLAG) {
1150 pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff);
1151 pch_can_bit_set(&priv->regs->if2_id2,
1152 ((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD);
1153 } else {
1154 pch_can_bit_set(&priv->regs->if2_id1, 0);
1155 pch_can_bit_set(&priv->regs->if2_id2,
1156 (cf->can_id & CAN_SFF_MASK) << 2);
1157 }
1158
1159 /* If remote frame has to be transmitted.. */
1160 if (cf->can_id & CAN_RTR_FLAG)
1161 pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR);
1162
1163 for (i = 0, j = 0; i < cf->can_dlc; j++) {
1164 iowrite32(le32_to_cpu(cf->data[i++]),
1165 (&priv->regs->if2_dataa1) + j*4);
1166 if (i == cf->can_dlc)
1167 break;
1168 iowrite32(le32_to_cpu(cf->data[i++] << 8),
1169 (&priv->regs->if2_dataa1) + j*4);
1170 }
1171
1172 can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_NUM - 1);
1173
1174 /* Updating the size of the data. */
1175 pch_can_bit_clear(&priv->regs->if2_mcont, 0x0f);
1176 pch_can_bit_set(&priv->regs->if2_mcont, cf->can_dlc);
1177
1178 /* Clearing IntPend, NewDat & TxRqst */
1179 pch_can_bit_clear(&priv->regs->if2_mcont,
1180 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
1181 CAN_IF_MCONT_TXRQXT);
1182
1183 /* Setting NewDat, TxRqst bits */
1184 pch_can_bit_set(&priv->regs->if2_mcont,
1185 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT);
1186
1187 pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
1188
1189 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
1190
1191 return NETDEV_TX_OK;
1192}
1193
1194static const struct net_device_ops pch_can_netdev_ops = {
1195 .ndo_open = pch_can_open,
1196 .ndo_stop = pch_close,
1197 .ndo_start_xmit = pch_xmit,
1198};
1199
1200static void __devexit pch_can_remove(struct pci_dev *pdev)
1201{
1202 struct net_device *ndev = pci_get_drvdata(pdev);
1203 struct pch_can_priv *priv = netdev_priv(ndev);
1204
1205 unregister_candev(priv->ndev);
1206 free_candev(priv->ndev);
1207 pci_iounmap(pdev, priv->regs);
1208 pci_release_regions(pdev);
1209 pci_disable_device(pdev);
1210 pci_set_drvdata(pdev, NULL);
1211 pch_can_reset(priv);
1212}
1213
1214#ifdef CONFIG_PM
1215static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
1216{
1217 int i; /* Counter variable. */
1218 int retval; /* Return value. */
1219 u32 buf_stat; /* Variable for reading the transmit buffer status. */
1220 u32 counter = 0xFFFFFF;
1221
1222 struct net_device *dev = pci_get_drvdata(pdev);
1223 struct pch_can_priv *priv = netdev_priv(dev);
1224
1225 /* Stop the CAN controller */
1226 pch_can_set_run_mode(priv, PCH_CAN_STOP);
1227
1228 /* Indicate that we are aboutto/in suspend */
1229 priv->can.state = CAN_STATE_SLEEPING;
1230
1231 /* Waiting for all transmission to complete. */
1232 while (counter) {
1233 buf_stat = pch_can_get_buffer_status(priv);
1234 if (!buf_stat)
1235 break;
1236 counter--;
1237 udelay(1);
1238 }
1239 if (!counter)
1240 dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
1241
1242 /* Save interrupt configuration and then disable them */
1243 pch_can_get_int_enables(priv, &(priv->int_enables));
1244 pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
1245
1246 /* Save Tx buffer enable state */
1247 for (i = 0; i < PCH_OBJ_NUM; i++) {
1248 if (priv->msg_obj[i] == MSG_OBJ_TX)
1249 pch_can_get_tx_enable(priv, i + 1,
1250 &(priv->tx_enable[i]));
1251 }
1252
1253 /* Disable all Transmit buffers */
1254 pch_can_tx_disable_all(priv);
1255
1256 /* Save Rx buffer enable state */
1257 for (i = 0; i < PCH_OBJ_NUM; i++) {
1258 if (priv->msg_obj[i] == MSG_OBJ_RX) {
1259 pch_can_get_rx_enable(priv, i + 1,
1260 &(priv->rx_enable[i]));
1261 pch_can_get_rx_buffer_link(priv, i + 1,
1262 &(priv->rx_link[i]));
1263 }
1264 }
1265
1266 /* Disable all Receive buffers */
1267 pch_can_rx_disable_all(priv);
1268 retval = pci_save_state(pdev);
1269 if (retval) {
1270 dev_err(&pdev->dev, "pci_save_state failed.\n");
1271 } else {
1272 pci_enable_wake(pdev, PCI_D3hot, 0);
1273 pci_disable_device(pdev);
1274 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1275 }
1276
1277 return retval;
1278}
1279
1280static int pch_can_resume(struct pci_dev *pdev)
1281{
1282 int i; /* Counter variable. */
1283 int retval; /* Return variable. */
1284 struct net_device *dev = pci_get_drvdata(pdev);
1285 struct pch_can_priv *priv = netdev_priv(dev);
1286
1287 pci_set_power_state(pdev, PCI_D0);
1288 pci_restore_state(pdev);
1289 retval = pci_enable_device(pdev);
1290 if (retval) {
1291 dev_err(&pdev->dev, "pci_enable_device failed.\n");
1292 return retval;
1293 }
1294
1295 pci_enable_wake(pdev, PCI_D3hot, 0);
1296
1297 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1298
1299 /* Disabling all interrupts. */
1300 pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
1301
1302 /* Setting the CAN device in Stop Mode. */
1303 pch_can_set_run_mode(priv, PCH_CAN_STOP);
1304
1305 /* Configuring the transmit and receive buffers. */
1306 pch_can_config_rx_tx_buffers(priv);
1307
1308 /* Restore the CAN state */
1309 pch_set_bittiming(dev);
1310
1311 /* Listen/Active */
1312 pch_can_set_optmode(priv);
1313
1314 /* Enabling the transmit buffer. */
1315 for (i = 0; i < PCH_OBJ_NUM; i++) {
1316 if (priv->msg_obj[i] == MSG_OBJ_TX) {
1317 pch_can_set_tx_enable(priv, i + 1,
1318 priv->tx_enable[i]);
1319 }
1320 }
1321
1322 /* Configuring the receive buffer and enabling them. */
1323 for (i = 0; i < PCH_OBJ_NUM; i++) {
1324 if (priv->msg_obj[i] == MSG_OBJ_RX) {
1325 /* Restore buffer link */
1326 pch_can_set_rx_buffer_link(priv, i + 1,
1327 priv->rx_link[i]);
1328
1329 /* Restore buffer enables */
1330 pch_can_set_rx_enable(priv, i + 1, priv->rx_enable[i]);
1331 }
1332 }
1333
1334 /* Enable CAN Interrupts */
1335 pch_can_set_int_custom(priv);
1336
1337 /* Restore Run Mode */
1338 pch_can_set_run_mode(priv, PCH_CAN_RUN);
1339
1340 return retval;
1341}
1342#else
1343#define pch_can_suspend NULL
1344#define pch_can_resume NULL
1345#endif
1346
1347static int pch_can_get_berr_counter(const struct net_device *dev,
1348 struct can_berr_counter *bec)
1349{
1350 struct pch_can_priv *priv = netdev_priv(dev);
1351
1352 bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC;
1353 bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8;
1354
1355 return 0;
1356}
1357
1358static int __devinit pch_can_probe(struct pci_dev *pdev,
1359 const struct pci_device_id *id)
1360{
1361 struct net_device *ndev;
1362 struct pch_can_priv *priv;
1363 int rc;
1364 int index;
1365 void __iomem *addr;
1366
1367 rc = pci_enable_device(pdev);
1368 if (rc) {
1369 dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc);
1370 goto probe_exit_endev;
1371 }
1372
1373 rc = pci_request_regions(pdev, KBUILD_MODNAME);
1374 if (rc) {
1375 dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc);
1376 goto probe_exit_pcireq;
1377 }
1378
1379 addr = pci_iomap(pdev, 1, 0);
1380 if (!addr) {
1381 rc = -EIO;
1382 dev_err(&pdev->dev, "Failed pci_iomap\n");
1383 goto probe_exit_ipmap;
1384 }
1385
1386 ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_NUM);
1387 if (!ndev) {
1388 rc = -ENOMEM;
1389 dev_err(&pdev->dev, "Failed alloc_candev\n");
1390 goto probe_exit_alloc_candev;
1391 }
1392
1393 priv = netdev_priv(ndev);
1394 priv->ndev = ndev;
1395 priv->regs = addr;
1396 priv->dev = pdev;
1397 priv->can.bittiming_const = &pch_can_bittiming_const;
1398 priv->can.do_set_mode = pch_can_do_set_mode;
1399 priv->can.do_get_berr_counter = pch_can_get_berr_counter;
1400 priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
1401 CAN_CTRLMODE_LOOPBACK;
1402 priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj */
1403
1404 ndev->irq = pdev->irq;
1405 ndev->flags |= IFF_ECHO;
1406
1407 pci_set_drvdata(pdev, ndev);
1408 SET_NETDEV_DEV(ndev, &pdev->dev);
1409 ndev->netdev_ops = &pch_can_netdev_ops;
1410
1411 priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
1412 for (index = 0; index < PCH_RX_OBJ_NUM;)
1413 priv->msg_obj[index++] = MSG_OBJ_RX;
1414
1415 for (index = index; index < PCH_OBJ_NUM;)
1416 priv->msg_obj[index++] = MSG_OBJ_TX;
1417
1418 netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM);
1419
1420 rc = register_candev(ndev);
1421 if (rc) {
1422 dev_err(&pdev->dev, "Failed register_candev %d\n", rc);
1423 goto probe_exit_reg_candev;
1424 }
1425
1426 return 0;
1427
1428probe_exit_reg_candev:
1429 free_candev(ndev);
1430probe_exit_alloc_candev:
1431 pci_iounmap(pdev, addr);
1432probe_exit_ipmap:
1433 pci_release_regions(pdev);
1434probe_exit_pcireq:
1435 pci_disable_device(pdev);
1436probe_exit_endev:
1437 return rc;
1438}
1439
1440static struct pci_driver pch_can_pci_driver = {
1441 .name = "pch_can",
1442 .id_table = pch_pci_tbl,
1443 .probe = pch_can_probe,
1444 .remove = __devexit_p(pch_can_remove),
1445 .suspend = pch_can_suspend,
1446 .resume = pch_can_resume,
1447};
1448
1449static int __init pch_can_pci_init(void)
1450{
1451 return pci_register_driver(&pch_can_pci_driver);
1452}
1453module_init(pch_can_pci_init);
1454
1455static void __exit pch_can_pci_exit(void)
1456{
1457 pci_unregister_driver(&pch_can_pci_driver);
1458}
1459module_exit(pch_can_pci_exit);
1460
1461MODULE_DESCRIPTION("Controller Area Network Driver");
1462MODULE_LICENSE("GPL v2");
1463MODULE_VERSION("0.94");
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index ae3505afd682..6fdc031daaae 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -58,4 +58,16 @@ config CAN_PLX_PCI
58 - esd CAN-PCIe/2000 58 - esd CAN-PCIe/2000
59 - Marathon CAN-bus-PCI card (http://www.marathon.ru/) 59 - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
60 - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/) 60 - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
61
62config CAN_TSCAN1
63 tristate "TS-CAN1 PC104 boards"
64 depends on ISA
65 help
66 This driver is for Technologic Systems' TSCAN-1 PC104 boards.
67 http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
68 The driver supports multiple boards and automatically configures them:
69 PLD IO base addresses are read from jumpers JP1 and JP2,
70 IRQ numbers are read from jumpers JP4 and JP5,
71 SJA1000 IO base addresses are chosen heuristically (first that works).
72
61endif 73endif
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index ce924553995d..2c591eb321c7 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -9,5 +9,6 @@ obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
9obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o 9obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
10obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o 10obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
11obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o 11obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
12obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
12 13
13ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 14ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
new file mode 100644
index 000000000000..9756099a883a
--- /dev/null
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -0,0 +1,216 @@
1/*
2 * tscan1.c: driver for Technologic Systems TS-CAN1 PC104 boards
3 *
4 * Copyright 2010 Andre B. Oliveira
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * References:
22 * - Getting started with TS-CAN1, Technologic Systems, Jun 2009
23 * http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
24 */
25
26#include <linux/init.h>
27#include <linux/io.h>
28#include <linux/ioport.h>
29#include <linux/isa.h>
30#include <linux/module.h>
31#include <linux/netdevice.h>
32#include "sja1000.h"
33
34MODULE_DESCRIPTION("Driver for Technologic Systems TS-CAN1 PC104 boards");
35MODULE_AUTHOR("Andre B. Oliveira <anbadeol@gmail.com>");
36MODULE_LICENSE("GPL");
37
38/* Maximum number of boards (one in each JP1:JP2 setting of IO address) */
39#define TSCAN1_MAXDEV 4
40
41/* PLD registers address offsets */
42#define TSCAN1_ID1 0
43#define TSCAN1_ID2 1
44#define TSCAN1_VERSION 2
45#define TSCAN1_LED 3
46#define TSCAN1_PAGE 4
47#define TSCAN1_MODE 5
48#define TSCAN1_JUMPERS 6
49
50/* PLD board identifier registers magic values */
51#define TSCAN1_ID1_VALUE 0xf6
52#define TSCAN1_ID2_VALUE 0xb9
53
54/* PLD mode register SJA1000 IO enable bit */
55#define TSCAN1_MODE_ENABLE 0x40
56
57/* PLD jumpers register bits */
58#define TSCAN1_JP4 0x10
59#define TSCAN1_JP5 0x20
60
61/* PLD IO base addresses start */
62#define TSCAN1_PLD_ADDRESS 0x150
63
64/* PLD register space size */
65#define TSCAN1_PLD_SIZE 8
66
67/* SJA1000 register space size */
68#define TSCAN1_SJA1000_SIZE 32
69
70/* SJA1000 crystal frequency (16MHz) */
71#define TSCAN1_SJA1000_XTAL 16000000
72
73/* SJA1000 IO base addresses */
74static const unsigned short tscan1_sja1000_addresses[] __devinitconst = {
75 0x100, 0x120, 0x180, 0x1a0, 0x200, 0x240, 0x280, 0x320
76};
77
78/* Read SJA1000 register */
79static u8 tscan1_read(const struct sja1000_priv *priv, int reg)
80{
81 return inb((unsigned long)priv->reg_base + reg);
82}
83
84/* Write SJA1000 register */
85static void tscan1_write(const struct sja1000_priv *priv, int reg, u8 val)
86{
87 outb(val, (unsigned long)priv->reg_base + reg);
88}
89
90/* Probe for a TS-CAN1 board with JP2:JP1 jumper setting ID */
91static int __devinit tscan1_probe(struct device *dev, unsigned id)
92{
93 struct net_device *netdev;
94 struct sja1000_priv *priv;
95 unsigned long pld_base, sja1000_base;
96 int irq, i;
97
98 pld_base = TSCAN1_PLD_ADDRESS + id * TSCAN1_PLD_SIZE;
99 if (!request_region(pld_base, TSCAN1_PLD_SIZE, dev_name(dev)))
100 return -EBUSY;
101
102 if (inb(pld_base + TSCAN1_ID1) != TSCAN1_ID1_VALUE ||
103 inb(pld_base + TSCAN1_ID2) != TSCAN1_ID2_VALUE) {
104 release_region(pld_base, TSCAN1_PLD_SIZE);
105 return -ENODEV;
106 }
107
108 switch (inb(pld_base + TSCAN1_JUMPERS) & (TSCAN1_JP4 | TSCAN1_JP5)) {
109 case TSCAN1_JP4:
110 irq = 6;
111 break;
112 case TSCAN1_JP5:
113 irq = 7;
114 break;
115 case TSCAN1_JP4 | TSCAN1_JP5:
116 irq = 5;
117 break;
118 default:
119 dev_err(dev, "invalid JP4:JP5 setting (no IRQ)\n");
120 release_region(pld_base, TSCAN1_PLD_SIZE);
121 return -EINVAL;
122 }
123
124 netdev = alloc_sja1000dev(0);
125 if (!netdev) {
126 release_region(pld_base, TSCAN1_PLD_SIZE);
127 return -ENOMEM;
128 }
129
130 dev_set_drvdata(dev, netdev);
131 SET_NETDEV_DEV(netdev, dev);
132
133 netdev->base_addr = pld_base;
134 netdev->irq = irq;
135
136 priv = netdev_priv(netdev);
137 priv->read_reg = tscan1_read;
138 priv->write_reg = tscan1_write;
139 priv->can.clock.freq = TSCAN1_SJA1000_XTAL / 2;
140 priv->cdr = CDR_CBP | CDR_CLK_OFF;
141 priv->ocr = OCR_TX0_PUSHPULL;
142
143 /* Select the first SJA1000 IO address that is free and that works */
144 for (i = 0; i < ARRAY_SIZE(tscan1_sja1000_addresses); i++) {
145 sja1000_base = tscan1_sja1000_addresses[i];
146 if (!request_region(sja1000_base, TSCAN1_SJA1000_SIZE,
147 dev_name(dev)))
148 continue;
149
150 /* Set SJA1000 IO base address and enable it */
151 outb(TSCAN1_MODE_ENABLE | i, pld_base + TSCAN1_MODE);
152
153 priv->reg_base = (void __iomem *)sja1000_base;
154 if (!register_sja1000dev(netdev)) {
155 /* SJA1000 probe succeeded; turn LED off and return */
156 outb(0, pld_base + TSCAN1_LED);
157 netdev_info(netdev, "TS-CAN1 at 0x%lx 0x%lx irq %d\n",
158 pld_base, sja1000_base, irq);
159 return 0;
160 }
161
162 /* SJA1000 probe failed; release and try next address */
163 outb(0, pld_base + TSCAN1_MODE);
164 release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
165 }
166
167 dev_err(dev, "failed to assign SJA1000 IO address\n");
168 dev_set_drvdata(dev, NULL);
169 free_sja1000dev(netdev);
170 release_region(pld_base, TSCAN1_PLD_SIZE);
171 return -ENXIO;
172}
173
174static int __devexit tscan1_remove(struct device *dev, unsigned id /*unused*/)
175{
176 struct net_device *netdev;
177 struct sja1000_priv *priv;
178 unsigned long pld_base, sja1000_base;
179
180 netdev = dev_get_drvdata(dev);
181 unregister_sja1000dev(netdev);
182 dev_set_drvdata(dev, NULL);
183
184 priv = netdev_priv(netdev);
185 pld_base = netdev->base_addr;
186 sja1000_base = (unsigned long)priv->reg_base;
187
188 outb(0, pld_base + TSCAN1_MODE); /* disable SJA1000 IO space */
189
190 release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
191 release_region(pld_base, TSCAN1_PLD_SIZE);
192
193 free_sja1000dev(netdev);
194
195 return 0;
196}
197
198static struct isa_driver tscan1_isa_driver = {
199 .probe = tscan1_probe,
200 .remove = __devexit_p(tscan1_remove),
201 .driver = {
202 .name = "tscan1",
203 },
204};
205
206static int __init tscan1_init(void)
207{
208 return isa_register_driver(&tscan1_isa_driver, TSCAN1_MAXDEV);
209}
210module_init(tscan1_init);
211
212static void __exit tscan1_exit(void)
213{
214 isa_unregister_driver(&tscan1_isa_driver);
215}
216module_exit(tscan1_exit);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index a04ce6a5f637..046d846c652d 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1266,11 +1266,13 @@ static int cxgb_up(struct adapter *adap)
1266 } 1266 }
1267 1267
1268 if (!(adap->flags & QUEUES_BOUND)) { 1268 if (!(adap->flags & QUEUES_BOUND)) {
1269 err = bind_qsets(adap); 1269 int ret = bind_qsets(adap);
1270 if (err) { 1270
1271 CH_ERR(adap, "failed to bind qsets, err %d\n", err); 1271 if (ret < 0) {
1272 CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1272 t3_intr_disable(adap); 1273 t3_intr_disable(adap);
1273 free_irq_resources(adap); 1274 free_irq_resources(adap);
1275 err = ret;
1274 goto out; 1276 goto out;
1275 } 1277 }
1276 adap->flags |= QUEUES_BOUND; 1278 adap->flags |= QUEUES_BOUND;
@@ -3299,7 +3301,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3299 pi->rx_offload = T3_RX_CSUM | T3_LRO; 3301 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3300 pi->port_id = i; 3302 pi->port_id = i;
3301 netif_carrier_off(netdev); 3303 netif_carrier_off(netdev);
3302 netif_tx_stop_all_queues(netdev);
3303 netdev->irq = pdev->irq; 3304 netdev->irq = pdev->irq;
3304 netdev->mem_start = mmio_start; 3305 netdev->mem_start = mmio_start;
3305 netdev->mem_end = mmio_start + mmio_len - 1; 3306 netdev->mem_end = mmio_start + mmio_len - 1;
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 5d72bda54389..f9f6645b2e61 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -296,8 +296,10 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
296 if (d->skb) { /* an SGL is present */ 296 if (d->skb) { /* an SGL is present */
297 if (need_unmap) 297 if (need_unmap)
298 unmap_skb(d->skb, q, cidx, pdev); 298 unmap_skb(d->skb, q, cidx, pdev);
299 if (d->eop) 299 if (d->eop) {
300 kfree_skb(d->skb); 300 kfree_skb(d->skb);
301 d->skb = NULL;
302 }
301 } 303 }
302 ++d; 304 ++d;
303 if (++cidx == q->size) { 305 if (++cidx == q->size) {
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index eaa49e4119f1..3d4253d311eb 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -281,7 +281,6 @@ struct sge_rspq;
281 281
282struct port_info { 282struct port_info {
283 struct adapter *adapter; 283 struct adapter *adapter;
284 struct vlan_group *vlan_grp;
285 u16 viid; 284 u16 viid;
286 s16 xact_addr_filt; /* index of exact MAC address filter */ 285 s16 xact_addr_filt; /* index of exact MAC address filter */
287 u16 rss_size; /* size of VI's RSS table slice */ 286 u16 rss_size; /* size of VI's RSS table slice */
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 87054e0a5746..f50bc98310f8 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -403,7 +403,7 @@ static int link_start(struct net_device *dev)
403 * that step explicitly. 403 * that step explicitly.
404 */ 404 */
405 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, 405 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
406 pi->vlan_grp != NULL, true); 406 !!(dev->features & NETIF_F_HW_VLAN_RX), true);
407 if (ret == 0) { 407 if (ret == 0) {
408 ret = t4_change_mac(pi->adapter, mb, pi->viid, 408 ret = t4_change_mac(pi->adapter, mb, pi->viid,
409 pi->xact_addr_filt, dev->dev_addr, true, 409 pi->xact_addr_filt, dev->dev_addr, true,
@@ -1881,7 +1881,24 @@ static int set_tso(struct net_device *dev, u32 value)
1881 1881
1882static int set_flags(struct net_device *dev, u32 flags) 1882static int set_flags(struct net_device *dev, u32 flags)
1883{ 1883{
1884 return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH); 1884 int err;
1885 unsigned long old_feat = dev->features;
1886
1887 err = ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH |
1888 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
1889 if (err)
1890 return err;
1891
1892 if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) {
1893 const struct port_info *pi = netdev_priv(dev);
1894
1895 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
1896 -1, -1, -1, !!(flags & ETH_FLAG_RXVLAN),
1897 true);
1898 if (err)
1899 dev->features = old_feat;
1900 }
1901 return err;
1885} 1902}
1886 1903
1887static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p) 1904static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
@@ -2842,15 +2859,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2842 return 0; 2859 return 0;
2843} 2860}
2844 2861
2845static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2846{
2847 struct port_info *pi = netdev_priv(dev);
2848
2849 pi->vlan_grp = grp;
2850 t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
2851 grp != NULL, true);
2852}
2853
2854#ifdef CONFIG_NET_POLL_CONTROLLER 2862#ifdef CONFIG_NET_POLL_CONTROLLER
2855static void cxgb_netpoll(struct net_device *dev) 2863static void cxgb_netpoll(struct net_device *dev)
2856{ 2864{
@@ -2878,7 +2886,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
2878 .ndo_validate_addr = eth_validate_addr, 2886 .ndo_validate_addr = eth_validate_addr,
2879 .ndo_do_ioctl = cxgb_ioctl, 2887 .ndo_do_ioctl = cxgb_ioctl,
2880 .ndo_change_mtu = cxgb_change_mtu, 2888 .ndo_change_mtu = cxgb_change_mtu,
2881 .ndo_vlan_rx_register = vlan_rx_register,
2882#ifdef CONFIG_NET_POLL_CONTROLLER 2889#ifdef CONFIG_NET_POLL_CONTROLLER
2883 .ndo_poll_controller = cxgb_netpoll, 2890 .ndo_poll_controller = cxgb_netpoll,
2884#endif 2891#endif
@@ -3658,7 +3665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3658 pi->rx_offload = RX_CSO; 3665 pi->rx_offload = RX_CSO;
3659 pi->port_id = i; 3666 pi->port_id = i;
3660 netif_carrier_off(netdev); 3667 netif_carrier_off(netdev);
3661 netif_tx_stop_all_queues(netdev);
3662 netdev->irq = pdev->irq; 3668 netdev->irq = pdev->irq;
3663 3669
3664 netdev->features |= NETIF_F_SG | TSO_FLAGS; 3670 netdev->features |= NETIF_F_SG | TSO_FLAGS;
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 9967f3debce7..17022258ed68 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -1530,18 +1530,11 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1530 skb->rxhash = (__force u32)pkt->rsshdr.hash_val; 1530 skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1531 1531
1532 if (unlikely(pkt->vlan_ex)) { 1532 if (unlikely(pkt->vlan_ex)) {
1533 struct port_info *pi = netdev_priv(rxq->rspq.netdev); 1533 __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1534 struct vlan_group *grp = pi->vlan_grp;
1535
1536 rxq->stats.vlan_ex++; 1534 rxq->stats.vlan_ex++;
1537 if (likely(grp)) {
1538 ret = vlan_gro_frags(&rxq->rspq.napi, grp,
1539 ntohs(pkt->vlan));
1540 goto stats;
1541 }
1542 } 1535 }
1543 ret = napi_gro_frags(&rxq->rspq.napi); 1536 ret = napi_gro_frags(&rxq->rspq.napi);
1544stats: if (ret == GRO_HELD) 1537 if (ret == GRO_HELD)
1545 rxq->stats.lro_pkts++; 1538 rxq->stats.lro_pkts++;
1546 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) 1539 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1547 rxq->stats.lro_merged++; 1540 rxq->stats.lro_merged++;
@@ -1608,16 +1601,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1608 skb_checksum_none_assert(skb); 1601 skb_checksum_none_assert(skb);
1609 1602
1610 if (unlikely(pkt->vlan_ex)) { 1603 if (unlikely(pkt->vlan_ex)) {
1611 struct vlan_group *grp = pi->vlan_grp; 1604 __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
1612
1613 rxq->stats.vlan_ex++; 1605 rxq->stats.vlan_ex++;
1614 if (likely(grp)) 1606 }
1615 vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan)); 1607 netif_receive_skb(skb);
1616 else
1617 dev_kfree_skb_any(skb);
1618 } else
1619 netif_receive_skb(skb);
1620
1621 return 0; 1608 return 0;
1622} 1609}
1623 1610
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 555ecc5a2e93..c3449bbc585a 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -753,7 +753,9 @@ static int cxgb4vf_open(struct net_device *dev)
753 if (err) 753 if (err)
754 return err; 754 return err;
755 set_bit(pi->port_id, &adapter->open_device_map); 755 set_bit(pi->port_id, &adapter->open_device_map);
756 link_start(dev); 756 err = link_start(dev);
757 if (err)
758 return err;
757 netif_tx_start_all_queues(dev); 759 netif_tx_start_all_queues(dev);
758 return 0; 760 return 0;
759} 761}
@@ -1103,18 +1105,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1103 return 0; 1105 return 0;
1104} 1106}
1105 1107
1106/*
1107 * Return a TX Queue on which to send the specified skb.
1108 */
1109static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
1110{
1111 /*
1112 * XXX For now just use the default hash but we probably want to
1113 * XXX look at other possibilities ...
1114 */
1115 return skb_tx_hash(dev, skb);
1116}
1117
1118#ifdef CONFIG_NET_POLL_CONTROLLER 1108#ifdef CONFIG_NET_POLL_CONTROLLER
1119/* 1109/*
1120 * Poll all of our receive queues. This is called outside of normal interrupt 1110 * Poll all of our receive queues. This is called outside of normal interrupt
@@ -2075,6 +2065,22 @@ static int adap_init0(struct adapter *adapter)
2075 } 2065 }
2076 2066
2077 /* 2067 /*
2068 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2069 * 2.6.31 and later we can't call pci_reset_function() in order to
2070 * issue an FLR because of a self- deadlock on the device semaphore.
2071 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2072 * cases where they're needed -- for instance, some versions of KVM
2073 * fail to reset "Assigned Devices" when the VM reboots. Therefore we
2074 * use the firmware based reset in order to reset any per function
2075 * state.
2076 */
2077 err = t4vf_fw_reset(adapter);
2078 if (err < 0) {
2079 dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2080 return err;
2081 }
2082
2083 /*
2078 * Grab basic operational parameters. These will predominantly have 2084 * Grab basic operational parameters. These will predominantly have
2079 * been set up by the Physical Function Driver or will be hard coded 2085 * been set up by the Physical Function Driver or will be hard coded
2080 * into the adapter. We just have to live with them ... Note that 2086 * into the adapter. We just have to live with them ... Note that
@@ -2417,7 +2423,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
2417 .ndo_get_stats = cxgb4vf_get_stats, 2423 .ndo_get_stats = cxgb4vf_get_stats,
2418 .ndo_set_rx_mode = cxgb4vf_set_rxmode, 2424 .ndo_set_rx_mode = cxgb4vf_set_rxmode,
2419 .ndo_set_mac_address = cxgb4vf_set_mac_addr, 2425 .ndo_set_mac_address = cxgb4vf_set_mac_addr,
2420 .ndo_select_queue = cxgb4vf_select_queue,
2421 .ndo_validate_addr = eth_validate_addr, 2426 .ndo_validate_addr = eth_validate_addr,
2422 .ndo_do_ioctl = cxgb4vf_do_ioctl, 2427 .ndo_do_ioctl = cxgb4vf_do_ioctl,
2423 .ndo_change_mtu = cxgb4vf_change_mtu, 2428 .ndo_change_mtu = cxgb4vf_change_mtu,
@@ -2600,7 +2605,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2600 pi->xact_addr_filt = -1; 2605 pi->xact_addr_filt = -1;
2601 pi->rx_offload = RX_CSO; 2606 pi->rx_offload = RX_CSO;
2602 netif_carrier_off(netdev); 2607 netif_carrier_off(netdev);
2603 netif_tx_stop_all_queues(netdev);
2604 netdev->irq = pdev->irq; 2608 netdev->irq = pdev->irq;
2605 2609
2606 netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 2610 netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
@@ -2625,7 +2629,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2625 netdev->do_ioctl = cxgb4vf_do_ioctl; 2629 netdev->do_ioctl = cxgb4vf_do_ioctl;
2626 netdev->change_mtu = cxgb4vf_change_mtu; 2630 netdev->change_mtu = cxgb4vf_change_mtu;
2627 netdev->set_mac_address = cxgb4vf_set_mac_addr; 2631 netdev->set_mac_address = cxgb4vf_set_mac_addr;
2628 netdev->select_queue = cxgb4vf_select_queue;
2629#ifdef CONFIG_NET_POLL_CONTROLLER 2632#ifdef CONFIG_NET_POLL_CONTROLLER
2630 netdev->poll_controller = cxgb4vf_poll_controller; 2633 netdev->poll_controller = cxgb4vf_poll_controller;
2631#endif 2634#endif
@@ -2844,6 +2847,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
2844 CH_DEVICE(0x4800, 0), /* T440-dbg */ 2847 CH_DEVICE(0x4800, 0), /* T440-dbg */
2845 CH_DEVICE(0x4801, 0), /* T420-cr */ 2848 CH_DEVICE(0x4801, 0), /* T420-cr */
2846 CH_DEVICE(0x4802, 0), /* T422-cr */ 2849 CH_DEVICE(0x4802, 0), /* T422-cr */
2850 CH_DEVICE(0x4803, 0), /* T440-cr */
2851 CH_DEVICE(0x4804, 0), /* T420-bch */
2852 CH_DEVICE(0x4805, 0), /* T440-bch */
2853 CH_DEVICE(0x4806, 0), /* T460-ch */
2854 CH_DEVICE(0x4807, 0), /* T420-so */
2855 CH_DEVICE(0x4808, 0), /* T420-cx */
2856 CH_DEVICE(0x4809, 0), /* T420-bt */
2857 CH_DEVICE(0x480a, 0), /* T404-bt */
2847 { 0, } 2858 { 0, }
2848}; 2859};
2849 2860
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index f10864ddafbe..ecf0770bf0ff 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -154,13 +154,14 @@ enum {
154 */ 154 */
155 RX_COPY_THRES = 256, 155 RX_COPY_THRES = 256,
156 RX_PULL_LEN = 128, 156 RX_PULL_LEN = 128,
157};
158 157
159/* 158 /*
160 * Can't define this in the above enum because PKTSHIFT isn't a constant in 159 * Main body length for sk_buffs used for RX Ethernet packets with
161 * the VF Driver ... 160 * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
162 */ 161 * pskb_may_pull() some room.
163#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT) 162 */
163 RX_SKB_LEN = 512,
164};
164 165
165/* 166/*
166 * Software state per TX descriptor. 167 * Software state per TX descriptor.
@@ -1355,6 +1356,67 @@ out_free:
1355} 1356}
1356 1357
1357/** 1358/**
1359 * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1360 * @gl: the gather list
1361 * @skb_len: size of sk_buff main body if it carries fragments
1362 * @pull_len: amount of data to move to the sk_buff's main body
1363 *
1364 * Builds an sk_buff from the given packet gather list. Returns the
1365 * sk_buff or %NULL if sk_buff allocation failed.
1366 */
1367struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1368 unsigned int skb_len, unsigned int pull_len)
1369{
1370 struct sk_buff *skb;
1371 struct skb_shared_info *ssi;
1372
1373 /*
1374 * If the ingress packet is small enough, allocate an skb large enough
1375 * for all of the data and copy it inline. Otherwise, allocate an skb
1376 * with enough room to pull in the header and reference the rest of
1377 * the data via the skb fragment list.
1378 *
1379 * Below we rely on RX_COPY_THRES being less than the smallest Rx
1380 * buff! size, which is expected since buffers are at least
1381 * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
1382 * fragment.
1383 */
1384 if (gl->tot_len <= RX_COPY_THRES) {
1385 /* small packets have only one fragment */
1386 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1387 if (unlikely(!skb))
1388 goto out;
1389 __skb_put(skb, gl->tot_len);
1390 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1391 } else {
1392 skb = alloc_skb(skb_len, GFP_ATOMIC);
1393 if (unlikely(!skb))
1394 goto out;
1395 __skb_put(skb, pull_len);
1396 skb_copy_to_linear_data(skb, gl->va, pull_len);
1397
1398 ssi = skb_shinfo(skb);
1399 ssi->frags[0].page = gl->frags[0].page;
1400 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
1401 ssi->frags[0].size = gl->frags[0].size - pull_len;
1402 if (gl->nfrags > 1)
1403 memcpy(&ssi->frags[1], &gl->frags[1],
1404 (gl->nfrags-1) * sizeof(skb_frag_t));
1405 ssi->nr_frags = gl->nfrags;
1406
1407 skb->len = gl->tot_len;
1408 skb->data_len = skb->len - pull_len;
1409 skb->truesize += skb->data_len;
1410
1411 /* Get a reference for the last page, we don't own it */
1412 get_page(gl->frags[gl->nfrags - 1].page);
1413 }
1414
1415out:
1416 return skb;
1417}
1418
1419/**
1358 * t4vf_pktgl_free - free a packet gather list 1420 * t4vf_pktgl_free - free a packet gather list
1359 * @gl: the gather list 1421 * @gl: the gather list
1360 * 1422 *
@@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1463{ 1525{
1464 struct sk_buff *skb; 1526 struct sk_buff *skb;
1465 struct port_info *pi; 1527 struct port_info *pi;
1466 struct skb_shared_info *ssi;
1467 const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; 1528 const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
1468 bool csum_ok = pkt->csum_calc && !pkt->err_vec; 1529 bool csum_ok = pkt->csum_calc && !pkt->err_vec;
1469 unsigned int len = be16_to_cpu(pkt->len);
1470 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1530 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1471 1531
1472 /* 1532 /*
@@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1481 } 1541 }
1482 1542
1483 /* 1543 /*
1484 * If the ingress packet is small enough, allocate an skb large enough 1544 * Convert the Packet Gather List into an skb.
1485 * for all of the data and copy it inline. Otherwise, allocate an skb
1486 * with enough room to pull in the header and reference the rest of
1487 * the data via the skb fragment list.
1488 */ 1545 */
1489 if (len <= RX_COPY_THRES) { 1546 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1490 /* small packets have only one fragment */ 1547 if (unlikely(!skb)) {
1491 skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC); 1548 t4vf_pktgl_free(gl);
1492 if (!skb) 1549 rxq->stats.rx_drops++;
1493 goto nomem; 1550 return 0;
1494 __skb_put(skb, gl->frags[0].size);
1495 skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
1496 } else {
1497 skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
1498 if (!skb)
1499 goto nomem;
1500 __skb_put(skb, RX_PKT_PULL_LEN);
1501 skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
1502
1503 ssi = skb_shinfo(skb);
1504 ssi->frags[0].page = gl->frags[0].page;
1505 ssi->frags[0].page_offset = (gl->frags[0].page_offset +
1506 RX_PKT_PULL_LEN);
1507 ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
1508 if (gl->nfrags > 1)
1509 memcpy(&ssi->frags[1], &gl->frags[1],
1510 (gl->nfrags-1) * sizeof(skb_frag_t));
1511 ssi->nr_frags = gl->nfrags;
1512 skb->len = len + PKTSHIFT;
1513 skb->data_len = skb->len - RX_PKT_PULL_LEN;
1514 skb->truesize += skb->data_len;
1515
1516 /* Get a reference for the last page, we don't own it */
1517 get_page(gl->frags[gl->nfrags - 1].page);
1518 } 1551 }
1519
1520 __skb_pull(skb, PKTSHIFT); 1552 __skb_pull(skb, PKTSHIFT);
1521 skb->protocol = eth_type_trans(skb, rspq->netdev); 1553 skb->protocol = eth_type_trans(skb, rspq->netdev);
1522 skb_record_rx_queue(skb, rspq->idx); 1554 skb_record_rx_queue(skb, rspq->idx);
@@ -1549,11 +1581,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1549 netif_receive_skb(skb); 1581 netif_receive_skb(skb);
1550 1582
1551 return 0; 1583 return 0;
1552
1553nomem:
1554 t4vf_pktgl_free(gl);
1555 rxq->stats.rx_drops++;
1556 return 0;
1557} 1584}
1558 1585
1559/** 1586/**
@@ -1679,6 +1706,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
1679 } 1706 }
1680 len = RSPD_LEN(len); 1707 len = RSPD_LEN(len);
1681 } 1708 }
1709 gl.tot_len = len;
1682 1710
1683 /* 1711 /*
1684 * Gather packet fragments. 1712 * Gather packet fragments.
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
index 873cb7d86c57..a65c80aed1f2 100644
--- a/drivers/net/cxgb4vf/t4vf_common.h
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -235,6 +235,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
235int __devinit t4vf_wait_dev_ready(struct adapter *); 235int __devinit t4vf_wait_dev_ready(struct adapter *);
236int __devinit t4vf_port_init(struct adapter *, int); 236int __devinit t4vf_port_init(struct adapter *, int);
237 237
238int t4vf_fw_reset(struct adapter *);
238int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *); 239int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
239int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); 240int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
240 241
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index ea1c123f0cb4..e306c20dfaee 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -326,6 +326,25 @@ int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
326} 326}
327 327
328/** 328/**
329 * t4vf_fw_reset - issue a reset to FW
330 * @adapter: the adapter
331 *
332 * Issues a reset command to FW. For a Physical Function this would
333 * result in the Firmware reseting all of its state. For a Virtual
334 * Function this just resets the state associated with the VF.
335 */
336int t4vf_fw_reset(struct adapter *adapter)
337{
338 struct fw_reset_cmd cmd;
339
340 memset(&cmd, 0, sizeof(cmd));
341 cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
342 FW_CMD_WRITE);
343 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
344 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
345}
346
347/**
329 * t4vf_query_params - query FW or device parameters 348 * t4vf_query_params - query FW or device parameters
330 * @adapter: the adapter 349 * @adapter: the adapter
331 * @nparams: the number of parameters 350 * @nparams: the number of parameters
diff --git a/drivers/net/davinci_cpdma.c b/drivers/net/davinci_cpdma.c
new file mode 100644
index 000000000000..e92b2b6cd8c4
--- /dev/null
+++ b/drivers/net/davinci_cpdma.c
@@ -0,0 +1,965 @@
1/*
2 * Texas Instruments CPDMA Driver
3 *
4 * Copyright (C) 2010 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/kernel.h>
16#include <linux/spinlock.h>
17#include <linux/device.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/dma-mapping.h>
21#include <linux/io.h>
22
23#include "davinci_cpdma.h"
24
25/* DMA Registers */
26#define CPDMA_TXIDVER 0x00
27#define CPDMA_TXCONTROL 0x04
28#define CPDMA_TXTEARDOWN 0x08
29#define CPDMA_RXIDVER 0x10
30#define CPDMA_RXCONTROL 0x14
31#define CPDMA_SOFTRESET 0x1c
32#define CPDMA_RXTEARDOWN 0x18
33#define CPDMA_TXINTSTATRAW 0x80
34#define CPDMA_TXINTSTATMASKED 0x84
35#define CPDMA_TXINTMASKSET 0x88
36#define CPDMA_TXINTMASKCLEAR 0x8c
37#define CPDMA_MACINVECTOR 0x90
38#define CPDMA_MACEOIVECTOR 0x94
39#define CPDMA_RXINTSTATRAW 0xa0
40#define CPDMA_RXINTSTATMASKED 0xa4
41#define CPDMA_RXINTMASKSET 0xa8
42#define CPDMA_RXINTMASKCLEAR 0xac
43#define CPDMA_DMAINTSTATRAW 0xb0
44#define CPDMA_DMAINTSTATMASKED 0xb4
45#define CPDMA_DMAINTMASKSET 0xb8
46#define CPDMA_DMAINTMASKCLEAR 0xbc
47#define CPDMA_DMAINT_HOSTERR BIT(1)
48
49/* the following exist only if has_ext_regs is set */
50#define CPDMA_DMACONTROL 0x20
51#define CPDMA_DMASTATUS 0x24
52#define CPDMA_RXBUFFOFS 0x28
53#define CPDMA_EM_CONTROL 0x2c
54
55/* Descriptor mode bits */
56#define CPDMA_DESC_SOP BIT(31)
57#define CPDMA_DESC_EOP BIT(30)
58#define CPDMA_DESC_OWNER BIT(29)
59#define CPDMA_DESC_EOQ BIT(28)
60#define CPDMA_DESC_TD_COMPLETE BIT(27)
61#define CPDMA_DESC_PASS_CRC BIT(26)
62
63#define CPDMA_TEARDOWN_VALUE 0xfffffffc
64
65struct cpdma_desc {
66 /* hardware fields */
67 u32 hw_next;
68 u32 hw_buffer;
69 u32 hw_len;
70 u32 hw_mode;
71 /* software fields */
72 void *sw_token;
73 u32 sw_buffer;
74 u32 sw_len;
75};
76
77struct cpdma_desc_pool {
78 u32 phys;
79 void __iomem *iomap; /* ioremap map */
80 void *cpumap; /* dma_alloc map */
81 int desc_size, mem_size;
82 int num_desc, used_desc;
83 unsigned long *bitmap;
84 struct device *dev;
85 spinlock_t lock;
86};
87
88enum cpdma_state {
89 CPDMA_STATE_IDLE,
90 CPDMA_STATE_ACTIVE,
91 CPDMA_STATE_TEARDOWN,
92};
93
94const char *cpdma_state_str[] = { "idle", "active", "teardown" };
95
96struct cpdma_ctlr {
97 enum cpdma_state state;
98 struct cpdma_params params;
99 struct device *dev;
100 struct cpdma_desc_pool *pool;
101 spinlock_t lock;
102 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
103};
104
105struct cpdma_chan {
106 enum cpdma_state state;
107 struct cpdma_ctlr *ctlr;
108 int chan_num;
109 spinlock_t lock;
110 struct cpdma_desc __iomem *head, *tail;
111 int count;
112 void __iomem *hdp, *cp, *rxfree;
113 u32 mask;
114 cpdma_handler_fn handler;
115 enum dma_data_direction dir;
116 struct cpdma_chan_stats stats;
117 /* offsets into dmaregs */
118 int int_set, int_clear, td;
119};
120
121/* The following make access to common cpdma_ctlr params more readable */
122#define dmaregs params.dmaregs
123#define num_chan params.num_chan
124
125/* various accessors */
126#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
127#define chan_read(chan, fld) __raw_readl((chan)->fld)
128#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
129#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
130#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
131#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
132
133/*
134 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
135 * emac) have dedicated on-chip memory for these descriptors. Some other
136 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
137 * abstract out these details
138 */
139static struct cpdma_desc_pool *
140cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align)
141{
142 int bitmap_size;
143 struct cpdma_desc_pool *pool;
144
145 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
146 if (!pool)
147 return NULL;
148
149 spin_lock_init(&pool->lock);
150
151 pool->dev = dev;
152 pool->mem_size = size;
153 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
154 pool->num_desc = size / pool->desc_size;
155
156 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
157 pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
158 if (!pool->bitmap)
159 goto fail;
160
161 if (phys) {
162 pool->phys = phys;
163 pool->iomap = ioremap(phys, size);
164 } else {
165 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
166 GFP_KERNEL);
167 pool->iomap = (void __force __iomem *)pool->cpumap;
168 }
169
170 if (pool->iomap)
171 return pool;
172
173fail:
174 kfree(pool->bitmap);
175 kfree(pool);
176 return NULL;
177}
178
179static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
180{
181 unsigned long flags;
182
183 if (!pool)
184 return;
185
186 spin_lock_irqsave(&pool->lock, flags);
187 WARN_ON(pool->used_desc);
188 kfree(pool->bitmap);
189 if (pool->cpumap) {
190 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
191 pool->phys);
192 } else {
193 iounmap(pool->iomap);
194 }
195 spin_unlock_irqrestore(&pool->lock, flags);
196 kfree(pool);
197}
198
199static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
200 struct cpdma_desc __iomem *desc)
201{
202 if (!desc)
203 return 0;
204 return pool->phys + (__force dma_addr_t)desc -
205 (__force dma_addr_t)pool->iomap;
206}
207
208static inline struct cpdma_desc __iomem *
209desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
210{
211 return dma ? pool->iomap + dma - pool->phys : NULL;
212}
213
214static struct cpdma_desc __iomem *
215cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
216{
217 unsigned long flags;
218 int index;
219 struct cpdma_desc __iomem *desc = NULL;
220
221 spin_lock_irqsave(&pool->lock, flags);
222
223 index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
224 num_desc, 0);
225 if (index < pool->num_desc) {
226 bitmap_set(pool->bitmap, index, num_desc);
227 desc = pool->iomap + pool->desc_size * index;
228 pool->used_desc++;
229 }
230
231 spin_unlock_irqrestore(&pool->lock, flags);
232 return desc;
233}
234
235static void cpdma_desc_free(struct cpdma_desc_pool *pool,
236 struct cpdma_desc __iomem *desc, int num_desc)
237{
238 unsigned long flags, index;
239
240 index = ((unsigned long)desc - (unsigned long)pool->iomap) /
241 pool->desc_size;
242 spin_lock_irqsave(&pool->lock, flags);
243 bitmap_clear(pool->bitmap, index, num_desc);
244 pool->used_desc--;
245 spin_unlock_irqrestore(&pool->lock, flags);
246}
247
248struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
249{
250 struct cpdma_ctlr *ctlr;
251
252 ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
253 if (!ctlr)
254 return NULL;
255
256 ctlr->state = CPDMA_STATE_IDLE;
257 ctlr->params = *params;
258 ctlr->dev = params->dev;
259 spin_lock_init(&ctlr->lock);
260
261 ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
262 ctlr->params.desc_mem_phys,
263 ctlr->params.desc_mem_size,
264 ctlr->params.desc_align);
265 if (!ctlr->pool) {
266 kfree(ctlr);
267 return NULL;
268 }
269
270 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
271 ctlr->num_chan = CPDMA_MAX_CHANNELS;
272 return ctlr;
273}
274
275int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
276{
277 unsigned long flags;
278 int i;
279
280 spin_lock_irqsave(&ctlr->lock, flags);
281 if (ctlr->state != CPDMA_STATE_IDLE) {
282 spin_unlock_irqrestore(&ctlr->lock, flags);
283 return -EBUSY;
284 }
285
286 if (ctlr->params.has_soft_reset) {
287 unsigned long timeout = jiffies + HZ/10;
288
289 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
290 while (time_before(jiffies, timeout)) {
291 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
292 break;
293 }
294 WARN_ON(!time_before(jiffies, timeout));
295 }
296
297 for (i = 0; i < ctlr->num_chan; i++) {
298 __raw_writel(0, ctlr->params.txhdp + 4 * i);
299 __raw_writel(0, ctlr->params.rxhdp + 4 * i);
300 __raw_writel(0, ctlr->params.txcp + 4 * i);
301 __raw_writel(0, ctlr->params.rxcp + 4 * i);
302 }
303
304 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
305 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
306
307 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
308 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
309
310 ctlr->state = CPDMA_STATE_ACTIVE;
311
312 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
313 if (ctlr->channels[i])
314 cpdma_chan_start(ctlr->channels[i]);
315 }
316 spin_unlock_irqrestore(&ctlr->lock, flags);
317 return 0;
318}
319
320int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
321{
322 unsigned long flags;
323 int i;
324
325 spin_lock_irqsave(&ctlr->lock, flags);
326 if (ctlr->state != CPDMA_STATE_ACTIVE) {
327 spin_unlock_irqrestore(&ctlr->lock, flags);
328 return -EINVAL;
329 }
330
331 ctlr->state = CPDMA_STATE_TEARDOWN;
332
333 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
334 if (ctlr->channels[i])
335 cpdma_chan_stop(ctlr->channels[i]);
336 }
337
338 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
339 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
340
341 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
342 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
343
344 ctlr->state = CPDMA_STATE_IDLE;
345
346 spin_unlock_irqrestore(&ctlr->lock, flags);
347 return 0;
348}
349
350int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
351{
352 struct device *dev = ctlr->dev;
353 unsigned long flags;
354 int i;
355
356 spin_lock_irqsave(&ctlr->lock, flags);
357
358 dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
359
360 dev_info(dev, "CPDMA: txidver: %x",
361 dma_reg_read(ctlr, CPDMA_TXIDVER));
362 dev_info(dev, "CPDMA: txcontrol: %x",
363 dma_reg_read(ctlr, CPDMA_TXCONTROL));
364 dev_info(dev, "CPDMA: txteardown: %x",
365 dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
366 dev_info(dev, "CPDMA: rxidver: %x",
367 dma_reg_read(ctlr, CPDMA_RXIDVER));
368 dev_info(dev, "CPDMA: rxcontrol: %x",
369 dma_reg_read(ctlr, CPDMA_RXCONTROL));
370 dev_info(dev, "CPDMA: softreset: %x",
371 dma_reg_read(ctlr, CPDMA_SOFTRESET));
372 dev_info(dev, "CPDMA: rxteardown: %x",
373 dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
374 dev_info(dev, "CPDMA: txintstatraw: %x",
375 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
376 dev_info(dev, "CPDMA: txintstatmasked: %x",
377 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
378 dev_info(dev, "CPDMA: txintmaskset: %x",
379 dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
380 dev_info(dev, "CPDMA: txintmaskclear: %x",
381 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
382 dev_info(dev, "CPDMA: macinvector: %x",
383 dma_reg_read(ctlr, CPDMA_MACINVECTOR));
384 dev_info(dev, "CPDMA: maceoivector: %x",
385 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
386 dev_info(dev, "CPDMA: rxintstatraw: %x",
387 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
388 dev_info(dev, "CPDMA: rxintstatmasked: %x",
389 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
390 dev_info(dev, "CPDMA: rxintmaskset: %x",
391 dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
392 dev_info(dev, "CPDMA: rxintmaskclear: %x",
393 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
394 dev_info(dev, "CPDMA: dmaintstatraw: %x",
395 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
396 dev_info(dev, "CPDMA: dmaintstatmasked: %x",
397 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
398 dev_info(dev, "CPDMA: dmaintmaskset: %x",
399 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
400 dev_info(dev, "CPDMA: dmaintmaskclear: %x",
401 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
402
403 if (!ctlr->params.has_ext_regs) {
404 dev_info(dev, "CPDMA: dmacontrol: %x",
405 dma_reg_read(ctlr, CPDMA_DMACONTROL));
406 dev_info(dev, "CPDMA: dmastatus: %x",
407 dma_reg_read(ctlr, CPDMA_DMASTATUS));
408 dev_info(dev, "CPDMA: rxbuffofs: %x",
409 dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
410 }
411
412 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
413 if (ctlr->channels[i])
414 cpdma_chan_dump(ctlr->channels[i]);
415
416 spin_unlock_irqrestore(&ctlr->lock, flags);
417 return 0;
418}
419
420int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
421{
422 unsigned long flags;
423 int ret = 0, i;
424
425 if (!ctlr)
426 return -EINVAL;
427
428 spin_lock_irqsave(&ctlr->lock, flags);
429 if (ctlr->state != CPDMA_STATE_IDLE)
430 cpdma_ctlr_stop(ctlr);
431
432 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
433 if (ctlr->channels[i])
434 cpdma_chan_destroy(ctlr->channels[i]);
435 }
436
437 cpdma_desc_pool_destroy(ctlr->pool);
438 spin_unlock_irqrestore(&ctlr->lock, flags);
439 kfree(ctlr);
440 return ret;
441}
442
443int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
444{
445 unsigned long flags;
446 int i, reg;
447
448 spin_lock_irqsave(&ctlr->lock, flags);
449 if (ctlr->state != CPDMA_STATE_ACTIVE) {
450 spin_unlock_irqrestore(&ctlr->lock, flags);
451 return -EINVAL;
452 }
453
454 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
455 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
456
457 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
458 if (ctlr->channels[i])
459 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
460 }
461
462 spin_unlock_irqrestore(&ctlr->lock, flags);
463 return 0;
464}
465
466void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
467{
468 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
469}
470
471struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
472 cpdma_handler_fn handler)
473{
474 struct cpdma_chan *chan;
475 int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
476 unsigned long flags;
477
478 if (__chan_linear(chan_num) >= ctlr->num_chan)
479 return NULL;
480
481 ret = -ENOMEM;
482 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
483 if (!chan)
484 goto err_chan_alloc;
485
486 spin_lock_irqsave(&ctlr->lock, flags);
487 ret = -EBUSY;
488 if (ctlr->channels[chan_num])
489 goto err_chan_busy;
490
491 chan->ctlr = ctlr;
492 chan->state = CPDMA_STATE_IDLE;
493 chan->chan_num = chan_num;
494 chan->handler = handler;
495
496 if (is_rx_chan(chan)) {
497 chan->hdp = ctlr->params.rxhdp + offset;
498 chan->cp = ctlr->params.rxcp + offset;
499 chan->rxfree = ctlr->params.rxfree + offset;
500 chan->int_set = CPDMA_RXINTMASKSET;
501 chan->int_clear = CPDMA_RXINTMASKCLEAR;
502 chan->td = CPDMA_RXTEARDOWN;
503 chan->dir = DMA_FROM_DEVICE;
504 } else {
505 chan->hdp = ctlr->params.txhdp + offset;
506 chan->cp = ctlr->params.txcp + offset;
507 chan->int_set = CPDMA_TXINTMASKSET;
508 chan->int_clear = CPDMA_TXINTMASKCLEAR;
509 chan->td = CPDMA_TXTEARDOWN;
510 chan->dir = DMA_TO_DEVICE;
511 }
512 chan->mask = BIT(chan_linear(chan));
513
514 spin_lock_init(&chan->lock);
515
516 ctlr->channels[chan_num] = chan;
517 spin_unlock_irqrestore(&ctlr->lock, flags);
518 return chan;
519
520err_chan_busy:
521 spin_unlock_irqrestore(&ctlr->lock, flags);
522 kfree(chan);
523err_chan_alloc:
524 return ERR_PTR(ret);
525}
526
527int cpdma_chan_destroy(struct cpdma_chan *chan)
528{
529 struct cpdma_ctlr *ctlr = chan->ctlr;
530 unsigned long flags;
531
532 if (!chan)
533 return -EINVAL;
534
535 spin_lock_irqsave(&ctlr->lock, flags);
536 if (chan->state != CPDMA_STATE_IDLE)
537 cpdma_chan_stop(chan);
538 ctlr->channels[chan->chan_num] = NULL;
539 spin_unlock_irqrestore(&ctlr->lock, flags);
540 kfree(chan);
541 return 0;
542}
543
544int cpdma_chan_get_stats(struct cpdma_chan *chan,
545 struct cpdma_chan_stats *stats)
546{
547 unsigned long flags;
548 if (!chan)
549 return -EINVAL;
550 spin_lock_irqsave(&chan->lock, flags);
551 memcpy(stats, &chan->stats, sizeof(*stats));
552 spin_unlock_irqrestore(&chan->lock, flags);
553 return 0;
554}
555
556int cpdma_chan_dump(struct cpdma_chan *chan)
557{
558 unsigned long flags;
559 struct device *dev = chan->ctlr->dev;
560
561 spin_lock_irqsave(&chan->lock, flags);
562
563 dev_info(dev, "channel %d (%s %d) state %s",
564 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
565 chan_linear(chan), cpdma_state_str[chan->state]);
566 dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
567 dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
568 if (chan->rxfree) {
569 dev_info(dev, "\trxfree: %x\n",
570 chan_read(chan, rxfree));
571 }
572
573 dev_info(dev, "\tstats head_enqueue: %d\n",
574 chan->stats.head_enqueue);
575 dev_info(dev, "\tstats tail_enqueue: %d\n",
576 chan->stats.tail_enqueue);
577 dev_info(dev, "\tstats pad_enqueue: %d\n",
578 chan->stats.pad_enqueue);
579 dev_info(dev, "\tstats misqueued: %d\n",
580 chan->stats.misqueued);
581 dev_info(dev, "\tstats desc_alloc_fail: %d\n",
582 chan->stats.desc_alloc_fail);
583 dev_info(dev, "\tstats pad_alloc_fail: %d\n",
584 chan->stats.pad_alloc_fail);
585 dev_info(dev, "\tstats runt_receive_buff: %d\n",
586 chan->stats.runt_receive_buff);
587 dev_info(dev, "\tstats runt_transmit_buff: %d\n",
588 chan->stats.runt_transmit_buff);
589 dev_info(dev, "\tstats empty_dequeue: %d\n",
590 chan->stats.empty_dequeue);
591 dev_info(dev, "\tstats busy_dequeue: %d\n",
592 chan->stats.busy_dequeue);
593 dev_info(dev, "\tstats good_dequeue: %d\n",
594 chan->stats.good_dequeue);
595 dev_info(dev, "\tstats requeue: %d\n",
596 chan->stats.requeue);
597 dev_info(dev, "\tstats teardown_dequeue: %d\n",
598 chan->stats.teardown_dequeue);
599
600 spin_unlock_irqrestore(&chan->lock, flags);
601 return 0;
602}
603
604static void __cpdma_chan_submit(struct cpdma_chan *chan,
605 struct cpdma_desc __iomem *desc)
606{
607 struct cpdma_ctlr *ctlr = chan->ctlr;
608 struct cpdma_desc __iomem *prev = chan->tail;
609 struct cpdma_desc_pool *pool = ctlr->pool;
610 dma_addr_t desc_dma;
611 u32 mode;
612
613 desc_dma = desc_phys(pool, desc);
614
615 /* simple case - idle channel */
616 if (!chan->head) {
617 chan->stats.head_enqueue++;
618 chan->head = desc;
619 chan->tail = desc;
620 if (chan->state == CPDMA_STATE_ACTIVE)
621 chan_write(chan, hdp, desc_dma);
622 return;
623 }
624
625 /* first chain the descriptor at the tail of the list */
626 desc_write(prev, hw_next, desc_dma);
627 chan->tail = desc;
628 chan->stats.tail_enqueue++;
629
630 /* next check if EOQ has been triggered already */
631 mode = desc_read(prev, hw_mode);
632 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
633 (chan->state == CPDMA_STATE_ACTIVE)) {
634 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
635 chan_write(chan, hdp, desc_dma);
636 chan->stats.misqueued++;
637 }
638}
639
640int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
641 int len, gfp_t gfp_mask)
642{
643 struct cpdma_ctlr *ctlr = chan->ctlr;
644 struct cpdma_desc __iomem *desc;
645 dma_addr_t buffer;
646 unsigned long flags;
647 u32 mode;
648 int ret = 0;
649
650 spin_lock_irqsave(&chan->lock, flags);
651
652 if (chan->state == CPDMA_STATE_TEARDOWN) {
653 ret = -EINVAL;
654 goto unlock_ret;
655 }
656
657 desc = cpdma_desc_alloc(ctlr->pool, 1);
658 if (!desc) {
659 chan->stats.desc_alloc_fail++;
660 ret = -ENOMEM;
661 goto unlock_ret;
662 }
663
664 if (len < ctlr->params.min_packet_size) {
665 len = ctlr->params.min_packet_size;
666 chan->stats.runt_transmit_buff++;
667 }
668
669 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
670 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
671
672 desc_write(desc, hw_next, 0);
673 desc_write(desc, hw_buffer, buffer);
674 desc_write(desc, hw_len, len);
675 desc_write(desc, hw_mode, mode | len);
676 desc_write(desc, sw_token, token);
677 desc_write(desc, sw_buffer, buffer);
678 desc_write(desc, sw_len, len);
679
680 __cpdma_chan_submit(chan, desc);
681
682 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
683 chan_write(chan, rxfree, 1);
684
685 chan->count++;
686
687unlock_ret:
688 spin_unlock_irqrestore(&chan->lock, flags);
689 return ret;
690}
691
692static void __cpdma_chan_free(struct cpdma_chan *chan,
693 struct cpdma_desc __iomem *desc,
694 int outlen, int status)
695{
696 struct cpdma_ctlr *ctlr = chan->ctlr;
697 struct cpdma_desc_pool *pool = ctlr->pool;
698 dma_addr_t buff_dma;
699 int origlen;
700 void *token;
701
702 token = (void *)desc_read(desc, sw_token);
703 buff_dma = desc_read(desc, sw_buffer);
704 origlen = desc_read(desc, sw_len);
705
706 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
707 cpdma_desc_free(pool, desc, 1);
708 (*chan->handler)(token, outlen, status);
709}
710
711static int __cpdma_chan_process(struct cpdma_chan *chan)
712{
713 struct cpdma_ctlr *ctlr = chan->ctlr;
714 struct cpdma_desc __iomem *desc;
715 int status, outlen;
716 struct cpdma_desc_pool *pool = ctlr->pool;
717 dma_addr_t desc_dma;
718 unsigned long flags;
719
720 spin_lock_irqsave(&chan->lock, flags);
721
722 desc = chan->head;
723 if (!desc) {
724 chan->stats.empty_dequeue++;
725 status = -ENOENT;
726 goto unlock_ret;
727 }
728 desc_dma = desc_phys(pool, desc);
729
730 status = __raw_readl(&desc->hw_mode);
731 outlen = status & 0x7ff;
732 if (status & CPDMA_DESC_OWNER) {
733 chan->stats.busy_dequeue++;
734 status = -EBUSY;
735 goto unlock_ret;
736 }
737 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
738
739 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
740 chan_write(chan, cp, desc_dma);
741 chan->count--;
742 chan->stats.good_dequeue++;
743
744 if (status & CPDMA_DESC_EOQ) {
745 chan->stats.requeue++;
746 chan_write(chan, hdp, desc_phys(pool, chan->head));
747 }
748
749 spin_unlock_irqrestore(&chan->lock, flags);
750
751 __cpdma_chan_free(chan, desc, outlen, status);
752 return status;
753
754unlock_ret:
755 spin_unlock_irqrestore(&chan->lock, flags);
756 return status;
757}
758
759int cpdma_chan_process(struct cpdma_chan *chan, int quota)
760{
761 int used = 0, ret = 0;
762
763 if (chan->state != CPDMA_STATE_ACTIVE)
764 return -EINVAL;
765
766 while (used < quota) {
767 ret = __cpdma_chan_process(chan);
768 if (ret < 0)
769 break;
770 used++;
771 }
772 return used;
773}
774
775int cpdma_chan_start(struct cpdma_chan *chan)
776{
777 struct cpdma_ctlr *ctlr = chan->ctlr;
778 struct cpdma_desc_pool *pool = ctlr->pool;
779 unsigned long flags;
780
781 spin_lock_irqsave(&chan->lock, flags);
782 if (chan->state != CPDMA_STATE_IDLE) {
783 spin_unlock_irqrestore(&chan->lock, flags);
784 return -EBUSY;
785 }
786 if (ctlr->state != CPDMA_STATE_ACTIVE) {
787 spin_unlock_irqrestore(&chan->lock, flags);
788 return -EINVAL;
789 }
790 dma_reg_write(ctlr, chan->int_set, chan->mask);
791 chan->state = CPDMA_STATE_ACTIVE;
792 if (chan->head) {
793 chan_write(chan, hdp, desc_phys(pool, chan->head));
794 if (chan->rxfree)
795 chan_write(chan, rxfree, chan->count);
796 }
797
798 spin_unlock_irqrestore(&chan->lock, flags);
799 return 0;
800}
801
802int cpdma_chan_stop(struct cpdma_chan *chan)
803{
804 struct cpdma_ctlr *ctlr = chan->ctlr;
805 struct cpdma_desc_pool *pool = ctlr->pool;
806 unsigned long flags;
807 int ret;
808 unsigned long timeout;
809
810 spin_lock_irqsave(&chan->lock, flags);
811 if (chan->state != CPDMA_STATE_ACTIVE) {
812 spin_unlock_irqrestore(&chan->lock, flags);
813 return -EINVAL;
814 }
815
816 chan->state = CPDMA_STATE_TEARDOWN;
817 dma_reg_write(ctlr, chan->int_clear, chan->mask);
818
819 /* trigger teardown */
820 dma_reg_write(ctlr, chan->td, chan->chan_num);
821
822 /* wait for teardown complete */
823 timeout = jiffies + HZ/10; /* 100 msec */
824 while (time_before(jiffies, timeout)) {
825 u32 cp = chan_read(chan, cp);
826 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
827 break;
828 cpu_relax();
829 }
830 WARN_ON(!time_before(jiffies, timeout));
831 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
832
833 /* handle completed packets */
834 do {
835 ret = __cpdma_chan_process(chan);
836 if (ret < 0)
837 break;
838 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
839
840 /* remaining packets haven't been tx/rx'ed, clean them up */
841 while (chan->head) {
842 struct cpdma_desc __iomem *desc = chan->head;
843 dma_addr_t next_dma;
844
845 next_dma = desc_read(desc, hw_next);
846 chan->head = desc_from_phys(pool, next_dma);
847 chan->stats.teardown_dequeue++;
848
849 /* issue callback without locks held */
850 spin_unlock_irqrestore(&chan->lock, flags);
851 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
852 spin_lock_irqsave(&chan->lock, flags);
853 }
854
855 chan->state = CPDMA_STATE_IDLE;
856 spin_unlock_irqrestore(&chan->lock, flags);
857 return 0;
858}
859
860int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
861{
862 unsigned long flags;
863
864 spin_lock_irqsave(&chan->lock, flags);
865 if (chan->state != CPDMA_STATE_ACTIVE) {
866 spin_unlock_irqrestore(&chan->lock, flags);
867 return -EINVAL;
868 }
869
870 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
871 chan->mask);
872 spin_unlock_irqrestore(&chan->lock, flags);
873
874 return 0;
875}
876
877struct cpdma_control_info {
878 u32 reg;
879 u32 shift, mask;
880 int access;
881#define ACCESS_RO BIT(0)
882#define ACCESS_WO BIT(1)
883#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
884};
885
886struct cpdma_control_info controls[] = {
887 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
888 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
889 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
890 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
891 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
892 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
893 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
894 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
895 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
896 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
897 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
898};
899
900int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
901{
902 unsigned long flags;
903 struct cpdma_control_info *info = &controls[control];
904 int ret;
905
906 spin_lock_irqsave(&ctlr->lock, flags);
907
908 ret = -ENOTSUPP;
909 if (!ctlr->params.has_ext_regs)
910 goto unlock_ret;
911
912 ret = -EINVAL;
913 if (ctlr->state != CPDMA_STATE_ACTIVE)
914 goto unlock_ret;
915
916 ret = -ENOENT;
917 if (control < 0 || control >= ARRAY_SIZE(controls))
918 goto unlock_ret;
919
920 ret = -EPERM;
921 if ((info->access & ACCESS_RO) != ACCESS_RO)
922 goto unlock_ret;
923
924 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
925
926unlock_ret:
927 spin_unlock_irqrestore(&ctlr->lock, flags);
928 return ret;
929}
930
931int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
932{
933 unsigned long flags;
934 struct cpdma_control_info *info = &controls[control];
935 int ret;
936 u32 val;
937
938 spin_lock_irqsave(&ctlr->lock, flags);
939
940 ret = -ENOTSUPP;
941 if (!ctlr->params.has_ext_regs)
942 goto unlock_ret;
943
944 ret = -EINVAL;
945 if (ctlr->state != CPDMA_STATE_ACTIVE)
946 goto unlock_ret;
947
948 ret = -ENOENT;
949 if (control < 0 || control >= ARRAY_SIZE(controls))
950 goto unlock_ret;
951
952 ret = -EPERM;
953 if ((info->access & ACCESS_WO) != ACCESS_WO)
954 goto unlock_ret;
955
956 val = dma_reg_read(ctlr, info->reg);
957 val &= ~(info->mask << info->shift);
958 val |= (value & info->mask) << info->shift;
959 dma_reg_write(ctlr, info->reg, val);
960 ret = 0;
961
962unlock_ret:
963 spin_unlock_irqrestore(&ctlr->lock, flags);
964 return ret;
965}
diff --git a/drivers/net/davinci_cpdma.h b/drivers/net/davinci_cpdma.h
new file mode 100644
index 000000000000..868e50ebde45
--- /dev/null
+++ b/drivers/net/davinci_cpdma.h
@@ -0,0 +1,108 @@
1/*
2 * Texas Instruments CPDMA Driver
3 *
4 * Copyright (C) 2010 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#ifndef __DAVINCI_CPDMA_H__
16#define __DAVINCI_CPDMA_H__
17
18#define CPDMA_MAX_CHANNELS BITS_PER_LONG
19
20#define tx_chan_num(chan) (chan)
21#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
22#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
23#define is_tx_chan(chan) (!is_rx_chan(chan))
24#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
25#define chan_linear(chan) __chan_linear((chan)->chan_num)
26
27struct cpdma_params {
28 struct device *dev;
29 void __iomem *dmaregs;
30 void __iomem *txhdp, *rxhdp, *txcp, *rxcp;
31 void __iomem *rxthresh, *rxfree;
32 int num_chan;
33 bool has_soft_reset;
34 int min_packet_size;
35 u32 desc_mem_phys;
36 int desc_mem_size;
37 int desc_align;
38
39 /*
40 * Some instances of embedded cpdma controllers have extra control and
41 * status registers. The following flag enables access to these
42 * "extended" registers.
43 */
44 bool has_ext_regs;
45};
46
47struct cpdma_chan_stats {
48 u32 head_enqueue;
49 u32 tail_enqueue;
50 u32 pad_enqueue;
51 u32 misqueued;
52 u32 desc_alloc_fail;
53 u32 pad_alloc_fail;
54 u32 runt_receive_buff;
55 u32 runt_transmit_buff;
56 u32 empty_dequeue;
57 u32 busy_dequeue;
58 u32 good_dequeue;
59 u32 requeue;
60 u32 teardown_dequeue;
61};
62
63struct cpdma_ctlr;
64struct cpdma_chan;
65
66typedef void (*cpdma_handler_fn)(void *token, int len, int status);
67
68struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params);
69int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr);
70int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
71int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
72int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr);
73
74struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
75 cpdma_handler_fn handler);
76int cpdma_chan_destroy(struct cpdma_chan *chan);
77int cpdma_chan_start(struct cpdma_chan *chan);
78int cpdma_chan_stop(struct cpdma_chan *chan);
79int cpdma_chan_dump(struct cpdma_chan *chan);
80
81int cpdma_chan_get_stats(struct cpdma_chan *chan,
82 struct cpdma_chan_stats *stats);
83int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
84 int len, gfp_t gfp_mask);
85int cpdma_chan_process(struct cpdma_chan *chan, int quota);
86
87int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
88void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
89int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
90
91enum cpdma_control {
92 CPDMA_CMD_IDLE, /* write-only */
93 CPDMA_COPY_ERROR_FRAMES, /* read-write */
94 CPDMA_RX_OFF_LEN_UPDATE, /* read-write */
95 CPDMA_RX_OWNERSHIP_FLIP, /* read-write */
96 CPDMA_TX_PRIO_FIXED, /* read-write */
97 CPDMA_STAT_IDLE, /* read-only */
98 CPDMA_STAT_TX_ERR_CHAN, /* read-only */
99 CPDMA_STAT_TX_ERR_CODE, /* read-only */
100 CPDMA_STAT_RX_ERR_CHAN, /* read-only */
101 CPDMA_STAT_RX_ERR_CODE, /* read-only */
102 CPDMA_RX_BUFFER_OFFSET, /* read-write */
103};
104
105int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
106int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
107
108#endif
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 7fbd052ddb0a..2a628d17d178 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -63,6 +63,8 @@
63#include <asm/irq.h> 63#include <asm/irq.h>
64#include <asm/page.h> 64#include <asm/page.h>
65 65
66#include "davinci_cpdma.h"
67
66static int debug_level; 68static int debug_level;
67module_param(debug_level, int, 0); 69module_param(debug_level, int, 0);
68MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)"); 70MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)");
@@ -113,7 +115,7 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
113#define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4) 115#define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4)
114#define EMAC_DEF_TX_CH (0) /* Default 0th channel */ 116#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
115#define EMAC_DEF_RX_CH (0) /* Default 0th channel */ 117#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
116#define EMAC_DEF_MDIO_TICK_MS (10) /* typically 1 tick=1 ms) */ 118#define EMAC_DEF_RX_NUM_DESC (128)
117#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */ 119#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
118#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */ 120#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
119#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */ 121#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
@@ -125,7 +127,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
125/* EMAC register related defines */ 127/* EMAC register related defines */
126#define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF) 128#define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF)
127#define EMAC_NUM_MULTICAST_BITS (64) 129#define EMAC_NUM_MULTICAST_BITS (64)
128#define EMAC_TEARDOWN_VALUE (0xFFFFFFFC)
129#define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1) 130#define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1)
130#define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1) 131#define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1)
131#define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2) 132#define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2)
@@ -212,24 +213,10 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
212#define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */ 213#define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */
213 214
214/* EMAC Peripheral Device Register Memory Layout structure */ 215/* EMAC Peripheral Device Register Memory Layout structure */
215#define EMAC_TXIDVER 0x0
216#define EMAC_TXCONTROL 0x4
217#define EMAC_TXTEARDOWN 0x8
218#define EMAC_RXIDVER 0x10
219#define EMAC_RXCONTROL 0x14
220#define EMAC_RXTEARDOWN 0x18
221#define EMAC_TXINTSTATRAW 0x80
222#define EMAC_TXINTSTATMASKED 0x84
223#define EMAC_TXINTMASKSET 0x88
224#define EMAC_TXINTMASKCLEAR 0x8C
225#define EMAC_MACINVECTOR 0x90 216#define EMAC_MACINVECTOR 0x90
226 217
227#define EMAC_DM646X_MACEOIVECTOR 0x94 218#define EMAC_DM646X_MACEOIVECTOR 0x94
228 219
229#define EMAC_RXINTSTATRAW 0xA0
230#define EMAC_RXINTSTATMASKED 0xA4
231#define EMAC_RXINTMASKSET 0xA8
232#define EMAC_RXINTMASKCLEAR 0xAC
233#define EMAC_MACINTSTATRAW 0xB0 220#define EMAC_MACINTSTATRAW 0xB0
234#define EMAC_MACINTSTATMASKED 0xB4 221#define EMAC_MACINTSTATMASKED 0xB4
235#define EMAC_MACINTMASKSET 0xB8 222#define EMAC_MACINTMASKSET 0xB8
@@ -256,12 +243,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
256#define EMAC_MACADDRHI 0x504 243#define EMAC_MACADDRHI 0x504
257#define EMAC_MACINDEX 0x508 244#define EMAC_MACINDEX 0x508
258 245
259/* EMAC HDP and Completion registors */
260#define EMAC_TXHDP(ch) (0x600 + (ch * 4))
261#define EMAC_RXHDP(ch) (0x620 + (ch * 4))
262#define EMAC_TXCP(ch) (0x640 + (ch * 4))
263#define EMAC_RXCP(ch) (0x660 + (ch * 4))
264
265/* EMAC statistics registers */ 246/* EMAC statistics registers */
266#define EMAC_RXGOODFRAMES 0x200 247#define EMAC_RXGOODFRAMES 0x200
267#define EMAC_RXBCASTFRAMES 0x204 248#define EMAC_RXBCASTFRAMES 0x204
@@ -303,25 +284,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
303#define EMAC_DM644X_INTMIN_INTVL 0x1 284#define EMAC_DM644X_INTMIN_INTVL 0x1
304#define EMAC_DM644X_INTMAX_INTVL (EMAC_DM644X_EWINTCNT_MASK) 285#define EMAC_DM644X_INTMAX_INTVL (EMAC_DM644X_EWINTCNT_MASK)
305 286
306/* EMAC MDIO related */
307/* Mask & Control defines */
308#define MDIO_CONTROL_CLKDIV (0xFF)
309#define MDIO_CONTROL_ENABLE BIT(30)
310#define MDIO_USERACCESS_GO BIT(31)
311#define MDIO_USERACCESS_WRITE BIT(30)
312#define MDIO_USERACCESS_READ (0)
313#define MDIO_USERACCESS_REGADR (0x1F << 21)
314#define MDIO_USERACCESS_PHYADR (0x1F << 16)
315#define MDIO_USERACCESS_DATA (0xFFFF)
316#define MDIO_USERPHYSEL_LINKSEL BIT(7)
317#define MDIO_VER_MODID (0xFFFF << 16)
318#define MDIO_VER_REVMAJ (0xFF << 8)
319#define MDIO_VER_REVMIN (0xFF)
320
321#define MDIO_USERACCESS(inst) (0x80 + (inst * 8))
322#define MDIO_USERPHYSEL(inst) (0x84 + (inst * 8))
323#define MDIO_CONTROL (0x04)
324
325/* EMAC DM646X control module registers */ 287/* EMAC DM646X control module registers */
326#define EMAC_DM646X_CMINTCTRL 0x0C 288#define EMAC_DM646X_CMINTCTRL 0x0C
327#define EMAC_DM646X_CMRXINTEN 0x14 289#define EMAC_DM646X_CMRXINTEN 0x14
@@ -345,120 +307,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
345/* EMAC Stats Clear Mask */ 307/* EMAC Stats Clear Mask */
346#define EMAC_STATS_CLR_MASK (0xFFFFFFFF) 308#define EMAC_STATS_CLR_MASK (0xFFFFFFFF)
347 309
348/** net_buf_obj: EMAC network bufferdata structure
349 *
350 * EMAC network buffer data structure
351 */
352struct emac_netbufobj {
353 void *buf_token;
354 char *data_ptr;
355 int length;
356};
357
358/** net_pkt_obj: EMAC network packet data structure
359 *
360 * EMAC network packet data structure - supports buffer list (for future)
361 */
362struct emac_netpktobj {
363 void *pkt_token; /* data token may hold tx/rx chan id */
364 struct emac_netbufobj *buf_list; /* array of network buffer objects */
365 int num_bufs;
366 int pkt_length;
367};
368
369/** emac_tx_bd: EMAC TX Buffer descriptor data structure
370 *
371 * EMAC TX Buffer descriptor data structure
372 */
373struct emac_tx_bd {
374 int h_next;
375 int buff_ptr;
376 int off_b_len;
377 int mode; /* SOP, EOP, ownership, EOQ, teardown,Qstarv, length */
378 struct emac_tx_bd __iomem *next;
379 void *buf_token;
380};
381
382/** emac_txch: EMAC TX Channel data structure
383 *
384 * EMAC TX Channel data structure
385 */
386struct emac_txch {
387 /* Config related */
388 u32 num_bd;
389 u32 service_max;
390
391 /* CPPI specific */
392 u32 alloc_size;
393 void __iomem *bd_mem;
394 struct emac_tx_bd __iomem *bd_pool_head;
395 struct emac_tx_bd __iomem *active_queue_head;
396 struct emac_tx_bd __iomem *active_queue_tail;
397 struct emac_tx_bd __iomem *last_hw_bdprocessed;
398 u32 queue_active;
399 u32 teardown_pending;
400 u32 *tx_complete;
401
402 /** statistics */
403 u32 proc_count; /* TX: # of times emac_tx_bdproc is called */
404 u32 mis_queued_packets;
405 u32 queue_reinit;
406 u32 end_of_queue_add;
407 u32 out_of_tx_bd;
408 u32 no_active_pkts; /* IRQ when there were no packets to process */
409 u32 active_queue_count;
410};
411
412/** emac_rx_bd: EMAC RX Buffer descriptor data structure
413 *
414 * EMAC RX Buffer descriptor data structure
415 */
416struct emac_rx_bd {
417 int h_next;
418 int buff_ptr;
419 int off_b_len;
420 int mode;
421 struct emac_rx_bd __iomem *next;
422 void *data_ptr;
423 void *buf_token;
424};
425
426/** emac_rxch: EMAC RX Channel data structure
427 *
428 * EMAC RX Channel data structure
429 */
430struct emac_rxch {
431 /* configuration info */
432 u32 num_bd;
433 u32 service_max;
434 u32 buf_size;
435 char mac_addr[6];
436
437 /** CPPI specific */
438 u32 alloc_size;
439 void __iomem *bd_mem;
440 struct emac_rx_bd __iomem *bd_pool_head;
441 struct emac_rx_bd __iomem *active_queue_head;
442 struct emac_rx_bd __iomem *active_queue_tail;
443 u32 queue_active;
444 u32 teardown_pending;
445
446 /* packet and buffer objects */
447 struct emac_netpktobj pkt_queue;
448 struct emac_netbufobj buf_queue;
449
450 /** statistics */
451 u32 proc_count; /* number of times emac_rx_bdproc is called */
452 u32 processed_bd;
453 u32 recycled_bd;
454 u32 out_of_rx_bd;
455 u32 out_of_rx_buffers;
456 u32 queue_reinit;
457 u32 end_of_queue_add;
458 u32 end_of_queue;
459 u32 mis_queued_packets;
460};
461
462/* emac_priv: EMAC private data structure 310/* emac_priv: EMAC private data structure
463 * 311 *
464 * EMAC adapter private data structure 312 * EMAC adapter private data structure
@@ -469,17 +317,13 @@ struct emac_priv {
469 struct platform_device *pdev; 317 struct platform_device *pdev;
470 struct napi_struct napi; 318 struct napi_struct napi;
471 char mac_addr[6]; 319 char mac_addr[6];
472 spinlock_t tx_lock;
473 spinlock_t rx_lock;
474 void __iomem *remap_addr; 320 void __iomem *remap_addr;
475 u32 emac_base_phys; 321 u32 emac_base_phys;
476 void __iomem *emac_base; 322 void __iomem *emac_base;
477 void __iomem *ctrl_base; 323 void __iomem *ctrl_base;
478 void __iomem *emac_ctrl_ram; 324 struct cpdma_ctlr *dma;
479 u32 ctrl_ram_size; 325 struct cpdma_chan *txchan;
480 u32 hw_ram_addr; 326 struct cpdma_chan *rxchan;
481 struct emac_txch *txch[EMAC_DEF_MAX_TX_CH];
482 struct emac_rxch *rxch[EMAC_DEF_MAX_RX_CH];
483 u32 link; /* 1=link on, 0=link off */ 327 u32 link; /* 1=link on, 0=link off */
484 u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */ 328 u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */
485 u32 duplex; /* Link duplex: 0=Half, 1=Full */ 329 u32 duplex; /* Link duplex: 0=Half, 1=Full */
@@ -493,13 +337,7 @@ struct emac_priv {
493 u32 mac_hash2; 337 u32 mac_hash2;
494 u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; 338 u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
495 u32 rx_addr_type; 339 u32 rx_addr_type;
496 /* periodic timer required for MDIO polling */ 340 const char *phy_id;
497 struct timer_list periodic_timer;
498 u32 periodic_ticks;
499 u32 timer_active;
500 u32 phy_mask;
501 /* mii_bus,phy members */
502 struct mii_bus *mii_bus;
503 struct phy_device *phydev; 341 struct phy_device *phydev;
504 spinlock_t lock; 342 spinlock_t lock;
505 /*platform specific members*/ 343 /*platform specific members*/
@@ -510,19 +348,6 @@ struct emac_priv {
510/* clock frequency for EMAC */ 348/* clock frequency for EMAC */
511static struct clk *emac_clk; 349static struct clk *emac_clk;
512static unsigned long emac_bus_frequency; 350static unsigned long emac_bus_frequency;
513static unsigned long mdio_max_freq;
514
515#define emac_virt_to_phys(addr, priv) \
516 (((u32 __force)(addr) - (u32 __force)(priv->emac_ctrl_ram)) \
517 + priv->hw_ram_addr)
518
519/* Cache macros - Packet buffers would be from skb pool which is cached */
520#define EMAC_VIRT_NOCACHE(addr) (addr)
521
522/* DM644x does not have BD's in cached memory - so no cache functions */
523#define BD_CACHE_INVALIDATE(addr, size)
524#define BD_CACHE_WRITEBACK(addr, size)
525#define BD_CACHE_WRITEBACK_INVALIDATE(addr, size)
526 351
527/* EMAC TX Host Error description strings */ 352/* EMAC TX Host Error description strings */
528static char *emac_txhost_errcodes[16] = { 353static char *emac_txhost_errcodes[16] = {
@@ -548,9 +373,6 @@ static char *emac_rxhost_errcodes[16] = {
548#define emac_ctrl_read(reg) ioread32((priv->ctrl_base + (reg))) 373#define emac_ctrl_read(reg) ioread32((priv->ctrl_base + (reg)))
549#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg))) 374#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
550 375
551#define emac_mdio_read(reg) ioread32(bus->priv + (reg))
552#define emac_mdio_write(reg, val) iowrite32(val, (bus->priv + (reg)))
553
554/** 376/**
555 * emac_dump_regs: Dump important EMAC registers to debug terminal 377 * emac_dump_regs: Dump important EMAC registers to debug terminal
556 * @priv: The DaVinci EMAC private adapter structure 378 * @priv: The DaVinci EMAC private adapter structure
@@ -569,20 +391,6 @@ static void emac_dump_regs(struct emac_priv *priv)
569 emac_ctrl_read(EMAC_CTRL_EWCTL), 391 emac_ctrl_read(EMAC_CTRL_EWCTL),
570 emac_ctrl_read(EMAC_CTRL_EWINTTCNT)); 392 emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
571 } 393 }
572 dev_info(emac_dev, "EMAC: TXID: %08X %s, RXID: %08X %s\n",
573 emac_read(EMAC_TXIDVER),
574 ((emac_read(EMAC_TXCONTROL)) ? "enabled" : "disabled"),
575 emac_read(EMAC_RXIDVER),
576 ((emac_read(EMAC_RXCONTROL)) ? "enabled" : "disabled"));
577 dev_info(emac_dev, "EMAC: TXIntRaw:%08X, TxIntMasked: %08X, "\
578 "TxIntMasSet: %08X\n", emac_read(EMAC_TXINTSTATRAW),
579 emac_read(EMAC_TXINTSTATMASKED), emac_read(EMAC_TXINTMASKSET));
580 dev_info(emac_dev, "EMAC: RXIntRaw:%08X, RxIntMasked: %08X, "\
581 "RxIntMasSet: %08X\n", emac_read(EMAC_RXINTSTATRAW),
582 emac_read(EMAC_RXINTSTATMASKED), emac_read(EMAC_RXINTMASKSET));
583 dev_info(emac_dev, "EMAC: MacIntRaw:%08X, MacIntMasked: %08X, "\
584 "MacInVector=%08X\n", emac_read(EMAC_MACINTSTATRAW),
585 emac_read(EMAC_MACINTSTATMASKED), emac_read(EMAC_MACINVECTOR));
586 dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n", 394 dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n",
587 emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL)); 395 emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL));
588 dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\ 396 dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\
@@ -591,8 +399,6 @@ static void emac_dump_regs(struct emac_priv *priv)
591 dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\ 399 dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\
592 "MacConfig=%08X\n", emac_read(EMAC_MACCONTROL), 400 "MacConfig=%08X\n", emac_read(EMAC_MACCONTROL),
593 emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG)); 401 emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG));
594 dev_info(emac_dev, "EMAC: TXHDP[0]:%08X, RXHDP[0]: %08X\n",
595 emac_read(EMAC_TXHDP(0)), emac_read(EMAC_RXHDP(0)));
596 dev_info(emac_dev, "EMAC Statistics\n"); 402 dev_info(emac_dev, "EMAC Statistics\n");
597 dev_info(emac_dev, "EMAC: rx_good_frames:%d\n", 403 dev_info(emac_dev, "EMAC: rx_good_frames:%d\n",
598 emac_read(EMAC_RXGOODFRAMES)); 404 emac_read(EMAC_RXGOODFRAMES));
@@ -654,11 +460,10 @@ static void emac_dump_regs(struct emac_priv *priv)
654 emac_read(EMAC_RXMOFOVERRUNS)); 460 emac_read(EMAC_RXMOFOVERRUNS));
655 dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n", 461 dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n",
656 emac_read(EMAC_RXDMAOVERRUNS)); 462 emac_read(EMAC_RXDMAOVERRUNS));
463
464 cpdma_ctlr_dump(priv->dma);
657} 465}
658 466
659/*************************************************************************
660 * EMAC MDIO/Phy Functionality
661 *************************************************************************/
662/** 467/**
663 * emac_get_drvinfo: Get EMAC driver information 468 * emac_get_drvinfo: Get EMAC driver information
664 * @ndev: The DaVinci EMAC network adapter 469 * @ndev: The DaVinci EMAC network adapter
@@ -686,7 +491,7 @@ static int emac_get_settings(struct net_device *ndev,
686 struct ethtool_cmd *ecmd) 491 struct ethtool_cmd *ecmd)
687{ 492{
688 struct emac_priv *priv = netdev_priv(ndev); 493 struct emac_priv *priv = netdev_priv(ndev);
689 if (priv->phy_mask) 494 if (priv->phydev)
690 return phy_ethtool_gset(priv->phydev, ecmd); 495 return phy_ethtool_gset(priv->phydev, ecmd);
691 else 496 else
692 return -EOPNOTSUPP; 497 return -EOPNOTSUPP;
@@ -704,7 +509,7 @@ static int emac_get_settings(struct net_device *ndev,
704static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) 509static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
705{ 510{
706 struct emac_priv *priv = netdev_priv(ndev); 511 struct emac_priv *priv = netdev_priv(ndev);
707 if (priv->phy_mask) 512 if (priv->phydev)
708 return phy_ethtool_sset(priv->phydev, ecmd); 513 return phy_ethtool_sset(priv->phydev, ecmd);
709 else 514 else
710 return -EOPNOTSUPP; 515 return -EOPNOTSUPP;
@@ -841,7 +646,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
841 mac_control = emac_read(EMAC_MACCONTROL); 646 mac_control = emac_read(EMAC_MACCONTROL);
842 cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ? 647 cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
843 DUPLEX_FULL : DUPLEX_HALF; 648 DUPLEX_FULL : DUPLEX_HALF;
844 if (priv->phy_mask) 649 if (priv->phydev)
845 new_duplex = priv->phydev->duplex; 650 new_duplex = priv->phydev->duplex;
846 else 651 else
847 new_duplex = DUPLEX_FULL; 652 new_duplex = DUPLEX_FULL;
@@ -1184,371 +989,68 @@ static irqreturn_t emac_irq(int irq, void *dev_id)
1184 return IRQ_HANDLED; 989 return IRQ_HANDLED;
1185} 990}
1186 991
1187/** EMAC on-chip buffer descriptor memory 992static struct sk_buff *emac_rx_alloc(struct emac_priv *priv)
1188 *
1189 * WARNING: Please note that the on chip memory is used for both TX and RX
1190 * buffer descriptor queues and is equally divided between TX and RX desc's
1191 * If the number of TX or RX descriptors change this memory pointers need
1192 * to be adjusted. If external memory is allocated then these pointers can
1193 * pointer to the memory
1194 *
1195 */
1196#define EMAC_TX_BD_MEM(priv) ((priv)->emac_ctrl_ram)
1197#define EMAC_RX_BD_MEM(priv) ((priv)->emac_ctrl_ram + \
1198 (((priv)->ctrl_ram_size) >> 1))
1199
1200/**
1201 * emac_init_txch: TX channel initialization
1202 * @priv: The DaVinci EMAC private adapter structure
1203 * @ch: RX channel number
1204 *
1205 * Called during device init to setup a TX channel (allocate buffer desc
1206 * create free pool and keep ready for transmission
1207 *
1208 * Returns success(0) or mem alloc failures error code
1209 */
1210static int emac_init_txch(struct emac_priv *priv, u32 ch)
1211{
1212 struct device *emac_dev = &priv->ndev->dev;
1213 u32 cnt, bd_size;
1214 void __iomem *mem;
1215 struct emac_tx_bd __iomem *curr_bd;
1216 struct emac_txch *txch = NULL;
1217
1218 txch = kzalloc(sizeof(struct emac_txch), GFP_KERNEL);
1219 if (NULL == txch) {
1220 dev_err(emac_dev, "DaVinci EMAC: TX Ch mem alloc failed");
1221 return -ENOMEM;
1222 }
1223 priv->txch[ch] = txch;
1224 txch->service_max = EMAC_DEF_TX_MAX_SERVICE;
1225 txch->active_queue_head = NULL;
1226 txch->active_queue_tail = NULL;
1227 txch->queue_active = 0;
1228 txch->teardown_pending = 0;
1229
1230 /* allocate memory for TX CPPI channel on a 4 byte boundry */
1231 txch->tx_complete = kzalloc(txch->service_max * sizeof(u32),
1232 GFP_KERNEL);
1233 if (NULL == txch->tx_complete) {
1234 dev_err(emac_dev, "DaVinci EMAC: Tx service mem alloc failed");
1235 kfree(txch);
1236 return -ENOMEM;
1237 }
1238
1239 /* allocate buffer descriptor pool align every BD on four word
1240 * boundry for future requirements */
1241 bd_size = (sizeof(struct emac_tx_bd) + 0xF) & ~0xF;
1242 txch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
1243 txch->alloc_size = (((bd_size * txch->num_bd) + 0xF) & ~0xF);
1244
1245 /* alloc TX BD memory */
1246 txch->bd_mem = EMAC_TX_BD_MEM(priv);
1247 __memzero((void __force *)txch->bd_mem, txch->alloc_size);
1248
1249 /* initialize the BD linked list */
1250 mem = (void __force __iomem *)
1251 (((u32 __force) txch->bd_mem + 0xF) & ~0xF);
1252 txch->bd_pool_head = NULL;
1253 for (cnt = 0; cnt < txch->num_bd; cnt++) {
1254 curr_bd = mem + (cnt * bd_size);
1255 curr_bd->next = txch->bd_pool_head;
1256 txch->bd_pool_head = curr_bd;
1257 }
1258
1259 /* reset statistics counters */
1260 txch->out_of_tx_bd = 0;
1261 txch->no_active_pkts = 0;
1262 txch->active_queue_count = 0;
1263
1264 return 0;
1265}
1266
1267/**
1268 * emac_cleanup_txch: Book-keep function to clean TX channel resources
1269 * @priv: The DaVinci EMAC private adapter structure
1270 * @ch: TX channel number
1271 *
1272 * Called to clean up TX channel resources
1273 *
1274 */
1275static void emac_cleanup_txch(struct emac_priv *priv, u32 ch)
1276{ 993{
1277 struct emac_txch *txch = priv->txch[ch]; 994 struct sk_buff *skb = dev_alloc_skb(priv->rx_buf_size);
1278 995 if (WARN_ON(!skb))
1279 if (txch) { 996 return NULL;
1280 if (txch->bd_mem) 997 skb->dev = priv->ndev;
1281 txch->bd_mem = NULL; 998 skb_reserve(skb, NET_IP_ALIGN);
1282 kfree(txch->tx_complete); 999 return skb;
1283 kfree(txch);
1284 priv->txch[ch] = NULL;
1285 }
1286} 1000}
1287 1001
1288/** 1002static void emac_rx_handler(void *token, int len, int status)
1289 * emac_net_tx_complete: TX packet completion function
1290 * @priv: The DaVinci EMAC private adapter structure
1291 * @net_data_tokens: packet token - skb pointer
1292 * @num_tokens: number of skb's to free
1293 * @ch: TX channel number
1294 *
1295 * Frees the skb once packet is transmitted
1296 *
1297 */
1298static int emac_net_tx_complete(struct emac_priv *priv,
1299 void **net_data_tokens,
1300 int num_tokens, u32 ch)
1301{ 1003{
1302 struct net_device *ndev = priv->ndev; 1004 struct sk_buff *skb = token;
1303 u32 cnt; 1005 struct net_device *ndev = skb->dev;
1304 1006 struct emac_priv *priv = netdev_priv(ndev);
1305 if (unlikely(num_tokens && netif_queue_stopped(ndev))) 1007 struct device *emac_dev = &ndev->dev;
1306 netif_start_queue(ndev); 1008 int ret;
1307 for (cnt = 0; cnt < num_tokens; cnt++) { 1009
1308 struct sk_buff *skb = (struct sk_buff *)net_data_tokens[cnt]; 1010 /* free and bail if we are shutting down */
1309 if (skb == NULL) 1011 if (unlikely(!netif_running(ndev))) {
1310 continue;
1311 ndev->stats.tx_packets++;
1312 ndev->stats.tx_bytes += skb->len;
1313 dev_kfree_skb_any(skb); 1012 dev_kfree_skb_any(skb);
1013 return;
1314 } 1014 }
1315 return 0;
1316}
1317
1318/**
1319 * emac_txch_teardown: TX channel teardown
1320 * @priv: The DaVinci EMAC private adapter structure
1321 * @ch: TX channel number
1322 *
1323 * Called to teardown TX channel
1324 *
1325 */
1326static void emac_txch_teardown(struct emac_priv *priv, u32 ch)
1327{
1328 struct device *emac_dev = &priv->ndev->dev;
1329 u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */
1330 struct emac_txch *txch = priv->txch[ch];
1331 struct emac_tx_bd __iomem *curr_bd;
1332
1333 while ((emac_read(EMAC_TXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
1334 EMAC_TEARDOWN_VALUE) {
1335 /* wait till tx teardown complete */
1336 cpu_relax(); /* TODO: check if this helps ... */
1337 --teardown_cnt;
1338 if (0 == teardown_cnt) {
1339 dev_err(emac_dev, "EMAC: TX teardown aborted\n");
1340 break;
1341 }
1342 }
1343 emac_write(EMAC_TXCP(ch), EMAC_TEARDOWN_VALUE);
1344
1345 /* process sent packets and return skb's to upper layer */
1346 if (1 == txch->queue_active) {
1347 curr_bd = txch->active_queue_head;
1348 while (curr_bd != NULL) {
1349 dma_unmap_single(emac_dev, curr_bd->buff_ptr,
1350 curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
1351 DMA_TO_DEVICE);
1352
1353 emac_net_tx_complete(priv, (void __force *)
1354 &curr_bd->buf_token, 1, ch);
1355 if (curr_bd != txch->active_queue_tail)
1356 curr_bd = curr_bd->next;
1357 else
1358 break;
1359 }
1360 txch->bd_pool_head = txch->active_queue_head;
1361 txch->active_queue_head =
1362 txch->active_queue_tail = NULL;
1363 }
1364}
1365 1015
1366/** 1016 /* recycle on recieve error */
1367 * emac_stop_txch: Stop TX channel operation 1017 if (status < 0) {
1368 * @priv: The DaVinci EMAC private adapter structure 1018 ndev->stats.rx_errors++;
1369 * @ch: TX channel number 1019 goto recycle;
1370 *
1371 * Called to stop TX channel operation
1372 *
1373 */
1374static void emac_stop_txch(struct emac_priv *priv, u32 ch)
1375{
1376 struct emac_txch *txch = priv->txch[ch];
1377
1378 if (txch) {
1379 txch->teardown_pending = 1;
1380 emac_write(EMAC_TXTEARDOWN, 0);
1381 emac_txch_teardown(priv, ch);
1382 txch->teardown_pending = 0;
1383 emac_write(EMAC_TXINTMASKCLEAR, BIT(ch));
1384 } 1020 }
1385}
1386 1021
1387/** 1022 /* feed received packet up the stack */
1388 * emac_tx_bdproc: TX buffer descriptor (packet) processing 1023 skb_put(skb, len);
1389 * @priv: The DaVinci EMAC private adapter structure 1024 skb->protocol = eth_type_trans(skb, ndev);
1390 * @ch: TX channel number to process buffer descriptors for 1025 netif_receive_skb(skb);
1391 * @budget: number of packets allowed to process 1026 ndev->stats.rx_bytes += len;
1392 * @pending: indication to caller that packets are pending to process 1027 ndev->stats.rx_packets++;
1393 *
1394 * Processes TX buffer descriptors after packets are transmitted - checks
1395 * ownership bit on the TX * descriptor and requeues it to free pool & frees
1396 * the SKB buffer. Only "budget" number of packets are processed and
1397 * indication of pending packets provided to the caller
1398 *
1399 * Returns number of packets processed
1400 */
1401static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
1402{
1403 struct device *emac_dev = &priv->ndev->dev;
1404 unsigned long flags;
1405 u32 frame_status;
1406 u32 pkts_processed = 0;
1407 u32 tx_complete_cnt = 0;
1408 struct emac_tx_bd __iomem *curr_bd;
1409 struct emac_txch *txch = priv->txch[ch];
1410 u32 *tx_complete_ptr = txch->tx_complete;
1411
1412 if (unlikely(1 == txch->teardown_pending)) {
1413 if (netif_msg_tx_err(priv) && net_ratelimit()) {
1414 dev_err(emac_dev, "DaVinci EMAC:emac_tx_bdproc: "\
1415 "teardown pending\n");
1416 }
1417 return 0; /* dont handle any pkt completions */
1418 }
1419 1028
1420 ++txch->proc_count; 1029 /* alloc a new packet for receive */
1421 spin_lock_irqsave(&priv->tx_lock, flags); 1030 skb = emac_rx_alloc(priv);
1422 curr_bd = txch->active_queue_head; 1031 if (!skb) {
1423 if (NULL == curr_bd) { 1032 if (netif_msg_rx_err(priv) && net_ratelimit())
1424 emac_write(EMAC_TXCP(ch), 1033 dev_err(emac_dev, "failed rx buffer alloc\n");
1425 emac_virt_to_phys(txch->last_hw_bdprocessed, priv)); 1034 return;
1426 txch->no_active_pkts++;
1427 spin_unlock_irqrestore(&priv->tx_lock, flags);
1428 return 0;
1429 } 1035 }
1430 BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
1431 frame_status = curr_bd->mode;
1432 while ((curr_bd) &&
1433 ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
1434 (pkts_processed < budget)) {
1435 emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd, priv));
1436 txch->active_queue_head = curr_bd->next;
1437 if (frame_status & EMAC_CPPI_EOQ_BIT) {
1438 if (curr_bd->next) { /* misqueued packet */
1439 emac_write(EMAC_TXHDP(ch), curr_bd->h_next);
1440 ++txch->mis_queued_packets;
1441 } else {
1442 txch->queue_active = 0; /* end of queue */
1443 }
1444 }
1445 1036
1446 dma_unmap_single(emac_dev, curr_bd->buff_ptr, 1037recycle:
1447 curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE, 1038 ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
1448 DMA_TO_DEVICE); 1039 skb_tailroom(skb), GFP_KERNEL);
1449 1040 if (WARN_ON(ret < 0))
1450 *tx_complete_ptr = (u32) curr_bd->buf_token; 1041 dev_kfree_skb_any(skb);
1451 ++tx_complete_ptr;
1452 ++tx_complete_cnt;
1453 curr_bd->next = txch->bd_pool_head;
1454 txch->bd_pool_head = curr_bd;
1455 --txch->active_queue_count;
1456 pkts_processed++;
1457 txch->last_hw_bdprocessed = curr_bd;
1458 curr_bd = txch->active_queue_head;
1459 if (curr_bd) {
1460 BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
1461 frame_status = curr_bd->mode;
1462 }
1463 } /* end of pkt processing loop */
1464
1465 emac_net_tx_complete(priv,
1466 (void *)&txch->tx_complete[0],
1467 tx_complete_cnt, ch);
1468 spin_unlock_irqrestore(&priv->tx_lock, flags);
1469 return pkts_processed;
1470} 1042}
1471 1043
1472#define EMAC_ERR_TX_OUT_OF_BD -1 1044static void emac_tx_handler(void *token, int len, int status)
1473
1474/**
1475 * emac_send: EMAC Transmit function (internal)
1476 * @priv: The DaVinci EMAC private adapter structure
1477 * @pkt: packet pointer (contains skb ptr)
1478 * @ch: TX channel number
1479 *
1480 * Called by the transmit function to queue the packet in EMAC hardware queue
1481 *
1482 * Returns success(0) or error code (typically out of desc's)
1483 */
1484static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch)
1485{ 1045{
1486 unsigned long flags; 1046 struct sk_buff *skb = token;
1487 struct emac_tx_bd __iomem *curr_bd; 1047 struct net_device *ndev = skb->dev;
1488 struct emac_txch *txch;
1489 struct emac_netbufobj *buf_list;
1490
1491 txch = priv->txch[ch];
1492 buf_list = pkt->buf_list; /* get handle to the buffer array */
1493
1494 /* check packet size and pad if short */
1495 if (pkt->pkt_length < EMAC_DEF_MIN_ETHPKTSIZE) {
1496 buf_list->length += (EMAC_DEF_MIN_ETHPKTSIZE - pkt->pkt_length);
1497 pkt->pkt_length = EMAC_DEF_MIN_ETHPKTSIZE;
1498 }
1499 1048
1500 spin_lock_irqsave(&priv->tx_lock, flags); 1049 if (unlikely(netif_queue_stopped(ndev)))
1501 curr_bd = txch->bd_pool_head; 1050 netif_start_queue(ndev);
1502 if (curr_bd == NULL) { 1051 ndev->stats.tx_packets++;
1503 txch->out_of_tx_bd++; 1052 ndev->stats.tx_bytes += len;
1504 spin_unlock_irqrestore(&priv->tx_lock, flags); 1053 dev_kfree_skb_any(skb);
1505 return EMAC_ERR_TX_OUT_OF_BD;
1506 }
1507
1508 txch->bd_pool_head = curr_bd->next;
1509 curr_bd->buf_token = buf_list->buf_token;
1510 curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buf_list->data_ptr,
1511 buf_list->length, DMA_TO_DEVICE);
1512 curr_bd->off_b_len = buf_list->length;
1513 curr_bd->h_next = 0;
1514 curr_bd->next = NULL;
1515 curr_bd->mode = (EMAC_CPPI_SOP_BIT | EMAC_CPPI_OWNERSHIP_BIT |
1516 EMAC_CPPI_EOP_BIT | pkt->pkt_length);
1517
1518 /* flush the packet from cache if write back cache is present */
1519 BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
1520
1521 /* send the packet */
1522 if (txch->active_queue_head == NULL) {
1523 txch->active_queue_head = curr_bd;
1524 txch->active_queue_tail = curr_bd;
1525 if (1 != txch->queue_active) {
1526 emac_write(EMAC_TXHDP(ch),
1527 emac_virt_to_phys(curr_bd, priv));
1528 txch->queue_active = 1;
1529 }
1530 ++txch->queue_reinit;
1531 } else {
1532 register struct emac_tx_bd __iomem *tail_bd;
1533 register u32 frame_status;
1534
1535 tail_bd = txch->active_queue_tail;
1536 tail_bd->next = curr_bd;
1537 txch->active_queue_tail = curr_bd;
1538 tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
1539 tail_bd->h_next = (int)emac_virt_to_phys(curr_bd, priv);
1540 frame_status = tail_bd->mode;
1541 if (frame_status & EMAC_CPPI_EOQ_BIT) {
1542 emac_write(EMAC_TXHDP(ch),
1543 emac_virt_to_phys(curr_bd, priv));
1544 frame_status &= ~(EMAC_CPPI_EOQ_BIT);
1545 tail_bd->mode = frame_status;
1546 ++txch->end_of_queue_add;
1547 }
1548 }
1549 txch->active_queue_count++;
1550 spin_unlock_irqrestore(&priv->tx_lock, flags);
1551 return 0;
1552} 1054}
1553 1055
1554/** 1056/**
@@ -1565,42 +1067,36 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
1565{ 1067{
1566 struct device *emac_dev = &ndev->dev; 1068 struct device *emac_dev = &ndev->dev;
1567 int ret_code; 1069 int ret_code;
1568 struct emac_netbufobj tx_buf; /* buffer obj-only single frame support */
1569 struct emac_netpktobj tx_packet; /* packet object */
1570 struct emac_priv *priv = netdev_priv(ndev); 1070 struct emac_priv *priv = netdev_priv(ndev);
1571 1071
1572 /* If no link, return */ 1072 /* If no link, return */
1573 if (unlikely(!priv->link)) { 1073 if (unlikely(!priv->link)) {
1574 if (netif_msg_tx_err(priv) && net_ratelimit()) 1074 if (netif_msg_tx_err(priv) && net_ratelimit())
1575 dev_err(emac_dev, "DaVinci EMAC: No link to transmit"); 1075 dev_err(emac_dev, "DaVinci EMAC: No link to transmit");
1576 return NETDEV_TX_BUSY; 1076 goto fail_tx;
1077 }
1078
1079 ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE);
1080 if (unlikely(ret_code < 0)) {
1081 if (netif_msg_tx_err(priv) && net_ratelimit())
1082 dev_err(emac_dev, "DaVinci EMAC: packet pad failed");
1083 goto fail_tx;
1577 } 1084 }
1578 1085
1579 /* Build the buffer and packet objects - Since only single fragment is 1086 ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
1580 * supported, need not set length and token in both packet & object. 1087 GFP_KERNEL);
1581 * Doing so for completeness sake & to show that this needs to be done
1582 * in multifragment case
1583 */
1584 tx_packet.buf_list = &tx_buf;
1585 tx_packet.num_bufs = 1; /* only single fragment supported */
1586 tx_packet.pkt_length = skb->len;
1587 tx_packet.pkt_token = (void *)skb;
1588 tx_buf.length = skb->len;
1589 tx_buf.buf_token = (void *)skb;
1590 tx_buf.data_ptr = skb->data;
1591 ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
1592 if (unlikely(ret_code != 0)) { 1088 if (unlikely(ret_code != 0)) {
1593 if (ret_code == EMAC_ERR_TX_OUT_OF_BD) { 1089 if (netif_msg_tx_err(priv) && net_ratelimit())
1594 if (netif_msg_tx_err(priv) && net_ratelimit()) 1090 dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
1595 dev_err(emac_dev, "DaVinci EMAC: xmit() fatal"\ 1091 goto fail_tx;
1596 " err. Out of TX BD's");
1597 netif_stop_queue(priv->ndev);
1598 }
1599 ndev->stats.tx_dropped++;
1600 return NETDEV_TX_BUSY;
1601 } 1092 }
1602 1093
1603 return NETDEV_TX_OK; 1094 return NETDEV_TX_OK;
1095
1096fail_tx:
1097 ndev->stats.tx_dropped++;
1098 netif_stop_queue(ndev);
1099 return NETDEV_TX_BUSY;
1604} 1100}
1605 1101
1606/** 1102/**
@@ -1621,218 +1117,16 @@ static void emac_dev_tx_timeout(struct net_device *ndev)
1621 if (netif_msg_tx_err(priv)) 1117 if (netif_msg_tx_err(priv))
1622 dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX"); 1118 dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX");
1623 1119
1120 emac_dump_regs(priv);
1121
1624 ndev->stats.tx_errors++; 1122 ndev->stats.tx_errors++;
1625 emac_int_disable(priv); 1123 emac_int_disable(priv);
1626 emac_stop_txch(priv, EMAC_DEF_TX_CH); 1124 cpdma_chan_stop(priv->txchan);
1627 emac_cleanup_txch(priv, EMAC_DEF_TX_CH); 1125 cpdma_chan_start(priv->txchan);
1628 emac_init_txch(priv, EMAC_DEF_TX_CH);
1629 emac_write(EMAC_TXHDP(0), 0);
1630 emac_write(EMAC_TXINTMASKSET, BIT(EMAC_DEF_TX_CH));
1631 emac_int_enable(priv); 1126 emac_int_enable(priv);
1632} 1127}
1633 1128
1634/** 1129/**
1635 * emac_net_alloc_rx_buf: Allocate a skb for RX
1636 * @priv: The DaVinci EMAC private adapter structure
1637 * @buf_size: size of SKB data buffer to allocate
1638 * @data_token: data token returned (skb handle for storing in buffer desc)
1639 * @ch: RX channel number
1640 *
1641 * Called during RX channel setup - allocates skb buffer of required size
1642 * and provides the skb handle and allocated buffer data pointer to caller
1643 *
1644 * Returns skb data pointer or 0 on failure to alloc skb
1645 */
1646static void *emac_net_alloc_rx_buf(struct emac_priv *priv, int buf_size,
1647 void **data_token, u32 ch)
1648{
1649 struct net_device *ndev = priv->ndev;
1650 struct device *emac_dev = &ndev->dev;
1651 struct sk_buff *p_skb;
1652
1653 p_skb = dev_alloc_skb(buf_size);
1654 if (unlikely(NULL == p_skb)) {
1655 if (netif_msg_rx_err(priv) && net_ratelimit())
1656 dev_err(emac_dev, "DaVinci EMAC: failed to alloc skb");
1657 return NULL;
1658 }
1659
1660 /* set device pointer in skb and reserve space for extra bytes */
1661 p_skb->dev = ndev;
1662 skb_reserve(p_skb, NET_IP_ALIGN);
1663 *data_token = (void *) p_skb;
1664 return p_skb->data;
1665}
1666
1667/**
1668 * emac_init_rxch: RX channel initialization
1669 * @priv: The DaVinci EMAC private adapter structure
1670 * @ch: RX channel number
1671 * @param: mac address for RX channel
1672 *
1673 * Called during device init to setup a RX channel (allocate buffers and
1674 * buffer descriptors, create queue and keep ready for reception
1675 *
1676 * Returns success(0) or mem alloc failures error code
1677 */
1678static int emac_init_rxch(struct emac_priv *priv, u32 ch, char *param)
1679{
1680 struct device *emac_dev = &priv->ndev->dev;
1681 u32 cnt, bd_size;
1682 void __iomem *mem;
1683 struct emac_rx_bd __iomem *curr_bd;
1684 struct emac_rxch *rxch = NULL;
1685
1686 rxch = kzalloc(sizeof(struct emac_rxch), GFP_KERNEL);
1687 if (NULL == rxch) {
1688 dev_err(emac_dev, "DaVinci EMAC: RX Ch mem alloc failed");
1689 return -ENOMEM;
1690 }
1691 priv->rxch[ch] = rxch;
1692 rxch->buf_size = priv->rx_buf_size;
1693 rxch->service_max = EMAC_DEF_RX_MAX_SERVICE;
1694 rxch->queue_active = 0;
1695 rxch->teardown_pending = 0;
1696
1697 /* save mac address */
1698 for (cnt = 0; cnt < 6; cnt++)
1699 rxch->mac_addr[cnt] = param[cnt];
1700
1701 /* allocate buffer descriptor pool align every BD on four word
1702 * boundry for future requirements */
1703 bd_size = (sizeof(struct emac_rx_bd) + 0xF) & ~0xF;
1704 rxch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
1705 rxch->alloc_size = (((bd_size * rxch->num_bd) + 0xF) & ~0xF);
1706 rxch->bd_mem = EMAC_RX_BD_MEM(priv);
1707 __memzero((void __force *)rxch->bd_mem, rxch->alloc_size);
1708 rxch->pkt_queue.buf_list = &rxch->buf_queue;
1709
1710 /* allocate RX buffer and initialize the BD linked list */
1711 mem = (void __force __iomem *)
1712 (((u32 __force) rxch->bd_mem + 0xF) & ~0xF);
1713 rxch->active_queue_head = NULL;
1714 rxch->active_queue_tail = mem;
1715 for (cnt = 0; cnt < rxch->num_bd; cnt++) {
1716 curr_bd = mem + (cnt * bd_size);
1717 /* for future use the last parameter contains the BD ptr */
1718 curr_bd->data_ptr = emac_net_alloc_rx_buf(priv,
1719 rxch->buf_size,
1720 (void __force **)&curr_bd->buf_token,
1721 EMAC_DEF_RX_CH);
1722 if (curr_bd->data_ptr == NULL) {
1723 dev_err(emac_dev, "DaVinci EMAC: RX buf mem alloc " \
1724 "failed for ch %d\n", ch);
1725 kfree(rxch);
1726 return -ENOMEM;
1727 }
1728
1729 /* populate the hardware descriptor */
1730 curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head,
1731 priv);
1732 curr_bd->buff_ptr = dma_map_single(emac_dev, curr_bd->data_ptr,
1733 rxch->buf_size, DMA_FROM_DEVICE);
1734 curr_bd->off_b_len = rxch->buf_size;
1735 curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
1736
1737 /* write back to hardware memory */
1738 BD_CACHE_WRITEBACK_INVALIDATE((u32) curr_bd,
1739 EMAC_BD_LENGTH_FOR_CACHE);
1740 curr_bd->next = rxch->active_queue_head;
1741 rxch->active_queue_head = curr_bd;
1742 }
1743
1744 /* At this point rxCppi->activeQueueHead points to the first
1745 RX BD ready to be given to RX HDP and rxch->active_queue_tail
1746 points to the last RX BD
1747 */
1748 return 0;
1749}
1750
1751/**
1752 * emac_rxch_teardown: RX channel teardown
1753 * @priv: The DaVinci EMAC private adapter structure
1754 * @ch: RX channel number
1755 *
1756 * Called during device stop to teardown RX channel
1757 *
1758 */
1759static void emac_rxch_teardown(struct emac_priv *priv, u32 ch)
1760{
1761 struct device *emac_dev = &priv->ndev->dev;
1762 u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */
1763
1764 while ((emac_read(EMAC_RXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
1765 EMAC_TEARDOWN_VALUE) {
1766 /* wait till tx teardown complete */
1767 cpu_relax(); /* TODO: check if this helps ... */
1768 --teardown_cnt;
1769 if (0 == teardown_cnt) {
1770 dev_err(emac_dev, "EMAC: RX teardown aborted\n");
1771 break;
1772 }
1773 }
1774 emac_write(EMAC_RXCP(ch), EMAC_TEARDOWN_VALUE);
1775}
1776
1777/**
1778 * emac_stop_rxch: Stop RX channel operation
1779 * @priv: The DaVinci EMAC private adapter structure
1780 * @ch: RX channel number
1781 *
1782 * Called during device stop to stop RX channel operation
1783 *
1784 */
1785static void emac_stop_rxch(struct emac_priv *priv, u32 ch)
1786{
1787 struct emac_rxch *rxch = priv->rxch[ch];
1788
1789 if (rxch) {
1790 rxch->teardown_pending = 1;
1791 emac_write(EMAC_RXTEARDOWN, ch);
1792 /* wait for teardown complete */
1793 emac_rxch_teardown(priv, ch);
1794 rxch->teardown_pending = 0;
1795 emac_write(EMAC_RXINTMASKCLEAR, BIT(ch));
1796 }
1797}
1798
1799/**
1800 * emac_cleanup_rxch: Book-keep function to clean RX channel resources
1801 * @priv: The DaVinci EMAC private adapter structure
1802 * @ch: RX channel number
1803 *
1804 * Called during device stop to clean up RX channel resources
1805 *
1806 */
1807static void emac_cleanup_rxch(struct emac_priv *priv, u32 ch)
1808{
1809 struct emac_rxch *rxch = priv->rxch[ch];
1810 struct emac_rx_bd __iomem *curr_bd;
1811
1812 if (rxch) {
1813 /* free the receive buffers previously allocated */
1814 curr_bd = rxch->active_queue_head;
1815 while (curr_bd) {
1816 if (curr_bd->buf_token) {
1817 dma_unmap_single(&priv->ndev->dev,
1818 curr_bd->buff_ptr,
1819 curr_bd->off_b_len
1820 & EMAC_RX_BD_BUF_SIZE,
1821 DMA_FROM_DEVICE);
1822
1823 dev_kfree_skb_any((struct sk_buff *)\
1824 curr_bd->buf_token);
1825 }
1826 curr_bd = curr_bd->next;
1827 }
1828 if (rxch->bd_mem)
1829 rxch->bd_mem = NULL;
1830 kfree(rxch);
1831 priv->rxch[ch] = NULL;
1832 }
1833}
1834
1835/**
1836 * emac_set_type0addr: Set EMAC Type0 mac address 1130 * emac_set_type0addr: Set EMAC Type0 mac address
1837 * @priv: The DaVinci EMAC private adapter structure 1131 * @priv: The DaVinci EMAC private adapter structure
1838 * @ch: RX channel number 1132 * @ch: RX channel number
@@ -1948,7 +1242,6 @@ static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
1948static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) 1242static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
1949{ 1243{
1950 struct emac_priv *priv = netdev_priv(ndev); 1244 struct emac_priv *priv = netdev_priv(ndev);
1951 struct emac_rxch *rxch = priv->rxch[EMAC_DEF_RX_CH];
1952 struct device *emac_dev = &priv->ndev->dev; 1245 struct device *emac_dev = &priv->ndev->dev;
1953 struct sockaddr *sa = addr; 1246 struct sockaddr *sa = addr;
1954 1247
@@ -1959,11 +1252,10 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
1959 memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); 1252 memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
1960 memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len); 1253 memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
1961 1254
1962 /* If the interface is down - rxch is NULL. */
1963 /* MAC address is configured only after the interface is enabled. */ 1255 /* MAC address is configured only after the interface is enabled. */
1964 if (netif_running(ndev)) { 1256 if (netif_running(ndev)) {
1965 memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len); 1257 memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
1966 emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr); 1258 emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
1967 } 1259 }
1968 1260
1969 if (netif_msg_drv(priv)) 1261 if (netif_msg_drv(priv))
@@ -1974,194 +1266,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
1974} 1266}
1975 1267
1976/** 1268/**
1977 * emac_addbd_to_rx_queue: Recycle RX buffer descriptor
1978 * @priv: The DaVinci EMAC private adapter structure
1979 * @ch: RX channel number to process buffer descriptors for
1980 * @curr_bd: current buffer descriptor
1981 * @buffer: buffer pointer for descriptor
1982 * @buf_token: buffer token (stores skb information)
1983 *
1984 * Prepares the recycled buffer descriptor and addes it to hardware
1985 * receive queue - if queue empty this descriptor becomes the head
1986 * else addes the descriptor to end of queue
1987 *
1988 */
1989static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch,
1990 struct emac_rx_bd __iomem *curr_bd,
1991 char *buffer, void *buf_token)
1992{
1993 struct emac_rxch *rxch = priv->rxch[ch];
1994
1995 /* populate the hardware descriptor */
1996 curr_bd->h_next = 0;
1997 curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buffer,
1998 rxch->buf_size, DMA_FROM_DEVICE);
1999 curr_bd->off_b_len = rxch->buf_size;
2000 curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
2001 curr_bd->next = NULL;
2002 curr_bd->data_ptr = buffer;
2003 curr_bd->buf_token = buf_token;
2004
2005 /* write back */
2006 BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
2007 if (rxch->active_queue_head == NULL) {
2008 rxch->active_queue_head = curr_bd;
2009 rxch->active_queue_tail = curr_bd;
2010 if (0 != rxch->queue_active) {
2011 emac_write(EMAC_RXHDP(ch),
2012 emac_virt_to_phys(rxch->active_queue_head, priv));
2013 rxch->queue_active = 1;
2014 }
2015 } else {
2016 struct emac_rx_bd __iomem *tail_bd;
2017 u32 frame_status;
2018
2019 tail_bd = rxch->active_queue_tail;
2020 rxch->active_queue_tail = curr_bd;
2021 tail_bd->next = curr_bd;
2022 tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
2023 tail_bd->h_next = emac_virt_to_phys(curr_bd, priv);
2024 frame_status = tail_bd->mode;
2025 if (frame_status & EMAC_CPPI_EOQ_BIT) {
2026 emac_write(EMAC_RXHDP(ch),
2027 emac_virt_to_phys(curr_bd, priv));
2028 frame_status &= ~(EMAC_CPPI_EOQ_BIT);
2029 tail_bd->mode = frame_status;
2030 ++rxch->end_of_queue_add;
2031 }
2032 }
2033 ++rxch->recycled_bd;
2034}
2035
2036/**
2037 * emac_net_rx_cb: Prepares packet and sends to upper layer
2038 * @priv: The DaVinci EMAC private adapter structure
2039 * @net_pkt_list: Network packet list (received packets)
2040 *
2041 * Invalidates packet buffer memory and sends the received packet to upper
2042 * layer
2043 *
2044 * Returns success or appropriate error code (none as of now)
2045 */
2046static int emac_net_rx_cb(struct emac_priv *priv,
2047 struct emac_netpktobj *net_pkt_list)
2048{
2049 struct net_device *ndev = priv->ndev;
2050 struct sk_buff *p_skb = net_pkt_list->pkt_token;
2051 /* set length of packet */
2052 skb_put(p_skb, net_pkt_list->pkt_length);
2053 p_skb->protocol = eth_type_trans(p_skb, priv->ndev);
2054 netif_receive_skb(p_skb);
2055 ndev->stats.rx_bytes += net_pkt_list->pkt_length;
2056 ndev->stats.rx_packets++;
2057 return 0;
2058}
2059
2060/**
2061 * emac_rx_bdproc: RX buffer descriptor (packet) processing
2062 * @priv: The DaVinci EMAC private adapter structure
2063 * @ch: RX channel number to process buffer descriptors for
2064 * @budget: number of packets allowed to process
2065 * @pending: indication to caller that packets are pending to process
2066 *
2067 * Processes RX buffer descriptors - checks ownership bit on the RX buffer
2068 * descriptor, sends the receive packet to upper layer, allocates a new SKB
2069 * and recycles the buffer descriptor (requeues it in hardware RX queue).
2070 * Only "budget" number of packets are processed and indication of pending
2071 * packets provided to the caller.
2072 *
2073 * Returns number of packets processed (and indication of pending packets)
2074 */
2075static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
2076{
2077 unsigned long flags;
2078 u32 frame_status;
2079 u32 pkts_processed = 0;
2080 char *new_buffer;
2081 struct emac_rx_bd __iomem *curr_bd;
2082 struct emac_rx_bd __iomem *last_bd;
2083 struct emac_netpktobj *curr_pkt, pkt_obj;
2084 struct emac_netbufobj buf_obj;
2085 struct emac_netbufobj *rx_buf_obj;
2086 void *new_buf_token;
2087 struct emac_rxch *rxch = priv->rxch[ch];
2088
2089 if (unlikely(1 == rxch->teardown_pending))
2090 return 0;
2091 ++rxch->proc_count;
2092 spin_lock_irqsave(&priv->rx_lock, flags);
2093 pkt_obj.buf_list = &buf_obj;
2094 curr_pkt = &pkt_obj;
2095 curr_bd = rxch->active_queue_head;
2096 BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
2097 frame_status = curr_bd->mode;
2098
2099 while ((curr_bd) &&
2100 ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
2101 (pkts_processed < budget)) {
2102
2103 new_buffer = emac_net_alloc_rx_buf(priv, rxch->buf_size,
2104 &new_buf_token, EMAC_DEF_RX_CH);
2105 if (unlikely(NULL == new_buffer)) {
2106 ++rxch->out_of_rx_buffers;
2107 goto end_emac_rx_bdproc;
2108 }
2109
2110 /* populate received packet data structure */
2111 rx_buf_obj = &curr_pkt->buf_list[0];
2112 rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr;
2113 rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE;
2114 rx_buf_obj->buf_token = curr_bd->buf_token;
2115
2116 dma_unmap_single(&priv->ndev->dev, curr_bd->buff_ptr,
2117 curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
2118 DMA_FROM_DEVICE);
2119
2120 curr_pkt->pkt_token = curr_pkt->buf_list->buf_token;
2121 curr_pkt->num_bufs = 1;
2122 curr_pkt->pkt_length =
2123 (frame_status & EMAC_RX_BD_PKT_LENGTH_MASK);
2124 emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd, priv));
2125 ++rxch->processed_bd;
2126 last_bd = curr_bd;
2127 curr_bd = last_bd->next;
2128 rxch->active_queue_head = curr_bd;
2129
2130 /* check if end of RX queue ? */
2131 if (frame_status & EMAC_CPPI_EOQ_BIT) {
2132 if (curr_bd) {
2133 ++rxch->mis_queued_packets;
2134 emac_write(EMAC_RXHDP(ch),
2135 emac_virt_to_phys(curr_bd, priv));
2136 } else {
2137 ++rxch->end_of_queue;
2138 rxch->queue_active = 0;
2139 }
2140 }
2141
2142 /* recycle BD */
2143 emac_addbd_to_rx_queue(priv, ch, last_bd, new_buffer,
2144 new_buf_token);
2145
2146 /* return the packet to the user - BD ptr passed in
2147 * last parameter for potential *future* use */
2148 spin_unlock_irqrestore(&priv->rx_lock, flags);
2149 emac_net_rx_cb(priv, curr_pkt);
2150 spin_lock_irqsave(&priv->rx_lock, flags);
2151 curr_bd = rxch->active_queue_head;
2152 if (curr_bd) {
2153 BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
2154 frame_status = curr_bd->mode;
2155 }
2156 ++pkts_processed;
2157 }
2158
2159end_emac_rx_bdproc:
2160 spin_unlock_irqrestore(&priv->rx_lock, flags);
2161 return pkts_processed;
2162}
2163
2164/**
2165 * emac_hw_enable: Enable EMAC hardware for packet transmission/reception 1269 * emac_hw_enable: Enable EMAC hardware for packet transmission/reception
2166 * @priv: The DaVinci EMAC private adapter structure 1270 * @priv: The DaVinci EMAC private adapter structure
2167 * 1271 *
@@ -2172,7 +1276,7 @@ end_emac_rx_bdproc:
2172 */ 1276 */
2173static int emac_hw_enable(struct emac_priv *priv) 1277static int emac_hw_enable(struct emac_priv *priv)
2174{ 1278{
2175 u32 ch, val, mbp_enable, mac_control; 1279 u32 val, mbp_enable, mac_control;
2176 1280
2177 /* Soft reset */ 1281 /* Soft reset */
2178 emac_write(EMAC_SOFTRESET, 1); 1282 emac_write(EMAC_SOFTRESET, 1);
@@ -2215,26 +1319,9 @@ static int emac_hw_enable(struct emac_priv *priv)
2215 emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL); 1319 emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL);
2216 priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF; 1320 priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF;
2217 1321
2218 val = emac_read(EMAC_TXCONTROL);
2219 val |= EMAC_TX_CONTROL_TX_ENABLE_VAL;
2220 emac_write(EMAC_TXCONTROL, val);
2221 val = emac_read(EMAC_RXCONTROL);
2222 val |= EMAC_RX_CONTROL_RX_ENABLE_VAL;
2223 emac_write(EMAC_RXCONTROL, val);
2224 emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL); 1322 emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL);
2225 1323
2226 for (ch = 0; ch < EMAC_DEF_MAX_TX_CH; ch++) { 1324 emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
2227 emac_write(EMAC_TXHDP(ch), 0);
2228 emac_write(EMAC_TXINTMASKSET, BIT(ch));
2229 }
2230 for (ch = 0; ch < EMAC_DEF_MAX_RX_CH; ch++) {
2231 struct emac_rxch *rxch = priv->rxch[ch];
2232 emac_setmac(priv, ch, rxch->mac_addr);
2233 emac_write(EMAC_RXINTMASKSET, BIT(ch));
2234 rxch->queue_active = 1;
2235 emac_write(EMAC_RXHDP(ch),
2236 emac_virt_to_phys(rxch->active_queue_head, priv));
2237 }
2238 1325
2239 /* Enable MII */ 1326 /* Enable MII */
2240 val = emac_read(EMAC_MACCONTROL); 1327 val = emac_read(EMAC_MACCONTROL);
@@ -2279,8 +1366,8 @@ static int emac_poll(struct napi_struct *napi, int budget)
2279 mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC; 1366 mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
2280 1367
2281 if (status & mask) { 1368 if (status & mask) {
2282 num_tx_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH, 1369 num_tx_pkts = cpdma_chan_process(priv->txchan,
2283 EMAC_DEF_TX_MAX_SERVICE); 1370 EMAC_DEF_TX_MAX_SERVICE);
2284 } /* TX processing */ 1371 } /* TX processing */
2285 1372
2286 mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC; 1373 mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
@@ -2289,7 +1376,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
2289 mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC; 1376 mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
2290 1377
2291 if (status & mask) { 1378 if (status & mask) {
2292 num_rx_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget); 1379 num_rx_pkts = cpdma_chan_process(priv->rxchan, budget);
2293 } /* RX processing */ 1380 } /* RX processing */
2294 1381
2295 mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT; 1382 mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
@@ -2348,79 +1435,6 @@ void emac_poll_controller(struct net_device *ndev)
2348} 1435}
2349#endif 1436#endif
2350 1437
2351/* PHY/MII bus related */
2352
2353/* Wait until mdio is ready for next command */
2354#define MDIO_WAIT_FOR_USER_ACCESS\
2355 while ((emac_mdio_read((MDIO_USERACCESS(0))) &\
2356 MDIO_USERACCESS_GO) != 0)
2357
2358static int emac_mii_read(struct mii_bus *bus, int phy_id, int phy_reg)
2359{
2360 unsigned int phy_data = 0;
2361 unsigned int phy_control;
2362
2363 /* Wait until mdio is ready for next command */
2364 MDIO_WAIT_FOR_USER_ACCESS;
2365
2366 phy_control = (MDIO_USERACCESS_GO |
2367 MDIO_USERACCESS_READ |
2368 ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
2369 ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
2370 (phy_data & MDIO_USERACCESS_DATA));
2371 emac_mdio_write(MDIO_USERACCESS(0), phy_control);
2372
2373 /* Wait until mdio is ready for next command */
2374 MDIO_WAIT_FOR_USER_ACCESS;
2375
2376 return emac_mdio_read(MDIO_USERACCESS(0)) & MDIO_USERACCESS_DATA;
2377
2378}
2379
2380static int emac_mii_write(struct mii_bus *bus, int phy_id,
2381 int phy_reg, u16 phy_data)
2382{
2383
2384 unsigned int control;
2385
2386 /* until mdio is ready for next command */
2387 MDIO_WAIT_FOR_USER_ACCESS;
2388
2389 control = (MDIO_USERACCESS_GO |
2390 MDIO_USERACCESS_WRITE |
2391 ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
2392 ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
2393 (phy_data & MDIO_USERACCESS_DATA));
2394 emac_mdio_write(MDIO_USERACCESS(0), control);
2395
2396 return 0;
2397}
2398
2399static int emac_mii_reset(struct mii_bus *bus)
2400{
2401 unsigned int clk_div;
2402 int mdio_bus_freq = emac_bus_frequency;
2403
2404 if (mdio_max_freq && mdio_bus_freq)
2405 clk_div = ((mdio_bus_freq / mdio_max_freq) - 1);
2406 else
2407 clk_div = 0xFF;
2408
2409 clk_div &= MDIO_CONTROL_CLKDIV;
2410
2411 /* Set enable and clock divider in MDIOControl */
2412 emac_mdio_write(MDIO_CONTROL, (clk_div | MDIO_CONTROL_ENABLE));
2413
2414 return 0;
2415
2416}
2417
2418static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, PHY_POLL };
2419
2420/* emac_driver: EMAC MII bus structure */
2421
2422static struct mii_bus *emac_mii;
2423
2424static void emac_adjust_link(struct net_device *ndev) 1438static void emac_adjust_link(struct net_device *ndev)
2425{ 1439{
2426 struct emac_priv *priv = netdev_priv(ndev); 1440 struct emac_priv *priv = netdev_priv(ndev);
@@ -2485,6 +1499,11 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
2485 return -EOPNOTSUPP; 1499 return -EOPNOTSUPP;
2486} 1500}
2487 1501
1502static int match_first_device(struct device *dev, void *data)
1503{
1504 return 1;
1505}
1506
2488/** 1507/**
2489 * emac_dev_open: EMAC device open 1508 * emac_dev_open: EMAC device open
2490 * @ndev: The DaVinci EMAC network adapter 1509 * @ndev: The DaVinci EMAC network adapter
@@ -2498,10 +1517,9 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
2498static int emac_dev_open(struct net_device *ndev) 1517static int emac_dev_open(struct net_device *ndev)
2499{ 1518{
2500 struct device *emac_dev = &ndev->dev; 1519 struct device *emac_dev = &ndev->dev;
2501 u32 rc, cnt, ch; 1520 u32 cnt;
2502 int phy_addr;
2503 struct resource *res; 1521 struct resource *res;
2504 int q, m; 1522 int q, m, ret;
2505 int i = 0; 1523 int i = 0;
2506 int k = 0; 1524 int k = 0;
2507 struct emac_priv *priv = netdev_priv(ndev); 1525 struct emac_priv *priv = netdev_priv(ndev);
@@ -2513,29 +1531,21 @@ static int emac_dev_open(struct net_device *ndev)
2513 /* Configuration items */ 1531 /* Configuration items */
2514 priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN; 1532 priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
2515 1533
2516 /* Clear basic hardware */
2517 for (ch = 0; ch < EMAC_MAX_TXRX_CHANNELS; ch++) {
2518 emac_write(EMAC_TXHDP(ch), 0);
2519 emac_write(EMAC_RXHDP(ch), 0);
2520 emac_write(EMAC_RXHDP(ch), 0);
2521 emac_write(EMAC_RXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
2522 emac_write(EMAC_TXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
2523 }
2524 priv->mac_hash1 = 0; 1534 priv->mac_hash1 = 0;
2525 priv->mac_hash2 = 0; 1535 priv->mac_hash2 = 0;
2526 emac_write(EMAC_MACHASH1, 0); 1536 emac_write(EMAC_MACHASH1, 0);
2527 emac_write(EMAC_MACHASH2, 0); 1537 emac_write(EMAC_MACHASH2, 0);
2528 1538
2529 /* multi ch not supported - open 1 TX, 1RX ch by default */ 1539 for (i = 0; i < EMAC_DEF_RX_NUM_DESC; i++) {
2530 rc = emac_init_txch(priv, EMAC_DEF_TX_CH); 1540 struct sk_buff *skb = emac_rx_alloc(priv);
2531 if (0 != rc) { 1541
2532 dev_err(emac_dev, "DaVinci EMAC: emac_init_txch() failed"); 1542 if (!skb)
2533 return rc; 1543 break;
2534 } 1544
2535 rc = emac_init_rxch(priv, EMAC_DEF_RX_CH, priv->mac_addr); 1545 ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
2536 if (0 != rc) { 1546 skb_tailroom(skb), GFP_KERNEL);
2537 dev_err(emac_dev, "DaVinci EMAC: emac_init_rxch() failed"); 1547 if (WARN_ON(ret < 0))
2538 return rc; 1548 break;
2539 } 1549 }
2540 1550
2541 /* Request IRQ */ 1551 /* Request IRQ */
@@ -2560,28 +1570,28 @@ static int emac_dev_open(struct net_device *ndev)
2560 emac_set_coalesce(ndev, &coal); 1570 emac_set_coalesce(ndev, &coal);
2561 } 1571 }
2562 1572
2563 /* find the first phy */ 1573 cpdma_ctlr_start(priv->dma);
1574
2564 priv->phydev = NULL; 1575 priv->phydev = NULL;
2565 if (priv->phy_mask) { 1576 /* use the first phy on the bus if pdata did not give us a phy id */
2566 emac_mii_reset(priv->mii_bus); 1577 if (!priv->phy_id) {
2567 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { 1578 struct device *phy;
2568 if (priv->mii_bus->phy_map[phy_addr]) {
2569 priv->phydev = priv->mii_bus->phy_map[phy_addr];
2570 break;
2571 }
2572 }
2573 1579
2574 if (!priv->phydev) { 1580 phy = bus_find_device(&mdio_bus_type, NULL, NULL,
2575 printk(KERN_ERR "%s: no PHY found\n", ndev->name); 1581 match_first_device);
2576 return -1; 1582 if (phy)
2577 } 1583 priv->phy_id = dev_name(phy);
1584 }
2578 1585
2579 priv->phydev = phy_connect(ndev, dev_name(&priv->phydev->dev), 1586 if (priv->phy_id && *priv->phy_id) {
2580 &emac_adjust_link, 0, PHY_INTERFACE_MODE_MII); 1587 priv->phydev = phy_connect(ndev, priv->phy_id,
1588 &emac_adjust_link, 0,
1589 PHY_INTERFACE_MODE_MII);
2581 1590
2582 if (IS_ERR(priv->phydev)) { 1591 if (IS_ERR(priv->phydev)) {
2583 printk(KERN_ERR "%s: Could not attach to PHY\n", 1592 dev_err(emac_dev, "could not connect to phy %s\n",
2584 ndev->name); 1593 priv->phy_id);
1594 priv->phydev = NULL;
2585 return PTR_ERR(priv->phydev); 1595 return PTR_ERR(priv->phydev);
2586 } 1596 }
2587 1597
@@ -2589,12 +1599,13 @@ static int emac_dev_open(struct net_device *ndev)
2589 priv->speed = 0; 1599 priv->speed = 0;
2590 priv->duplex = ~0; 1600 priv->duplex = ~0;
2591 1601
2592 printk(KERN_INFO "%s: attached PHY driver [%s] " 1602 dev_info(emac_dev, "attached PHY driver [%s] "
2593 "(mii_bus:phy_addr=%s, id=%x)\n", ndev->name, 1603 "(mii_bus:phy_addr=%s, id=%x)\n",
2594 priv->phydev->drv->name, dev_name(&priv->phydev->dev), 1604 priv->phydev->drv->name, dev_name(&priv->phydev->dev),
2595 priv->phydev->phy_id); 1605 priv->phydev->phy_id);
2596 } else{ 1606 } else {
2597 /* No PHY , fix the link, speed and duplex settings */ 1607 /* No PHY , fix the link, speed and duplex settings */
1608 dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
2598 priv->link = 1; 1609 priv->link = 1;
2599 priv->speed = SPEED_100; 1610 priv->speed = SPEED_100;
2600 priv->duplex = DUPLEX_FULL; 1611 priv->duplex = DUPLEX_FULL;
@@ -2607,7 +1618,7 @@ static int emac_dev_open(struct net_device *ndev)
2607 if (netif_msg_drv(priv)) 1618 if (netif_msg_drv(priv))
2608 dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name); 1619 dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name);
2609 1620
2610 if (priv->phy_mask) 1621 if (priv->phydev)
2611 phy_start(priv->phydev); 1622 phy_start(priv->phydev);
2612 1623
2613 return 0; 1624 return 0;
@@ -2648,10 +1659,7 @@ static int emac_dev_stop(struct net_device *ndev)
2648 1659
2649 netif_carrier_off(ndev); 1660 netif_carrier_off(ndev);
2650 emac_int_disable(priv); 1661 emac_int_disable(priv);
2651 emac_stop_txch(priv, EMAC_DEF_TX_CH); 1662 cpdma_ctlr_stop(priv->dma);
2652 emac_stop_rxch(priv, EMAC_DEF_RX_CH);
2653 emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
2654 emac_cleanup_rxch(priv, EMAC_DEF_RX_CH);
2655 emac_write(EMAC_SOFTRESET, 1); 1663 emac_write(EMAC_SOFTRESET, 1);
2656 1664
2657 if (priv->phydev) 1665 if (priv->phydev)
@@ -2756,9 +1764,10 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2756 struct resource *res; 1764 struct resource *res;
2757 struct net_device *ndev; 1765 struct net_device *ndev;
2758 struct emac_priv *priv; 1766 struct emac_priv *priv;
2759 unsigned long size; 1767 unsigned long size, hw_ram_addr;
2760 struct emac_platform_data *pdata; 1768 struct emac_platform_data *pdata;
2761 struct device *emac_dev; 1769 struct device *emac_dev;
1770 struct cpdma_params dma_params;
2762 1771
2763 /* obtain emac clock from kernel */ 1772 /* obtain emac clock from kernel */
2764 emac_clk = clk_get(&pdev->dev, NULL); 1773 emac_clk = clk_get(&pdev->dev, NULL);
@@ -2782,8 +1791,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2782 priv->ndev = ndev; 1791 priv->ndev = ndev;
2783 priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG); 1792 priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG);
2784 1793
2785 spin_lock_init(&priv->tx_lock);
2786 spin_lock_init(&priv->rx_lock);
2787 spin_lock_init(&priv->lock); 1794 spin_lock_init(&priv->lock);
2788 1795
2789 pdata = pdev->dev.platform_data; 1796 pdata = pdev->dev.platform_data;
@@ -2794,7 +1801,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2794 1801
2795 /* MAC addr and PHY mask , RMII enable info from platform_data */ 1802 /* MAC addr and PHY mask , RMII enable info from platform_data */
2796 memcpy(priv->mac_addr, pdata->mac_addr, 6); 1803 memcpy(priv->mac_addr, pdata->mac_addr, 6);
2797 priv->phy_mask = pdata->phy_mask; 1804 priv->phy_id = pdata->phy_id;
2798 priv->rmii_en = pdata->rmii_en; 1805 priv->rmii_en = pdata->rmii_en;
2799 priv->version = pdata->version; 1806 priv->version = pdata->version;
2800 priv->int_enable = pdata->interrupt_enable; 1807 priv->int_enable = pdata->interrupt_enable;
@@ -2831,14 +1838,41 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2831 ndev->base_addr = (unsigned long)priv->remap_addr; 1838 ndev->base_addr = (unsigned long)priv->remap_addr;
2832 1839
2833 priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset; 1840 priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
2834 priv->ctrl_ram_size = pdata->ctrl_ram_size;
2835 priv->emac_ctrl_ram = priv->remap_addr + pdata->ctrl_ram_offset;
2836 1841
2837 if (pdata->hw_ram_addr) 1842 hw_ram_addr = pdata->hw_ram_addr;
2838 priv->hw_ram_addr = pdata->hw_ram_addr; 1843 if (!hw_ram_addr)
2839 else 1844 hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
2840 priv->hw_ram_addr = (u32 __force)res->start + 1845
2841 pdata->ctrl_ram_offset; 1846 memset(&dma_params, 0, sizeof(dma_params));
1847 dma_params.dev = emac_dev;
1848 dma_params.dmaregs = priv->emac_base;
1849 dma_params.rxthresh = priv->emac_base + 0x120;
1850 dma_params.rxfree = priv->emac_base + 0x140;
1851 dma_params.txhdp = priv->emac_base + 0x600;
1852 dma_params.rxhdp = priv->emac_base + 0x620;
1853 dma_params.txcp = priv->emac_base + 0x640;
1854 dma_params.rxcp = priv->emac_base + 0x660;
1855 dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS;
1856 dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE;
1857 dma_params.desc_mem_phys = hw_ram_addr;
1858 dma_params.desc_mem_size = pdata->ctrl_ram_size;
1859 dma_params.desc_align = 16;
1860
1861 priv->dma = cpdma_ctlr_create(&dma_params);
1862 if (!priv->dma) {
1863 dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n");
1864 rc = -ENOMEM;
1865 goto no_dma;
1866 }
1867
1868 priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
1869 emac_tx_handler);
1870 priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH),
1871 emac_rx_handler);
1872 if (WARN_ON(!priv->txchan || !priv->rxchan)) {
1873 rc = -ENOMEM;
1874 goto no_irq_res;
1875 }
2842 1876
2843 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1877 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2844 if (!res) { 1878 if (!res) {
@@ -2871,32 +1905,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2871 } 1905 }
2872 1906
2873 1907
2874 /* MII/Phy intialisation, mdio bus registration */
2875 emac_mii = mdiobus_alloc();
2876 if (emac_mii == NULL) {
2877 dev_err(emac_dev, "DaVinci EMAC: Error allocating mii_bus\n");
2878 rc = -ENOMEM;
2879 goto mdio_alloc_err;
2880 }
2881
2882 priv->mii_bus = emac_mii;
2883 emac_mii->name = "emac-mii",
2884 emac_mii->read = emac_mii_read,
2885 emac_mii->write = emac_mii_write,
2886 emac_mii->reset = emac_mii_reset,
2887 emac_mii->irq = mii_irqs,
2888 emac_mii->phy_mask = ~(priv->phy_mask);
2889 emac_mii->parent = &pdev->dev;
2890 emac_mii->priv = priv->remap_addr + pdata->mdio_reg_offset;
2891 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", priv->pdev->id);
2892 mdio_max_freq = pdata->mdio_max_freq;
2893 emac_mii->reset(emac_mii);
2894
2895 /* Register the MII bus */
2896 rc = mdiobus_register(emac_mii);
2897 if (rc)
2898 goto mdiobus_quit;
2899
2900 if (netif_msg_probe(priv)) { 1908 if (netif_msg_probe(priv)) {
2901 dev_notice(emac_dev, "DaVinci EMAC Probe found device "\ 1909 dev_notice(emac_dev, "DaVinci EMAC Probe found device "\
2902 "(regs: %p, irq: %d)\n", 1910 "(regs: %p, irq: %d)\n",
@@ -2904,13 +1912,15 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
2904 } 1912 }
2905 return 0; 1913 return 0;
2906 1914
2907mdiobus_quit:
2908 mdiobus_free(emac_mii);
2909
2910netdev_reg_err: 1915netdev_reg_err:
2911mdio_alloc_err:
2912 clk_disable(emac_clk); 1916 clk_disable(emac_clk);
2913no_irq_res: 1917no_irq_res:
1918 if (priv->txchan)
1919 cpdma_chan_destroy(priv->txchan);
1920 if (priv->rxchan)
1921 cpdma_chan_destroy(priv->rxchan);
1922 cpdma_ctlr_destroy(priv->dma);
1923no_dma:
2914 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1924 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2915 release_mem_region(res->start, res->end - res->start + 1); 1925 release_mem_region(res->start, res->end - res->start + 1);
2916 iounmap(priv->remap_addr); 1926 iounmap(priv->remap_addr);
@@ -2938,8 +1948,12 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
2938 1948
2939 platform_set_drvdata(pdev, NULL); 1949 platform_set_drvdata(pdev, NULL);
2940 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1950 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2941 mdiobus_unregister(priv->mii_bus); 1951
2942 mdiobus_free(priv->mii_bus); 1952 if (priv->txchan)
1953 cpdma_chan_destroy(priv->txchan);
1954 if (priv->rxchan)
1955 cpdma_chan_destroy(priv->rxchan);
1956 cpdma_ctlr_destroy(priv->dma);
2943 1957
2944 release_mem_region(res->start, res->end - res->start + 1); 1958 release_mem_region(res->start, res->end - res->start + 1);
2945 1959
diff --git a/drivers/net/davinci_mdio.c b/drivers/net/davinci_mdio.c
new file mode 100644
index 000000000000..7615040df756
--- /dev/null
+++ b/drivers/net/davinci_mdio.c
@@ -0,0 +1,475 @@
1/*
2 * DaVinci MDIO Module driver
3 *
4 * Copyright (C) 2010 Texas Instruments.
5 *
6 * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
7 *
8 * Copyright (C) 2009 Texas Instruments.
9 *
10 * ---------------------------------------------------------------------------
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * ---------------------------------------------------------------------------
26 */
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/platform_device.h>
30#include <linux/delay.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/phy.h>
34#include <linux/clk.h>
35#include <linux/err.h>
36#include <linux/io.h>
37#include <linux/davinci_emac.h>
38
39/*
40 * This timeout definition is a worst-case ultra defensive measure against
41 * unexpected controller lock ups. Ideally, we should never ever hit this
42 * scenario in practice.
43 */
44#define MDIO_TIMEOUT 100 /* msecs */
45
46#define PHY_REG_MASK 0x1f
47#define PHY_ID_MASK 0x1f
48
49#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
50
51struct davinci_mdio_regs {
52 u32 version;
53 u32 control;
54#define CONTROL_IDLE BIT(31)
55#define CONTROL_ENABLE BIT(30)
56#define CONTROL_MAX_DIV (0xff)
57
58 u32 alive;
59 u32 link;
60 u32 linkintraw;
61 u32 linkintmasked;
62 u32 __reserved_0[2];
63 u32 userintraw;
64 u32 userintmasked;
65 u32 userintmaskset;
66 u32 userintmaskclr;
67 u32 __reserved_1[20];
68
69 struct {
70 u32 access;
71#define USERACCESS_GO BIT(31)
72#define USERACCESS_WRITE BIT(30)
73#define USERACCESS_ACK BIT(29)
74#define USERACCESS_READ (0)
75#define USERACCESS_DATA (0xffff)
76
77 u32 physel;
78 } user[0];
79};
80
81struct mdio_platform_data default_pdata = {
82 .bus_freq = DEF_OUT_FREQ,
83};
84
85struct davinci_mdio_data {
86 struct mdio_platform_data pdata;
87 struct davinci_mdio_regs __iomem *regs;
88 spinlock_t lock;
89 struct clk *clk;
90 struct device *dev;
91 struct mii_bus *bus;
92 bool suspended;
93 unsigned long access_time; /* jiffies */
94};
95
96static void __davinci_mdio_reset(struct davinci_mdio_data *data)
97{
98 u32 mdio_in, div, mdio_out_khz, access_time;
99
100 mdio_in = clk_get_rate(data->clk);
101 div = (mdio_in / data->pdata.bus_freq) - 1;
102 if (div > CONTROL_MAX_DIV)
103 div = CONTROL_MAX_DIV;
104
105 /* set enable and clock divider */
106 __raw_writel(div | CONTROL_ENABLE, &data->regs->control);
107
108 /*
109 * One mdio transaction consists of:
110 * 32 bits of preamble
111 * 32 bits of transferred data
112 * 24 bits of bus yield (not needed unless shared?)
113 */
114 mdio_out_khz = mdio_in / (1000 * (div + 1));
115 access_time = (88 * 1000) / mdio_out_khz;
116
117 /*
118 * In the worst case, we could be kicking off a user-access immediately
119 * after the mdio bus scan state-machine triggered its own read. If
120 * so, our request could get deferred by one access cycle. We
121 * defensively allow for 4 access cycles.
122 */
123 data->access_time = usecs_to_jiffies(access_time * 4);
124 if (!data->access_time)
125 data->access_time = 1;
126}
127
128static int davinci_mdio_reset(struct mii_bus *bus)
129{
130 struct davinci_mdio_data *data = bus->priv;
131 u32 phy_mask, ver;
132
133 __davinci_mdio_reset(data);
134
135 /* wait for scan logic to settle */
136 msleep(PHY_MAX_ADDR * data->access_time);
137
138 /* dump hardware version info */
139 ver = __raw_readl(&data->regs->version);
140 dev_info(data->dev, "davinci mdio revision %d.%d\n",
141 (ver >> 8) & 0xff, ver & 0xff);
142
143 /* get phy mask from the alive register */
144 phy_mask = __raw_readl(&data->regs->alive);
145 if (phy_mask) {
146 /* restrict mdio bus to live phys only */
147 dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
148 phy_mask = ~phy_mask;
149 } else {
150 /* desperately scan all phys */
151 dev_warn(data->dev, "no live phy, scanning all\n");
152 phy_mask = 0;
153 }
154 data->bus->phy_mask = phy_mask;
155
156 return 0;
157}
158
159/* wait until hardware is ready for another user access */
160static inline int wait_for_user_access(struct davinci_mdio_data *data)
161{
162 struct davinci_mdio_regs __iomem *regs = data->regs;
163 unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
164 u32 reg;
165
166 while (time_after(timeout, jiffies)) {
167 reg = __raw_readl(&regs->user[0].access);
168 if ((reg & USERACCESS_GO) == 0)
169 return 0;
170
171 reg = __raw_readl(&regs->control);
172 if ((reg & CONTROL_IDLE) == 0)
173 continue;
174
175 /*
176 * An emac soft_reset may have clobbered the mdio controller's
177 * state machine. We need to reset and retry the current
178 * operation
179 */
180 dev_warn(data->dev, "resetting idled controller\n");
181 __davinci_mdio_reset(data);
182 return -EAGAIN;
183 }
184 dev_err(data->dev, "timed out waiting for user access\n");
185 return -ETIMEDOUT;
186}
187
188/* wait until hardware state machine is idle */
189static inline int wait_for_idle(struct davinci_mdio_data *data)
190{
191 struct davinci_mdio_regs __iomem *regs = data->regs;
192 unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
193
194 while (time_after(timeout, jiffies)) {
195 if (__raw_readl(&regs->control) & CONTROL_IDLE)
196 return 0;
197 }
198 dev_err(data->dev, "timed out waiting for idle\n");
199 return -ETIMEDOUT;
200}
201
202static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
203{
204 struct davinci_mdio_data *data = bus->priv;
205 u32 reg;
206 int ret;
207
208 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
209 return -EINVAL;
210
211 spin_lock(&data->lock);
212
213 if (data->suspended) {
214 spin_unlock(&data->lock);
215 return -ENODEV;
216 }
217
218 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
219 (phy_id << 16));
220
221 while (1) {
222 ret = wait_for_user_access(data);
223 if (ret == -EAGAIN)
224 continue;
225 if (ret < 0)
226 break;
227
228 __raw_writel(reg, &data->regs->user[0].access);
229
230 ret = wait_for_user_access(data);
231 if (ret == -EAGAIN)
232 continue;
233 if (ret < 0)
234 break;
235
236 reg = __raw_readl(&data->regs->user[0].access);
237 ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
238 break;
239 }
240
241 spin_unlock(&data->lock);
242
243 return ret;
244}
245
246static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
247 int phy_reg, u16 phy_data)
248{
249 struct davinci_mdio_data *data = bus->priv;
250 u32 reg;
251 int ret;
252
253 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
254 return -EINVAL;
255
256 spin_lock(&data->lock);
257
258 if (data->suspended) {
259 spin_unlock(&data->lock);
260 return -ENODEV;
261 }
262
263 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
264 (phy_id << 16) | (phy_data & USERACCESS_DATA));
265
266 while (1) {
267 ret = wait_for_user_access(data);
268 if (ret == -EAGAIN)
269 continue;
270 if (ret < 0)
271 break;
272
273 __raw_writel(reg, &data->regs->user[0].access);
274
275 ret = wait_for_user_access(data);
276 if (ret == -EAGAIN)
277 continue;
278 break;
279 }
280
281 spin_unlock(&data->lock);
282
283 return 0;
284}
285
286static int __devinit davinci_mdio_probe(struct platform_device *pdev)
287{
288 struct mdio_platform_data *pdata = pdev->dev.platform_data;
289 struct device *dev = &pdev->dev;
290 struct davinci_mdio_data *data;
291 struct resource *res;
292 struct phy_device *phy;
293 int ret, addr;
294
295 data = kzalloc(sizeof(*data), GFP_KERNEL);
296 if (!data) {
297 dev_err(dev, "failed to alloc device data\n");
298 return -ENOMEM;
299 }
300
301 data->pdata = pdata ? (*pdata) : default_pdata;
302
303 data->bus = mdiobus_alloc();
304 if (!data->bus) {
305 dev_err(dev, "failed to alloc mii bus\n");
306 ret = -ENOMEM;
307 goto bail_out;
308 }
309
310 data->bus->name = dev_name(dev);
311 data->bus->read = davinci_mdio_read,
312 data->bus->write = davinci_mdio_write,
313 data->bus->reset = davinci_mdio_reset,
314 data->bus->parent = dev;
315 data->bus->priv = data;
316 snprintf(data->bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
317
318 data->clk = clk_get(dev, NULL);
319 if (IS_ERR(data->clk)) {
320 data->clk = NULL;
321 dev_err(dev, "failed to get device clock\n");
322 ret = PTR_ERR(data->clk);
323 goto bail_out;
324 }
325
326 clk_enable(data->clk);
327
328 dev_set_drvdata(dev, data);
329 data->dev = dev;
330 spin_lock_init(&data->lock);
331
332 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
333 if (!res) {
334 dev_err(dev, "could not find register map resource\n");
335 ret = -ENOENT;
336 goto bail_out;
337 }
338
339 res = devm_request_mem_region(dev, res->start, resource_size(res),
340 dev_name(dev));
341 if (!res) {
342 dev_err(dev, "could not allocate register map resource\n");
343 ret = -ENXIO;
344 goto bail_out;
345 }
346
347 data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
348 if (!data->regs) {
349 dev_err(dev, "could not map mdio registers\n");
350 ret = -ENOMEM;
351 goto bail_out;
352 }
353
354 /* register the mii bus */
355 ret = mdiobus_register(data->bus);
356 if (ret)
357 goto bail_out;
358
359 /* scan and dump the bus */
360 for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
361 phy = data->bus->phy_map[addr];
362 if (phy) {
363 dev_info(dev, "phy[%d]: device %s, driver %s\n",
364 phy->addr, dev_name(&phy->dev),
365 phy->drv ? phy->drv->name : "unknown");
366 }
367 }
368
369 return 0;
370
371bail_out:
372 if (data->bus)
373 mdiobus_free(data->bus);
374
375 if (data->clk) {
376 clk_disable(data->clk);
377 clk_put(data->clk);
378 }
379
380 kfree(data);
381
382 return ret;
383}
384
385static int __devexit davinci_mdio_remove(struct platform_device *pdev)
386{
387 struct device *dev = &pdev->dev;
388 struct davinci_mdio_data *data = dev_get_drvdata(dev);
389
390 if (data->bus)
391 mdiobus_free(data->bus);
392
393 if (data->clk) {
394 clk_disable(data->clk);
395 clk_put(data->clk);
396 }
397
398 dev_set_drvdata(dev, NULL);
399
400 kfree(data);
401
402 return 0;
403}
404
405static int davinci_mdio_suspend(struct device *dev)
406{
407 struct davinci_mdio_data *data = dev_get_drvdata(dev);
408 u32 ctrl;
409
410 spin_lock(&data->lock);
411
412 /* shutdown the scan state machine */
413 ctrl = __raw_readl(&data->regs->control);
414 ctrl &= ~CONTROL_ENABLE;
415 __raw_writel(ctrl, &data->regs->control);
416 wait_for_idle(data);
417
418 if (data->clk)
419 clk_disable(data->clk);
420
421 data->suspended = true;
422 spin_unlock(&data->lock);
423
424 return 0;
425}
426
427static int davinci_mdio_resume(struct device *dev)
428{
429 struct davinci_mdio_data *data = dev_get_drvdata(dev);
430 u32 ctrl;
431
432 spin_lock(&data->lock);
433 if (data->clk)
434 clk_enable(data->clk);
435
436 /* restart the scan state machine */
437 ctrl = __raw_readl(&data->regs->control);
438 ctrl |= CONTROL_ENABLE;
439 __raw_writel(ctrl, &data->regs->control);
440
441 data->suspended = false;
442 spin_unlock(&data->lock);
443
444 return 0;
445}
446
447static const struct dev_pm_ops davinci_mdio_pm_ops = {
448 .suspend = davinci_mdio_suspend,
449 .resume = davinci_mdio_resume,
450};
451
452static struct platform_driver davinci_mdio_driver = {
453 .driver = {
454 .name = "davinci_mdio",
455 .owner = THIS_MODULE,
456 .pm = &davinci_mdio_pm_ops,
457 },
458 .probe = davinci_mdio_probe,
459 .remove = __devexit_p(davinci_mdio_remove),
460};
461
462static int __init davinci_mdio_init(void)
463{
464 return platform_driver_register(&davinci_mdio_driver);
465}
466device_initcall(davinci_mdio_init);
467
468static void __exit davinci_mdio_exit(void)
469{
470 platform_driver_unregister(&davinci_mdio_driver);
471}
472module_exit(davinci_mdio_exit);
473
474MODULE_LICENSE("GPL");
475MODULE_DESCRIPTION("DaVinci MDIO driver");
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index a117f2a0252e..4686c3983fc3 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -521,7 +521,7 @@ void e1000_down(struct e1000_adapter *adapter)
521 e1000_clean_all_rx_rings(adapter); 521 e1000_clean_all_rx_rings(adapter);
522} 522}
523 523
524void e1000_reinit_safe(struct e1000_adapter *adapter) 524static void e1000_reinit_safe(struct e1000_adapter *adapter)
525{ 525{
526 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 526 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
527 msleep(1); 527 msleep(1);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index ca663f19d7df..7236f1a53ba0 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -52,6 +52,10 @@
52 (ID_LED_DEF1_DEF2)) 52 (ID_LED_DEF1_DEF2))
53 53
54#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 54#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
55#define E1000_BASE1000T_STATUS 10
56#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
57#define E1000_RECEIVE_ERROR_COUNTER 21
58#define E1000_RECEIVE_ERROR_MAX 0xFFFF
55 59
56#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ 60#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
57 61
@@ -1243,6 +1247,39 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
1243} 1247}
1244 1248
1245/** 1249/**
1250 * e1000_check_phy_82574 - check 82574 phy hung state
1251 * @hw: pointer to the HW structure
1252 *
1253 * Returns whether phy is hung or not
1254 **/
1255bool e1000_check_phy_82574(struct e1000_hw *hw)
1256{
1257 u16 status_1kbt = 0;
1258 u16 receive_errors = 0;
1259 bool phy_hung = false;
1260 s32 ret_val = 0;
1261
1262 /*
1263 * Read PHY Receive Error counter first, if its is max - all F's then
1264 * read the Base1000T status register If both are max then PHY is hung.
1265 */
1266 ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
1267
1268 if (ret_val)
1269 goto out;
1270 if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
1271 ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
1272 if (ret_val)
1273 goto out;
1274 if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
1275 E1000_IDLE_ERROR_COUNT_MASK)
1276 phy_hung = true;
1277 }
1278out:
1279 return phy_hung;
1280}
1281
1282/**
1246 * e1000_setup_link_82571 - Setup flow control and link settings 1283 * e1000_setup_link_82571 - Setup flow control and link settings
1247 * @hw: pointer to the HW structure 1284 * @hw: pointer to the HW structure
1248 * 1285 *
@@ -1859,6 +1896,7 @@ struct e1000_info e1000_82574_info = {
1859 | FLAG_HAS_SMART_POWER_DOWN 1896 | FLAG_HAS_SMART_POWER_DOWN
1860 | FLAG_HAS_AMT 1897 | FLAG_HAS_AMT
1861 | FLAG_HAS_CTRLEXT_ON_LOAD, 1898 | FLAG_HAS_CTRLEXT_ON_LOAD,
1899 .flags2 = FLAG2_CHECK_PHY_HANG,
1862 .pba = 36, 1900 .pba = 36,
1863 .max_hw_frame_size = DEFAULT_JUMBO, 1901 .max_hw_frame_size = DEFAULT_JUMBO,
1864 .get_variants = e1000_get_variants_82571, 1902 .get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index cee882dd67bf..fdc67fead4ea 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -397,6 +397,7 @@ struct e1000_adapter {
397 struct work_struct print_hang_task; 397 struct work_struct print_hang_task;
398 398
399 bool idle_check; 399 bool idle_check;
400 int phy_hang_count;
400}; 401};
401 402
402struct e1000_info { 403struct e1000_info {
@@ -454,6 +455,7 @@ struct e1000_info {
454#define FLAG2_HAS_EEE (1 << 5) 455#define FLAG2_HAS_EEE (1 << 5)
455#define FLAG2_DMA_BURST (1 << 6) 456#define FLAG2_DMA_BURST (1 << 6)
456#define FLAG2_DISABLE_AIM (1 << 8) 457#define FLAG2_DISABLE_AIM (1 << 8)
458#define FLAG2_CHECK_PHY_HANG (1 << 9)
457 459
458#define E1000_RX_DESC_PS(R, i) \ 460#define E1000_RX_DESC_PS(R, i) \
459 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 461 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -631,6 +633,7 @@ extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
631extern s32 e1000_check_polarity_ife(struct e1000_hw *hw); 633extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
632extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); 634extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
633extern s32 e1000_check_polarity_igp(struct e1000_hw *hw); 635extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
636extern bool e1000_check_phy_82574(struct e1000_hw *hw);
634 637
635static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) 638static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
636{ 639{
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index ec8cf3f51423..c4ca1629f532 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4098,6 +4098,25 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
4098 } 4098 }
4099} 4099}
4100 4100
4101static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4102{
4103 struct e1000_hw *hw = &adapter->hw;
4104
4105 /*
4106 * With 82574 controllers, PHY needs to be checked periodically
4107 * for hung state and reset, if two calls return true
4108 */
4109 if (e1000_check_phy_82574(hw))
4110 adapter->phy_hang_count++;
4111 else
4112 adapter->phy_hang_count = 0;
4113
4114 if (adapter->phy_hang_count > 1) {
4115 adapter->phy_hang_count = 0;
4116 schedule_work(&adapter->reset_task);
4117 }
4118}
4119
4101/** 4120/**
4102 * e1000_watchdog - Timer Call-back 4121 * e1000_watchdog - Timer Call-back
4103 * @data: pointer to adapter cast into an unsigned long 4122 * @data: pointer to adapter cast into an unsigned long
@@ -4333,6 +4352,9 @@ link_up:
4333 if (e1000e_get_laa_state_82571(hw)) 4352 if (e1000e_get_laa_state_82571(hw))
4334 e1000e_rar_set(hw, adapter->hw.mac.addr, 0); 4353 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
4335 4354
4355 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
4356 e1000e_check_82574_phy_workaround(adapter);
4357
4336 /* Reset the timer */ 4358 /* Reset the timer */
4337 if (!test_bit(__E1000_DOWN, &adapter->state)) 4359 if (!test_bit(__E1000_DOWN, &adapter->state))
4338 mod_timer(&adapter->watchdog_timer, 4360 mod_timer(&adapter->watchdog_timer,
@@ -4860,8 +4882,11 @@ static void e1000_reset_task(struct work_struct *work)
4860 struct e1000_adapter *adapter; 4882 struct e1000_adapter *adapter;
4861 adapter = container_of(work, struct e1000_adapter, reset_task); 4883 adapter = container_of(work, struct e1000_adapter, reset_task);
4862 4884
4863 e1000e_dump(adapter); 4885 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4864 e_err("Reset adapter\n"); 4886 (adapter->flags & FLAG_RX_RESTART_NOW))) {
4887 e1000e_dump(adapter);
4888 e_err("Reset adapter\n");
4889 }
4865 e1000e_reinit_locked(adapter); 4890 e1000e_reinit_locked(adapter);
4866} 4891}
4867 4892
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 1321cb6401cf..8e745e74828d 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -396,7 +396,9 @@ struct ehea_port_res {
396 int swqe_ll_count; 396 int swqe_ll_count;
397 u32 swqe_id_counter; 397 u32 swqe_id_counter;
398 u64 tx_packets; 398 u64 tx_packets;
399 u64 tx_bytes;
399 u64 rx_packets; 400 u64 rx_packets;
401 u64 rx_bytes;
400 u32 poll_counter; 402 u32 poll_counter;
401 struct net_lro_mgr lro_mgr; 403 struct net_lro_mgr lro_mgr;
402 struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; 404 struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index bb7d306fb446..182b2a7be8dc 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -330,7 +330,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
330 struct ehea_port *port = netdev_priv(dev); 330 struct ehea_port *port = netdev_priv(dev);
331 struct net_device_stats *stats = &port->stats; 331 struct net_device_stats *stats = &port->stats;
332 struct hcp_ehea_port_cb2 *cb2; 332 struct hcp_ehea_port_cb2 *cb2;
333 u64 hret, rx_packets, tx_packets; 333 u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
334 int i; 334 int i;
335 335
336 memset(stats, 0, sizeof(*stats)); 336 memset(stats, 0, sizeof(*stats));
@@ -353,18 +353,22 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
353 ehea_dump(cb2, sizeof(*cb2), "net_device_stats"); 353 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
354 354
355 rx_packets = 0; 355 rx_packets = 0;
356 for (i = 0; i < port->num_def_qps; i++) 356 for (i = 0; i < port->num_def_qps; i++) {
357 rx_packets += port->port_res[i].rx_packets; 357 rx_packets += port->port_res[i].rx_packets;
358 rx_bytes += port->port_res[i].rx_bytes;
359 }
358 360
359 tx_packets = 0; 361 tx_packets = 0;
360 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) 362 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
361 tx_packets += port->port_res[i].tx_packets; 363 tx_packets += port->port_res[i].tx_packets;
364 tx_bytes += port->port_res[i].tx_bytes;
365 }
362 366
363 stats->tx_packets = tx_packets; 367 stats->tx_packets = tx_packets;
364 stats->multicast = cb2->rxmcp; 368 stats->multicast = cb2->rxmcp;
365 stats->rx_errors = cb2->rxuerr; 369 stats->rx_errors = cb2->rxuerr;
366 stats->rx_bytes = cb2->rxo; 370 stats->rx_bytes = rx_bytes;
367 stats->tx_bytes = cb2->txo; 371 stats->tx_bytes = tx_bytes;
368 stats->rx_packets = rx_packets; 372 stats->rx_packets = rx_packets;
369 373
370out_herr: 374out_herr:
@@ -703,6 +707,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
703 int skb_arr_rq2_len = pr->rq2_skba.len; 707 int skb_arr_rq2_len = pr->rq2_skba.len;
704 int skb_arr_rq3_len = pr->rq3_skba.len; 708 int skb_arr_rq3_len = pr->rq3_skba.len;
705 int processed, processed_rq1, processed_rq2, processed_rq3; 709 int processed, processed_rq1, processed_rq2, processed_rq3;
710 u64 processed_bytes = 0;
706 int wqe_index, last_wqe_index, rq, port_reset; 711 int wqe_index, last_wqe_index, rq, port_reset;
707 712
708 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; 713 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
@@ -760,6 +765,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
760 processed_rq3++; 765 processed_rq3++;
761 } 766 }
762 767
768 processed_bytes += skb->len;
763 ehea_proc_skb(pr, cqe, skb); 769 ehea_proc_skb(pr, cqe, skb);
764 } else { 770 } else {
765 pr->p_stats.poll_receive_errors++; 771 pr->p_stats.poll_receive_errors++;
@@ -775,6 +781,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
775 lro_flush_all(&pr->lro_mgr); 781 lro_flush_all(&pr->lro_mgr);
776 782
777 pr->rx_packets += processed; 783 pr->rx_packets += processed;
784 pr->rx_bytes += processed_bytes;
778 785
779 ehea_refill_rq1(pr, last_wqe_index, processed_rq1); 786 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
780 ehea_refill_rq2(pr, processed_rq2); 787 ehea_refill_rq2(pr, processed_rq2);
@@ -1509,9 +1516,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1509 enum ehea_eq_type eq_type = EHEA_EQ; 1516 enum ehea_eq_type eq_type = EHEA_EQ;
1510 struct ehea_qp_init_attr *init_attr = NULL; 1517 struct ehea_qp_init_attr *init_attr = NULL;
1511 int ret = -EIO; 1518 int ret = -EIO;
1519 u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1520
1521 tx_bytes = pr->tx_bytes;
1522 tx_packets = pr->tx_packets;
1523 rx_bytes = pr->rx_bytes;
1524 rx_packets = pr->rx_packets;
1512 1525
1513 memset(pr, 0, sizeof(struct ehea_port_res)); 1526 memset(pr, 0, sizeof(struct ehea_port_res));
1514 1527
1528 pr->tx_bytes = rx_bytes;
1529 pr->tx_packets = tx_packets;
1530 pr->rx_bytes = rx_bytes;
1531 pr->rx_packets = rx_packets;
1532
1515 pr->port = port; 1533 pr->port = port;
1516 spin_lock_init(&pr->xmit_lock); 1534 spin_lock_init(&pr->xmit_lock);
1517 spin_lock_init(&pr->netif_queue); 1535 spin_lock_init(&pr->netif_queue);
@@ -2249,6 +2267,14 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2249 memset(swqe, 0, SWQE_HEADER_SIZE); 2267 memset(swqe, 0, SWQE_HEADER_SIZE);
2250 atomic_dec(&pr->swqe_avail); 2268 atomic_dec(&pr->swqe_avail);
2251 2269
2270 if (vlan_tx_tag_present(skb)) {
2271 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2272 swqe->vlan_tag = vlan_tx_tag_get(skb);
2273 }
2274
2275 pr->tx_packets++;
2276 pr->tx_bytes += skb->len;
2277
2252 if (skb->len <= SWQE3_MAX_IMM) { 2278 if (skb->len <= SWQE3_MAX_IMM) {
2253 u32 sig_iv = port->sig_comp_iv; 2279 u32 sig_iv = port->sig_comp_iv;
2254 u32 swqe_num = pr->swqe_id_counter; 2280 u32 swqe_num = pr->swqe_id_counter;
@@ -2279,11 +2305,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2279 } 2305 }
2280 pr->swqe_id_counter += 1; 2306 pr->swqe_id_counter += 1;
2281 2307
2282 if (vlan_tx_tag_present(skb)) {
2283 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2284 swqe->vlan_tag = vlan_tx_tag_get(skb);
2285 }
2286
2287 if (netif_msg_tx_queued(port)) { 2308 if (netif_msg_tx_queued(port)) {
2288 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr); 2309 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
2289 ehea_dump(swqe, 512, "swqe"); 2310 ehea_dump(swqe, 512, "swqe");
@@ -2295,7 +2316,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2295 } 2316 }
2296 2317
2297 ehea_post_swqe(pr->qp, swqe); 2318 ehea_post_swqe(pr->qp, swqe);
2298 pr->tx_packets++;
2299 2319
2300 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { 2320 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2301 spin_lock_irqsave(&pr->netif_queue, flags); 2321 spin_lock_irqsave(&pr->netif_queue, flags);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4c4cc80ec0a1..d1bec6269173 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -577,11 +577,10 @@ static int gfar_parse_group(struct device_node *np,
577 irq_of_parse_and_map(np, 1); 577 irq_of_parse_and_map(np, 1);
578 priv->gfargrp[priv->num_grps].interruptError = 578 priv->gfargrp[priv->num_grps].interruptError =
579 irq_of_parse_and_map(np,2); 579 irq_of_parse_and_map(np,2);
580 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 || 580 if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
581 priv->gfargrp[priv->num_grps].interruptReceive < 0 || 581 priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
582 priv->gfargrp[priv->num_grps].interruptError < 0) { 582 priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
583 return -EINVAL; 583 return -EINVAL;
584 }
585 } 584 }
586 585
587 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; 586 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
@@ -2511,7 +2510,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2511 skb_recycle_check(skb, priv->rx_buffer_size + 2510 skb_recycle_check(skb, priv->rx_buffer_size +
2512 RXBUF_ALIGNMENT)) { 2511 RXBUF_ALIGNMENT)) {
2513 gfar_align_skb(skb); 2512 gfar_align_skb(skb);
2514 __skb_queue_head(&priv->rx_recycle, skb); 2513 skb_queue_head(&priv->rx_recycle, skb);
2515 } else 2514 } else
2516 dev_kfree_skb_any(skb); 2515 dev_kfree_skb_any(skb);
2517 2516
@@ -2594,7 +2593,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
2594 struct gfar_private *priv = netdev_priv(dev); 2593 struct gfar_private *priv = netdev_priv(dev);
2595 struct sk_buff *skb = NULL; 2594 struct sk_buff *skb = NULL;
2596 2595
2597 skb = __skb_dequeue(&priv->rx_recycle); 2596 skb = skb_dequeue(&priv->rx_recycle);
2598 if (!skb) 2597 if (!skb)
2599 skb = gfar_alloc_skb(dev); 2598 skb = gfar_alloc_skb(dev);
2600 2599
@@ -2750,7 +2749,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2750 if (unlikely(!newskb)) 2749 if (unlikely(!newskb))
2751 newskb = skb; 2750 newskb = skb;
2752 else if (skb) 2751 else if (skb)
2753 __skb_queue_head(&priv->rx_recycle, skb); 2752 skb_queue_head(&priv->rx_recycle, skb);
2754 } else { 2753 } else {
2755 /* Increment the number of packets */ 2754 /* Increment the number of packets */
2756 rx_queue->stats.rx_packets++; 2755 rx_queue->stats.rx_packets++;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 5c566ebc54b8..3bc8e276ba4d 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -635,9 +635,10 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
635 if (wol->wolopts & ~WAKE_MAGIC) 635 if (wol->wolopts & ~WAKE_MAGIC)
636 return -EINVAL; 636 return -EINVAL;
637 637
638 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
639
638 spin_lock_irqsave(&priv->bflock, flags); 640 spin_lock_irqsave(&priv->bflock, flags);
639 priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0; 641 priv->wol_en = !!device_may_wakeup(&dev->dev);
640 device_set_wakeup_enable(&dev->dev, priv->wol_en);
641 spin_unlock_irqrestore(&priv->bflock, flags); 642 spin_unlock_irqrestore(&priv->bflock, flags);
642 643
643 return 0; 644 return 0;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 385dc3204cb7..06bb9b799458 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2871,7 +2871,6 @@ static int __devinit emac_probe(struct platform_device *ofdev,
2871 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); 2871 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2872 2872
2873 netif_carrier_off(ndev); 2873 netif_carrier_off(ndev);
2874 netif_stop_queue(ndev);
2875 2874
2876 err = register_netdev(ndev); 2875 err = register_netdev(ndev);
2877 if (err) { 2876 if (err) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 14db09e2fa8b..892d196f17ac 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -4107,7 +4107,6 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
4107netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, 4107netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4108 struct igb_ring *tx_ring) 4108 struct igb_ring *tx_ring)
4109{ 4109{
4110 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4111 int tso = 0, count; 4110 int tso = 0, count;
4112 u32 tx_flags = 0; 4111 u32 tx_flags = 0;
4113 u16 first; 4112 u16 first;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index ebfaa68ee630..28af019c97bb 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -2783,15 +2783,15 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2783 /* reset the hardware with the new settings */ 2783 /* reset the hardware with the new settings */
2784 igbvf_reset(adapter); 2784 igbvf_reset(adapter);
2785 2785
2786 /* tell the stack to leave us alone until igbvf_open() is called */
2787 netif_carrier_off(netdev);
2788 netif_stop_queue(netdev);
2789
2790 strcpy(netdev->name, "eth%d"); 2786 strcpy(netdev->name, "eth%d");
2791 err = register_netdev(netdev); 2787 err = register_netdev(netdev);
2792 if (err) 2788 if (err)
2793 goto err_hw_init; 2789 goto err_hw_init;
2794 2790
2791 /* tell the stack to leave us alone until igbvf_open() is called */
2792 netif_carrier_off(netdev);
2793 netif_stop_queue(netdev);
2794
2795 igbvf_print_device_info(adapter); 2795 igbvf_print_device_info(adapter);
2796 2796
2797 igbvf_initialize_last_counter_stats(adapter); 2797 igbvf_initialize_last_counter_stats(adapter);
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index dc0198092343..aa93655c3aa7 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -88,16 +88,14 @@ static const char *ipg_brand_name[] = {
88 "IC PLUS IP1000 1000/100/10 based NIC", 88 "IC PLUS IP1000 1000/100/10 based NIC",
89 "Sundance Technology ST2021 based NIC", 89 "Sundance Technology ST2021 based NIC",
90 "Tamarack Microelectronics TC9020/9021 based NIC", 90 "Tamarack Microelectronics TC9020/9021 based NIC",
91 "Tamarack Microelectronics TC9020/9021 based NIC",
92 "D-Link NIC IP1000A" 91 "D-Link NIC IP1000A"
93}; 92};
94 93
95static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = { 94static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
96 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, 95 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
97 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, 96 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
98 { PCI_VDEVICE(SUNDANCE, 0x1021), 2 }, 97 { PCI_VDEVICE(DLINK, 0x9021), 2 },
99 { PCI_VDEVICE(DLINK, 0x9021), 3 }, 98 { PCI_VDEVICE(DLINK, 0x4020), 3 },
100 { PCI_VDEVICE(DLINK, 0x4020), 4 },
101 { 0, } 99 { 0, }
102}; 100};
103 101
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 666207a9c039..caa8192fff2a 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -533,6 +533,7 @@ ixgb_remove(struct pci_dev *pdev)
533 pci_release_regions(pdev); 533 pci_release_regions(pdev);
534 534
535 free_netdev(netdev); 535 free_netdev(netdev);
536 pci_disable_device(pdev);
536} 537}
537 538
538/** 539/**
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 8bb9ddb6dffe..0d44c6470ca3 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -43,9 +43,12 @@
43 * ixgbe_dcb_check_config(). 43 * ixgbe_dcb_check_config().
44 */ 44 */
45s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, 45s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
46 u8 direction) 46 int max_frame, u8 direction)
47{ 47{
48 struct tc_bw_alloc *p; 48 struct tc_bw_alloc *p;
49 int min_credit;
50 int min_multiplier;
51 int min_percent = 100;
49 s32 ret_val = 0; 52 s32 ret_val = 0;
50 /* Initialization values default for Tx settings */ 53 /* Initialization values default for Tx settings */
51 u32 credit_refill = 0; 54 u32 credit_refill = 0;
@@ -59,6 +62,31 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
59 goto out; 62 goto out;
60 } 63 }
61 64
65 min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
66 DCB_CREDIT_QUANTUM;
67
68 /* Find smallest link percentage */
69 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
70 p = &dcb_config->tc_config[i].path[direction];
71 bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
72 link_percentage = p->bwg_percent;
73
74 link_percentage = (link_percentage * bw_percent) / 100;
75
76 if (link_percentage && link_percentage < min_percent)
77 min_percent = link_percentage;
78 }
79
80 /*
81 * The ratio between traffic classes will control the bandwidth
82 * percentages seen on the wire. To calculate this ratio we use
83 * a multiplier. It is required that the refill credits must be
84 * larger than the max frame size so here we find the smallest
85 * multiplier that will allow all bandwidth percentages to be
86 * greater than the max frame size.
87 */
88 min_multiplier = (min_credit / min_percent) + 1;
89
62 /* Find out the link percentage for each TC first */ 90 /* Find out the link percentage for each TC first */
63 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 91 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
64 p = &dcb_config->tc_config[i].path[direction]; 92 p = &dcb_config->tc_config[i].path[direction];
@@ -73,8 +101,9 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
73 /* Save link_percentage for reference */ 101 /* Save link_percentage for reference */
74 p->link_percent = (u8)link_percentage; 102 p->link_percent = (u8)link_percentage;
75 103
76 /* Calculate credit refill and save it */ 104 /* Calculate credit refill ratio using multiplier */
77 credit_refill = link_percentage * MINIMUM_CREDIT_REFILL; 105 credit_refill = min(link_percentage * min_multiplier,
106 MAX_CREDIT_REFILL);
78 p->data_credits_refill = (u16)credit_refill; 107 p->data_credits_refill = (u16)credit_refill;
79 108
80 /* Calculate maximum credit for the TC */ 109 /* Calculate maximum credit for the TC */
@@ -85,8 +114,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
85 * of a TC is too small, the maximum credit may not be 114 * of a TC is too small, the maximum credit may not be
86 * enough to send out a jumbo frame in data plane arbitration. 115 * enough to send out a jumbo frame in data plane arbitration.
87 */ 116 */
88 if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_JUMBO)) 117 if (credit_max && (credit_max < min_credit))
89 credit_max = MINIMUM_CREDIT_FOR_JUMBO; 118 credit_max = min_credit;
90 119
91 if (direction == DCB_TX_CONFIG) { 120 if (direction == DCB_TX_CONFIG) {
92 /* 121 /*
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index eb1059f09da0..0208a87b129e 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -150,15 +150,14 @@ struct ixgbe_dcb_config {
150/* DCB driver APIs */ 150/* DCB driver APIs */
151 151
152/* DCB credits calculation */ 152/* DCB credits calculation */
153s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8); 153s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8);
154 154
155/* DCB hw initialization */ 155/* DCB hw initialization */
156s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); 156s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
157 157
158/* DCB definitions for credit calculation */ 158/* DCB definitions for credit calculation */
159#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */
159#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ 160#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
160#define MINIMUM_CREDIT_REFILL 5 /* 5*64B = 320B */
161#define MINIMUM_CREDIT_FOR_JUMBO 145 /* 145= UpperBound((9*1024+54)/64B) for 9KB jumbo frame */
162#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */ 161#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */
163#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */ 162#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
164#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */ 163#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 67c219f86c3a..05f224715073 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -397,6 +397,11 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
397 reg &= ~IXGBE_RTTDCS_ARBDIS; 397 reg &= ~IXGBE_RTTDCS_ARBDIS;
398 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); 398 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
399 399
400 /* Enable Security TX Buffer IFG for DCB */
401 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
402 reg |= IXGBE_SECTX_DCB;
403 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
404
400 return 0; 405 return 0;
401} 406}
402 407
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 18d7fbf6c292..3841649fb954 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -95,6 +95,9 @@
95 95
96#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */ 96#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */
97 97
98/* SECTXMINIFG DCB */
99#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */
100
98 101
99/* DCB hardware-specific driver APIs */ 102/* DCB hardware-specific driver APIs */
100 103
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index f85631263af8..fbad4d819608 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -764,8 +764,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
764#ifdef IXGBE_FCOE 764#ifdef IXGBE_FCOE
765 /* adjust for FCoE Sequence Offload */ 765 /* adjust for FCoE Sequence Offload */
766 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 766 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
767 && (skb->protocol == htons(ETH_P_FCOE)) && 767 && skb_is_gso(skb)
768 skb_is_gso(skb)) { 768 && vlan_get_protocol(skb) ==
769 htons(ETH_P_FCOE)) {
769 hlen = skb_transport_offset(skb) + 770 hlen = skb_transport_offset(skb) +
770 sizeof(struct fc_frame_header) + 771 sizeof(struct fc_frame_header) +
771 sizeof(struct fcoe_crc_eof); 772 sizeof(struct fcoe_crc_eof);
@@ -3347,6 +3348,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3347static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) 3348static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3348{ 3349{
3349 struct ixgbe_hw *hw = &adapter->hw; 3350 struct ixgbe_hw *hw = &adapter->hw;
3351 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3350 u32 txdctl; 3352 u32 txdctl;
3351 int i, j; 3353 int i, j;
3352 3354
@@ -3359,8 +3361,15 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3359 if (hw->mac.type == ixgbe_mac_82598EB) 3361 if (hw->mac.type == ixgbe_mac_82598EB)
3360 netif_set_gso_max_size(adapter->netdev, 32768); 3362 netif_set_gso_max_size(adapter->netdev, 32768);
3361 3363
3362 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG); 3364#ifdef CONFIG_FCOE
3363 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG); 3365 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3366 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3367#endif
3368
3369 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
3370 DCB_TX_CONFIG);
3371 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
3372 DCB_RX_CONFIG);
3364 3373
3365 /* reconfigure the hardware */ 3374 /* reconfigure the hardware */
3366 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); 3375 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
@@ -5815,7 +5824,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5815 5824
5816static int ixgbe_tso(struct ixgbe_adapter *adapter, 5825static int ixgbe_tso(struct ixgbe_adapter *adapter,
5817 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 5826 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5818 u32 tx_flags, u8 *hdr_len) 5827 u32 tx_flags, u8 *hdr_len, __be16 protocol)
5819{ 5828{
5820 struct ixgbe_adv_tx_context_desc *context_desc; 5829 struct ixgbe_adv_tx_context_desc *context_desc;
5821 unsigned int i; 5830 unsigned int i;
@@ -5833,7 +5842,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5833 l4len = tcp_hdrlen(skb); 5842 l4len = tcp_hdrlen(skb);
5834 *hdr_len += l4len; 5843 *hdr_len += l4len;
5835 5844
5836 if (skb->protocol == htons(ETH_P_IP)) { 5845 if (protocol == htons(ETH_P_IP)) {
5837 struct iphdr *iph = ip_hdr(skb); 5846 struct iphdr *iph = ip_hdr(skb);
5838 iph->tot_len = 0; 5847 iph->tot_len = 0;
5839 iph->check = 0; 5848 iph->check = 0;
@@ -5872,7 +5881,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5872 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | 5881 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
5873 IXGBE_ADVTXD_DTYP_CTXT); 5882 IXGBE_ADVTXD_DTYP_CTXT);
5874 5883
5875 if (skb->protocol == htons(ETH_P_IP)) 5884 if (protocol == htons(ETH_P_IP))
5876 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 5885 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5877 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 5886 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5878 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 5887 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -5898,16 +5907,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5898 return false; 5907 return false;
5899} 5908}
5900 5909
5901static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb) 5910static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5911 __be16 protocol)
5902{ 5912{
5903 u32 rtn = 0; 5913 u32 rtn = 0;
5904 __be16 protocol;
5905
5906 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5907 protocol = ((const struct vlan_ethhdr *)skb->data)->
5908 h_vlan_encapsulated_proto;
5909 else
5910 protocol = skb->protocol;
5911 5914
5912 switch (protocol) { 5915 switch (protocol) {
5913 case cpu_to_be16(ETH_P_IP): 5916 case cpu_to_be16(ETH_P_IP):
@@ -5935,7 +5938,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
5935 default: 5938 default:
5936 if (unlikely(net_ratelimit())) 5939 if (unlikely(net_ratelimit()))
5937 e_warn(probe, "partial checksum but proto=%x!\n", 5940 e_warn(probe, "partial checksum but proto=%x!\n",
5938 skb->protocol); 5941 protocol);
5939 break; 5942 break;
5940 } 5943 }
5941 5944
@@ -5944,7 +5947,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
5944 5947
5945static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 5948static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5946 struct ixgbe_ring *tx_ring, 5949 struct ixgbe_ring *tx_ring,
5947 struct sk_buff *skb, u32 tx_flags) 5950 struct sk_buff *skb, u32 tx_flags,
5951 __be16 protocol)
5948{ 5952{
5949 struct ixgbe_adv_tx_context_desc *context_desc; 5953 struct ixgbe_adv_tx_context_desc *context_desc;
5950 unsigned int i; 5954 unsigned int i;
@@ -5973,7 +5977,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5973 IXGBE_ADVTXD_DTYP_CTXT); 5977 IXGBE_ADVTXD_DTYP_CTXT);
5974 5978
5975 if (skb->ip_summed == CHECKSUM_PARTIAL) 5979 if (skb->ip_summed == CHECKSUM_PARTIAL)
5976 type_tucmd_mlhl |= ixgbe_psum(adapter, skb); 5980 type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
5977 5981
5978 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 5982 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
5979 /* use index zero for tx checksum offload */ 5983 /* use index zero for tx checksum offload */
@@ -6171,7 +6175,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6171} 6175}
6172 6176
6173static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6177static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6174 int queue, u32 tx_flags) 6178 int queue, u32 tx_flags, __be16 protocol)
6175{ 6179{
6176 struct ixgbe_atr_input atr_input; 6180 struct ixgbe_atr_input atr_input;
6177 struct tcphdr *th; 6181 struct tcphdr *th;
@@ -6182,7 +6186,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6182 u8 l4type = 0; 6186 u8 l4type = 0;
6183 6187
6184 /* Right now, we support IPv4 only */ 6188 /* Right now, we support IPv4 only */
6185 if (skb->protocol != htons(ETH_P_IP)) 6189 if (protocol != htons(ETH_P_IP))
6186 return; 6190 return;
6187 /* check if we're UDP or TCP */ 6191 /* check if we're UDP or TCP */
6188 if (iph->protocol == IPPROTO_TCP) { 6192 if (iph->protocol == IPPROTO_TCP) {
@@ -6249,10 +6253,13 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6249{ 6253{
6250 struct ixgbe_adapter *adapter = netdev_priv(dev); 6254 struct ixgbe_adapter *adapter = netdev_priv(dev);
6251 int txq = smp_processor_id(); 6255 int txq = smp_processor_id();
6252
6253#ifdef IXGBE_FCOE 6256#ifdef IXGBE_FCOE
6254 if ((skb->protocol == htons(ETH_P_FCOE)) || 6257 __be16 protocol;
6255 (skb->protocol == htons(ETH_P_FIP))) { 6258
6259 protocol = vlan_get_protocol(skb);
6260
6261 if ((protocol == htons(ETH_P_FCOE)) ||
6262 (protocol == htons(ETH_P_FIP))) {
6256 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 6263 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
6257 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 6264 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6258 txq += adapter->ring_feature[RING_F_FCOE].mask; 6265 txq += adapter->ring_feature[RING_F_FCOE].mask;
@@ -6295,6 +6302,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6295 int tso; 6302 int tso;
6296 int count = 0; 6303 int count = 0;
6297 unsigned int f; 6304 unsigned int f;
6305 __be16 protocol;
6306
6307 protocol = vlan_get_protocol(skb);
6298 6308
6299 if (vlan_tx_tag_present(skb)) { 6309 if (vlan_tx_tag_present(skb)) {
6300 tx_flags |= vlan_tx_tag_get(skb); 6310 tx_flags |= vlan_tx_tag_get(skb);
@@ -6315,8 +6325,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6315 /* for FCoE with DCB, we force the priority to what 6325 /* for FCoE with DCB, we force the priority to what
6316 * was specified by the switch */ 6326 * was specified by the switch */
6317 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && 6327 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
6318 (skb->protocol == htons(ETH_P_FCOE) || 6328 (protocol == htons(ETH_P_FCOE) ||
6319 skb->protocol == htons(ETH_P_FIP))) { 6329 protocol == htons(ETH_P_FIP))) {
6320#ifdef CONFIG_IXGBE_DCB 6330#ifdef CONFIG_IXGBE_DCB
6321 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6331 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6322 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK 6332 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
@@ -6326,7 +6336,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6326 } 6336 }
6327#endif 6337#endif
6328 /* flag for FCoE offloads */ 6338 /* flag for FCoE offloads */
6329 if (skb->protocol == htons(ETH_P_FCOE)) 6339 if (protocol == htons(ETH_P_FCOE))
6330 tx_flags |= IXGBE_TX_FLAGS_FCOE; 6340 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6331 } 6341 }
6332#endif 6342#endif
@@ -6360,9 +6370,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6360 tx_flags |= IXGBE_TX_FLAGS_FSO; 6370 tx_flags |= IXGBE_TX_FLAGS_FSO;
6361#endif /* IXGBE_FCOE */ 6371#endif /* IXGBE_FCOE */
6362 } else { 6372 } else {
6363 if (skb->protocol == htons(ETH_P_IP)) 6373 if (protocol == htons(ETH_P_IP))
6364 tx_flags |= IXGBE_TX_FLAGS_IPV4; 6374 tx_flags |= IXGBE_TX_FLAGS_IPV4;
6365 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); 6375 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
6376 protocol);
6366 if (tso < 0) { 6377 if (tso < 0) {
6367 dev_kfree_skb_any(skb); 6378 dev_kfree_skb_any(skb);
6368 return NETDEV_TX_OK; 6379 return NETDEV_TX_OK;
@@ -6370,7 +6381,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6370 6381
6371 if (tso) 6382 if (tso)
6372 tx_flags |= IXGBE_TX_FLAGS_TSO; 6383 tx_flags |= IXGBE_TX_FLAGS_TSO;
6373 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && 6384 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
6385 protocol) &&
6374 (skb->ip_summed == CHECKSUM_PARTIAL)) 6386 (skb->ip_summed == CHECKSUM_PARTIAL))
6375 tx_flags |= IXGBE_TX_FLAGS_CSUM; 6387 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6376 } 6388 }
@@ -6384,7 +6396,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6384 test_bit(__IXGBE_FDIR_INIT_DONE, 6396 test_bit(__IXGBE_FDIR_INIT_DONE,
6385 &tx_ring->reinit_state)) { 6397 &tx_ring->reinit_state)) {
6386 ixgbe_atr(adapter, skb, tx_ring->queue_index, 6398 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6387 tx_flags); 6399 tx_flags, protocol);
6388 tx_ring->atr_count = 0; 6400 tx_ring->atr_count = 0;
6389 } 6401 }
6390 } 6402 }
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index d7a975ee2add..c57d9a43ceca 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -1623,12 +1623,12 @@ err_out:
1623 return rc; 1623 return rc;
1624} 1624}
1625 1625
1626#ifdef CONFIG_PM
1627static void 1626static void
1628jme_set_100m_half(struct jme_adapter *jme) 1627jme_set_100m_half(struct jme_adapter *jme)
1629{ 1628{
1630 u32 bmcr, tmp; 1629 u32 bmcr, tmp;
1631 1630
1631 jme_phy_on(jme);
1632 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); 1632 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1633 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | 1633 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1634 BMCR_SPEED1000 | BMCR_FULLDPLX); 1634 BMCR_SPEED1000 | BMCR_FULLDPLX);
@@ -1656,7 +1656,6 @@ jme_wait_link(struct jme_adapter *jme)
1656 phylink = jme_linkstat_from_phy(jme); 1656 phylink = jme_linkstat_from_phy(jme);
1657 } 1657 }
1658} 1658}
1659#endif
1660 1659
1661static inline void 1660static inline void
1662jme_phy_off(struct jme_adapter *jme) 1661jme_phy_off(struct jme_adapter *jme)
@@ -1664,6 +1663,21 @@ jme_phy_off(struct jme_adapter *jme)
1664 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN); 1663 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
1665} 1664}
1666 1665
1666static void
1667jme_powersave_phy(struct jme_adapter *jme)
1668{
1669 if (jme->reg_pmcs) {
1670 jme_set_100m_half(jme);
1671
1672 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
1673 jme_wait_link(jme);
1674
1675 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
1676 } else {
1677 jme_phy_off(jme);
1678 }
1679}
1680
1667static int 1681static int
1668jme_close(struct net_device *netdev) 1682jme_close(struct net_device *netdev)
1669{ 1683{
@@ -2941,11 +2955,7 @@ jme_init_one(struct pci_dev *pdev,
2941 * Tell stack that we are not ready to work until open() 2955 * Tell stack that we are not ready to work until open()
2942 */ 2956 */
2943 netif_carrier_off(netdev); 2957 netif_carrier_off(netdev);
2944 netif_stop_queue(netdev);
2945 2958
2946 /*
2947 * Register netdev
2948 */
2949 rc = register_netdev(netdev); 2959 rc = register_netdev(netdev);
2950 if (rc) { 2960 if (rc) {
2951 pr_err("Cannot register net device\n"); 2961 pr_err("Cannot register net device\n");
@@ -2991,6 +3001,16 @@ jme_remove_one(struct pci_dev *pdev)
2991 3001
2992} 3002}
2993 3003
3004static void
3005jme_shutdown(struct pci_dev *pdev)
3006{
3007 struct net_device *netdev = pci_get_drvdata(pdev);
3008 struct jme_adapter *jme = netdev_priv(netdev);
3009
3010 jme_powersave_phy(jme);
3011 pci_pme_active(pdev, true);
3012}
3013
2994#ifdef CONFIG_PM 3014#ifdef CONFIG_PM
2995static int 3015static int
2996jme_suspend(struct pci_dev *pdev, pm_message_t state) 3016jme_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -3028,19 +3048,9 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
3028 tasklet_hi_enable(&jme->rxempty_task); 3048 tasklet_hi_enable(&jme->rxempty_task);
3029 3049
3030 pci_save_state(pdev); 3050 pci_save_state(pdev);
3031 if (jme->reg_pmcs) { 3051 jme_powersave_phy(jme);
3032 jme_set_100m_half(jme); 3052 pci_enable_wake(jme->pdev, PCI_D3hot, true);
3033 3053 pci_set_power_state(pdev, PCI_D3hot);
3034 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
3035 jme_wait_link(jme);
3036
3037 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
3038
3039 pci_enable_wake(pdev, PCI_D3cold, true);
3040 } else {
3041 jme_phy_off(jme);
3042 }
3043 pci_set_power_state(pdev, PCI_D3cold);
3044 3054
3045 return 0; 3055 return 0;
3046} 3056}
@@ -3087,6 +3097,7 @@ static struct pci_driver jme_driver = {
3087 .suspend = jme_suspend, 3097 .suspend = jme_suspend,
3088 .resume = jme_resume, 3098 .resume = jme_resume,
3089#endif /* CONFIG_PM */ 3099#endif /* CONFIG_PM */
3100 .shutdown = jme_shutdown,
3090}; 3101};
3091 3102
3092static int __init 3103static int __init
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 316bb70775b1..e7030ceb178b 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -1077,7 +1077,6 @@ static void __NS8390_init(struct net_device *dev, int startp)
1077 ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); 1077 ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1078 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); 1078 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1079 1079
1080 netif_start_queue(dev);
1081 ei_local->tx1 = ei_local->tx2 = 0; 1080 ei_local->tx1 = ei_local->tx2 = 0;
1082 ei_local->txing = 0; 1081 ei_local->txing = 0;
1083 1082
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 4297f6e8c4bc..f69e73e2191e 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -515,14 +515,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
515 (unsigned long)status, budget); 515 (unsigned long)status, budget);
516 516
517 work_done = macb_rx(bp, budget); 517 work_done = macb_rx(bp, budget);
518 if (work_done < budget) 518 if (work_done < budget) {
519 napi_complete(napi); 519 napi_complete(napi);
520 520
521 /* 521 /*
522 * We've done what we can to clean the buffers. Make sure we 522 * We've done what we can to clean the buffers. Make sure we
523 * get notified when new packets arrive. 523 * get notified when new packets arrive.
524 */ 524 */
525 macb_writel(bp, IER, MACB_RX_INT_FLAGS); 525 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
526 }
526 527
527 /* TODO: Handle errors */ 528 /* TODO: Handle errors */
528 529
@@ -550,12 +551,16 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
550 } 551 }
551 552
552 if (status & MACB_RX_INT_FLAGS) { 553 if (status & MACB_RX_INT_FLAGS) {
554 /*
555 * There's no point taking any more interrupts
556 * until we have processed the buffers. The
557 * scheduling call may fail if the poll routine
558 * is already scheduled, so disable interrupts
559 * now.
560 */
561 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
562
553 if (napi_schedule_prep(&bp->napi)) { 563 if (napi_schedule_prep(&bp->napi)) {
554 /*
555 * There's no point taking any more interrupts
556 * until we have processed the buffers
557 */
558 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
559 dev_dbg(&bp->pdev->dev, 564 dev_dbg(&bp->pdev->dev,
560 "scheduling RX softirq\n"); 565 "scheduling RX softirq\n");
561 __napi_schedule(&bp->napi); 566 __napi_schedule(&bp->napi);
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 143906417048..f6e0d40cd876 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -124,6 +124,13 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
124 return 0; 124 return 0;
125} 125}
126 126
127static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
128{
129 struct mlx4_en_dev *endev = ctx;
130
131 return endev->pndev[port];
132}
133
127static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, 134static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
128 enum mlx4_dev_event event, int port) 135 enum mlx4_dev_event event, int port)
129{ 136{
@@ -282,9 +289,11 @@ err_free_res:
282} 289}
283 290
284static struct mlx4_interface mlx4_en_interface = { 291static struct mlx4_interface mlx4_en_interface = {
285 .add = mlx4_en_add, 292 .add = mlx4_en_add,
286 .remove = mlx4_en_remove, 293 .remove = mlx4_en_remove,
287 .event = mlx4_en_event, 294 .event = mlx4_en_event,
295 .get_dev = mlx4_en_get_netdev,
296 .protocol = MLX4_PROTOCOL_EN,
288}; 297};
289 298
290static int __init mlx4_en_init(void) 299static int __init mlx4_en_init(void)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 79478bd4211a..6d6806b361e3 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -69,6 +69,7 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
69 struct mlx4_en_priv *priv = netdev_priv(dev); 69 struct mlx4_en_priv *priv = netdev_priv(dev);
70 struct mlx4_en_dev *mdev = priv->mdev; 70 struct mlx4_en_dev *mdev = priv->mdev;
71 int err; 71 int err;
72 int idx;
72 73
73 if (!priv->vlgrp) 74 if (!priv->vlgrp)
74 return; 75 return;
@@ -83,7 +84,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
83 if (err) 84 if (err)
84 en_err(priv, "Failed configuring VLAN filter\n"); 85 en_err(priv, "Failed configuring VLAN filter\n");
85 } 86 }
87 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
88 en_err(priv, "failed adding vlan %d\n", vid);
86 mutex_unlock(&mdev->state_lock); 89 mutex_unlock(&mdev->state_lock);
90
87} 91}
88 92
89static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 93static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -91,6 +95,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
91 struct mlx4_en_priv *priv = netdev_priv(dev); 95 struct mlx4_en_priv *priv = netdev_priv(dev);
92 struct mlx4_en_dev *mdev = priv->mdev; 96 struct mlx4_en_dev *mdev = priv->mdev;
93 int err; 97 int err;
98 int idx;
94 99
95 if (!priv->vlgrp) 100 if (!priv->vlgrp)
96 return; 101 return;
@@ -101,6 +106,11 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
101 106
102 /* Remove VID from port VLAN filter */ 107 /* Remove VID from port VLAN filter */
103 mutex_lock(&mdev->state_lock); 108 mutex_lock(&mdev->state_lock);
109 if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
110 mlx4_unregister_vlan(mdev->dev, priv->port, idx);
111 else
112 en_err(priv, "could not find vid %d in cache\n", vid);
113
104 if (mdev->device_up && priv->port_up) { 114 if (mdev->device_up && priv->port_up) {
105 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 115 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
106 if (err) 116 if (err)
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index aa3ef2aee5bf..7f5a3221e0c1 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -127,8 +127,8 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
127 memset(context, 0, sizeof *context); 127 memset(context, 0, sizeof *context);
128 128
129 context->base_qpn = cpu_to_be32(base_qpn); 129 context->base_qpn = cpu_to_be32(base_qpn);
130 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn); 130 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn);
131 context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn); 131 context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_MODE_SHIFT | base_qpn);
132 context->intra_no_vlan = 0; 132 context->intra_no_vlan = 0;
133 context->no_vlan = MLX4_NO_VLAN_IDX; 133 context->no_vlan = MLX4_NO_VLAN_IDX;
134 context->intra_vlan_miss = 0; 134 context->intra_vlan_miss = 0;
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
index f6511aa2b7df..092e814b1981 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/mlx4/en_port.h
@@ -36,7 +36,8 @@
36 36
37 37
38#define SET_PORT_GEN_ALL_VALID 0x7 38#define SET_PORT_GEN_ALL_VALID 0x7
39#define SET_PORT_PROMISC_SHIFT 31 39#define SET_PORT_PROMISC_EN_SHIFT 31
40#define SET_PORT_PROMISC_MODE_SHIFT 30
40 41
41enum { 42enum {
42 MLX4_CMD_SET_VLAN_FLTR = 0x47, 43 MLX4_CMD_SET_VLAN_FLTR = 0x47,
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index b716e1a1b298..b68eee2414c2 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -98,7 +98,8 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
98 [20] = "Address vector port checking support", 98 [20] = "Address vector port checking support",
99 [21] = "UD multicast support", 99 [21] = "UD multicast support",
100 [24] = "Demand paging support", 100 [24] = "Demand paging support",
101 [25] = "Router support" 101 [25] = "Router support",
102 [30] = "IBoE support"
102 }; 103 };
103 int i; 104 int i;
104 105
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index b07e4dee80aa..02393fdf44c1 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -210,38 +210,12 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
210 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt); 210 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
211} 211}
212 212
213int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count) 213static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
214{ 214{
215 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM, 215 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
216 MLX4_CMD_TIME_CLASS_B); 216 MLX4_CMD_TIME_CLASS_B);
217} 217}
218 218
219int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
220{
221 struct mlx4_cmd_mailbox *mailbox;
222 __be64 *inbox;
223 int err;
224
225 mailbox = mlx4_alloc_cmd_mailbox(dev);
226 if (IS_ERR(mailbox))
227 return PTR_ERR(mailbox);
228 inbox = mailbox->buf;
229
230 inbox[0] = cpu_to_be64(virt);
231 inbox[1] = cpu_to_be64(dma_addr);
232
233 err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
234 MLX4_CMD_TIME_CLASS_B);
235
236 mlx4_free_cmd_mailbox(dev, mailbox);
237
238 if (!err)
239 mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
240 (unsigned long long) dma_addr, (unsigned long long) virt);
241
242 return err;
243}
244
245int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm) 219int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
246{ 220{
247 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1); 221 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h
index ab56a2f89b65..b10c07a1dc1a 100644
--- a/drivers/net/mlx4/icm.h
+++ b/drivers/net/mlx4/icm.h
@@ -128,8 +128,6 @@ static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
128 return sg_dma_len(&iter->chunk->mem[iter->page_idx]); 128 return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
129} 129}
130 130
131int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
132int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
133int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); 131int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
134int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev); 132int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
135 133
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index 555067802751..73c94fcdfddf 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -161,3 +161,24 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
161 161
162 mutex_unlock(&intf_mutex); 162 mutex_unlock(&intf_mutex);
163} 163}
164
165void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
166{
167 struct mlx4_priv *priv = mlx4_priv(dev);
168 struct mlx4_device_context *dev_ctx;
169 unsigned long flags;
170 void *result = NULL;
171
172 spin_lock_irqsave(&priv->ctx_lock, flags);
173
174 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
175 if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
176 result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
177 break;
178 }
179
180 spin_unlock_irqrestore(&priv->ctx_lock, flags);
181
182 return result;
183}
184EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 569fa3df381f..782f11d8fa71 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -103,7 +103,7 @@ MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
103 103
104static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 104static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
105module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 105module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
106MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)"); 106MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
107 107
108int mlx4_check_port_params(struct mlx4_dev *dev, 108int mlx4_check_port_params(struct mlx4_dev *dev,
109 enum mlx4_port_type *port_type) 109 enum mlx4_port_type *port_type)
@@ -1310,7 +1310,7 @@ static int __init mlx4_verify_params(void)
1310 return -1; 1310 return -1;
1311 } 1311 }
1312 1312
1313 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) { 1313 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
1314 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); 1314 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
1315 return -1; 1315 return -1;
1316 } 1316 }
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 1fc16ab7ad2f..dfed6a07c2d7 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -475,6 +475,7 @@ struct mlx4_en_priv {
475 char *mc_addrs; 475 char *mc_addrs;
476 int mc_addrs_cnt; 476 int mc_addrs_cnt;
477 struct mlx4_en_stat_out_mbox hw_stats; 477 struct mlx4_en_stat_out_mbox hw_stats;
478 int vids[128];
478}; 479};
479 480
480 481
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 606aa58afdea..451339559bdc 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -111,6 +111,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
111 goto out; 111 goto out;
112 } 112 }
113 } 113 }
114
115 if (free < 0) {
116 err = -ENOMEM;
117 goto out;
118 }
119
114 mlx4_dbg(dev, "Free MAC index is %d\n", free); 120 mlx4_dbg(dev, "Free MAC index is %d\n", free);
115 121
116 if (table->total == table->max) { 122 if (table->total == table->max) {
@@ -182,6 +188,25 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
182 return err; 188 return err;
183} 189}
184 190
191int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
192{
193 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
194 int i;
195
196 for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
197 if (table->refs[i] &&
198 (vid == (MLX4_VLAN_MASK &
199 be32_to_cpu(table->entries[i])))) {
200 /* VLAN already registered, increase reference count */
201 *idx = i;
202 return 0;
203 }
204 }
205
206 return -ENOENT;
207}
208EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
209
185int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) 210int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
186{ 211{
187 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 212 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
@@ -205,6 +230,11 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
205 } 230 }
206 } 231 }
207 232
233 if (free < 0) {
234 err = -ENOMEM;
235 goto out;
236 }
237
208 if (table->total == table->max) { 238 if (table->total == table->max) {
209 /* No free vlan entries */ 239 /* No free vlan entries */
210 err = -ENOSPC; 240 err = -ENOSPC;
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 12612127a087..f7d06cbc70ae 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -255,19 +255,6 @@ out_free_rq:
255} 255}
256 256
257static void 257static void
258nx_fw_cmd_reset_ctx(struct netxen_adapter *adapter)
259{
260
261 netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
262 adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
263 NX_CDRP_CMD_DESTROY_RX_CTX);
264
265 netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
266 adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
267 NX_CDRP_CMD_DESTROY_TX_CTX);
268}
269
270static void
271nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) 258nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
272{ 259{
273 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 260 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
@@ -698,8 +685,6 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
698 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 685 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
699 if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state)) 686 if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
700 goto done; 687 goto done;
701 if (reset_devices)
702 nx_fw_cmd_reset_ctx(adapter);
703 err = nx_fw_cmd_create_rx_ctx(adapter); 688 err = nx_fw_cmd_create_rx_ctx(adapter);
704 if (err) 689 if (err)
705 goto err_out_free; 690 goto err_out_free;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 50820beac3aa..e1d30d7f2071 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -41,9 +41,6 @@
41MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver"); 41MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); 43MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
44MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
45MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
46MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
47MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); 44MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
48 45
49char netxen_nic_driver_name[] = "netxen_nic"; 46char netxen_nic_driver_name[] = "netxen_nic";
@@ -1240,7 +1237,6 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
1240 dev_warn(&pdev->dev, "failed to read mac addr\n"); 1237 dev_warn(&pdev->dev, "failed to read mac addr\n");
1241 1238
1242 netif_carrier_off(netdev); 1239 netif_carrier_off(netdev);
1243 netif_stop_queue(netdev);
1244 1240
1245 err = register_netdev(netdev); 1241 err = register_netdev(netdev);
1246 if (err) { 1242 if (err) {
@@ -1356,6 +1352,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1356 break; 1352 break;
1357 } 1353 }
1358 1354
1355 if (reset_devices) {
1356 if (adapter->portnum == 0) {
1357 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
1358 adapter->need_fw_reset = 1;
1359 }
1360 }
1361
1359 err = netxen_start_firmware(adapter); 1362 err = netxen_start_firmware(adapter);
1360 if (err) 1363 if (err)
1361 goto err_out_decr_ref; 1364 goto err_out_decr_ref;
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d2e166e29dda..8a4d19e5de06 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -111,13 +111,14 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id);
111 111
112typedef struct axnet_dev_t { 112typedef struct axnet_dev_t {
113 struct pcmcia_device *p_dev; 113 struct pcmcia_device *p_dev;
114 caddr_t base; 114 caddr_t base;
115 struct timer_list watchdog; 115 struct timer_list watchdog;
116 int stale, fast_poll; 116 int stale, fast_poll;
117 u_short link_status; 117 u_short link_status;
118 u_char duplex_flag; 118 u_char duplex_flag;
119 int phy_id; 119 int phy_id;
120 int flags; 120 int flags;
121 int active_low;
121} axnet_dev_t; 122} axnet_dev_t;
122 123
123static inline axnet_dev_t *PRIV(struct net_device *dev) 124static inline axnet_dev_t *PRIV(struct net_device *dev)
@@ -322,6 +323,8 @@ static int axnet_config(struct pcmcia_device *link)
322 if (info->flags & IS_AX88790) 323 if (info->flags & IS_AX88790)
323 outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */ 324 outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */
324 325
326 info->active_low = 0;
327
325 for (i = 0; i < 32; i++) { 328 for (i = 0; i < 32; i++) {
326 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); 329 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
327 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); 330 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
@@ -329,15 +332,18 @@ static int axnet_config(struct pcmcia_device *link)
329 if ((j != 0) && (j != 0xffff)) break; 332 if ((j != 0) && (j != 0xffff)) break;
330 } 333 }
331 334
332 /* Maybe PHY is in power down mode. (PPD_SET = 1)
333 Bit 2 of CCSR is active low. */
334 if (i == 32) { 335 if (i == 32) {
336 /* Maybe PHY is in power down mode. (PPD_SET = 1)
337 Bit 2 of CCSR is active low. */
335 pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); 338 pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
336 for (i = 0; i < 32; i++) { 339 for (i = 0; i < 32; i++) {
337 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); 340 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
338 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); 341 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
339 if (j == j2) continue; 342 if (j == j2) continue;
340 if ((j != 0) && (j != 0xffff)) break; 343 if ((j != 0) && (j != 0xffff)) {
344 info->active_low = 1;
345 break;
346 }
341 } 347 }
342 } 348 }
343 349
@@ -383,8 +389,12 @@ static int axnet_suspend(struct pcmcia_device *link)
383static int axnet_resume(struct pcmcia_device *link) 389static int axnet_resume(struct pcmcia_device *link)
384{ 390{
385 struct net_device *dev = link->priv; 391 struct net_device *dev = link->priv;
392 axnet_dev_t *info = PRIV(dev);
386 393
387 if (link->open) { 394 if (link->open) {
395 if (info->active_low == 1)
396 pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
397
388 axnet_reset_8390(dev); 398 axnet_reset_8390(dev);
389 AX88190_init(dev, 1); 399 AX88190_init(dev, 1);
390 netif_device_attach(dev); 400 netif_device_attach(dev);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 03096c80103d..d05c44692f08 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1536,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1536 PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "FASTline PCMCIA 10/100 Fast-Ethernet", 0xfa2e424d, 0x3953d9b9), 1536 PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "FASTline PCMCIA 10/100 Fast-Ethernet", 0xfa2e424d, 0x3953d9b9),
1537 PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), 1537 PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
1538 PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), 1538 PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
1539 PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
1539 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), 1540 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
1540 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), 1541 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
1541 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), 1542 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e2afdce0a437..f0bd1a1aba3a 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -74,8 +74,8 @@
74#define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4) 74#define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4)
75#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4)) 75#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4))
76 76
77#define MII_88EC048_PHY_MSCR1_REG 16 77#define MII_88E1318S_PHY_MSCR1_REG 16
78#define MII_88EC048_PHY_MSCR1_PAD_ODD BIT(6) 78#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
79 79
80#define MII_88E1121_PHY_LED_CTRL 16 80#define MII_88E1121_PHY_LED_CTRL 16
81#define MII_88E1121_PHY_LED_PAGE 3 81#define MII_88E1121_PHY_LED_PAGE 3
@@ -240,7 +240,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
240 return err; 240 return err;
241} 241}
242 242
243static int m88ec048_config_aneg(struct phy_device *phydev) 243static int m88e1318_config_aneg(struct phy_device *phydev)
244{ 244{
245 int err, oldpage, mscr; 245 int err, oldpage, mscr;
246 246
@@ -251,10 +251,10 @@ static int m88ec048_config_aneg(struct phy_device *phydev)
251 if (err < 0) 251 if (err < 0)
252 return err; 252 return err;
253 253
254 mscr = phy_read(phydev, MII_88EC048_PHY_MSCR1_REG); 254 mscr = phy_read(phydev, MII_88E1318S_PHY_MSCR1_REG);
255 mscr |= MII_88EC048_PHY_MSCR1_PAD_ODD; 255 mscr |= MII_88E1318S_PHY_MSCR1_PAD_ODD;
256 256
257 err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr); 257 err = phy_write(phydev, MII_88E1318S_PHY_MSCR1_REG, mscr);
258 if (err < 0) 258 if (err < 0)
259 return err; 259 return err;
260 260
@@ -659,12 +659,12 @@ static struct phy_driver marvell_drivers[] = {
659 .driver = { .owner = THIS_MODULE }, 659 .driver = { .owner = THIS_MODULE },
660 }, 660 },
661 { 661 {
662 .phy_id = MARVELL_PHY_ID_88EC048, 662 .phy_id = MARVELL_PHY_ID_88E1318S,
663 .phy_id_mask = MARVELL_PHY_ID_MASK, 663 .phy_id_mask = MARVELL_PHY_ID_MASK,
664 .name = "Marvell 88EC048", 664 .name = "Marvell 88E1318S",
665 .features = PHY_GBIT_FEATURES, 665 .features = PHY_GBIT_FEATURES,
666 .flags = PHY_HAS_INTERRUPT, 666 .flags = PHY_HAS_INTERRUPT,
667 .config_aneg = &m88ec048_config_aneg, 667 .config_aneg = &m88e1318_config_aneg,
668 .read_status = &marvell_read_status, 668 .read_status = &marvell_read_status,
669 .ack_interrupt = &marvell_ack_interrupt, 669 .ack_interrupt = &marvell_ack_interrupt,
670 .config_intr = &marvell_config_intr, 670 .config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1bb16cb79433..7670aac0e93f 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(phy_print_status);
65 * 65 *
66 * Returns 0 on success on < 0 on error. 66 * Returns 0 on success on < 0 on error.
67 */ 67 */
68int phy_clear_interrupt(struct phy_device *phydev) 68static int phy_clear_interrupt(struct phy_device *phydev)
69{ 69{
70 int err = 0; 70 int err = 0;
71 71
@@ -82,7 +82,7 @@ int phy_clear_interrupt(struct phy_device *phydev)
82 * 82 *
83 * Returns 0 on success on < 0 on error. 83 * Returns 0 on success on < 0 on error.
84 */ 84 */
85int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) 85static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
86{ 86{
87 int err = 0; 87 int err = 0;
88 88
@@ -208,7 +208,7 @@ static inline int phy_find_valid(int idx, u32 features)
208 * duplexes. Drop down by one in this order: 1000/FULL, 208 * duplexes. Drop down by one in this order: 1000/FULL,
209 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF. 209 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
210 */ 210 */
211void phy_sanitize_settings(struct phy_device *phydev) 211static void phy_sanitize_settings(struct phy_device *phydev)
212{ 212{
213 u32 features = phydev->supported; 213 u32 features = phydev->supported;
214 int idx; 214 int idx;
@@ -223,7 +223,6 @@ void phy_sanitize_settings(struct phy_device *phydev)
223 phydev->speed = settings[idx].speed; 223 phydev->speed = settings[idx].speed;
224 phydev->duplex = settings[idx].duplex; 224 phydev->duplex = settings[idx].duplex;
225} 225}
226EXPORT_SYMBOL(phy_sanitize_settings);
227 226
228/** 227/**
229 * phy_ethtool_sset - generic ethtool sset function, handles all the details 228 * phy_ethtool_sset - generic ethtool sset function, handles all the details
@@ -532,7 +531,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
532 * phy_enable_interrupts - Enable the interrupts from the PHY side 531 * phy_enable_interrupts - Enable the interrupts from the PHY side
533 * @phydev: target phy_device struct 532 * @phydev: target phy_device struct
534 */ 533 */
535int phy_enable_interrupts(struct phy_device *phydev) 534static int phy_enable_interrupts(struct phy_device *phydev)
536{ 535{
537 int err; 536 int err;
538 537
@@ -545,13 +544,12 @@ int phy_enable_interrupts(struct phy_device *phydev)
545 544
546 return err; 545 return err;
547} 546}
548EXPORT_SYMBOL(phy_enable_interrupts);
549 547
550/** 548/**
551 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side 549 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
552 * @phydev: target phy_device struct 550 * @phydev: target phy_device struct
553 */ 551 */
554int phy_disable_interrupts(struct phy_device *phydev) 552static int phy_disable_interrupts(struct phy_device *phydev)
555{ 553{
556 int err; 554 int err;
557 555
@@ -574,7 +572,6 @@ phy_err:
574 572
575 return err; 573 return err;
576} 574}
577EXPORT_SYMBOL(phy_disable_interrupts);
578 575
579/** 576/**
580 * phy_start_interrupts - request and enable interrupts for a PHY device 577 * phy_start_interrupts - request and enable interrupts for a PHY device
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 16ddc77313cb..993c52c82aeb 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -57,6 +57,9 @@ extern void mdio_bus_exit(void);
57static LIST_HEAD(phy_fixup_list); 57static LIST_HEAD(phy_fixup_list);
58static DEFINE_MUTEX(phy_fixup_lock); 58static DEFINE_MUTEX(phy_fixup_lock);
59 59
60static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
61 u32 flags, phy_interface_t interface);
62
60/* 63/*
61 * Creates a new phy_fixup and adds it to the list 64 * Creates a new phy_fixup and adds it to the list
62 * @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID) 65 * @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
@@ -146,7 +149,8 @@ int phy_scan_fixups(struct phy_device *phydev)
146} 149}
147EXPORT_SYMBOL(phy_scan_fixups); 150EXPORT_SYMBOL(phy_scan_fixups);
148 151
149struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) 152static struct phy_device* phy_device_create(struct mii_bus *bus,
153 int addr, int phy_id)
150{ 154{
151 struct phy_device *dev; 155 struct phy_device *dev;
152 156
@@ -193,7 +197,6 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
193 197
194 return dev; 198 return dev;
195} 199}
196EXPORT_SYMBOL(phy_device_create);
197 200
198/** 201/**
199 * get_phy_id - reads the specified addr for its ID. 202 * get_phy_id - reads the specified addr for its ID.
@@ -316,7 +319,7 @@ EXPORT_SYMBOL(phy_find_first);
316 * If you want to monitor your own link state, don't call 319 * If you want to monitor your own link state, don't call
317 * this function. 320 * this function.
318 */ 321 */
319void phy_prepare_link(struct phy_device *phydev, 322static void phy_prepare_link(struct phy_device *phydev,
320 void (*handler)(struct net_device *)) 323 void (*handler)(struct net_device *))
321{ 324{
322 phydev->adjust_link = handler; 325 phydev->adjust_link = handler;
@@ -435,8 +438,8 @@ int phy_init_hw(struct phy_device *phydev)
435 * the attaching device, and given a callback for link status 438 * the attaching device, and given a callback for link status
436 * change. The phy_device is returned to the attaching driver. 439 * change. The phy_device is returned to the attaching driver.
437 */ 440 */
438int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, 441static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
439 u32 flags, phy_interface_t interface) 442 u32 flags, phy_interface_t interface)
440{ 443{
441 struct device *d = &phydev->dev; 444 struct device *d = &phydev->dev;
442 445
@@ -473,7 +476,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
473 * (dev_flags and interface) */ 476 * (dev_flags and interface) */
474 return phy_init_hw(phydev); 477 return phy_init_hw(phydev);
475} 478}
476EXPORT_SYMBOL(phy_attach_direct);
477 479
478/** 480/**
479 * phy_attach - attach a network device to a particular PHY device 481 * phy_attach - attach a network device to a particular PHY device
@@ -540,7 +542,7 @@ EXPORT_SYMBOL(phy_detach);
540 * what is supported. Returns < 0 on error, 0 if the PHY's advertisement 542 * what is supported. Returns < 0 on error, 0 if the PHY's advertisement
541 * hasn't changed, and > 0 if it has changed. 543 * hasn't changed, and > 0 if it has changed.
542 */ 544 */
543int genphy_config_advert(struct phy_device *phydev) 545static int genphy_config_advert(struct phy_device *phydev)
544{ 546{
545 u32 advertise; 547 u32 advertise;
546 int oldadv, adv; 548 int oldadv, adv;
@@ -605,7 +607,6 @@ int genphy_config_advert(struct phy_device *phydev)
605 607
606 return changed; 608 return changed;
607} 609}
608EXPORT_SYMBOL(genphy_config_advert);
609 610
610/** 611/**
611 * genphy_setup_forced - configures/forces speed/duplex from @phydev 612 * genphy_setup_forced - configures/forces speed/duplex from @phydev
@@ -615,7 +616,7 @@ EXPORT_SYMBOL(genphy_config_advert);
615 * to the values in phydev. Assumes that the values are valid. 616 * to the values in phydev. Assumes that the values are valid.
616 * Please see phy_sanitize_settings(). 617 * Please see phy_sanitize_settings().
617 */ 618 */
618int genphy_setup_forced(struct phy_device *phydev) 619static int genphy_setup_forced(struct phy_device *phydev)
619{ 620{
620 int err; 621 int err;
621 int ctl = 0; 622 int ctl = 0;
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 26c37d3a5868..8ecc170c9b74 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -146,11 +146,13 @@
146#define MAX_CMD_DESCRIPTORS 1024 146#define MAX_CMD_DESCRIPTORS 1024
147#define MAX_RCV_DESCRIPTORS_1G 4096 147#define MAX_RCV_DESCRIPTORS_1G 4096
148#define MAX_RCV_DESCRIPTORS_10G 8192 148#define MAX_RCV_DESCRIPTORS_10G 8192
149#define MAX_RCV_DESCRIPTORS_VF 2048
149#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512 150#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
150#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024 151#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
151 152
152#define DEFAULT_RCV_DESCRIPTORS_1G 2048 153#define DEFAULT_RCV_DESCRIPTORS_1G 2048
153#define DEFAULT_RCV_DESCRIPTORS_10G 4096 154#define DEFAULT_RCV_DESCRIPTORS_10G 4096
155#define DEFAULT_RCV_DESCRIPTORS_VF 1024
154#define MAX_RDS_RINGS 2 156#define MAX_RDS_RINGS 2
155 157
156#define get_next_index(index, length) \ 158#define get_next_index(index, length) \
@@ -942,6 +944,7 @@ struct qlcnic_ipaddr {
942#define QLCNIC_LOOPBACK_TEST 2 944#define QLCNIC_LOOPBACK_TEST 2
943 945
944#define QLCNIC_FILTER_AGE 80 946#define QLCNIC_FILTER_AGE 80
947#define QLCNIC_READD_AGE 20
945#define QLCNIC_LB_MAX_FILTERS 64 948#define QLCNIC_LB_MAX_FILTERS 64
946 949
947struct qlcnic_filter { 950struct qlcnic_filter {
@@ -970,6 +973,8 @@ struct qlcnic_adapter {
970 u16 num_txd; 973 u16 num_txd;
971 u16 num_rxd; 974 u16 num_rxd;
972 u16 num_jumbo_rxd; 975 u16 num_jumbo_rxd;
976 u16 max_rxd;
977 u16 max_jumbo_rxd;
973 978
974 u8 max_rds_rings; 979 u8 max_rds_rings;
975 u8 max_sds_rings; 980 u8 max_sds_rings;
@@ -1129,7 +1134,7 @@ struct qlcnic_eswitch {
1129#define MAX_RX_QUEUES 4 1134#define MAX_RX_QUEUES 4
1130#define DEFAULT_MAC_LEARN 1 1135#define DEFAULT_MAC_LEARN 1
1131 1136
1132#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID) 1137#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
1133#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW) 1138#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
1134#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES) 1139#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
1135#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES) 1140#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 25e93a53fca0..ec21d24015c4 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -437,14 +437,8 @@ qlcnic_get_ringparam(struct net_device *dev,
437 ring->rx_jumbo_pending = adapter->num_jumbo_rxd; 437 ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
438 ring->tx_pending = adapter->num_txd; 438 ring->tx_pending = adapter->num_txd;
439 439
440 if (adapter->ahw.port_type == QLCNIC_GBE) { 440 ring->rx_max_pending = adapter->max_rxd;
441 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G; 441 ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
442 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
443 } else {
444 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
445 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
446 }
447
448 ring->tx_max_pending = MAX_CMD_DESCRIPTORS; 442 ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
449 443
450 ring->rx_mini_max_pending = 0; 444 ring->rx_mini_max_pending = 0;
@@ -472,24 +466,17 @@ qlcnic_set_ringparam(struct net_device *dev,
472 struct ethtool_ringparam *ring) 466 struct ethtool_ringparam *ring)
473{ 467{
474 struct qlcnic_adapter *adapter = netdev_priv(dev); 468 struct qlcnic_adapter *adapter = netdev_priv(dev);
475 u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
476 u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
477 u16 num_rxd, num_jumbo_rxd, num_txd; 469 u16 num_rxd, num_jumbo_rxd, num_txd;
478 470
479
480 if (ring->rx_mini_pending) 471 if (ring->rx_mini_pending)
481 return -EOPNOTSUPP; 472 return -EOPNOTSUPP;
482 473
483 if (adapter->ahw.port_type == QLCNIC_GBE) {
484 max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
485 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
486 }
487
488 num_rxd = qlcnic_validate_ringparam(ring->rx_pending, 474 num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
489 MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx"); 475 MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
490 476
491 num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending, 477 num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
492 MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo"); 478 MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
479 "rx jumbo");
493 480
494 num_txd = qlcnic_validate_ringparam(ring->tx_pending, 481 num_txd = qlcnic_validate_ringparam(ring->tx_pending,
495 MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx"); 482 MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index f047c7c48314..a3dcd04be22f 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -656,13 +656,23 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
656 656
657 dev_info(&pdev->dev, "firmware v%d.%d.%d\n", 657 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
658 fw_major, fw_minor, fw_build); 658 fw_major, fw_minor, fw_build);
659
660 if (adapter->ahw.port_type == QLCNIC_XGBE) { 659 if (adapter->ahw.port_type == QLCNIC_XGBE) {
661 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; 660 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
661 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
662 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
663 } else {
664 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
665 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
666 }
667
662 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; 668 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
669 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
670
663 } else if (adapter->ahw.port_type == QLCNIC_GBE) { 671 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
664 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; 672 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
665 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; 673 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
674 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
675 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
666 } 676 }
667 677
668 adapter->msix_supported = !!use_msi_x; 678 adapter->msix_supported = !!use_msi_x;
@@ -1440,7 +1450,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1440 netdev->irq = adapter->msix_entries[0].vector; 1450 netdev->irq = adapter->msix_entries[0].vector;
1441 1451
1442 netif_carrier_off(netdev); 1452 netif_carrier_off(netdev);
1443 netif_stop_queue(netdev);
1444 1453
1445 err = register_netdev(netdev); 1454 err = register_netdev(netdev);
1446 if (err) { 1455 if (err) {
@@ -1860,6 +1869,11 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter,
1860 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { 1869 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
1861 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && 1870 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1862 tmp_fil->vlan_id == vlan_id) { 1871 tmp_fil->vlan_id == vlan_id) {
1872
1873 if (jiffies >
1874 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1875 qlcnic_change_filter(adapter, src_addr, vlan_id,
1876 tx_ring);
1863 tmp_fil->ftime = jiffies; 1877 tmp_fil->ftime = jiffies;
1864 return; 1878 return;
1865 } 1879 }
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index a478786840a6..22821398fc63 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -2226,7 +2226,6 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
2226int ql_core_dump(struct ql_adapter *qdev, 2226int ql_core_dump(struct ql_adapter *qdev,
2227 struct ql_mpi_coredump *mpi_coredump); 2227 struct ql_mpi_coredump *mpi_coredump);
2228int ql_mb_about_fw(struct ql_adapter *qdev); 2228int ql_mb_about_fw(struct ql_adapter *qdev);
2229int ql_wol(struct ql_adapter *qdev);
2230int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); 2229int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
2231int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol); 2230int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
2232int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config); 2231int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
@@ -2243,16 +2242,13 @@ netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
2243void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *); 2242void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
2244int ql_own_firmware(struct ql_adapter *qdev); 2243int ql_own_firmware(struct ql_adapter *qdev);
2245int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); 2244int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
2246void qlge_set_multicast_list(struct net_device *ndev);
2247 2245
2248#if 1 2246/* #define QL_ALL_DUMP */
2249#define QL_ALL_DUMP 2247/* #define QL_REG_DUMP */
2250#define QL_REG_DUMP 2248/* #define QL_DEV_DUMP */
2251#define QL_DEV_DUMP 2249/* #define QL_CB_DUMP */
2252#define QL_CB_DUMP
2253/* #define QL_IB_DUMP */ 2250/* #define QL_IB_DUMP */
2254/* #define QL_OB_DUMP */ 2251/* #define QL_OB_DUMP */
2255#endif
2256 2252
2257#ifdef QL_REG_DUMP 2253#ifdef QL_REG_DUMP
2258extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev); 2254extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index ba0053d8515e..c30e0fe55a31 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -94,6 +94,9 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
94 94
95MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); 95MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96 96
97static int ql_wol(struct ql_adapter *qdev);
98static void qlge_set_multicast_list(struct net_device *ndev);
99
97/* This hardware semaphore causes exclusive access to 100/* This hardware semaphore causes exclusive access to
98 * resources shared between the NIC driver, MPI firmware, 101 * resources shared between the NIC driver, MPI firmware,
99 * FCOE firmware and the FC driver. 102 * FCOE firmware and the FC driver.
@@ -2382,6 +2385,20 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2382 2385
2383} 2386}
2384 2387
2388static void qlge_restore_vlan(struct ql_adapter *qdev)
2389{
2390 qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
2391
2392 if (qdev->vlgrp) {
2393 u16 vid;
2394 for (vid = 0; vid < VLAN_N_VID; vid++) {
2395 if (!vlan_group_get_device(qdev->vlgrp, vid))
2396 continue;
2397 qlge_vlan_rx_add_vid(qdev->ndev, vid);
2398 }
2399 }
2400}
2401
2385/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ 2402/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2386static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) 2403static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2387{ 2404{
@@ -3842,7 +3859,7 @@ static void ql_display_dev_info(struct net_device *ndev)
3842 "MAC address %pM\n", ndev->dev_addr); 3859 "MAC address %pM\n", ndev->dev_addr);
3843} 3860}
3844 3861
3845int ql_wol(struct ql_adapter *qdev) 3862static int ql_wol(struct ql_adapter *qdev)
3846{ 3863{
3847 int status = 0; 3864 int status = 0;
3848 u32 wol = MB_WOL_DISABLE; 3865 u32 wol = MB_WOL_DISABLE;
@@ -3957,6 +3974,9 @@ static int ql_adapter_up(struct ql_adapter *qdev)
3957 clear_bit(QL_PROMISCUOUS, &qdev->flags); 3974 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3958 qlge_set_multicast_list(qdev->ndev); 3975 qlge_set_multicast_list(qdev->ndev);
3959 3976
3977 /* Restore vlan setting. */
3978 qlge_restore_vlan(qdev);
3979
3960 ql_enable_interrupts(qdev); 3980 ql_enable_interrupts(qdev);
3961 ql_enable_all_completion_interrupts(qdev); 3981 ql_enable_all_completion_interrupts(qdev);
3962 netif_tx_start_all_queues(qdev->ndev); 3982 netif_tx_start_all_queues(qdev->ndev);
@@ -4242,7 +4262,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
4242 return &ndev->stats; 4262 return &ndev->stats;
4243} 4263}
4244 4264
4245void qlge_set_multicast_list(struct net_device *ndev) 4265static void qlge_set_multicast_list(struct net_device *ndev)
4246{ 4266{
4247 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4267 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4248 struct netdev_hw_addr *ha; 4268 struct netdev_hw_addr *ha;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index f84e8570c7cb..0e7c7c7ee164 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -87,7 +87,7 @@ exit:
87 return status; 87 return status;
88} 88}
89 89
90int ql_soft_reset_mpi_risc(struct ql_adapter *qdev) 90static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
91{ 91{
92 int status; 92 int status;
93 status = ql_write_mpi_reg(qdev, 0x00001010, 1); 93 status = ql_write_mpi_reg(qdev, 0x00001010, 1);
@@ -681,7 +681,7 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
681/* Send and ACK mailbox command to the firmware to 681/* Send and ACK mailbox command to the firmware to
682 * let it continue with the change. 682 * let it continue with the change.
683 */ 683 */
684int ql_mb_idc_ack(struct ql_adapter *qdev) 684static int ql_mb_idc_ack(struct ql_adapter *qdev)
685{ 685{
686 struct mbox_params mbc; 686 struct mbox_params mbc;
687 struct mbox_params *mbcp = &mbc; 687 struct mbox_params *mbcp = &mbc;
@@ -744,7 +744,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
744 return status; 744 return status;
745} 745}
746 746
747int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr, 747static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
748 u32 size) 748 u32 size)
749{ 749{
750 int status = 0; 750 int status = 0;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index d88ce9fb1cbd..7d33ef4bcb4a 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -846,10 +846,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
846 else 846 else
847 tp->features &= ~RTL_FEATURE_WOL; 847 tp->features &= ~RTL_FEATURE_WOL;
848 __rtl8169_set_wol(tp, wol->wolopts); 848 __rtl8169_set_wol(tp, wol->wolopts);
849 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
850
851 spin_unlock_irq(&tp->lock); 849 spin_unlock_irq(&tp->lock);
852 850
851 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
852
853 return 0; 853 return 0;
854} 854}
855 855
@@ -2931,7 +2931,7 @@ static const struct rtl_cfg_info {
2931 .hw_start = rtl_hw_start_8168, 2931 .hw_start = rtl_hw_start_8168,
2932 .region = 2, 2932 .region = 2,
2933 .align = 8, 2933 .align = 8,
2934 .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow | 2934 .intr_event = SYSErr | LinkChg | RxOverflow |
2935 TxErr | TxOK | RxOK | RxErr, 2935 TxErr | TxOK | RxOK | RxErr,
2936 .napi_event = TxErr | TxOK | RxOK | RxOverflow, 2936 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
2937 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, 2937 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -4440,8 +4440,7 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
4440 u32 status = opts1 & RxProtoMask; 4440 u32 status = opts1 & RxProtoMask;
4441 4441
4442 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || 4442 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
4443 ((status == RxProtoUDP) && !(opts1 & UDPFail)) || 4443 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
4444 ((status == RxProtoIP) && !(opts1 & IPFail)))
4445 skb->ip_summed = CHECKSUM_UNNECESSARY; 4444 skb->ip_summed = CHECKSUM_UNNECESSARY;
4446 else 4445 else
4447 skb_checksum_none_assert(skb); 4446 skb_checksum_none_assert(skb);
@@ -4588,7 +4587,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4588 } 4587 }
4589 4588
4590 /* Work around for rx fifo overflow */ 4589 /* Work around for rx fifo overflow */
4591 if (unlikely(status & RxFIFOOver)) { 4590 if (unlikely(status & RxFIFOOver) &&
4591 (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
4592 netif_stop_queue(dev); 4592 netif_stop_queue(dev);
4593 rtl8169_tx_timeout(dev); 4593 rtl8169_tx_timeout(dev);
4594 break; 4594 break;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index a9ae505e1baf..66c2f1a01963 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -961,9 +961,9 @@ sb1000_open(struct net_device *dev)
961 lp->rx_error_count = 0; 961 lp->rx_error_count = 0;
962 lp->rx_error_dpc_count = 0; 962 lp->rx_error_dpc_count = 0;
963 lp->rx_session_id[0] = 0x50; 963 lp->rx_session_id[0] = 0x50;
964 lp->rx_session_id[0] = 0x48; 964 lp->rx_session_id[1] = 0x48;
965 lp->rx_session_id[0] = 0x44; 965 lp->rx_session_id[2] = 0x44;
966 lp->rx_session_id[0] = 0x42; 966 lp->rx_session_id[3] = 0x42;
967 lp->rx_frame_id[0] = 0; 967 lp->rx_frame_id[0] = 0;
968 lp->rx_frame_id[1] = 0; 968 lp->rx_frame_id[1] = 0;
969 lp->rx_frame_id[2] = 0; 969 lp->rx_frame_id[2] = 0;
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 9265315baa0b..3a0cc63428ee 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -531,7 +531,7 @@ static int sgiseeq_open(struct net_device *dev)
531 531
532 if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) { 532 if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
533 printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq); 533 printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
534 err = -EAGAIN; 534 return -EAGAIN;
535 } 535 }
536 536
537 err = init_seeq(dev, sp, sregs); 537 err = init_seeq(dev, sp, sregs);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index bfec2e0f5275..220e0398f1d5 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3858,7 +3858,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3858 3858
3859 /* device is off until link detection */ 3859 /* device is off until link detection */
3860 netif_carrier_off(dev); 3860 netif_carrier_off(dev);
3861 netif_stop_queue(dev);
3862 3861
3863 return dev; 3862 return dev;
3864} 3863}
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index ac279fad9d45..ab9e3b785b5b 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -688,18 +688,8 @@ slhc_toss(struct slcompress *comp)
688 return 0; 688 return 0;
689} 689}
690 690
691
692/* VJ header compression */
693EXPORT_SYMBOL(slhc_init);
694EXPORT_SYMBOL(slhc_free);
695EXPORT_SYMBOL(slhc_remember);
696EXPORT_SYMBOL(slhc_compress);
697EXPORT_SYMBOL(slhc_uncompress);
698EXPORT_SYMBOL(slhc_toss);
699
700#else /* CONFIG_INET */ 691#else /* CONFIG_INET */
701 692
702
703int 693int
704slhc_toss(struct slcompress *comp) 694slhc_toss(struct slcompress *comp)
705{ 695{
@@ -738,6 +728,10 @@ slhc_init(int rslots, int tslots)
738 printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init"); 728 printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init");
739 return NULL; 729 return NULL;
740} 730}
731
732#endif /* CONFIG_INET */
733
734/* VJ header compression */
741EXPORT_SYMBOL(slhc_init); 735EXPORT_SYMBOL(slhc_init);
742EXPORT_SYMBOL(slhc_free); 736EXPORT_SYMBOL(slhc_free);
743EXPORT_SYMBOL(slhc_remember); 737EXPORT_SYMBOL(slhc_remember);
@@ -745,5 +739,4 @@ EXPORT_SYMBOL(slhc_compress);
745EXPORT_SYMBOL(slhc_uncompress); 739EXPORT_SYMBOL(slhc_uncompress);
746EXPORT_SYMBOL(slhc_toss); 740EXPORT_SYMBOL(slhc_toss);
747 741
748#endif /* CONFIG_INET */
749MODULE_LICENSE("Dual BSD/GPL"); 742MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index a8e5856ce882..64bfdae5956f 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -2075,7 +2075,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2075 } else { 2075 } else {
2076 /* Try reading mac address from device. if EEPROM is present 2076 /* Try reading mac address from device. if EEPROM is present
2077 * it will already have been set */ 2077 * it will already have been set */
2078 smsc911x_read_mac_address(dev); 2078 smsc_get_mac(dev);
2079 2079
2080 if (is_valid_ether_addr(dev->dev_addr)) { 2080 if (is_valid_ether_addr(dev->dev_addr)) {
2081 /* eeprom values are valid so use them */ 2081 /* eeprom values are valid so use them */
@@ -2176,6 +2176,7 @@ static struct platform_driver smsc911x_driver = {
2176/* Entry point for loading the module */ 2176/* Entry point for loading the module */
2177static int __init smsc911x_init_module(void) 2177static int __init smsc911x_init_module(void)
2178{ 2178{
2179 SMSC_INITIALIZE();
2179 return platform_driver_register(&smsc911x_driver); 2180 return platform_driver_register(&smsc911x_driver);
2180} 2181}
2181 2182
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 016360c65ce2..50f712e99e96 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -22,7 +22,7 @@
22#define __SMSC911X_H__ 22#define __SMSC911X_H__
23 23
24#define TX_FIFO_LOW_THRESHOLD ((u32)1600) 24#define TX_FIFO_LOW_THRESHOLD ((u32)1600)
25#define SMSC911X_EEPROM_SIZE ((u32)7) 25#define SMSC911X_EEPROM_SIZE ((u32)128)
26#define USE_DEBUG 0 26#define USE_DEBUG 0
27 27
28/* This is the maximum number of packets to be received every 28/* This is the maximum number of packets to be received every
@@ -394,4 +394,15 @@
394#define LPA_PAUSE_ALL (LPA_PAUSE_CAP | \ 394#define LPA_PAUSE_ALL (LPA_PAUSE_CAP | \
395 LPA_PAUSE_ASYM) 395 LPA_PAUSE_ASYM)
396 396
397/*
398 * Provide hooks to let the arch add to the initialisation procedure
399 * and to override the source of the MAC address.
400 */
401#define SMSC_INITIALIZE() do {} while (0)
402#define smsc_get_mac(dev) smsc911x_read_mac_address((dev))
403
404#ifdef CONFIG_SMSC911X_ARCH_HOOKS
405#include <asm/smsc911x.h>
406#endif
407
397#endif /* __SMSC911X_H__ */ 408#endif /* __SMSC911X_H__ */
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 823b9e6431d5..06bc6034ce81 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -337,33 +337,19 @@ static int stmmac_init_phy(struct net_device *dev)
337 return 0; 337 return 0;
338} 338}
339 339
340static inline void stmmac_mac_enable_rx(void __iomem *ioaddr) 340static inline void stmmac_enable_mac(void __iomem *ioaddr)
341{ 341{
342 u32 value = readl(ioaddr + MAC_CTRL_REG); 342 u32 value = readl(ioaddr + MAC_CTRL_REG);
343 value |= MAC_RNABLE_RX;
344 /* Set the RE (receive enable bit into the MAC CTRL register). */
345 writel(value, ioaddr + MAC_CTRL_REG);
346}
347 343
348static inline void stmmac_mac_enable_tx(void __iomem *ioaddr) 344 value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
349{
350 u32 value = readl(ioaddr + MAC_CTRL_REG);
351 value |= MAC_ENABLE_TX;
352 /* Set the TE (transmit enable bit into the MAC CTRL register). */
353 writel(value, ioaddr + MAC_CTRL_REG); 345 writel(value, ioaddr + MAC_CTRL_REG);
354} 346}
355 347
356static inline void stmmac_mac_disable_rx(void __iomem *ioaddr) 348static inline void stmmac_disable_mac(void __iomem *ioaddr)
357{ 349{
358 u32 value = readl(ioaddr + MAC_CTRL_REG); 350 u32 value = readl(ioaddr + MAC_CTRL_REG);
359 value &= ~MAC_RNABLE_RX;
360 writel(value, ioaddr + MAC_CTRL_REG);
361}
362 351
363static inline void stmmac_mac_disable_tx(void __iomem *ioaddr) 352 value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
364{
365 u32 value = readl(ioaddr + MAC_CTRL_REG);
366 value &= ~MAC_ENABLE_TX;
367 writel(value, ioaddr + MAC_CTRL_REG); 353 writel(value, ioaddr + MAC_CTRL_REG);
368} 354}
369 355
@@ -857,8 +843,7 @@ static int stmmac_open(struct net_device *dev)
857 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); 843 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
858 844
859 /* Enable the MAC Rx/Tx */ 845 /* Enable the MAC Rx/Tx */
860 stmmac_mac_enable_rx(priv->ioaddr); 846 stmmac_enable_mac(priv->ioaddr);
861 stmmac_mac_enable_tx(priv->ioaddr);
862 847
863 /* Set the HW DMA mode and the COE */ 848 /* Set the HW DMA mode and the COE */
864 stmmac_dma_operation_mode(priv); 849 stmmac_dma_operation_mode(priv);
@@ -928,9 +913,8 @@ static int stmmac_release(struct net_device *dev)
928 /* Release and free the Rx/Tx resources */ 913 /* Release and free the Rx/Tx resources */
929 free_dma_desc_resources(priv); 914 free_dma_desc_resources(priv);
930 915
931 /* Disable the MAC core */ 916 /* Disable the MAC Rx/Tx */
932 stmmac_mac_disable_tx(priv->ioaddr); 917 stmmac_disable_mac(priv->ioaddr);
933 stmmac_mac_disable_rx(priv->ioaddr);
934 918
935 netif_carrier_off(dev); 919 netif_carrier_off(dev);
936 920
@@ -1787,8 +1771,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1787 priv->hw->dma->stop_rx(priv->ioaddr); 1771 priv->hw->dma->stop_rx(priv->ioaddr);
1788 priv->hw->dma->stop_tx(priv->ioaddr); 1772 priv->hw->dma->stop_tx(priv->ioaddr);
1789 1773
1790 stmmac_mac_disable_rx(priv->ioaddr); 1774 stmmac_disable_mac(priv->ioaddr);
1791 stmmac_mac_disable_tx(priv->ioaddr);
1792 1775
1793 netif_carrier_off(ndev); 1776 netif_carrier_off(ndev);
1794 1777
@@ -1839,13 +1822,11 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
1839 dis_ic); 1822 dis_ic);
1840 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 1823 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
1841 1824
1842 stmmac_mac_disable_tx(priv->ioaddr);
1843
1844 /* Enable Power down mode by programming the PMT regs */ 1825 /* Enable Power down mode by programming the PMT regs */
1845 if (device_can_wakeup(priv->device)) 1826 if (device_can_wakeup(priv->device))
1846 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); 1827 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
1847 else 1828 else
1848 stmmac_mac_disable_rx(priv->ioaddr); 1829 stmmac_disable_mac(priv->ioaddr);
1849 } else { 1830 } else {
1850 priv->shutdown = 1; 1831 priv->shutdown = 1;
1851 /* Although this can appear slightly redundant it actually 1832 /* Although this can appear slightly redundant it actually
@@ -1886,8 +1867,7 @@ static int stmmac_resume(struct platform_device *pdev)
1886 netif_device_attach(dev); 1867 netif_device_attach(dev);
1887 1868
1888 /* Enable the MAC and DMA */ 1869 /* Enable the MAC and DMA */
1889 stmmac_mac_enable_rx(priv->ioaddr); 1870 stmmac_enable_mac(priv->ioaddr);
1890 stmmac_mac_enable_tx(priv->ioaddr);
1891 priv->hw->dma->start_tx(priv->ioaddr); 1871 priv->hw->dma->start_tx(priv->ioaddr);
1892 priv->hw->dma->start_rx(priv->ioaddr); 1872 priv->hw->dma->start_rx(priv->ioaddr);
1893 1873
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 852e917778f8..30ccbb6d097a 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -9948,16 +9948,16 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9948 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp))) 9948 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9949 return -EINVAL; 9949 return -EINVAL;
9950 9950
9951 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
9952
9951 spin_lock_bh(&tp->lock); 9953 spin_lock_bh(&tp->lock);
9952 if (wol->wolopts & WAKE_MAGIC) { 9954 if (device_may_wakeup(dp))
9953 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 9955 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9954 device_set_wakeup_enable(dp, true); 9956 else
9955 } else {
9956 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; 9957 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9957 device_set_wakeup_enable(dp, false);
9958 }
9959 spin_unlock_bh(&tp->lock); 9958 spin_unlock_bh(&tp->lock);
9960 9959
9960
9961 return 0; 9961 return 0;
9962} 9962}
9963 9963
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 663b8860a531..793020347e54 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1220,7 +1220,7 @@ void tms380tr_wait(unsigned long time)
1220 tmp = schedule_timeout_interruptible(tmp); 1220 tmp = schedule_timeout_interruptible(tmp);
1221 } while(time_after(tmp, jiffies)); 1221 } while(time_after(tmp, jiffies));
1222#else 1222#else
1223 udelay(time); 1223 mdelay(time / 1000);
1224#endif 1224#endif
1225} 1225}
1226 1226
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 28e1ffb13db9..c78a50586c1d 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -2021,7 +2021,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2021 de->media_timer.data = (unsigned long) de; 2021 de->media_timer.data = (unsigned long) de;
2022 2022
2023 netif_carrier_off(dev); 2023 netif_carrier_off(dev);
2024 netif_stop_queue(dev);
2025 2024
2026 /* wake up device, assign resources */ 2025 /* wake up device, assign resources */
2027 rc = pci_enable_device(pdev); 2026 rc = pci_enable_device(pdev);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 1cc67138adbf..5b83c3f35f47 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -24,10 +24,6 @@
24 3XP Processor. It has been tested on x86 and sparc64. 24 3XP Processor. It has been tested on x86 and sparc64.
25 25
26 KNOWN ISSUES: 26 KNOWN ISSUES:
27 *) The current firmware always strips the VLAN tag off, even if
28 we tell it not to. You should filter VLANs at the switch
29 as a workaround (good practice in any event) until we can
30 get this fixed.
31 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware 27 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32 issue. Hopefully 3Com will fix it. 28 issue. Hopefully 3Com will fix it.
33 *) Waiting for a command response takes 8ms due to non-preemptable 29 *) Waiting for a command response takes 8ms due to non-preemptable
@@ -280,8 +276,6 @@ struct typhoon {
280 struct pci_dev * pdev; 276 struct pci_dev * pdev;
281 struct net_device * dev; 277 struct net_device * dev;
282 struct napi_struct napi; 278 struct napi_struct napi;
283 spinlock_t state_lock;
284 struct vlan_group * vlgrp;
285 struct basic_ring rxHiRing; 279 struct basic_ring rxHiRing;
286 struct basic_ring rxBuffRing; 280 struct basic_ring rxBuffRing;
287 struct rxbuff_ent rxbuffers[RXENT_ENTRIES]; 281 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
@@ -695,44 +689,6 @@ out:
695 return err; 689 return err;
696} 690}
697 691
698static void
699typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
700{
701 struct typhoon *tp = netdev_priv(dev);
702 struct cmd_desc xp_cmd;
703 int err;
704
705 spin_lock_bh(&tp->state_lock);
706 if(!tp->vlgrp != !grp) {
707 /* We've either been turned on for the first time, or we've
708 * been turned off. Update the 3XP.
709 */
710 if(grp)
711 tp->offload |= TYPHOON_OFFLOAD_VLAN;
712 else
713 tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
714
715 /* If the interface is up, the runtime is running -- and we
716 * must be up for the vlan core to call us.
717 *
718 * Do the command outside of the spin lock, as it is slow.
719 */
720 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
721 TYPHOON_CMD_SET_OFFLOAD_TASKS);
722 xp_cmd.parm2 = tp->offload;
723 xp_cmd.parm3 = tp->offload;
724 spin_unlock_bh(&tp->state_lock);
725 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
726 if(err < 0)
727 netdev_err(tp->dev, "vlan offload error %d\n", -err);
728 spin_lock_bh(&tp->state_lock);
729 }
730
731 /* now make the change visible */
732 tp->vlgrp = grp;
733 spin_unlock_bh(&tp->state_lock);
734}
735
736static inline void 692static inline void
737typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing, 693typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
738 u32 ring_dma) 694 u32 ring_dma)
@@ -818,7 +774,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
818 first_txd->processFlags |= 774 first_txd->processFlags |=
819 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY; 775 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
820 first_txd->processFlags |= 776 first_txd->processFlags |=
821 cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) << 777 cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
822 TYPHOON_TX_PF_VLAN_TAG_SHIFT); 778 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
823 } 779 }
824 780
@@ -936,7 +892,7 @@ typhoon_set_rx_mode(struct net_device *dev)
936 filter |= TYPHOON_RX_FILTER_MCAST_HASH; 892 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
937 } 893 }
938 894
939 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER); 895 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
940 xp_cmd.parm1 = filter; 896 xp_cmd.parm1 = filter;
941 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 897 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
942} 898}
@@ -1198,6 +1154,20 @@ typhoon_get_rx_csum(struct net_device *dev)
1198 return 1; 1154 return 1;
1199} 1155}
1200 1156
1157static int
1158typhoon_set_flags(struct net_device *dev, u32 data)
1159{
1160 /* There's no way to turn off the RX VLAN offloading and stripping
1161 * on the current 3XP firmware -- it does not respect the offload
1162 * settings -- so we only allow the user to toggle the TX processing.
1163 */
1164 if (!(data & ETH_FLAG_RXVLAN))
1165 return -EINVAL;
1166
1167 return ethtool_op_set_flags(dev, data,
1168 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
1169}
1170
1201static void 1171static void
1202typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 1172typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1203{ 1173{
@@ -1224,6 +1194,8 @@ static const struct ethtool_ops typhoon_ethtool_ops = {
1224 .set_sg = ethtool_op_set_sg, 1194 .set_sg = ethtool_op_set_sg,
1225 .set_tso = ethtool_op_set_tso, 1195 .set_tso = ethtool_op_set_tso,
1226 .get_ringparam = typhoon_get_ringparam, 1196 .get_ringparam = typhoon_get_ringparam,
1197 .set_flags = typhoon_set_flags,
1198 .get_flags = ethtool_op_get_flags,
1227}; 1199};
1228 1200
1229static int 1201static int
@@ -1309,9 +1281,9 @@ typhoon_init_interface(struct typhoon *tp)
1309 1281
1310 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM; 1282 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1311 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON; 1283 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1284 tp->offload |= TYPHOON_OFFLOAD_VLAN;
1312 1285
1313 spin_lock_init(&tp->command_lock); 1286 spin_lock_init(&tp->command_lock);
1314 spin_lock_init(&tp->state_lock);
1315 1287
1316 /* Force the writes to the shared memory area out before continuing. */ 1288 /* Force the writes to the shared memory area out before continuing. */
1317 wmb(); 1289 wmb();
@@ -1328,7 +1300,7 @@ typhoon_init_rings(struct typhoon *tp)
1328 tp->rxHiRing.lastWrite = 0; 1300 tp->rxHiRing.lastWrite = 0;
1329 tp->rxBuffRing.lastWrite = 0; 1301 tp->rxBuffRing.lastWrite = 0;
1330 tp->cmdRing.lastWrite = 0; 1302 tp->cmdRing.lastWrite = 0;
1331 tp->cmdRing.lastWrite = 0; 1303 tp->respRing.lastWrite = 0;
1332 1304
1333 tp->txLoRing.lastRead = 0; 1305 tp->txLoRing.lastRead = 0;
1334 tp->txHiRing.lastRead = 0; 1306 tp->txHiRing.lastRead = 0;
@@ -1762,13 +1734,10 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
1762 } else 1734 } else
1763 skb_checksum_none_assert(new_skb); 1735 skb_checksum_none_assert(new_skb);
1764 1736
1765 spin_lock(&tp->state_lock); 1737 if (rx->rxStatus & TYPHOON_RX_VLAN)
1766 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN) 1738 __vlan_hwaccel_put_tag(new_skb,
1767 vlan_hwaccel_receive_skb(new_skb, tp->vlgrp, 1739 ntohl(rx->vlanTag) & 0xffff);
1768 ntohl(rx->vlanTag) & 0xffff); 1740 netif_receive_skb(new_skb);
1769 else
1770 netif_receive_skb(new_skb);
1771 spin_unlock(&tp->state_lock);
1772 1741
1773 received++; 1742 received++;
1774 budget--; 1743 budget--;
@@ -1989,11 +1958,9 @@ typhoon_start_runtime(struct typhoon *tp)
1989 goto error_out; 1958 goto error_out;
1990 1959
1991 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS); 1960 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1992 spin_lock_bh(&tp->state_lock);
1993 xp_cmd.parm2 = tp->offload; 1961 xp_cmd.parm2 = tp->offload;
1994 xp_cmd.parm3 = tp->offload; 1962 xp_cmd.parm3 = tp->offload;
1995 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); 1963 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1996 spin_unlock_bh(&tp->state_lock);
1997 if(err < 0) 1964 if(err < 0)
1998 goto error_out; 1965 goto error_out;
1999 1966
@@ -2231,13 +2198,9 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2231 if(!netif_running(dev)) 2198 if(!netif_running(dev))
2232 return 0; 2199 return 0;
2233 2200
2234 spin_lock_bh(&tp->state_lock); 2201 /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
2235 if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) { 2202 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
2236 spin_unlock_bh(&tp->state_lock); 2203 netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
2237 netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
2238 return -EBUSY;
2239 }
2240 spin_unlock_bh(&tp->state_lock);
2241 2204
2242 netif_device_detach(dev); 2205 netif_device_detach(dev);
2243 2206
@@ -2338,7 +2301,6 @@ static const struct net_device_ops typhoon_netdev_ops = {
2338 .ndo_validate_addr = eth_validate_addr, 2301 .ndo_validate_addr = eth_validate_addr,
2339 .ndo_set_mac_address = typhoon_set_mac_address, 2302 .ndo_set_mac_address = typhoon_set_mac_address,
2340 .ndo_change_mtu = eth_change_mtu, 2303 .ndo_change_mtu = eth_change_mtu,
2341 .ndo_vlan_rx_register = typhoon_vlan_rx_register,
2342}; 2304};
2343 2305
2344static int __devinit 2306static int __devinit
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index a4c3f5708246..acbdab3d66ca 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2050,12 +2050,16 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2050 2050
2051 ugeth_vdbg("%s: IN", __func__); 2051 ugeth_vdbg("%s: IN", __func__);
2052 2052
2053 /*
2054 * Tell the kernel the link is down.
2055 * Must be done before disabling the controller
2056 * or deadlock may happen.
2057 */
2058 phy_stop(phydev);
2059
2053 /* Disable the controller */ 2060 /* Disable the controller */
2054 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 2061 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2055 2062
2056 /* Tell the kernel the link is down */
2057 phy_stop(phydev);
2058
2059 /* Mask all interrupts */ 2063 /* Mask all interrupts */
2060 out_be32(ugeth->uccf->p_uccm, 0x00000000); 2064 out_be32(ugeth->uccf->p_uccm, 0x00000000);
2061 2065
@@ -2065,9 +2069,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2065 /* Disable Rx and Tx */ 2069 /* Disable Rx and Tx */
2066 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2070 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2067 2071
2068 phy_disconnect(ugeth->phydev);
2069 ugeth->phydev = NULL;
2070
2071 ucc_geth_memclean(ugeth); 2072 ucc_geth_memclean(ugeth);
2072} 2073}
2073 2074
@@ -3550,7 +3551,10 @@ static int ucc_geth_close(struct net_device *dev)
3550 3551
3551 napi_disable(&ugeth->napi); 3552 napi_disable(&ugeth->napi);
3552 3553
3554 cancel_work_sync(&ugeth->timeout_work);
3553 ucc_geth_stop(ugeth); 3555 ucc_geth_stop(ugeth);
3556 phy_disconnect(ugeth->phydev);
3557 ugeth->phydev = NULL;
3554 3558
3555 free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); 3559 free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
3556 3560
@@ -3579,8 +3583,12 @@ static void ucc_geth_timeout_work(struct work_struct *work)
3579 * Must reset MAC *and* PHY. This is done by reopening 3583 * Must reset MAC *and* PHY. This is done by reopening
3580 * the device. 3584 * the device.
3581 */ 3585 */
3582 ucc_geth_close(dev); 3586 netif_tx_stop_all_queues(dev);
3583 ucc_geth_open(dev); 3587 ucc_geth_stop(ugeth);
3588 ucc_geth_init_mac(ugeth);
3589 /* Must start PHY here */
3590 phy_start(ugeth->phydev);
3591 netif_tx_start_all_queues(dev);
3584 } 3592 }
3585 3593
3586 netif_tx_schedule_all(dev); 3594 netif_tx_schedule_all(dev);
@@ -3594,7 +3602,6 @@ static void ucc_geth_timeout(struct net_device *dev)
3594{ 3602{
3595 struct ucc_geth_private *ugeth = netdev_priv(dev); 3603 struct ucc_geth_private *ugeth = netdev_priv(dev);
3596 3604
3597 netif_carrier_off(dev);
3598 schedule_work(&ugeth->timeout_work); 3605 schedule_work(&ugeth->timeout_work);
3599} 3606}
3600 3607
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ca7fc9df1ccf..c04d49e31f81 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -45,6 +45,7 @@
45#include <linux/usb/usbnet.h> 45#include <linux/usb/usbnet.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/kernel.h> 47#include <linux/kernel.h>
48#include <linux/pm_runtime.h>
48 49
49#define DRIVER_VERSION "22-Aug-2005" 50#define DRIVER_VERSION "22-Aug-2005"
50 51
@@ -1273,6 +1274,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1273 struct usb_device *xdev; 1274 struct usb_device *xdev;
1274 int status; 1275 int status;
1275 const char *name; 1276 const char *name;
1277 struct usb_driver *driver = to_usb_driver(udev->dev.driver);
1278
1279 /* usbnet already took usb runtime pm, so have to enable the feature
1280 * for usb interface, otherwise usb_autopm_get_interface may return
1281 * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
1282 */
1283 if (!driver->supports_autosuspend) {
1284 driver->supports_autosuspend = 1;
1285 pm_runtime_enable(&udev->dev);
1286 }
1276 1287
1277 name = udev->dev.driver->name; 1288 name = udev->dev.driver->name;
1278 info = (struct driver_info *) prod->driver_info; 1289 info = (struct driver_info *) prod->driver_info;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bb6b67f6b0cc..b6d402806ae6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -986,9 +986,15 @@ static int virtnet_probe(struct virtio_device *vdev)
986 goto unregister; 986 goto unregister;
987 } 987 }
988 988
989 vi->status = VIRTIO_NET_S_LINK_UP; 989 /* Assume link up if device can't report link status,
990 virtnet_update_status(vi); 990 otherwise get link status from config. */
991 netif_carrier_on(dev); 991 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
992 netif_carrier_off(dev);
993 virtnet_update_status(vi);
994 } else {
995 vi->status = VIRTIO_NET_S_LINK_UP;
996 netif_carrier_on(dev);
997 }
992 998
993 pr_debug("virtnet: registered device %s\n", dev->name); 999 pr_debug("virtnet: registered device %s\n", dev->name);
994 return 0; 1000 return 0;
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
index 37108fb226d3..969c751ee404 100644
--- a/drivers/net/vmxnet3/upt1_defs.h
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -88,9 +88,9 @@ struct UPT1_RSSConf {
88 88
89/* features */ 89/* features */
90enum { 90enum {
91 UPT1_F_RXCSUM = 0x0001, /* rx csum verification */ 91 UPT1_F_RXCSUM = cpu_to_le64(0x0001), /* rx csum verification */
92 UPT1_F_RSS = 0x0002, 92 UPT1_F_RSS = cpu_to_le64(0x0002),
93 UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */ 93 UPT1_F_RXVLAN = cpu_to_le64(0x0004), /* VLAN tag stripping */
94 UPT1_F_LRO = 0x0008, 94 UPT1_F_LRO = cpu_to_le64(0x0008),
95}; 95};
96#endif 96#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index ca7727b940ad..4d84912c99ba 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -523,9 +523,9 @@ struct Vmxnet3_RxFilterConf {
523#define VMXNET3_PM_MAX_PATTERN_SIZE 128 523#define VMXNET3_PM_MAX_PATTERN_SIZE 128
524#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8) 524#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
525 525
526#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */ 526#define VMXNET3_PM_WAKEUP_MAGIC cpu_to_le16(0x01) /* wake up on magic pkts */
527#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching 527#define VMXNET3_PM_WAKEUP_FILTER cpu_to_le16(0x02) /* wake up on pkts matching
528 * filters */ 528 * filters */
529 529
530 530
531struct Vmxnet3_PM_PktFilter { 531struct Vmxnet3_PM_PktFilter {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3f60e0e3097b..21314e06e6d7 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -873,7 +873,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
873 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 873 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
874 skb_shinfo(skb)->nr_frags + 1; 874 skb_shinfo(skb)->nr_frags + 1;
875 875
876 ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP)); 876 ctx.ipv4 = (skb->protocol == cpu_to_be16(ETH_P_IP));
877 877
878 ctx.mss = skb_shinfo(skb)->gso_size; 878 ctx.mss = skb_shinfo(skb)->gso_size;
879 if (ctx.mss) { 879 if (ctx.mss) {
@@ -1563,8 +1563,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1563 adapter->vlan_grp = grp; 1563 adapter->vlan_grp = grp;
1564 1564
1565 /* update FEATURES to device */ 1565 /* update FEATURES to device */
1566 set_flag_le64(&devRead->misc.uptFeatures, 1566 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1567 UPT1_F_RXVLAN);
1568 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1567 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1569 VMXNET3_CMD_UPDATE_FEATURE); 1568 VMXNET3_CMD_UPDATE_FEATURE);
1570 /* 1569 /*
@@ -1587,7 +1586,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1587 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 1586 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1588 adapter->vlan_grp = NULL; 1587 adapter->vlan_grp = NULL;
1589 1588
1590 if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) { 1589 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
1591 int i; 1590 int i;
1592 1591
1593 for (i = 0; i < VMXNET3_VFT_SIZE; i++) { 1592 for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
@@ -1600,8 +1599,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1600 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1599 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1601 1600
1602 /* update FEATURES to device */ 1601 /* update FEATURES to device */
1603 reset_flag_le64(&devRead->misc.uptFeatures, 1602 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1604 UPT1_F_RXVLAN);
1605 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1603 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1606 VMXNET3_CMD_UPDATE_FEATURE); 1604 VMXNET3_CMD_UPDATE_FEATURE);
1607 } 1605 }
@@ -1762,15 +1760,15 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1762 1760
1763 /* set up feature flags */ 1761 /* set up feature flags */
1764 if (adapter->rxcsum) 1762 if (adapter->rxcsum)
1765 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM); 1763 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
1766 1764
1767 if (adapter->lro) { 1765 if (adapter->lro) {
1768 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO); 1766 devRead->misc.uptFeatures |= UPT1_F_LRO;
1769 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); 1767 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
1770 } 1768 }
1771 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) && 1769 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
1772 adapter->vlan_grp) { 1770 adapter->vlan_grp) {
1773 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN); 1771 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1774 } 1772 }
1775 1773
1776 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); 1774 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
@@ -2577,7 +2575,7 @@ vmxnet3_suspend(struct device *device)
2577 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); 2575 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
2578 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ 2576 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
2579 2577
2580 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER); 2578 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
2581 i++; 2579 i++;
2582 } 2580 }
2583 2581
@@ -2619,13 +2617,13 @@ vmxnet3_suspend(struct device *device)
2619 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ 2617 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
2620 in_dev_put(in_dev); 2618 in_dev_put(in_dev);
2621 2619
2622 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER); 2620 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
2623 i++; 2621 i++;
2624 } 2622 }
2625 2623
2626skip_arp: 2624skip_arp:
2627 if (adapter->wol & WAKE_MAGIC) 2625 if (adapter->wol & WAKE_MAGIC)
2628 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC); 2626 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
2629 2627
2630 pmConf->numFilters = i; 2628 pmConf->numFilters = i;
2631 2629
@@ -2667,7 +2665,7 @@ vmxnet3_resume(struct device *device)
2667 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); 2665 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
2668 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( 2666 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
2669 *pmConf)); 2667 *pmConf));
2670 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys( 2668 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
2671 pmConf)); 2669 pmConf));
2672 2670
2673 netif_device_attach(netdev); 2671 netif_device_attach(netdev);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 7e4b5a89165a..b79070bcc92e 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -50,13 +50,11 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
50 adapter->rxcsum = val; 50 adapter->rxcsum = val;
51 if (netif_running(netdev)) { 51 if (netif_running(netdev)) {
52 if (val) 52 if (val)
53 set_flag_le64( 53 adapter->shared->devRead.misc.uptFeatures |=
54 &adapter->shared->devRead.misc.uptFeatures, 54 UPT1_F_RXCSUM;
55 UPT1_F_RXCSUM);
56 else 55 else
57 reset_flag_le64( 56 adapter->shared->devRead.misc.uptFeatures &=
58 &adapter->shared->devRead.misc.uptFeatures, 57 ~UPT1_F_RXCSUM;
59 UPT1_F_RXCSUM);
60 58
61 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 59 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
62 VMXNET3_CMD_UPDATE_FEATURE); 60 VMXNET3_CMD_UPDATE_FEATURE);
@@ -292,10 +290,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
292 /* update harware LRO capability accordingly */ 290 /* update harware LRO capability accordingly */
293 if (lro_requested) 291 if (lro_requested)
294 adapter->shared->devRead.misc.uptFeatures |= 292 adapter->shared->devRead.misc.uptFeatures |=
295 cpu_to_le64(UPT1_F_LRO); 293 UPT1_F_LRO;
296 else 294 else
297 adapter->shared->devRead.misc.uptFeatures &= 295 adapter->shared->devRead.misc.uptFeatures &=
298 cpu_to_le64(~UPT1_F_LRO); 296 ~UPT1_F_LRO;
299 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 297 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
300 VMXNET3_CMD_UPDATE_FEATURE); 298 VMXNET3_CMD_UPDATE_FEATURE);
301 } 299 }
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index c88ea5cbba0d..edf228843afc 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -301,8 +301,8 @@ struct vmxnet3_adapter {
301 struct net_device *netdev; 301 struct net_device *netdev;
302 struct pci_dev *pdev; 302 struct pci_dev *pdev;
303 303
304 u8 *hw_addr0; /* for BAR 0 */ 304 u8 __iomem *hw_addr0; /* for BAR 0 */
305 u8 *hw_addr1; /* for BAR 1 */ 305 u8 __iomem *hw_addr1; /* for BAR 1 */
306 306
307 /* feature control */ 307 /* feature control */
308 bool rxcsum; 308 bool rxcsum;
@@ -330,14 +330,14 @@ struct vmxnet3_adapter {
330}; 330};
331 331
332#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ 332#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
333 writel(cpu_to_le32(val), (adapter)->hw_addr0 + (reg)) 333 writel((val), (adapter)->hw_addr0 + (reg))
334#define VMXNET3_READ_BAR0_REG(adapter, reg) \ 334#define VMXNET3_READ_BAR0_REG(adapter, reg) \
335 le32_to_cpu(readl((adapter)->hw_addr0 + (reg))) 335 readl((adapter)->hw_addr0 + (reg))
336 336
337#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \ 337#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
338 writel(cpu_to_le32(val), (adapter)->hw_addr1 + (reg)) 338 writel((val), (adapter)->hw_addr1 + (reg))
339#define VMXNET3_READ_BAR1_REG(adapter, reg) \ 339#define VMXNET3_READ_BAR1_REG(adapter, reg) \
340 le32_to_cpu(readl((adapter)->hw_addr1 + (reg))) 340 readl((adapter)->hw_addr1 + (reg))
341 341
342#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5) 342#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
343#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ 343#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
@@ -353,21 +353,6 @@ struct vmxnet3_adapter {
353#define VMXNET3_MAX_ETH_HDR_SIZE 22 353#define VMXNET3_MAX_ETH_HDR_SIZE 22
354#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) 354#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
355 355
356static inline void set_flag_le16(__le16 *data, u16 flag)
357{
358 *data = cpu_to_le16(le16_to_cpu(*data) | flag);
359}
360
361static inline void set_flag_le64(__le64 *data, u64 flag)
362{
363 *data = cpu_to_le64(le64_to_cpu(*data) | flag);
364}
365
366static inline void reset_flag_le64(__le64 *data, u64 flag)
367{
368 *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
369}
370
371int 356int
372vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); 357vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
373 358
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 0e6db5935609..906a3ca3676b 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -20,6 +20,179 @@
20#include "vxge-traffic.h" 20#include "vxge-traffic.h"
21#include "vxge-config.h" 21#include "vxge-config.h"
22 22
23static enum vxge_hw_status
24__vxge_hw_fifo_create(
25 struct __vxge_hw_vpath_handle *vpath_handle,
26 struct vxge_hw_fifo_attr *attr);
27
28static enum vxge_hw_status
29__vxge_hw_fifo_abort(
30 struct __vxge_hw_fifo *fifoh);
31
32static enum vxge_hw_status
33__vxge_hw_fifo_reset(
34 struct __vxge_hw_fifo *ringh);
35
36static enum vxge_hw_status
37__vxge_hw_fifo_delete(
38 struct __vxge_hw_vpath_handle *vpath_handle);
39
40static struct __vxge_hw_blockpool_entry *
41__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
42 u32 size);
43
44static void
45__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
46 struct __vxge_hw_blockpool_entry *entry);
47
48static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
49 void *block_addr,
50 u32 length,
51 struct pci_dev *dma_h,
52 struct pci_dev *acc_handle);
53
54static enum vxge_hw_status
55__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
56 struct __vxge_hw_blockpool *blockpool,
57 u32 pool_size,
58 u32 pool_max);
59
60static void
61__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
62
63static void *
64__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
65 u32 size,
66 struct vxge_hw_mempool_dma *dma_object);
67
68static void
69__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
70 void *memblock,
71 u32 size,
72 struct vxge_hw_mempool_dma *dma_object);
73
74
75static struct __vxge_hw_channel*
76__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
77 enum __vxge_hw_channel_type type, u32 length,
78 u32 per_dtr_space, void *userdata);
79
80static void
81__vxge_hw_channel_free(
82 struct __vxge_hw_channel *channel);
83
84static enum vxge_hw_status
85__vxge_hw_channel_initialize(
86 struct __vxge_hw_channel *channel);
87
88static enum vxge_hw_status
89__vxge_hw_channel_reset(
90 struct __vxge_hw_channel *channel);
91
92static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
93
94static enum vxge_hw_status
95__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
96
97static enum vxge_hw_status
98__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
99
100static void
101__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
102
103static void
104__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
105
106static enum vxge_hw_status
107__vxge_hw_vpath_card_info_get(
108 u32 vp_id,
109 struct vxge_hw_vpath_reg __iomem *vpath_reg,
110 struct vxge_hw_device_hw_info *hw_info);
111
112static enum vxge_hw_status
113__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
114
115static void
116__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
117
118static enum vxge_hw_status
119__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
120
121static enum vxge_hw_status
122__vxge_hw_device_register_poll(
123 void __iomem *reg,
124 u64 mask, u32 max_millis);
125
126static inline enum vxge_hw_status
127__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
128 u64 mask, u32 max_millis)
129{
130 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
131 wmb();
132
133 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
134 wmb();
135
136 return __vxge_hw_device_register_poll(addr, mask, max_millis);
137}
138
139static struct vxge_hw_mempool*
140__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
141 u32 item_size, u32 private_size, u32 items_initial,
142 u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
143 void *userdata);
144static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
145
146static enum vxge_hw_status
147__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
148 struct vxge_hw_vpath_stats_hw_info *hw_stats);
149
150static enum vxge_hw_status
151vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
152
153static enum vxge_hw_status
154__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
155
156static u64
157__vxge_hw_vpath_pci_func_mode_get(u32 vp_id,
158 struct vxge_hw_vpath_reg __iomem *vpath_reg);
159
160static u32
161__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
162
163static enum vxge_hw_status
164__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
165 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
166
167static enum vxge_hw_status
168__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
169
170
171static enum vxge_hw_status
172__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
173
174static enum vxge_hw_status
175__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
176 struct vxge_hw_device_hw_info *hw_info);
177
178static enum vxge_hw_status
179__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
180
181static void
182__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
183
184static enum vxge_hw_status
185__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
186 u32 operation, u32 offset, u64 *stat);
187
188static enum vxge_hw_status
189__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
190 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
191
192static enum vxge_hw_status
193__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
194 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
195
23/* 196/*
24 * __vxge_hw_channel_allocate - Allocate memory for channel 197 * __vxge_hw_channel_allocate - Allocate memory for channel
25 * This function allocates required memory for the channel and various arrays 198 * This function allocates required memory for the channel and various arrays
@@ -190,7 +363,7 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
190 * Will poll certain register for specified amount of time. 363 * Will poll certain register for specified amount of time.
191 * Will poll until masked bit is not cleared. 364 * Will poll until masked bit is not cleared.
192 */ 365 */
193enum vxge_hw_status 366static enum vxge_hw_status
194__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis) 367__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
195{ 368{
196 u64 val64; 369 u64 val64;
@@ -221,7 +394,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
221 * in progress 394 * in progress
222 * This routine checks the vpath reset in progress register is turned zero 395 * This routine checks the vpath reset in progress register is turned zero
223 */ 396 */
224enum vxge_hw_status 397static enum vxge_hw_status
225__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog) 398__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
226{ 399{
227 enum vxge_hw_status status; 400 enum vxge_hw_status status;
@@ -236,7 +409,7 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
236 * This routine sets the swapper and reads the toc pointer and returns the 409 * This routine sets the swapper and reads the toc pointer and returns the
237 * memory mapped address of the toc 410 * memory mapped address of the toc
238 */ 411 */
239struct vxge_hw_toc_reg __iomem * 412static struct vxge_hw_toc_reg __iomem *
240__vxge_hw_device_toc_get(void __iomem *bar0) 413__vxge_hw_device_toc_get(void __iomem *bar0)
241{ 414{
242 u64 val64; 415 u64 val64;
@@ -779,7 +952,7 @@ exit:
779 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port 952 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
780 * Get the Statistics on aggregate port 953 * Get the Statistics on aggregate port
781 */ 954 */
782enum vxge_hw_status 955static enum vxge_hw_status
783vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port, 956vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
784 struct vxge_hw_xmac_aggr_stats *aggr_stats) 957 struct vxge_hw_xmac_aggr_stats *aggr_stats)
785{ 958{
@@ -814,7 +987,7 @@ exit:
814 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port 987 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
815 * Get the Statistics on port 988 * Get the Statistics on port
816 */ 989 */
817enum vxge_hw_status 990static enum vxge_hw_status
818vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port, 991vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
819 struct vxge_hw_xmac_port_stats *port_stats) 992 struct vxge_hw_xmac_port_stats *port_stats)
820{ 993{
@@ -952,20 +1125,6 @@ u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
952 return 0; 1125 return 0;
953#endif 1126#endif
954} 1127}
955/*
956 * vxge_hw_device_debug_mask_get - Get the debug mask
957 * This routine returns the current debug mask set
958 */
959u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
960{
961#if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
962 if (hldev == NULL)
963 return 0;
964 return hldev->debug_module_mask;
965#else
966 return 0;
967#endif
968}
969 1128
970/* 1129/*
971 * vxge_hw_getpause_data -Pause frame frame generation and reception. 1130 * vxge_hw_getpause_data -Pause frame frame generation and reception.
@@ -1090,7 +1249,7 @@ __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
1090 * first block 1249 * first block
1091 * Returns the dma address of the first RxD block 1250 * Returns the dma address of the first RxD block
1092 */ 1251 */
1093u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring) 1252static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
1094{ 1253{
1095 struct vxge_hw_mempool_dma *dma_object; 1254 struct vxge_hw_mempool_dma *dma_object;
1096 1255
@@ -1252,7 +1411,7 @@ exit:
1252 * This function creates Ring and initializes it. 1411 * This function creates Ring and initializes it.
1253 * 1412 *
1254 */ 1413 */
1255enum vxge_hw_status 1414static enum vxge_hw_status
1256__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, 1415__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1257 struct vxge_hw_ring_attr *attr) 1416 struct vxge_hw_ring_attr *attr)
1258{ 1417{
@@ -1363,7 +1522,7 @@ exit:
1363 * __vxge_hw_ring_abort - Returns the RxD 1522 * __vxge_hw_ring_abort - Returns the RxD
1364 * This function terminates the RxDs of ring 1523 * This function terminates the RxDs of ring
1365 */ 1524 */
1366enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) 1525static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1367{ 1526{
1368 void *rxdh; 1527 void *rxdh;
1369 struct __vxge_hw_channel *channel; 1528 struct __vxge_hw_channel *channel;
@@ -1392,7 +1551,7 @@ enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1392 * __vxge_hw_ring_reset - Resets the ring 1551 * __vxge_hw_ring_reset - Resets the ring
1393 * This function resets the ring during vpath reset operation 1552 * This function resets the ring during vpath reset operation
1394 */ 1553 */
1395enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) 1554static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
1396{ 1555{
1397 enum vxge_hw_status status = VXGE_HW_OK; 1556 enum vxge_hw_status status = VXGE_HW_OK;
1398 struct __vxge_hw_channel *channel; 1557 struct __vxge_hw_channel *channel;
@@ -1419,7 +1578,7 @@ exit:
1419 * __vxge_hw_ring_delete - Removes the ring 1578 * __vxge_hw_ring_delete - Removes the ring
1420 * This function freeup the memory pool and removes the ring 1579 * This function freeup the memory pool and removes the ring
1421 */ 1580 */
1422enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) 1581static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1423{ 1582{
1424 struct __vxge_hw_ring *ring = vp->vpath->ringh; 1583 struct __vxge_hw_ring *ring = vp->vpath->ringh;
1425 1584
@@ -1438,7 +1597,7 @@ enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1438 * __vxge_hw_mempool_grow 1597 * __vxge_hw_mempool_grow
1439 * Will resize mempool up to %num_allocate value. 1598 * Will resize mempool up to %num_allocate value.
1440 */ 1599 */
1441enum vxge_hw_status 1600static enum vxge_hw_status
1442__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, 1601__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1443 u32 *num_allocated) 1602 u32 *num_allocated)
1444{ 1603{
@@ -1527,7 +1686,7 @@ exit:
1527 * with size enough to hold %items_initial number of items. Memory is 1686 * with size enough to hold %items_initial number of items. Memory is
1528 * DMA-able but client must map/unmap before interoperating with the device. 1687 * DMA-able but client must map/unmap before interoperating with the device.
1529 */ 1688 */
1530struct vxge_hw_mempool* 1689static struct vxge_hw_mempool*
1531__vxge_hw_mempool_create( 1690__vxge_hw_mempool_create(
1532 struct __vxge_hw_device *devh, 1691 struct __vxge_hw_device *devh,
1533 u32 memblock_size, 1692 u32 memblock_size,
@@ -1644,7 +1803,7 @@ exit:
1644/* 1803/*
1645 * vxge_hw_mempool_destroy 1804 * vxge_hw_mempool_destroy
1646 */ 1805 */
1647void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) 1806static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1648{ 1807{
1649 u32 i, j; 1808 u32 i, j;
1650 struct __vxge_hw_device *devh = mempool->devh; 1809 struct __vxge_hw_device *devh = mempool->devh;
@@ -1700,7 +1859,7 @@ __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1700 * __vxge_hw_device_vpath_config_check - Check vpath configuration. 1859 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1701 * Check the vpath configuration 1860 * Check the vpath configuration
1702 */ 1861 */
1703enum vxge_hw_status 1862static enum vxge_hw_status
1704__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) 1863__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1705{ 1864{
1706 enum vxge_hw_status status; 1865 enum vxge_hw_status status;
@@ -1922,7 +2081,7 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
1922 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion. 2081 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
1923 * Set the swapper bits appropriately for the lagacy section. 2082 * Set the swapper bits appropriately for the lagacy section.
1924 */ 2083 */
1925enum vxge_hw_status 2084static enum vxge_hw_status
1926__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg) 2085__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
1927{ 2086{
1928 u64 val64; 2087 u64 val64;
@@ -1977,7 +2136,7 @@ __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
1977 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. 2136 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
1978 * Set the swapper bits appropriately for the vpath. 2137 * Set the swapper bits appropriately for the vpath.
1979 */ 2138 */
1980enum vxge_hw_status 2139static enum vxge_hw_status
1981__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg) 2140__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
1982{ 2141{
1983#ifndef __BIG_ENDIAN 2142#ifndef __BIG_ENDIAN
@@ -1996,7 +2155,7 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
1996 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc. 2155 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
1997 * Set the swapper bits appropriately for the vpath. 2156 * Set the swapper bits appropriately for the vpath.
1998 */ 2157 */
1999enum vxge_hw_status 2158static enum vxge_hw_status
2000__vxge_hw_kdfc_swapper_set( 2159__vxge_hw_kdfc_swapper_set(
2001 struct vxge_hw_legacy_reg __iomem *legacy_reg, 2160 struct vxge_hw_legacy_reg __iomem *legacy_reg,
2002 struct vxge_hw_vpath_reg __iomem *vpath_reg) 2161 struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2021,28 +2180,6 @@ __vxge_hw_kdfc_swapper_set(
2021} 2180}
2022 2181
2023/* 2182/*
2024 * vxge_hw_mgmt_device_config - Retrieve device configuration.
2025 * Get device configuration. Permits to retrieve at run-time configuration
2026 * values that were used to initialize and configure the device.
2027 */
2028enum vxge_hw_status
2029vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
2030 struct vxge_hw_device_config *dev_config, int size)
2031{
2032
2033 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
2034 return VXGE_HW_ERR_INVALID_DEVICE;
2035
2036 if (size != sizeof(struct vxge_hw_device_config))
2037 return VXGE_HW_ERR_VERSION_CONFLICT;
2038
2039 memcpy(dev_config, &hldev->config,
2040 sizeof(struct vxge_hw_device_config));
2041
2042 return VXGE_HW_OK;
2043}
2044
2045/*
2046 * vxge_hw_mgmt_reg_read - Read Titan register. 2183 * vxge_hw_mgmt_reg_read - Read Titan register.
2047 */ 2184 */
2048enum vxge_hw_status 2185enum vxge_hw_status
@@ -2438,7 +2575,7 @@ exit:
2438 * __vxge_hw_fifo_abort - Returns the TxD 2575 * __vxge_hw_fifo_abort - Returns the TxD
2439 * This function terminates the TxDs of fifo 2576 * This function terminates the TxDs of fifo
2440 */ 2577 */
2441enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo) 2578static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2442{ 2579{
2443 void *txdlh; 2580 void *txdlh;
2444 2581
@@ -2466,7 +2603,7 @@ enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2466 * __vxge_hw_fifo_reset - Resets the fifo 2603 * __vxge_hw_fifo_reset - Resets the fifo
2467 * This function resets the fifo during vpath reset operation 2604 * This function resets the fifo during vpath reset operation
2468 */ 2605 */
2469enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo) 2606static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2470{ 2607{
2471 enum vxge_hw_status status = VXGE_HW_OK; 2608 enum vxge_hw_status status = VXGE_HW_OK;
2472 2609
@@ -2501,7 +2638,7 @@ enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2501 * in pci config space. 2638 * in pci config space.
2502 * Read from the vpath pci config space. 2639 * Read from the vpath pci config space.
2503 */ 2640 */
2504enum vxge_hw_status 2641static enum vxge_hw_status
2505__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath, 2642__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
2506 u32 phy_func_0, u32 offset, u32 *val) 2643 u32 phy_func_0, u32 offset, u32 *val)
2507{ 2644{
@@ -2542,7 +2679,7 @@ exit:
2542 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath. 2679 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2543 * Returns the function number of the vpath. 2680 * Returns the function number of the vpath.
2544 */ 2681 */
2545u32 2682static u32
2546__vxge_hw_vpath_func_id_get(u32 vp_id, 2683__vxge_hw_vpath_func_id_get(u32 vp_id,
2547 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg) 2684 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2548{ 2685{
@@ -2573,7 +2710,7 @@ __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2573 * __vxge_hw_vpath_card_info_get - Get the serial numbers, 2710 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2574 * part number and product description. 2711 * part number and product description.
2575 */ 2712 */
2576enum vxge_hw_status 2713static enum vxge_hw_status
2577__vxge_hw_vpath_card_info_get( 2714__vxge_hw_vpath_card_info_get(
2578 u32 vp_id, 2715 u32 vp_id,
2579 struct vxge_hw_vpath_reg __iomem *vpath_reg, 2716 struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2695,7 +2832,7 @@ __vxge_hw_vpath_card_info_get(
2695 * __vxge_hw_vpath_fw_ver_get - Get the fw version 2832 * __vxge_hw_vpath_fw_ver_get - Get the fw version
2696 * Returns FW Version 2833 * Returns FW Version
2697 */ 2834 */
2698enum vxge_hw_status 2835static enum vxge_hw_status
2699__vxge_hw_vpath_fw_ver_get( 2836__vxge_hw_vpath_fw_ver_get(
2700 u32 vp_id, 2837 u32 vp_id,
2701 struct vxge_hw_vpath_reg __iomem *vpath_reg, 2838 struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2789,7 +2926,7 @@ exit:
2789 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode 2926 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2790 * Returns pci function mode 2927 * Returns pci function mode
2791 */ 2928 */
2792u64 2929static u64
2793__vxge_hw_vpath_pci_func_mode_get( 2930__vxge_hw_vpath_pci_func_mode_get(
2794 u32 vp_id, 2931 u32 vp_id,
2795 struct vxge_hw_vpath_reg __iomem *vpath_reg) 2932 struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2995,7 +3132,7 @@ exit:
2995 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath 3132 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
2996 * from MAC address table. 3133 * from MAC address table.
2997 */ 3134 */
2998enum vxge_hw_status 3135static enum vxge_hw_status
2999__vxge_hw_vpath_addr_get( 3136__vxge_hw_vpath_addr_get(
3000 u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, 3137 u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3001 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]) 3138 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
@@ -3347,7 +3484,7 @@ __vxge_hw_vpath_mgmt_read(
3347 * This routine checks the vpath_rst_in_prog register to see if 3484 * This routine checks the vpath_rst_in_prog register to see if
3348 * adapter completed the reset process for the vpath 3485 * adapter completed the reset process for the vpath
3349 */ 3486 */
3350enum vxge_hw_status 3487static enum vxge_hw_status
3351__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath) 3488__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3352{ 3489{
3353 enum vxge_hw_status status; 3490 enum vxge_hw_status status;
@@ -3365,7 +3502,7 @@ __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3365 * __vxge_hw_vpath_reset 3502 * __vxge_hw_vpath_reset
3366 * This routine resets the vpath on the device 3503 * This routine resets the vpath on the device
3367 */ 3504 */
3368enum vxge_hw_status 3505static enum vxge_hw_status
3369__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id) 3506__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3370{ 3507{
3371 u64 val64; 3508 u64 val64;
@@ -3383,7 +3520,7 @@ __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3383 * __vxge_hw_vpath_sw_reset 3520 * __vxge_hw_vpath_sw_reset
3384 * This routine resets the vpath structures 3521 * This routine resets the vpath structures
3385 */ 3522 */
3386enum vxge_hw_status 3523static enum vxge_hw_status
3387__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id) 3524__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3388{ 3525{
3389 enum vxge_hw_status status = VXGE_HW_OK; 3526 enum vxge_hw_status status = VXGE_HW_OK;
@@ -3408,7 +3545,7 @@ exit:
3408 * This routine configures the prc registers of virtual path using the config 3545 * This routine configures the prc registers of virtual path using the config
3409 * passed 3546 * passed
3410 */ 3547 */
3411void 3548static void
3412__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id) 3549__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3413{ 3550{
3414 u64 val64; 3551 u64 val64;
@@ -3480,7 +3617,7 @@ __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3480 * This routine configures the kdfc registers of virtual path using the 3617 * This routine configures the kdfc registers of virtual path using the
3481 * config passed 3618 * config passed
3482 */ 3619 */
3483enum vxge_hw_status 3620static enum vxge_hw_status
3484__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id) 3621__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3485{ 3622{
3486 u64 val64; 3623 u64 val64;
@@ -3553,7 +3690,7 @@ exit:
3553 * __vxge_hw_vpath_mac_configure 3690 * __vxge_hw_vpath_mac_configure
3554 * This routine configures the mac of virtual path using the config passed 3691 * This routine configures the mac of virtual path using the config passed
3555 */ 3692 */
3556enum vxge_hw_status 3693static enum vxge_hw_status
3557__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id) 3694__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3558{ 3695{
3559 u64 val64; 3696 u64 val64;
@@ -3621,7 +3758,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3621 * This routine configures the tim registers of virtual path using the config 3758 * This routine configures the tim registers of virtual path using the config
3622 * passed 3759 * passed
3623 */ 3760 */
3624enum vxge_hw_status 3761static enum vxge_hw_status
3625__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) 3762__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3626{ 3763{
3627 u64 val64; 3764 u64 val64;
@@ -3897,7 +4034,7 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
3897 * This routine is the final phase of init which initializes the 4034 * This routine is the final phase of init which initializes the
3898 * registers of the vpath using the configuration passed. 4035 * registers of the vpath using the configuration passed.
3899 */ 4036 */
3900enum vxge_hw_status 4037static enum vxge_hw_status
3901__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id) 4038__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
3902{ 4039{
3903 u64 val64; 4040 u64 val64;
@@ -3966,7 +4103,7 @@ exit:
3966 * This routine is the initial phase of init which resets the vpath and 4103 * This routine is the initial phase of init which resets the vpath and
3967 * initializes the software support structures. 4104 * initializes the software support structures.
3968 */ 4105 */
3969enum vxge_hw_status 4106static enum vxge_hw_status
3970__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id, 4107__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
3971 struct vxge_hw_vp_config *config) 4108 struct vxge_hw_vp_config *config)
3972{ 4109{
@@ -4022,7 +4159,7 @@ exit:
4022 * __vxge_hw_vp_terminate - Terminate Virtual Path structure 4159 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4023 * This routine closes all channels it opened and freeup memory 4160 * This routine closes all channels it opened and freeup memory
4024 */ 4161 */
4025void 4162static void
4026__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id) 4163__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4027{ 4164{
4028 struct __vxge_hw_virtualpath *vpath; 4165 struct __vxge_hw_virtualpath *vpath;
@@ -4384,7 +4521,7 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4384 * Enable the DMA vpath statistics. The function is to be called to re-enable 4521 * Enable the DMA vpath statistics. The function is to be called to re-enable
4385 * the adapter to update stats into the host memory 4522 * the adapter to update stats into the host memory
4386 */ 4523 */
4387enum vxge_hw_status 4524static enum vxge_hw_status
4388vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp) 4525vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4389{ 4526{
4390 enum vxge_hw_status status = VXGE_HW_OK; 4527 enum vxge_hw_status status = VXGE_HW_OK;
@@ -4409,7 +4546,7 @@ exit:
4409 * __vxge_hw_vpath_stats_access - Get the statistics from the given location 4546 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4410 * and offset and perform an operation 4547 * and offset and perform an operation
4411 */ 4548 */
4412enum vxge_hw_status 4549static enum vxge_hw_status
4413__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, 4550__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4414 u32 operation, u32 offset, u64 *stat) 4551 u32 operation, u32 offset, u64 *stat)
4415{ 4552{
@@ -4445,7 +4582,7 @@ vpath_stats_access_exit:
4445/* 4582/*
4446 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath 4583 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4447 */ 4584 */
4448enum vxge_hw_status 4585static enum vxge_hw_status
4449__vxge_hw_vpath_xmac_tx_stats_get( 4586__vxge_hw_vpath_xmac_tx_stats_get(
4450 struct __vxge_hw_virtualpath *vpath, 4587 struct __vxge_hw_virtualpath *vpath,
4451 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats) 4588 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
@@ -4478,9 +4615,9 @@ exit:
4478/* 4615/*
4479 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath 4616 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4480 */ 4617 */
4481enum vxge_hw_status 4618static enum vxge_hw_status
4482__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, 4619__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4483 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats) 4620 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4484{ 4621{
4485 u64 *val64; 4622 u64 *val64;
4486 enum vxge_hw_status status = VXGE_HW_OK; 4623 enum vxge_hw_status status = VXGE_HW_OK;
@@ -4509,9 +4646,9 @@ exit:
4509/* 4646/*
4510 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics. 4647 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4511 */ 4648 */
4512enum vxge_hw_status __vxge_hw_vpath_stats_get( 4649static enum vxge_hw_status
4513 struct __vxge_hw_virtualpath *vpath, 4650__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
4514 struct vxge_hw_vpath_stats_hw_info *hw_stats) 4651 struct vxge_hw_vpath_stats_hw_info *hw_stats)
4515{ 4652{
4516 u64 val64; 4653 u64 val64;
4517 enum vxge_hw_status status = VXGE_HW_OK; 4654 enum vxge_hw_status status = VXGE_HW_OK;
@@ -4643,6 +4780,32 @@ exit:
4643 return status; 4780 return status;
4644} 4781}
4645 4782
4783
4784static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
4785 unsigned long size)
4786{
4787 gfp_t flags;
4788 void *vaddr;
4789
4790 if (in_interrupt())
4791 flags = GFP_ATOMIC | GFP_DMA;
4792 else
4793 flags = GFP_KERNEL | GFP_DMA;
4794
4795 vaddr = kmalloc((size), flags);
4796
4797 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
4798}
4799
4800static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
4801 struct pci_dev **p_dma_acch)
4802{
4803 unsigned long misaligned = *(unsigned long *)p_dma_acch;
4804 u8 *tmp = (u8 *)vaddr;
4805 tmp -= misaligned;
4806 kfree((void *)tmp);
4807}
4808
4646/* 4809/*
4647 * __vxge_hw_blockpool_create - Create block pool 4810 * __vxge_hw_blockpool_create - Create block pool
4648 */ 4811 */
@@ -4845,12 +5008,11 @@ void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
4845 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async 5008 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
4846 * Adds a block to block pool 5009 * Adds a block to block pool
4847 */ 5010 */
4848void vxge_hw_blockpool_block_add( 5011static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
4849 struct __vxge_hw_device *devh, 5012 void *block_addr,
4850 void *block_addr, 5013 u32 length,
4851 u32 length, 5014 struct pci_dev *dma_h,
4852 struct pci_dev *dma_h, 5015 struct pci_dev *acc_handle)
4853 struct pci_dev *acc_handle)
4854{ 5016{
4855 struct __vxge_hw_blockpool *blockpool; 5017 struct __vxge_hw_blockpool *blockpool;
4856 struct __vxge_hw_blockpool_entry *entry = NULL; 5018 struct __vxge_hw_blockpool_entry *entry = NULL;
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 1a94343023cb..5c00861b6c2c 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -183,11 +183,6 @@ struct vxge_hw_device_version {
183 char version[VXGE_HW_FW_STRLEN]; 183 char version[VXGE_HW_FW_STRLEN];
184}; 184};
185 185
186u64
187__vxge_hw_vpath_pci_func_mode_get(
188 u32 vp_id,
189 struct vxge_hw_vpath_reg __iomem *vpath_reg);
190
191/** 186/**
192 * struct vxge_hw_fifo_config - Configuration of fifo. 187 * struct vxge_hw_fifo_config - Configuration of fifo.
193 * @enable: Is this fifo to be commissioned 188 * @enable: Is this fifo to be commissioned
@@ -1426,9 +1421,6 @@ struct vxge_hw_rth_hash_types {
1426 u8 hash_type_ipv6ex_en; 1421 u8 hash_type_ipv6ex_en;
1427}; 1422};
1428 1423
1429u32
1430vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
1431
1432void vxge_hw_device_debug_set( 1424void vxge_hw_device_debug_set(
1433 struct __vxge_hw_device *devh, 1425 struct __vxge_hw_device *devh,
1434 enum vxge_debug_level level, 1426 enum vxge_debug_level level,
@@ -1440,9 +1432,6 @@ vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
1440u32 1432u32
1441vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh); 1433vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
1442 1434
1443u32
1444vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
1445
1446/** 1435/**
1447 * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor. 1436 * vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
1448 * @buf_mode: Buffer mode (1, 3 or 5) 1437 * @buf_mode: Buffer mode (1, 3 or 5)
@@ -1817,60 +1806,10 @@ struct vxge_hw_vpath_attr {
1817 struct vxge_hw_fifo_attr fifo_attr; 1806 struct vxge_hw_fifo_attr fifo_attr;
1818}; 1807};
1819 1808
1820enum vxge_hw_status
1821__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
1822 struct __vxge_hw_blockpool *blockpool,
1823 u32 pool_size,
1824 u32 pool_max);
1825
1826void
1827__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
1828
1829struct __vxge_hw_blockpool_entry *
1830__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
1831 u32 size);
1832
1833void
1834__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
1835 struct __vxge_hw_blockpool_entry *entry);
1836
1837void *
1838__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
1839 u32 size,
1840 struct vxge_hw_mempool_dma *dma_object);
1841
1842void
1843__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
1844 void *memblock,
1845 u32 size,
1846 struct vxge_hw_mempool_dma *dma_object);
1847
1848enum vxge_hw_status
1849__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
1850
1851enum vxge_hw_status
1852__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
1853
1854enum vxge_hw_status
1855vxge_hw_mgmt_device_config(struct __vxge_hw_device *devh,
1856 struct vxge_hw_device_config *dev_config, int size);
1857
1858enum vxge_hw_status __devinit vxge_hw_device_hw_info_get( 1809enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
1859 void __iomem *bar0, 1810 void __iomem *bar0,
1860 struct vxge_hw_device_hw_info *hw_info); 1811 struct vxge_hw_device_hw_info *hw_info);
1861 1812
1862enum vxge_hw_status
1863__vxge_hw_vpath_fw_ver_get(
1864 u32 vp_id,
1865 struct vxge_hw_vpath_reg __iomem *vpath_reg,
1866 struct vxge_hw_device_hw_info *hw_info);
1867
1868enum vxge_hw_status
1869__vxge_hw_vpath_card_info_get(
1870 u32 vp_id,
1871 struct vxge_hw_vpath_reg __iomem *vpath_reg,
1872 struct vxge_hw_device_hw_info *hw_info);
1873
1874enum vxge_hw_status __devinit vxge_hw_device_config_default_get( 1813enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
1875 struct vxge_hw_device_config *device_config); 1814 struct vxge_hw_device_config *device_config);
1876 1815
@@ -1954,38 +1893,6 @@ out:
1954 return vaddr; 1893 return vaddr;
1955} 1894}
1956 1895
1957extern void vxge_hw_blockpool_block_add(
1958 struct __vxge_hw_device *devh,
1959 void *block_addr,
1960 u32 length,
1961 struct pci_dev *dma_h,
1962 struct pci_dev *acc_handle);
1963
1964static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
1965 unsigned long size)
1966{
1967 gfp_t flags;
1968 void *vaddr;
1969
1970 if (in_interrupt())
1971 flags = GFP_ATOMIC | GFP_DMA;
1972 else
1973 flags = GFP_KERNEL | GFP_DMA;
1974
1975 vaddr = kmalloc((size), flags);
1976
1977 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
1978}
1979
1980static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
1981 struct pci_dev **p_dma_acch)
1982{
1983 unsigned long misaligned = *(unsigned long *)p_dma_acch;
1984 u8 *tmp = (u8 *)vaddr;
1985 tmp -= misaligned;
1986 kfree((void *)tmp);
1987}
1988
1989/* 1896/*
1990 * __vxge_hw_mempool_item_priv - will return pointer on per item private space 1897 * __vxge_hw_mempool_item_priv - will return pointer on per item private space
1991 */ 1898 */
@@ -2010,40 +1917,6 @@ __vxge_hw_mempool_item_priv(
2010 (*memblock_item_idx) * mempool->items_priv_size; 1917 (*memblock_item_idx) * mempool->items_priv_size;
2011} 1918}
2012 1919
2013enum vxge_hw_status
2014__vxge_hw_mempool_grow(
2015 struct vxge_hw_mempool *mempool,
2016 u32 num_allocate,
2017 u32 *num_allocated);
2018
2019struct vxge_hw_mempool*
2020__vxge_hw_mempool_create(
2021 struct __vxge_hw_device *devh,
2022 u32 memblock_size,
2023 u32 item_size,
2024 u32 private_size,
2025 u32 items_initial,
2026 u32 items_max,
2027 struct vxge_hw_mempool_cbs *mp_callback,
2028 void *userdata);
2029
2030struct __vxge_hw_channel*
2031__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
2032 enum __vxge_hw_channel_type type, u32 length,
2033 u32 per_dtr_space, void *userdata);
2034
2035void
2036__vxge_hw_channel_free(
2037 struct __vxge_hw_channel *channel);
2038
2039enum vxge_hw_status
2040__vxge_hw_channel_initialize(
2041 struct __vxge_hw_channel *channel);
2042
2043enum vxge_hw_status
2044__vxge_hw_channel_reset(
2045 struct __vxge_hw_channel *channel);
2046
2047/* 1920/*
2048 * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated 1921 * __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
2049 * for the fifo. 1922 * for the fifo.
@@ -2065,9 +1938,6 @@ enum vxge_hw_status vxge_hw_vpath_open(
2065 struct vxge_hw_vpath_attr *attr, 1938 struct vxge_hw_vpath_attr *attr,
2066 struct __vxge_hw_vpath_handle **vpath_handle); 1939 struct __vxge_hw_vpath_handle **vpath_handle);
2067 1940
2068enum vxge_hw_status
2069__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
2070
2071enum vxge_hw_status vxge_hw_vpath_close( 1941enum vxge_hw_status vxge_hw_vpath_close(
2072 struct __vxge_hw_vpath_handle *vpath_handle); 1942 struct __vxge_hw_vpath_handle *vpath_handle);
2073 1943
@@ -2089,54 +1959,9 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
2089 struct __vxge_hw_vpath_handle *vpath_handle, 1959 struct __vxge_hw_vpath_handle *vpath_handle,
2090 u32 new_mtu); 1960 u32 new_mtu);
2091 1961
2092enum vxge_hw_status vxge_hw_vpath_stats_enable(
2093 struct __vxge_hw_vpath_handle *vpath_handle);
2094
2095enum vxge_hw_status
2096__vxge_hw_vpath_stats_access(
2097 struct __vxge_hw_virtualpath *vpath,
2098 u32 operation,
2099 u32 offset,
2100 u64 *stat);
2101
2102enum vxge_hw_status
2103__vxge_hw_vpath_xmac_tx_stats_get(
2104 struct __vxge_hw_virtualpath *vpath,
2105 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
2106
2107enum vxge_hw_status
2108__vxge_hw_vpath_xmac_rx_stats_get(
2109 struct __vxge_hw_virtualpath *vpath,
2110 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
2111
2112enum vxge_hw_status
2113__vxge_hw_vpath_stats_get(
2114 struct __vxge_hw_virtualpath *vpath,
2115 struct vxge_hw_vpath_stats_hw_info *hw_stats);
2116
2117void 1962void
2118vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); 1963vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
2119 1964
2120enum vxge_hw_status
2121__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config);
2122
2123void
2124__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
2125
2126enum vxge_hw_status
2127__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
2128
2129enum vxge_hw_status
2130__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
2131
2132enum vxge_hw_status
2133__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
2134 struct vxge_hw_vpath_reg __iomem *vpath_reg);
2135
2136enum vxge_hw_status
2137__vxge_hw_device_register_poll(
2138 void __iomem *reg,
2139 u64 mask, u32 max_millis);
2140 1965
2141#ifndef readq 1966#ifndef readq
2142static inline u64 readq(void __iomem *addr) 1967static inline u64 readq(void __iomem *addr)
@@ -2168,62 +1993,12 @@ static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
2168 writel(val, addr); 1993 writel(val, addr);
2169} 1994}
2170 1995
2171static inline enum vxge_hw_status
2172__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
2173 u64 mask, u32 max_millis)
2174{
2175 enum vxge_hw_status status = VXGE_HW_OK;
2176
2177 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
2178 wmb();
2179 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
2180 wmb();
2181
2182 status = __vxge_hw_device_register_poll(addr, mask, max_millis);
2183 return status;
2184}
2185
2186struct vxge_hw_toc_reg __iomem *
2187__vxge_hw_device_toc_get(void __iomem *bar0);
2188
2189enum vxge_hw_status
2190__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
2191
2192void
2193__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
2194
2195void
2196__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
2197
2198enum vxge_hw_status 1996enum vxge_hw_status
2199vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off); 1997vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
2200 1998
2201enum vxge_hw_status 1999enum vxge_hw_status
2202__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
2203
2204enum vxge_hw_status
2205__vxge_hw_vpath_pci_read(
2206 struct __vxge_hw_virtualpath *vpath,
2207 u32 phy_func_0,
2208 u32 offset,
2209 u32 *val);
2210
2211enum vxge_hw_status
2212__vxge_hw_vpath_addr_get(
2213 u32 vp_id,
2214 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2215 u8 (macaddr)[ETH_ALEN],
2216 u8 (macaddr_mask)[ETH_ALEN]);
2217
2218u32
2219__vxge_hw_vpath_func_id_get(
2220 u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
2221
2222enum vxge_hw_status
2223__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
2224
2225enum vxge_hw_status
2226vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); 2000vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2001
2227/** 2002/**
2228 * vxge_debug 2003 * vxge_debug
2229 * @level: level of debug verbosity. 2004 * @level: level of debug verbosity.
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index 05679e306fdd..b67746eef923 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -1142,7 +1142,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
1142 .get_ethtool_stats = vxge_get_ethtool_stats, 1142 .get_ethtool_stats = vxge_get_ethtool_stats,
1143}; 1143};
1144 1144
1145void initialize_ethtool_ops(struct net_device *ndev) 1145void vxge_initialize_ethtool_ops(struct net_device *ndev)
1146{ 1146{
1147 SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops); 1147 SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
1148} 1148}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index a69542ecb68d..813829f3d024 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -82,6 +82,16 @@ module_param_array(bw_percentage, uint, NULL, 0);
82 82
83static struct vxge_drv_config *driver_config; 83static struct vxge_drv_config *driver_config;
84 84
85static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
86 struct macInfo *mac);
87static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
88 struct macInfo *mac);
89static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
90static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
91static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
92static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
93static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
94
85static inline int is_vxge_card_up(struct vxgedev *vdev) 95static inline int is_vxge_card_up(struct vxgedev *vdev)
86{ 96{
87 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state); 97 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -138,7 +148,7 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
138 * This function is called during interrupt context to notify link up state 148 * This function is called during interrupt context to notify link up state
139 * change. 149 * change.
140 */ 150 */
141void 151static void
142vxge_callback_link_up(struct __vxge_hw_device *hldev) 152vxge_callback_link_up(struct __vxge_hw_device *hldev)
143{ 153{
144 struct net_device *dev = hldev->ndev; 154 struct net_device *dev = hldev->ndev;
@@ -162,7 +172,7 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
162 * This function is called during interrupt context to notify link down state 172 * This function is called during interrupt context to notify link down state
163 * change. 173 * change.
164 */ 174 */
165void 175static void
166vxge_callback_link_down(struct __vxge_hw_device *hldev) 176vxge_callback_link_down(struct __vxge_hw_device *hldev)
167{ 177{
168 struct net_device *dev = hldev->ndev; 178 struct net_device *dev = hldev->ndev;
@@ -354,7 +364,7 @@ static inline void vxge_post(int *dtr_cnt, void **first_dtr,
354 * If the interrupt is because of a received frame or if the receive ring 364 * If the interrupt is because of a received frame or if the receive ring
355 * contains fresh as yet un-processed frames, this function is called. 365 * contains fresh as yet un-processed frames, this function is called.
356 */ 366 */
357enum vxge_hw_status 367static enum vxge_hw_status
358vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, 368vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
359 u8 t_code, void *userdata) 369 u8 t_code, void *userdata)
360{ 370{
@@ -531,7 +541,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
531 * freed and frees all skbs whose data have already DMA'ed into the NICs 541 * freed and frees all skbs whose data have already DMA'ed into the NICs
532 * internal memory. 542 * internal memory.
533 */ 543 */
534enum vxge_hw_status 544static enum vxge_hw_status
535vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, 545vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
536 enum vxge_hw_fifo_tcode t_code, void *userdata, 546 enum vxge_hw_fifo_tcode t_code, void *userdata,
537 struct sk_buff ***skb_ptr, int nr_skb, int *more) 547 struct sk_buff ***skb_ptr, int nr_skb, int *more)
@@ -1246,7 +1256,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
1246 * 1256 *
1247 * Enables the interrupts for the vpath 1257 * Enables the interrupts for the vpath
1248*/ 1258*/
1249void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) 1259static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1250{ 1260{
1251 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1261 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1252 int msix_id = 0; 1262 int msix_id = 0;
@@ -1279,7 +1289,7 @@ void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1279 * 1289 *
1280 * Disables the interrupts for the vpath 1290 * Disables the interrupts for the vpath
1281*/ 1291*/
1282void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) 1292static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1283{ 1293{
1284 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1294 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1285 int msix_id; 1295 int msix_id;
@@ -1553,7 +1563,7 @@ out:
1553 * 1563 *
1554 * driver may reset the chip on events of serr, eccerr, etc 1564 * driver may reset the chip on events of serr, eccerr, etc
1555 */ 1565 */
1556int vxge_reset(struct vxgedev *vdev) 1566static int vxge_reset(struct vxgedev *vdev)
1557{ 1567{
1558 return do_vxge_reset(vdev, VXGE_LL_FULL_RESET); 1568 return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1559} 1569}
@@ -1724,7 +1734,7 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1724 return status; 1734 return status;
1725} 1735}
1726 1736
1727int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac) 1737static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
1728{ 1738{
1729 struct vxge_mac_addrs *new_mac_entry; 1739 struct vxge_mac_addrs *new_mac_entry;
1730 u8 *mac_address = NULL; 1740 u8 *mac_address = NULL;
@@ -1757,7 +1767,8 @@ int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
1757} 1767}
1758 1768
1759/* Add a mac address to DA table */ 1769/* Add a mac address to DA table */
1760enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac) 1770static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
1771 struct macInfo *mac)
1761{ 1772{
1762 enum vxge_hw_status status = VXGE_HW_OK; 1773 enum vxge_hw_status status = VXGE_HW_OK;
1763 struct vxge_vpath *vpath; 1774 struct vxge_vpath *vpath;
@@ -1782,7 +1793,7 @@ enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1782 return status; 1793 return status;
1783} 1794}
1784 1795
1785int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac) 1796static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1786{ 1797{
1787 struct list_head *entry, *next; 1798 struct list_head *entry, *next;
1788 u64 del_mac = 0; 1799 u64 del_mac = 0;
@@ -1807,7 +1818,8 @@ int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1807 return FALSE; 1818 return FALSE;
1808} 1819}
1809/* delete a mac address from DA table */ 1820/* delete a mac address from DA table */
1810enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac) 1821static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
1822 struct macInfo *mac)
1811{ 1823{
1812 enum vxge_hw_status status = VXGE_HW_OK; 1824 enum vxge_hw_status status = VXGE_HW_OK;
1813 struct vxge_vpath *vpath; 1825 struct vxge_vpath *vpath;
@@ -1854,7 +1866,7 @@ static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
1854} 1866}
1855 1867
1856/* Store all vlan ids from the list to the vid table */ 1868/* Store all vlan ids from the list to the vid table */
1857enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath) 1869static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1858{ 1870{
1859 enum vxge_hw_status status = VXGE_HW_OK; 1871 enum vxge_hw_status status = VXGE_HW_OK;
1860 struct vxgedev *vdev = vpath->vdev; 1872 struct vxgedev *vdev = vpath->vdev;
@@ -1874,7 +1886,7 @@ enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1874} 1886}
1875 1887
1876/* Store all mac addresses from the list to the DA table */ 1888/* Store all mac addresses from the list to the DA table */
1877enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) 1889static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1878{ 1890{
1879 enum vxge_hw_status status = VXGE_HW_OK; 1891 enum vxge_hw_status status = VXGE_HW_OK;
1880 struct macInfo mac_info; 1892 struct macInfo mac_info;
@@ -1916,7 +1928,7 @@ enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1916} 1928}
1917 1929
1918/* reset vpaths */ 1930/* reset vpaths */
1919enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) 1931static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1920{ 1932{
1921 enum vxge_hw_status status = VXGE_HW_OK; 1933 enum vxge_hw_status status = VXGE_HW_OK;
1922 struct vxge_vpath *vpath; 1934 struct vxge_vpath *vpath;
@@ -1948,7 +1960,7 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1948} 1960}
1949 1961
1950/* close vpaths */ 1962/* close vpaths */
1951void vxge_close_vpaths(struct vxgedev *vdev, int index) 1963static void vxge_close_vpaths(struct vxgedev *vdev, int index)
1952{ 1964{
1953 struct vxge_vpath *vpath; 1965 struct vxge_vpath *vpath;
1954 int i; 1966 int i;
@@ -1966,7 +1978,7 @@ void vxge_close_vpaths(struct vxgedev *vdev, int index)
1966} 1978}
1967 1979
1968/* open vpaths */ 1980/* open vpaths */
1969int vxge_open_vpaths(struct vxgedev *vdev) 1981static int vxge_open_vpaths(struct vxgedev *vdev)
1970{ 1982{
1971 struct vxge_hw_vpath_attr attr; 1983 struct vxge_hw_vpath_attr attr;
1972 enum vxge_hw_status status; 1984 enum vxge_hw_status status;
@@ -2517,7 +2529,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
2517 * Return value: '0' on success and an appropriate (-)ve integer as 2529 * Return value: '0' on success and an appropriate (-)ve integer as
2518 * defined in errno.h file on failure. 2530 * defined in errno.h file on failure.
2519 */ 2531 */
2520int 2532static int
2521vxge_open(struct net_device *dev) 2533vxge_open(struct net_device *dev)
2522{ 2534{
2523 enum vxge_hw_status status; 2535 enum vxge_hw_status status;
@@ -2721,7 +2733,7 @@ out0:
2721} 2733}
2722 2734
2723/* Loop throught the mac address list and delete all the entries */ 2735/* Loop throught the mac address list and delete all the entries */
2724void vxge_free_mac_add_list(struct vxge_vpath *vpath) 2736static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
2725{ 2737{
2726 2738
2727 struct list_head *entry, *next; 2739 struct list_head *entry, *next;
@@ -2745,7 +2757,7 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
2745 } 2757 }
2746} 2758}
2747 2759
2748int do_vxge_close(struct net_device *dev, int do_io) 2760static int do_vxge_close(struct net_device *dev, int do_io)
2749{ 2761{
2750 enum vxge_hw_status status; 2762 enum vxge_hw_status status;
2751 struct vxgedev *vdev; 2763 struct vxgedev *vdev;
@@ -2856,7 +2868,7 @@ int do_vxge_close(struct net_device *dev, int do_io)
2856 * Return value: '0' on success and an appropriate (-)ve integer as 2868 * Return value: '0' on success and an appropriate (-)ve integer as
2857 * defined in errno.h file on failure. 2869 * defined in errno.h file on failure.
2858 */ 2870 */
2859int 2871static int
2860vxge_close(struct net_device *dev) 2872vxge_close(struct net_device *dev)
2861{ 2873{
2862 do_vxge_close(dev, 1); 2874 do_vxge_close(dev, 1);
@@ -3113,10 +3125,10 @@ static const struct net_device_ops vxge_netdev_ops = {
3113#endif 3125#endif
3114}; 3126};
3115 3127
3116int __devinit vxge_device_register(struct __vxge_hw_device *hldev, 3128static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3117 struct vxge_config *config, 3129 struct vxge_config *config,
3118 int high_dma, int no_of_vpath, 3130 int high_dma, int no_of_vpath,
3119 struct vxgedev **vdev_out) 3131 struct vxgedev **vdev_out)
3120{ 3132{
3121 struct net_device *ndev; 3133 struct net_device *ndev;
3122 enum vxge_hw_status status = VXGE_HW_OK; 3134 enum vxge_hw_status status = VXGE_HW_OK;
@@ -3164,7 +3176,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3164 3176
3165 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT; 3177 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3166 3178
3167 initialize_ethtool_ops(ndev); 3179 vxge_initialize_ethtool_ops(ndev);
3168 3180
3169 /* Allocate memory for vpath */ 3181 /* Allocate memory for vpath */
3170 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * 3182 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
@@ -3249,7 +3261,7 @@ _out0:
3249 * 3261 *
3250 * This function will unregister and free network device 3262 * This function will unregister and free network device
3251 */ 3263 */
3252void 3264static void
3253vxge_device_unregister(struct __vxge_hw_device *hldev) 3265vxge_device_unregister(struct __vxge_hw_device *hldev)
3254{ 3266{
3255 struct vxgedev *vdev; 3267 struct vxgedev *vdev;
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index d4be07eaacd7..de64536cb7d0 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -396,64 +396,7 @@ struct vxge_tx_priv {
396 mod_timer(&timer, (jiffies + exp)); \ 396 mod_timer(&timer, (jiffies + exp)); \
397 } while (0); 397 } while (0);
398 398
399int __devinit vxge_device_register(struct __vxge_hw_device *devh, 399extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
400 struct vxge_config *config,
401 int high_dma, int no_of_vpath,
402 struct vxgedev **vdev);
403
404void vxge_device_unregister(struct __vxge_hw_device *devh);
405
406void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id);
407
408void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id);
409
410void vxge_callback_link_up(struct __vxge_hw_device *devh);
411
412void vxge_callback_link_down(struct __vxge_hw_device *devh);
413
414enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
415 struct macInfo *mac);
416
417int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
418
419int vxge_reset(struct vxgedev *vdev);
420
421enum vxge_hw_status
422vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
423 u8 t_code, void *userdata);
424
425enum vxge_hw_status
426vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
427 enum vxge_hw_fifo_tcode t_code, void *userdata,
428 struct sk_buff ***skb_ptr, int nr_skbs, int *more);
429
430int vxge_close(struct net_device *dev);
431
432int vxge_open(struct net_device *dev);
433
434void vxge_close_vpaths(struct vxgedev *vdev, int index);
435
436int vxge_open_vpaths(struct vxgedev *vdev);
437
438enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
439
440enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
441 struct macInfo *mac);
442
443enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
444 struct macInfo *mac);
445
446int vxge_mac_list_add(struct vxge_vpath *vpath,
447 struct macInfo *mac);
448
449void vxge_free_mac_add_list(struct vxge_vpath *vpath);
450
451enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
452
453enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
454
455int do_vxge_close(struct net_device *dev, int do_io);
456extern void initialize_ethtool_ops(struct net_device *ndev);
457/** 400/**
458 * #define VXGE_DEBUG_INIT: debug for initialization functions 401 * #define VXGE_DEBUG_INIT: debug for initialization functions
459 * #define VXGE_DEBUG_TX : debug transmit related functions 402 * #define VXGE_DEBUG_TX : debug transmit related functions
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index cedf08f99cb3..4bdb611a6842 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -17,6 +17,13 @@
17#include "vxge-config.h" 17#include "vxge-config.h"
18#include "vxge-main.h" 18#include "vxge-main.h"
19 19
20static enum vxge_hw_status
21__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
22 u32 vp_id, enum vxge_hw_event type);
23static enum vxge_hw_status
24__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
25 u32 skip_alarms);
26
20/* 27/*
21 * vxge_hw_vpath_intr_enable - Enable vpath interrupts. 28 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
22 * @vp: Virtual Path handle. 29 * @vp: Virtual Path handle.
@@ -513,7 +520,7 @@ exit:
513 * Link up indication handler. The function is invoked by HW when 520 * Link up indication handler. The function is invoked by HW when
514 * Titan indicates that the link is up for programmable amount of time. 521 * Titan indicates that the link is up for programmable amount of time.
515 */ 522 */
516enum vxge_hw_status 523static enum vxge_hw_status
517__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev) 524__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
518{ 525{
519 /* 526 /*
@@ -538,7 +545,7 @@ exit:
538 * Link down indication handler. The function is invoked by HW when 545 * Link down indication handler. The function is invoked by HW when
539 * Titan indicates that the link is down. 546 * Titan indicates that the link is down.
540 */ 547 */
541enum vxge_hw_status 548static enum vxge_hw_status
542__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) 549__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
543{ 550{
544 /* 551 /*
@@ -564,7 +571,7 @@ exit:
564 * 571 *
565 * Handle error. 572 * Handle error.
566 */ 573 */
567enum vxge_hw_status 574static enum vxge_hw_status
568__vxge_hw_device_handle_error( 575__vxge_hw_device_handle_error(
569 struct __vxge_hw_device *hldev, 576 struct __vxge_hw_device *hldev,
570 u32 vp_id, 577 u32 vp_id,
@@ -646,7 +653,7 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
646 * it swaps the reserve and free arrays. 653 * it swaps the reserve and free arrays.
647 * 654 *
648 */ 655 */
649enum vxge_hw_status 656static enum vxge_hw_status
650vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh) 657vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
651{ 658{
652 void **tmp_arr; 659 void **tmp_arr;
@@ -692,7 +699,8 @@ _alloc_after_swap:
692 * Posts a dtr to work array. 699 * Posts a dtr to work array.
693 * 700 *
694 */ 701 */
695void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh) 702static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
703 void *dtrh)
696{ 704{
697 vxge_assert(channel->work_arr[channel->post_index] == NULL); 705 vxge_assert(channel->work_arr[channel->post_index] == NULL);
698 706
@@ -1658,37 +1666,6 @@ exit:
1658} 1666}
1659 1667
1660/** 1668/**
1661 * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
1662 * from vlan id table.
1663 * @vp: Vpath handle.
1664 * @vid: Buffer to return vlan id
1665 *
1666 * Returns the next vlan id in the list for this vpath.
1667 * see also: vxge_hw_vpath_vid_get
1668 *
1669 */
1670enum vxge_hw_status
1671vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1672{
1673 u64 data;
1674 enum vxge_hw_status status = VXGE_HW_OK;
1675
1676 if (vp == NULL) {
1677 status = VXGE_HW_ERR_INVALID_HANDLE;
1678 goto exit;
1679 }
1680
1681 status = __vxge_hw_vpath_rts_table_get(vp,
1682 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1683 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1684 0, vid, &data);
1685
1686 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1687exit:
1688 return status;
1689}
1690
1691/**
1692 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath 1669 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1693 * to vlan id table. 1670 * to vlan id table.
1694 * @vp: Vpath handle. 1671 * @vp: Vpath handle.
@@ -1898,9 +1875,9 @@ exit:
1898 * Process vpath alarms. 1875 * Process vpath alarms.
1899 * 1876 *
1900 */ 1877 */
1901enum vxge_hw_status __vxge_hw_vpath_alarm_process( 1878static enum vxge_hw_status
1902 struct __vxge_hw_virtualpath *vpath, 1879__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
1903 u32 skip_alarms) 1880 u32 skip_alarms)
1904{ 1881{
1905 u64 val64; 1882 u64 val64;
1906 u64 alarm_status; 1883 u64 alarm_status;
@@ -2265,36 +2242,6 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2265} 2242}
2266 2243
2267/** 2244/**
2268 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2269 * @vp: Virtual Path handle.
2270 * @msix_id: MSI ID
2271 *
2272 * The function clears the msix interrupt for the given msix_id
2273 *
2274 * Returns: 0,
2275 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2276 * status.
2277 * See also:
2278 */
2279void
2280vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2281{
2282 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2283 if (hldev->config.intr_mode ==
2284 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2285 __vxge_hw_pio_mem_write32_upper(
2286 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2287 &hldev->common_reg->
2288 clr_msix_one_shot_vec[msix_id%4]);
2289 } else {
2290 __vxge_hw_pio_mem_write32_upper(
2291 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2292 &hldev->common_reg->
2293 clear_msix_mask_vect[msix_id%4]);
2294 }
2295}
2296
2297/**
2298 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. 2245 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2299 * @vp: Virtual Path handle. 2246 * @vp: Virtual Path handle.
2300 * @msix_id: MSI ID 2247 * @msix_id: MSI ID
@@ -2316,22 +2263,6 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2316} 2263}
2317 2264
2318/** 2265/**
2319 * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
2320 * @vp: Virtual Path handle.
2321 *
2322 * The function masks all msix interrupt for the given vpath
2323 *
2324 */
2325void
2326vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2327{
2328
2329 __vxge_hw_pio_mem_write32_upper(
2330 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2331 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2332}
2333
2334/**
2335 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts. 2266 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2336 * @vp: Virtual Path handle. 2267 * @vp: Virtual Path handle.
2337 * 2268 *
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 6fa07d13798e..9890d4d596d0 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1749,14 +1749,6 @@ vxge_hw_mrpcim_stats_access(
1749 u64 *stat); 1749 u64 *stat);
1750 1750
1751enum vxge_hw_status 1751enum vxge_hw_status
1752vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *devh, u32 port,
1753 struct vxge_hw_xmac_aggr_stats *aggr_stats);
1754
1755enum vxge_hw_status
1756vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *devh, u32 port,
1757 struct vxge_hw_xmac_port_stats *port_stats);
1758
1759enum vxge_hw_status
1760vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh, 1752vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
1761 struct vxge_hw_xmac_stats *xmac_stats); 1753 struct vxge_hw_xmac_stats *xmac_stats);
1762 1754
@@ -2117,49 +2109,10 @@ struct __vxge_hw_ring_rxd_priv {
2117#endif 2109#endif
2118}; 2110};
2119 2111
2120/* ========================= RING PRIVATE API ============================= */
2121u64
2122__vxge_hw_ring_first_block_address_get(
2123 struct __vxge_hw_ring *ringh);
2124
2125enum vxge_hw_status
2126__vxge_hw_ring_create(
2127 struct __vxge_hw_vpath_handle *vpath_handle,
2128 struct vxge_hw_ring_attr *attr);
2129
2130enum vxge_hw_status
2131__vxge_hw_ring_abort(
2132 struct __vxge_hw_ring *ringh);
2133
2134enum vxge_hw_status
2135__vxge_hw_ring_reset(
2136 struct __vxge_hw_ring *ringh);
2137
2138enum vxge_hw_status
2139__vxge_hw_ring_delete(
2140 struct __vxge_hw_vpath_handle *vpath_handle);
2141
2142/* ========================= FIFO PRIVATE API ============================= */ 2112/* ========================= FIFO PRIVATE API ============================= */
2143 2113
2144struct vxge_hw_fifo_attr; 2114struct vxge_hw_fifo_attr;
2145 2115
2146enum vxge_hw_status
2147__vxge_hw_fifo_create(
2148 struct __vxge_hw_vpath_handle *vpath_handle,
2149 struct vxge_hw_fifo_attr *attr);
2150
2151enum vxge_hw_status
2152__vxge_hw_fifo_abort(
2153 struct __vxge_hw_fifo *fifoh);
2154
2155enum vxge_hw_status
2156__vxge_hw_fifo_reset(
2157 struct __vxge_hw_fifo *ringh);
2158
2159enum vxge_hw_status
2160__vxge_hw_fifo_delete(
2161 struct __vxge_hw_vpath_handle *vpath_handle);
2162
2163struct vxge_hw_mempool_cbs { 2116struct vxge_hw_mempool_cbs {
2164 void (*item_func_alloc)( 2117 void (*item_func_alloc)(
2165 struct vxge_hw_mempool *mempoolh, 2118 struct vxge_hw_mempool *mempoolh,
@@ -2169,10 +2122,6 @@ struct vxge_hw_mempool_cbs {
2169 u32 is_last); 2122 u32 is_last);
2170}; 2123};
2171 2124
2172void
2173__vxge_hw_mempool_destroy(
2174 struct vxge_hw_mempool *mempool);
2175
2176#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \ 2125#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
2177 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next) 2126 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
2178 2127
@@ -2195,61 +2144,10 @@ __vxge_hw_vpath_rts_table_set(
2195 u64 data2); 2144 u64 data2);
2196 2145
2197enum vxge_hw_status 2146enum vxge_hw_status
2198__vxge_hw_vpath_reset(
2199 struct __vxge_hw_device *devh,
2200 u32 vp_id);
2201
2202enum vxge_hw_status
2203__vxge_hw_vpath_sw_reset(
2204 struct __vxge_hw_device *devh,
2205 u32 vp_id);
2206
2207enum vxge_hw_status
2208__vxge_hw_vpath_enable( 2147__vxge_hw_vpath_enable(
2209 struct __vxge_hw_device *devh, 2148 struct __vxge_hw_device *devh,
2210 u32 vp_id); 2149 u32 vp_id);
2211 2150
2212void
2213__vxge_hw_vpath_prc_configure(
2214 struct __vxge_hw_device *devh,
2215 u32 vp_id);
2216
2217enum vxge_hw_status
2218__vxge_hw_vpath_kdfc_configure(
2219 struct __vxge_hw_device *devh,
2220 u32 vp_id);
2221
2222enum vxge_hw_status
2223__vxge_hw_vpath_mac_configure(
2224 struct __vxge_hw_device *devh,
2225 u32 vp_id);
2226
2227enum vxge_hw_status
2228__vxge_hw_vpath_tim_configure(
2229 struct __vxge_hw_device *devh,
2230 u32 vp_id);
2231
2232enum vxge_hw_status
2233__vxge_hw_vpath_initialize(
2234 struct __vxge_hw_device *devh,
2235 u32 vp_id);
2236
2237enum vxge_hw_status
2238__vxge_hw_vp_initialize(
2239 struct __vxge_hw_device *devh,
2240 u32 vp_id,
2241 struct vxge_hw_vp_config *config);
2242
2243void
2244__vxge_hw_vp_terminate(
2245 struct __vxge_hw_device *devh,
2246 u32 vp_id);
2247
2248enum vxge_hw_status
2249__vxge_hw_vpath_alarm_process(
2250 struct __vxge_hw_virtualpath *vpath,
2251 u32 skip_alarms);
2252
2253void vxge_hw_device_intr_enable( 2151void vxge_hw_device_intr_enable(
2254 struct __vxge_hw_device *devh); 2152 struct __vxge_hw_device *devh);
2255 2153
@@ -2321,11 +2219,6 @@ vxge_hw_vpath_vid_get(
2321 u64 *vid); 2219 u64 *vid);
2322 2220
2323enum vxge_hw_status 2221enum vxge_hw_status
2324vxge_hw_vpath_vid_get_next(
2325 struct __vxge_hw_vpath_handle *vpath_handle,
2326 u64 *vid);
2327
2328enum vxge_hw_status
2329vxge_hw_vpath_vid_delete( 2222vxge_hw_vpath_vid_delete(
2330 struct __vxge_hw_vpath_handle *vpath_handle, 2223 struct __vxge_hw_vpath_handle *vpath_handle,
2331 u64 vid); 2224 u64 vid);
@@ -2387,16 +2280,9 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
2387void vxge_hw_device_flush_io(struct __vxge_hw_device *devh); 2280void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
2388 2281
2389void 2282void
2390vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vpath_handle,
2391 int msix_id);
2392
2393void
2394vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle, 2283vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
2395 int msix_id); 2284 int msix_id);
2396 2285
2397void
2398vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vpath_handle);
2399
2400enum vxge_hw_status vxge_hw_vpath_intr_enable( 2286enum vxge_hw_status vxge_hw_vpath_intr_enable(
2401 struct __vxge_hw_vpath_handle *vpath_handle); 2287 struct __vxge_hw_vpath_handle *vpath_handle);
2402 2288
@@ -2415,12 +2301,6 @@ vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
2415void 2301void
2416vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id); 2302vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
2417 2303
2418enum vxge_hw_status
2419vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh);
2420
2421void
2422vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh);
2423
2424void 2304void
2425vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, 2305vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
2426 void **dtrh); 2306 void **dtrh);
@@ -2436,18 +2316,4 @@ vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
2436void 2316void
2437vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); 2317vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
2438 2318
2439/* ========================== PRIVATE API ================================= */
2440
2441enum vxge_hw_status
2442__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev);
2443
2444enum vxge_hw_status
2445__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev);
2446
2447enum vxge_hw_status
2448__vxge_hw_device_handle_error(
2449 struct __vxge_hw_device *hldev,
2450 u32 vp_id,
2451 enum vxge_hw_event type);
2452
2453#endif 2319#endif
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index eea1ef2f502b..4396d4b9bfb9 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -221,9 +221,6 @@ config RT2X00_LIB_LEDS
221 boolean 221 boolean
222 default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n) 222 default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
223 223
224comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00"
225 depends on RT2X00_LIB=y && LEDS_CLASS=m
226
227config RT2X00_LIB_DEBUGFS 224config RT2X00_LIB_DEBUGFS
228 bool "Ralink debugfs support" 225 bool "Ralink debugfs support"
229 depends on RT2X00_LIB && MAC80211_DEBUGFS 226 depends on RT2X00_LIB && MAC80211_DEBUGFS
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 630fb8664768..458bb57914a3 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1610,6 +1610,8 @@ static void netback_changed(struct xenbus_device *dev,
1610 switch (backend_state) { 1610 switch (backend_state) {
1611 case XenbusStateInitialising: 1611 case XenbusStateInitialising:
1612 case XenbusStateInitialised: 1612 case XenbusStateInitialised:
1613 case XenbusStateReconfiguring:
1614 case XenbusStateReconfigured:
1613 case XenbusStateConnected: 1615 case XenbusStateConnected:
1614 case XenbusStateUnknown: 1616 case XenbusStateUnknown:
1615 case XenbusStateClosed: 1617 case XenbusStateClosed:
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index f3f8be5a35fa..14f0955eca68 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -430,8 +430,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
430 } 430 }
431 431
432 /* Get the protocol type of the ethernet frame that arrived */ 432 /* Get the protocol type of the ethernet frame that arrived */
433 proto_type = ((in_be32(addr + XEL_HEADER_OFFSET + 433 proto_type = ((ntohl(in_be32(addr + XEL_HEADER_OFFSET +
434 XEL_RXBUFF_OFFSET) >> XEL_HEADER_SHIFT) & 434 XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
435 XEL_RPLR_LENGTH_MASK); 435 XEL_RPLR_LENGTH_MASK);
436 436
437 /* Check if received ethernet frame is a raw ethernet frame 437 /* Check if received ethernet frame is a raw ethernet frame
@@ -439,9 +439,9 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
439 if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 439 if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
440 440
441 if (proto_type == ETH_P_IP) { 441 if (proto_type == ETH_P_IP) {
442 length = ((in_be32(addr + 442 length = ((ntohl(in_be32(addr +
443 XEL_HEADER_IP_LENGTH_OFFSET + 443 XEL_HEADER_IP_LENGTH_OFFSET +
444 XEL_RXBUFF_OFFSET) >> 444 XEL_RXBUFF_OFFSET)) >>
445 XEL_HEADER_SHIFT) & 445 XEL_HEADER_SHIFT) &
446 XEL_RPLR_LENGTH_MASK); 446 XEL_RPLR_LENGTH_MASK);
447 length += ETH_HLEN + ETH_FCS_LEN; 447 length += ETH_HLEN + ETH_FCS_LEN;