aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c59x.c77
-rw-r--r--drivers/net/Kconfig7
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/au1000_eth.c4
-rw-r--r--drivers/net/bcm63xx_enet.c2
-rw-r--r--drivers/net/benet/be.h1
-rw-r--r--drivers/net/benet/be_cmds.c4
-rw-r--r--drivers/net/benet/be_cmds.h5
-rw-r--r--drivers/net/benet/be_ethtool.c2
-rw-r--r--drivers/net/benet/be_main.c29
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/cnic.c3
-rw-r--r--drivers/net/cnic_if.h4
-rw-r--r--drivers/net/e1000e/82571.c4
-rw-r--r--drivers/net/e1000e/netdev.c13
-rw-r--r--drivers/net/ethoc.c81
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/igb/igb_main.c13
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c232
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c56
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h11
-rw-r--r--drivers/net/ks8851_mll.c1697
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/mlx4/main.c1
-rw-r--r--drivers/net/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/pasemi_mac_ethtool.c3
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c10
-rw-r--r--drivers/net/pppol2tp.c2
-rw-r--r--drivers/net/qlge/qlge.h26
-rw-r--r--drivers/net/qlge/qlge_ethtool.c2
-rw-r--r--drivers/net/qlge/qlge_main.c44
-rw-r--r--drivers/net/qlge/qlge_mpi.c12
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/skge.c16
-rw-r--r--drivers/net/skge.h2
-rw-r--r--drivers/net/sky2.c7
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/tg3.c41
-rw-r--r--drivers/net/tg3.h2
-rw-r--r--drivers/net/usb/rndis_host.c1
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wireless/Kconfig13
-rw-r--r--drivers/net/wireless/ath/ar9170/phy.c6
-rw-r--r--drivers/net/wireless/b43/pio.c60
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
50 files changed, 2207 insertions, 316 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index b9eeadf01b74..975e25b19ebe 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -805,52 +805,54 @@ static void poll_vortex(struct net_device *dev)
805 805
806#ifdef CONFIG_PM 806#ifdef CONFIG_PM
807 807
808static int vortex_suspend(struct pci_dev *pdev, pm_message_t state) 808static int vortex_suspend(struct device *dev)
809{ 809{
810 struct net_device *dev = pci_get_drvdata(pdev); 810 struct pci_dev *pdev = to_pci_dev(dev);
811 struct net_device *ndev = pci_get_drvdata(pdev);
812
813 if (!ndev || !netif_running(ndev))
814 return 0;
815
816 netif_device_detach(ndev);
817 vortex_down(ndev, 1);
811 818
812 if (dev && netdev_priv(dev)) {
813 if (netif_running(dev)) {
814 netif_device_detach(dev);
815 vortex_down(dev, 1);
816 disable_irq(dev->irq);
817 }
818 pci_save_state(pdev);
819 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
820 pci_disable_device(pdev);
821 pci_set_power_state(pdev, pci_choose_state(pdev, state));
822 }
823 return 0; 819 return 0;
824} 820}
825 821
826static int vortex_resume(struct pci_dev *pdev) 822static int vortex_resume(struct device *dev)
827{ 823{
828 struct net_device *dev = pci_get_drvdata(pdev); 824 struct pci_dev *pdev = to_pci_dev(dev);
829 struct vortex_private *vp = netdev_priv(dev); 825 struct net_device *ndev = pci_get_drvdata(pdev);
830 int err; 826 int err;
831 827
832 if (dev && vp) { 828 if (!ndev || !netif_running(ndev))
833 pci_set_power_state(pdev, PCI_D0); 829 return 0;
834 pci_restore_state(pdev); 830
835 err = pci_enable_device(pdev); 831 err = vortex_up(ndev);
836 if (err) { 832 if (err)
837 pr_warning("%s: Could not enable device\n", 833 return err;
838 dev->name); 834
839 return err; 835 netif_device_attach(ndev);
840 } 836
841 pci_set_master(pdev);
842 if (netif_running(dev)) {
843 err = vortex_up(dev);
844 if (err)
845 return err;
846 enable_irq(dev->irq);
847 netif_device_attach(dev);
848 }
849 }
850 return 0; 837 return 0;
851} 838}
852 839
853#endif /* CONFIG_PM */ 840static struct dev_pm_ops vortex_pm_ops = {
841 .suspend = vortex_suspend,
842 .resume = vortex_resume,
843 .freeze = vortex_suspend,
844 .thaw = vortex_resume,
845 .poweroff = vortex_suspend,
846 .restore = vortex_resume,
847};
848
849#define VORTEX_PM_OPS (&vortex_pm_ops)
850
851#else /* !CONFIG_PM */
852
853#define VORTEX_PM_OPS NULL
854
855#endif /* !CONFIG_PM */
854 856
855#ifdef CONFIG_EISA 857#ifdef CONFIG_EISA
856static struct eisa_device_id vortex_eisa_ids[] = { 858static struct eisa_device_id vortex_eisa_ids[] = {
@@ -3199,10 +3201,7 @@ static struct pci_driver vortex_driver = {
3199 .probe = vortex_init_one, 3201 .probe = vortex_init_one,
3200 .remove = __devexit_p(vortex_remove_one), 3202 .remove = __devexit_p(vortex_remove_one),
3201 .id_table = vortex_pci_tbl, 3203 .id_table = vortex_pci_tbl,
3202#ifdef CONFIG_PM 3204 .driver.pm = VORTEX_PM_OPS,
3203 .suspend = vortex_suspend,
3204 .resume = vortex_resume,
3205#endif
3206}; 3205};
3207 3206
3208 3207
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2bea67c134f0..712776089b46 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1738,6 +1738,13 @@ config KS8851
1738 help 1738 help
1739 SPI driver for Micrel KS8851 SPI attached network chip. 1739 SPI driver for Micrel KS8851 SPI attached network chip.
1740 1740
1741config KS8851_MLL
1742 tristate "Micrel KS8851 MLL"
1743 depends on HAS_IOMEM
1744 help
1745 This platform driver is for Micrel KS8851 Address/data bus
1746 multiplexed network chip.
1747
1741config VIA_RHINE 1748config VIA_RHINE
1742 tristate "VIA Rhine support" 1749 tristate "VIA Rhine support"
1743 depends on NET_PCI && PCI 1750 depends on NET_PCI && PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ae8cd30f13d6..d866b8cf65d1 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_SKY2) += sky2.o
89obj-$(CONFIG_SKFP) += skfp/ 89obj-$(CONFIG_SKFP) += skfp/
90obj-$(CONFIG_KS8842) += ks8842.o 90obj-$(CONFIG_KS8842) += ks8842.o
91obj-$(CONFIG_KS8851) += ks8851.o 91obj-$(CONFIG_KS8851) += ks8851.o
92obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
92obj-$(CONFIG_VIA_RHINE) += via-rhine.o 93obj-$(CONFIG_VIA_RHINE) += via-rhine.o
93obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o 94obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
94obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o 95obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index fdf5937233fc..04f63c77071d 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -721,7 +721,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status)
721 ps->rx_errors++; 721 ps->rx_errors++;
722 if (status & RX_MISSED_FRAME) 722 if (status & RX_MISSED_FRAME)
723 ps->rx_missed_errors++; 723 ps->rx_missed_errors++;
724 if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR)) 724 if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
725 ps->rx_length_errors++; 725 ps->rx_length_errors++;
726 if (status & RX_CRC_ERROR) 726 if (status & RX_CRC_ERROR)
727 ps->rx_crc_errors++; 727 ps->rx_crc_errors++;
@@ -794,8 +794,6 @@ static int au1000_rx(struct net_device *dev)
794 printk("rx len error\n"); 794 printk("rx len error\n");
795 if (status & RX_U_CNTRL_FRAME) 795 if (status & RX_U_CNTRL_FRAME)
796 printk("rx u control frame\n"); 796 printk("rx u control frame\n");
797 if (status & RX_MISSED_FRAME)
798 printk("rx miss\n");
799 } 797 }
800 } 798 }
801 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); 799 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 09d270913c50..ba29dc319b34 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -90,7 +90,7 @@ static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
90 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) 90 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
91 break; 91 break;
92 udelay(1); 92 udelay(1);
93 } while (limit-- >= 0); 93 } while (limit-- > 0);
94 94
95 return (limit < 0) ? 1 : 0; 95 return (limit < 0) ? 1 : 0;
96} 96}
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 684c6fe24c8d..a80da0e14a52 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -258,6 +258,7 @@ struct be_adapter {
258 bool link_up; 258 bool link_up;
259 u32 port_num; 259 u32 port_num;
260 bool promiscuous; 260 bool promiscuous;
261 u32 cap;
261}; 262};
262 263
263extern const struct ethtool_ops be_ethtool_ops; 264extern const struct ethtool_ops be_ethtool_ops;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 3dd76c4170bf..89876ade5e33 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1068,7 +1068,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1068} 1068}
1069 1069
1070/* Uses mbox */ 1070/* Uses mbox */
1071int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num) 1071int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
1072{ 1072{
1073 struct be_mcc_wrb *wrb; 1073 struct be_mcc_wrb *wrb;
1074 struct be_cmd_req_query_fw_cfg *req; 1074 struct be_cmd_req_query_fw_cfg *req;
@@ -1088,6 +1088,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
1088 if (!status) { 1088 if (!status) {
1089 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 1089 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1090 *port_num = le32_to_cpu(resp->phys_port); 1090 *port_num = le32_to_cpu(resp->phys_port);
1091 *cap = le32_to_cpu(resp->function_cap);
1091 } 1092 }
1092 1093
1093 spin_unlock(&adapter->mbox_lock); 1094 spin_unlock(&adapter->mbox_lock);
@@ -1128,7 +1129,6 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1128 spin_lock_bh(&adapter->mcc_lock); 1129 spin_lock_bh(&adapter->mcc_lock);
1129 1130
1130 wrb = wrb_from_mccq(adapter); 1131 wrb = wrb_from_mccq(adapter);
1131 req = embedded_payload(wrb);
1132 sge = nonembedded_sgl(wrb); 1132 sge = nonembedded_sgl(wrb);
1133 1133
1134 be_wrb_hdr_prepare(wrb, cmd->size, false, 1); 1134 be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 93e432f3d926..a86f917f85f4 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -62,7 +62,7 @@ enum {
62 MCC_STATUS_QUEUE_FLUSHING = 0x4, 62 MCC_STATUS_QUEUE_FLUSHING = 0x4,
63/* The command is completing with a DMA error */ 63/* The command is completing with a DMA error */
64 MCC_STATUS_DMA_FAILED = 0x5, 64 MCC_STATUS_DMA_FAILED = 0x5,
65 MCC_STATUS_NOT_SUPPORTED = 0x66 65 MCC_STATUS_NOT_SUPPORTED = 66
66}; 66};
67 67
68#define CQE_STATUS_COMPL_MASK 0xFFFF 68#define CQE_STATUS_COMPL_MASK 0xFFFF
@@ -760,7 +760,8 @@ extern int be_cmd_set_flow_control(struct be_adapter *adapter,
760 u32 tx_fc, u32 rx_fc); 760 u32 tx_fc, u32 rx_fc);
761extern int be_cmd_get_flow_control(struct be_adapter *adapter, 761extern int be_cmd_get_flow_control(struct be_adapter *adapter,
762 u32 *tx_fc, u32 *rx_fc); 762 u32 *tx_fc, u32 *rx_fc);
763extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num); 763extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
764 u32 *port_num, u32 *cap);
764extern int be_cmd_reset_function(struct be_adapter *adapter); 765extern int be_cmd_reset_function(struct be_adapter *adapter);
765extern int be_process_mcc(struct be_adapter *adapter); 766extern int be_process_mcc(struct be_adapter *adapter);
766extern int be_cmd_write_flashrom(struct be_adapter *adapter, 767extern int be_cmd_write_flashrom(struct be_adapter *adapter,
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 11445df3dbc0..cda5bf2fc50a 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -358,7 +358,7 @@ const struct ethtool_ops be_ethtool_ops = {
358 .get_rx_csum = be_get_rx_csum, 358 .get_rx_csum = be_get_rx_csum,
359 .set_rx_csum = be_set_rx_csum, 359 .set_rx_csum = be_set_rx_csum,
360 .get_tx_csum = ethtool_op_get_tx_csum, 360 .get_tx_csum = ethtool_op_get_tx_csum,
361 .set_tx_csum = ethtool_op_set_tx_csum, 361 .set_tx_csum = ethtool_op_set_tx_hw_csum,
362 .get_sg = ethtool_op_get_sg, 362 .get_sg = ethtool_op_get_sg,
363 .set_sg = ethtool_op_set_sg, 363 .set_sg = ethtool_op_set_sg,
364 .get_tso = ethtool_op_get_tso, 364 .get_tso = ethtool_op_get_tso,
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 409cf0595903..6d5e81f7046f 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -197,7 +197,7 @@ void netdev_stats_update(struct be_adapter *adapter)
197 /* no space available in linux */ 197 /* no space available in linux */
198 dev_stats->tx_dropped = 0; 198 dev_stats->tx_dropped = 0;
199 199
200 dev_stats->multicast = port_stats->tx_multicastframes; 200 dev_stats->multicast = port_stats->rx_multicast_frames;
201 dev_stats->collisions = 0; 201 dev_stats->collisions = 0;
202 202
203 /* detailed tx_errors */ 203 /* detailed tx_errors */
@@ -747,9 +747,16 @@ static void be_rx_compl_process(struct be_adapter *adapter,
747 struct be_eth_rx_compl *rxcp) 747 struct be_eth_rx_compl *rxcp)
748{ 748{
749 struct sk_buff *skb; 749 struct sk_buff *skb;
750 u32 vtp, vid; 750 u32 vlanf, vid;
751 u8 vtm;
751 752
752 vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 753 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
754 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
755
756 /* vlanf could be wrongly set in some cards.
757 * ignore if vtm is not set */
758 if ((adapter->cap == 0x400) && !vtm)
759 vlanf = 0;
753 760
754 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); 761 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
755 if (!skb) { 762 if (!skb) {
@@ -772,7 +779,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
772 skb->protocol = eth_type_trans(skb, adapter->netdev); 779 skb->protocol = eth_type_trans(skb, adapter->netdev);
773 skb->dev = adapter->netdev; 780 skb->dev = adapter->netdev;
774 781
775 if (vtp) { 782 if (vlanf) {
776 if (!adapter->vlan_grp || adapter->num_vlans == 0) { 783 if (!adapter->vlan_grp || adapter->num_vlans == 0) {
777 kfree_skb(skb); 784 kfree_skb(skb);
778 return; 785 return;
@@ -797,11 +804,18 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
797 struct be_eq_obj *eq_obj = &adapter->rx_eq; 804 struct be_eq_obj *eq_obj = &adapter->rx_eq;
798 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; 805 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
799 u16 i, rxq_idx = 0, vid, j; 806 u16 i, rxq_idx = 0, vid, j;
807 u8 vtm;
800 808
801 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 809 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
802 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 810 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
803 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 811 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
804 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 812 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
813 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
814
815 /* vlanf could be wrongly set in some cards.
816 * ignore if vtm is not set */
817 if ((adapter->cap == 0x400) && !vtm)
818 vlanf = 0;
805 819
806 skb = napi_get_frags(&eq_obj->napi); 820 skb = napi_get_frags(&eq_obj->napi);
807 if (!skb) { 821 if (!skb) {
@@ -1885,8 +1899,8 @@ static void be_netdev_init(struct net_device *netdev)
1885 struct be_adapter *adapter = netdev_priv(netdev); 1899 struct be_adapter *adapter = netdev_priv(netdev);
1886 1900
1887 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | 1901 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
1888 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | 1902 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
1889 NETIF_F_IPV6_CSUM | NETIF_F_GRO; 1903 NETIF_F_GRO;
1890 1904
1891 netdev->flags |= IFF_MULTICAST; 1905 netdev->flags |= IFF_MULTICAST;
1892 1906
@@ -2045,7 +2059,8 @@ static int be_hw_up(struct be_adapter *adapter)
2045 if (status) 2059 if (status)
2046 return status; 2060 return status;
2047 2061
2048 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num); 2062 status = be_cmd_query_fw_cfg(adapter,
2063 &adapter->port_num, &adapter->cap);
2049 return status; 2064 return status;
2050} 2065}
2051 2066
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 6044e12ff9fc..ff449de6f3c0 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1182,6 +1182,7 @@ static ssize_t bonding_store_primary(struct device *d,
1182 ": %s: Setting %s as primary slave.\n", 1182 ": %s: Setting %s as primary slave.\n",
1183 bond->dev->name, slave->dev->name); 1183 bond->dev->name, slave->dev->name);
1184 bond->primary_slave = slave; 1184 bond->primary_slave = slave;
1185 strcpy(bond->params.primary, slave->dev->name);
1185 bond_select_active_slave(bond); 1186 bond_select_active_slave(bond);
1186 goto out; 1187 goto out;
1187 } 1188 }
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 211c8e9182fc..46c87ec7960c 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2733,7 +2733,8 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
2733 cnic_ulp_init(dev); 2733 cnic_ulp_init(dev);
2734 else if (event == NETDEV_UNREGISTER) 2734 else if (event == NETDEV_UNREGISTER)
2735 cnic_ulp_exit(dev); 2735 cnic_ulp_exit(dev);
2736 else if (event == NETDEV_UP) { 2736
2737 if (event == NETDEV_UP) {
2737 if (cnic_register_netdev(dev) != 0) { 2738 if (cnic_register_netdev(dev) != 0) {
2738 cnic_put(dev); 2739 cnic_put(dev);
2739 goto done; 2740 goto done;
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index a49235739eef..d8b09efdcb52 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.0.0" 15#define CNIC_MODULE_VERSION "2.0.1"
16#define CNIC_MODULE_RELDATE "May 21, 2009" 16#define CNIC_MODULE_RELDATE "Oct 01, 2009"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index b53b40ba88a8..d1e0563a67df 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1803,7 +1803,7 @@ struct e1000_info e1000_82574_info = {
1803 | FLAG_HAS_AMT 1803 | FLAG_HAS_AMT
1804 | FLAG_HAS_CTRLEXT_ON_LOAD, 1804 | FLAG_HAS_CTRLEXT_ON_LOAD,
1805 .pba = 20, 1805 .pba = 20,
1806 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, 1806 .max_hw_frame_size = DEFAULT_JUMBO,
1807 .get_variants = e1000_get_variants_82571, 1807 .get_variants = e1000_get_variants_82571,
1808 .mac_ops = &e82571_mac_ops, 1808 .mac_ops = &e82571_mac_ops,
1809 .phy_ops = &e82_phy_ops_bm, 1809 .phy_ops = &e82_phy_ops_bm,
@@ -1820,7 +1820,7 @@ struct e1000_info e1000_82583_info = {
1820 | FLAG_HAS_AMT 1820 | FLAG_HAS_AMT
1821 | FLAG_HAS_CTRLEXT_ON_LOAD, 1821 | FLAG_HAS_CTRLEXT_ON_LOAD,
1822 .pba = 20, 1822 .pba = 20,
1823 .max_hw_frame_size = DEFAULT_JUMBO, 1823 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
1824 .get_variants = e1000_get_variants_82571, 1824 .get_variants = e1000_get_variants_82571,
1825 .mac_ops = &e82571_mac_ops, 1825 .mac_ops = &e82571_mac_ops,
1826 .phy_ops = &e82_phy_ops_bm, 1826 .phy_ops = &e82_phy_ops_bm,
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 16c193a6c95c..0687c6aa4e46 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4982,12 +4982,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4982 goto err_pci_reg; 4982 goto err_pci_reg;
4983 4983
4984 /* AER (Advanced Error Reporting) hooks */ 4984 /* AER (Advanced Error Reporting) hooks */
4985 err = pci_enable_pcie_error_reporting(pdev); 4985 pci_enable_pcie_error_reporting(pdev);
4986 if (err) {
4987 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
4988 "0x%x\n", err);
4989 /* non-fatal, continue */
4990 }
4991 4986
4992 pci_set_master(pdev); 4987 pci_set_master(pdev);
4993 /* PCI config space info */ 4988 /* PCI config space info */
@@ -5263,7 +5258,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5263{ 5258{
5264 struct net_device *netdev = pci_get_drvdata(pdev); 5259 struct net_device *netdev = pci_get_drvdata(pdev);
5265 struct e1000_adapter *adapter = netdev_priv(netdev); 5260 struct e1000_adapter *adapter = netdev_priv(netdev);
5266 int err;
5267 5261
5268 /* 5262 /*
5269 * flush_scheduled work may reschedule our watchdog task, so 5263 * flush_scheduled work may reschedule our watchdog task, so
@@ -5299,10 +5293,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5299 free_netdev(netdev); 5293 free_netdev(netdev);
5300 5294
5301 /* AER disable */ 5295 /* AER disable */
5302 err = pci_disable_pcie_error_reporting(pdev); 5296 pci_disable_pcie_error_reporting(pdev);
5303 if (err)
5304 dev_err(&pdev->dev,
5305 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
5306 5297
5307 pci_disable_device(pdev); 5298 pci_disable_device(pdev);
5308} 5299}
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index b7311bc00258..34d0c69e67f7 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -19,6 +19,10 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <net/ethoc.h> 20#include <net/ethoc.h>
21 21
22static int buffer_size = 0x8000; /* 32 KBytes */
23module_param(buffer_size, int, 0);
24MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
25
22/* register offsets */ 26/* register offsets */
23#define MODER 0x00 27#define MODER 0x00
24#define INT_SOURCE 0x04 28#define INT_SOURCE 0x04
@@ -167,6 +171,7 @@
167 * struct ethoc - driver-private device structure 171 * struct ethoc - driver-private device structure
168 * @iobase: pointer to I/O memory region 172 * @iobase: pointer to I/O memory region
169 * @membase: pointer to buffer memory region 173 * @membase: pointer to buffer memory region
174 * @dma_alloc: dma allocated buffer size
170 * @num_tx: number of send buffers 175 * @num_tx: number of send buffers
171 * @cur_tx: last send buffer written 176 * @cur_tx: last send buffer written
172 * @dty_tx: last buffer actually sent 177 * @dty_tx: last buffer actually sent
@@ -185,6 +190,7 @@
185struct ethoc { 190struct ethoc {
186 void __iomem *iobase; 191 void __iomem *iobase;
187 void __iomem *membase; 192 void __iomem *membase;
193 int dma_alloc;
188 194
189 unsigned int num_tx; 195 unsigned int num_tx;
190 unsigned int cur_tx; 196 unsigned int cur_tx;
@@ -284,7 +290,7 @@ static int ethoc_init_ring(struct ethoc *dev)
284 dev->cur_rx = 0; 290 dev->cur_rx = 0;
285 291
286 /* setup transmission buffers */ 292 /* setup transmission buffers */
287 bd.addr = 0; 293 bd.addr = virt_to_phys(dev->membase);
288 bd.stat = TX_BD_IRQ | TX_BD_CRC; 294 bd.stat = TX_BD_IRQ | TX_BD_CRC;
289 295
290 for (i = 0; i < dev->num_tx; i++) { 296 for (i = 0; i < dev->num_tx; i++) {
@@ -295,7 +301,6 @@ static int ethoc_init_ring(struct ethoc *dev)
295 bd.addr += ETHOC_BUFSIZ; 301 bd.addr += ETHOC_BUFSIZ;
296 } 302 }
297 303
298 bd.addr = dev->num_tx * ETHOC_BUFSIZ;
299 bd.stat = RX_BD_EMPTY | RX_BD_IRQ; 304 bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
300 305
301 for (i = 0; i < dev->num_rx; i++) { 306 for (i = 0; i < dev->num_rx; i++) {
@@ -400,8 +405,12 @@ static int ethoc_rx(struct net_device *dev, int limit)
400 if (ethoc_update_rx_stats(priv, &bd) == 0) { 405 if (ethoc_update_rx_stats(priv, &bd) == 0) {
401 int size = bd.stat >> 16; 406 int size = bd.stat >> 16;
402 struct sk_buff *skb = netdev_alloc_skb(dev, size); 407 struct sk_buff *skb = netdev_alloc_skb(dev, size);
408
409 size -= 4; /* strip the CRC */
410 skb_reserve(skb, 2); /* align TCP/IP header */
411
403 if (likely(skb)) { 412 if (likely(skb)) {
404 void *src = priv->membase + bd.addr; 413 void *src = phys_to_virt(bd.addr);
405 memcpy_fromio(skb_put(skb, size), src, size); 414 memcpy_fromio(skb_put(skb, size), src, size);
406 skb->protocol = eth_type_trans(skb, dev); 415 skb->protocol = eth_type_trans(skb, dev);
407 priv->stats.rx_packets++; 416 priv->stats.rx_packets++;
@@ -653,9 +662,9 @@ static int ethoc_open(struct net_device *dev)
653 if (ret) 662 if (ret)
654 return ret; 663 return ret;
655 664
656 /* calculate the number of TX/RX buffers */ 665 /* calculate the number of TX/RX buffers, maximum 128 supported */
657 num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ; 666 num_bd = min(128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
658 priv->num_tx = min(min_tx, num_bd / 4); 667 priv->num_tx = max(min_tx, num_bd / 4);
659 priv->num_rx = num_bd - priv->num_tx; 668 priv->num_rx = num_bd - priv->num_tx;
660 ethoc_write(priv, TX_BD_NUM, priv->num_tx); 669 ethoc_write(priv, TX_BD_NUM, priv->num_tx);
661 670
@@ -823,7 +832,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
823 else 832 else
824 bd.stat &= ~TX_BD_PAD; 833 bd.stat &= ~TX_BD_PAD;
825 834
826 dest = priv->membase + bd.addr; 835 dest = phys_to_virt(bd.addr);
827 memcpy_toio(dest, skb->data, skb->len); 836 memcpy_toio(dest, skb->data, skb->len);
828 837
829 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); 838 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
@@ -903,22 +912,19 @@ static int ethoc_probe(struct platform_device *pdev)
903 912
904 /* obtain buffer memory space */ 913 /* obtain buffer memory space */
905 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 914 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
906 if (!res) { 915 if (res) {
907 dev_err(&pdev->dev, "cannot obtain memory space\n"); 916 mem = devm_request_mem_region(&pdev->dev, res->start,
908 ret = -ENXIO;
909 goto free;
910 }
911
912 mem = devm_request_mem_region(&pdev->dev, res->start,
913 res->end - res->start + 1, res->name); 917 res->end - res->start + 1, res->name);
914 if (!mem) { 918 if (!mem) {
915 dev_err(&pdev->dev, "cannot request memory space\n"); 919 dev_err(&pdev->dev, "cannot request memory space\n");
916 ret = -ENXIO; 920 ret = -ENXIO;
917 goto free; 921 goto free;
922 }
923
924 netdev->mem_start = mem->start;
925 netdev->mem_end = mem->end;
918 } 926 }
919 927
920 netdev->mem_start = mem->start;
921 netdev->mem_end = mem->end;
922 928
923 /* obtain device IRQ number */ 929 /* obtain device IRQ number */
924 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 930 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -933,6 +939,7 @@ static int ethoc_probe(struct platform_device *pdev)
933 /* setup driver-private data */ 939 /* setup driver-private data */
934 priv = netdev_priv(netdev); 940 priv = netdev_priv(netdev);
935 priv->netdev = netdev; 941 priv->netdev = netdev;
942 priv->dma_alloc = 0;
936 943
937 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, 944 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
938 mmio->end - mmio->start + 1); 945 mmio->end - mmio->start + 1);
@@ -942,12 +949,27 @@ static int ethoc_probe(struct platform_device *pdev)
942 goto error; 949 goto error;
943 } 950 }
944 951
945 priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start, 952 if (netdev->mem_end) {
946 mem->end - mem->start + 1); 953 priv->membase = devm_ioremap_nocache(&pdev->dev,
947 if (!priv->membase) { 954 netdev->mem_start, mem->end - mem->start + 1);
948 dev_err(&pdev->dev, "cannot remap memory space\n"); 955 if (!priv->membase) {
949 ret = -ENXIO; 956 dev_err(&pdev->dev, "cannot remap memory space\n");
950 goto error; 957 ret = -ENXIO;
958 goto error;
959 }
960 } else {
961 /* Allocate buffer memory */
962 priv->membase = dma_alloc_coherent(NULL,
963 buffer_size, (void *)&netdev->mem_start,
964 GFP_KERNEL);
965 if (!priv->membase) {
966 dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
967 buffer_size);
968 ret = -ENOMEM;
969 goto error;
970 }
971 netdev->mem_end = netdev->mem_start + buffer_size;
972 priv->dma_alloc = buffer_size;
951 } 973 }
952 974
953 /* Allow the platform setup code to pass in a MAC address. */ 975 /* Allow the platform setup code to pass in a MAC address. */
@@ -1034,6 +1056,9 @@ free_mdio:
1034 kfree(priv->mdio->irq); 1056 kfree(priv->mdio->irq);
1035 mdiobus_free(priv->mdio); 1057 mdiobus_free(priv->mdio);
1036free: 1058free:
1059 if (priv->dma_alloc)
1060 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1061 netdev->mem_start);
1037 free_netdev(netdev); 1062 free_netdev(netdev);
1038out: 1063out:
1039 return ret; 1064 return ret;
@@ -1059,7 +1084,9 @@ static int ethoc_remove(struct platform_device *pdev)
1059 kfree(priv->mdio->irq); 1084 kfree(priv->mdio->irq);
1060 mdiobus_free(priv->mdio); 1085 mdiobus_free(priv->mdio);
1061 } 1086 }
1062 1087 if (priv->dma_alloc)
1088 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1089 netdev->mem_start);
1063 unregister_netdev(netdev); 1090 unregister_netdev(netdev);
1064 free_netdev(netdev); 1091 free_netdev(netdev);
1065 } 1092 }
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 33b55f729742..db4b7f1603f6 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -258,7 +258,7 @@ static void ax_bump(struct mkiss *ax)
258 } 258 }
259 if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) { 259 if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) {
260 printk(KERN_INFO 260 printk(KERN_INFO
261 "mkiss: %s: Switchting to crc-smack\n", 261 "mkiss: %s: Switching to crc-smack\n",
262 ax->dev->name); 262 ax->dev->name);
263 ax->crcmode = CRC_MODE_SMACK; 263 ax->crcmode = CRC_MODE_SMACK;
264 } 264 }
@@ -272,7 +272,7 @@ static void ax_bump(struct mkiss *ax)
272 } 272 }
273 if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) { 273 if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) {
274 printk(KERN_INFO 274 printk(KERN_INFO
275 "mkiss: %s: Switchting to crc-flexnet\n", 275 "mkiss: %s: Switching to crc-flexnet\n",
276 ax->dev->name); 276 ax->dev->name);
277 ax->crcmode = CRC_MODE_FLEX; 277 ax->crcmode = CRC_MODE_FLEX;
278 } 278 }
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 5d6c1530a8c0..714c3a4a44ef 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1246,12 +1246,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1246 if (err) 1246 if (err)
1247 goto err_pci_reg; 1247 goto err_pci_reg;
1248 1248
1249 err = pci_enable_pcie_error_reporting(pdev); 1249 pci_enable_pcie_error_reporting(pdev);
1250 if (err) {
1251 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
1252 "0x%x\n", err);
1253 /* non-fatal, continue */
1254 }
1255 1250
1256 pci_set_master(pdev); 1251 pci_set_master(pdev);
1257 pci_save_state(pdev); 1252 pci_save_state(pdev);
@@ -1628,7 +1623,6 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1628 struct net_device *netdev = pci_get_drvdata(pdev); 1623 struct net_device *netdev = pci_get_drvdata(pdev);
1629 struct igb_adapter *adapter = netdev_priv(netdev); 1624 struct igb_adapter *adapter = netdev_priv(netdev);
1630 struct e1000_hw *hw = &adapter->hw; 1625 struct e1000_hw *hw = &adapter->hw;
1631 int err;
1632 1626
1633 /* flush_scheduled work may reschedule our watchdog task, so 1627 /* flush_scheduled work may reschedule our watchdog task, so
1634 * explicitly disable watchdog tasks from being rescheduled */ 1628 * explicitly disable watchdog tasks from being rescheduled */
@@ -1682,10 +1676,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1682 1676
1683 free_netdev(netdev); 1677 free_netdev(netdev);
1684 1678
1685 err = pci_disable_pcie_error_reporting(pdev); 1679 pci_disable_pcie_error_reporting(pdev);
1686 if (err)
1687 dev_err(&pdev->dev,
1688 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
1689 1680
1690 pci_disable_device(pdev); 1681 pci_disable_device(pdev);
1691} 1682}
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index e36e951cbc65..aa7286bc4364 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -495,7 +495,7 @@ static void veth_take_cap_ack(struct veth_lpar_connection *cnx,
495 cnx->remote_lp); 495 cnx->remote_lp);
496 } else { 496 } else {
497 memcpy(&cnx->cap_ack_event, event, 497 memcpy(&cnx->cap_ack_event, event,
498 sizeof(&cnx->cap_ack_event)); 498 sizeof(cnx->cap_ack_event));
499 cnx->state |= VETH_STATE_GOTCAPACK; 499 cnx->state |= VETH_STATE_GOTCAPACK;
500 veth_kick_statemachine(cnx); 500 veth_kick_statemachine(cnx);
501 } 501 }
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 56b12f3192f1..e2d5343f1275 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -425,7 +425,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
425#endif /* CONFIG_DCB */ 425#endif /* CONFIG_DCB */
426 default: 426 default:
427 hw_dbg(hw, "Flow control param set incorrectly\n"); 427 hw_dbg(hw, "Flow control param set incorrectly\n");
428 ret_val = -IXGBE_ERR_CONFIG; 428 ret_val = IXGBE_ERR_CONFIG;
429 goto out; 429 goto out;
430 break; 430 break;
431 } 431 }
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 2ec58dcdb82b..34b04924c8a1 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -330,6 +330,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
330 330
331 switch (hw->device_id) { 331 switch (hw->device_id) {
332 case IXGBE_DEV_ID_82599_KX4: 332 case IXGBE_DEV_ID_82599_KX4:
333 case IXGBE_DEV_ID_82599_KX4_MEZZ:
334 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
333 case IXGBE_DEV_ID_82599_XAUI_LOM: 335 case IXGBE_DEV_ID_82599_XAUI_LOM:
334 /* Default device ID is mezzanine card KX/KX4 */ 336 /* Default device ID is mezzanine card KX/KX4 */
335 media_type = ixgbe_media_type_backplane; 337 media_type = ixgbe_media_type_backplane;
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 6621e172df3d..40ff120a9ad4 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1355,9 +1355,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1355/** 1355/**
1356 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 1356 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1357 * @hw: pointer to hardware structure 1357 * @hw: pointer to hardware structure
1358 * @addr_list: the list of new addresses 1358 * @uc_list: the list of new addresses
1359 * @addr_count: number of addresses
1360 * @next: iterator function to walk the address list
1361 * 1359 *
1362 * The given list replaces any existing list. Clears the secondary addrs from 1360 * The given list replaces any existing list. Clears the secondary addrs from
1363 * receive address registers. Uses unused receive address registers for the 1361 * receive address registers. Uses unused receive address registers for the
@@ -1663,7 +1661,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1663#endif /* CONFIG_DCB */ 1661#endif /* CONFIG_DCB */
1664 default: 1662 default:
1665 hw_dbg(hw, "Flow control param set incorrectly\n"); 1663 hw_dbg(hw, "Flow control param set incorrectly\n");
1666 ret_val = -IXGBE_ERR_CONFIG; 1664 ret_val = IXGBE_ERR_CONFIG;
1667 goto out; 1665 goto out;
1668 break; 1666 break;
1669 } 1667 }
@@ -1734,75 +1732,140 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1734 s32 ret_val = 0; 1732 s32 ret_val = 0;
1735 ixgbe_link_speed speed; 1733 ixgbe_link_speed speed;
1736 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 1734 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1735 u32 links2, anlp1_reg, autoc_reg, links;
1737 bool link_up; 1736 bool link_up;
1738 1737
1739 /* 1738 /*
1740 * AN should have completed when the cable was plugged in. 1739 * AN should have completed when the cable was plugged in.
1741 * Look for reasons to bail out. Bail out if: 1740 * Look for reasons to bail out. Bail out if:
1742 * - FC autoneg is disabled, or if 1741 * - FC autoneg is disabled, or if
1743 * - we don't have multispeed fiber, or if 1742 * - link is not up.
1744 * - we're not running at 1G, or if
1745 * - link is not up, or if
1746 * - link is up but AN did not complete, or if
1747 * - link is up and AN completed but timed out
1748 * 1743 *
1749 * Since we're being called from an LSC, link is already know to be up. 1744 * Since we're being called from an LSC, link is already known to be up.
1750 * So use link_up_wait_to_complete=false. 1745 * So use link_up_wait_to_complete=false.
1751 */ 1746 */
1752 hw->mac.ops.check_link(hw, &speed, &link_up, false); 1747 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1753 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 1748
1754 1749 if (hw->fc.disable_fc_autoneg || (!link_up)) {
1755 if (hw->fc.disable_fc_autoneg ||
1756 !hw->phy.multispeed_fiber ||
1757 (speed != IXGBE_LINK_SPEED_1GB_FULL) ||
1758 !link_up ||
1759 ((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1760 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1761 hw->fc.fc_was_autonegged = false; 1750 hw->fc.fc_was_autonegged = false;
1762 hw->fc.current_mode = hw->fc.requested_mode; 1751 hw->fc.current_mode = hw->fc.requested_mode;
1763 hw_dbg(hw, "Autoneg FC was skipped.\n");
1764 goto out; 1752 goto out;
1765 } 1753 }
1766 1754
1767 /* 1755 /*
1756 * On backplane, bail out if
1757 * - backplane autoneg was not completed, or if
1758 * - link partner is not AN enabled
1759 */
1760 if (hw->phy.media_type == ixgbe_media_type_backplane) {
1761 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1762 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
1763 if (((links & IXGBE_LINKS_KX_AN_COMP) == 0) ||
1764 ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)) {
1765 hw->fc.fc_was_autonegged = false;
1766 hw->fc.current_mode = hw->fc.requested_mode;
1767 goto out;
1768 }
1769 }
1770
1771 /*
1772 * On multispeed fiber at 1g, bail out if
1773 * - link is up but AN did not complete, or if
1774 * - link is up and AN completed but timed out
1775 */
1776 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
1777 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1778 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1779 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1780 hw->fc.fc_was_autonegged = false;
1781 hw->fc.current_mode = hw->fc.requested_mode;
1782 goto out;
1783 }
1784 }
1785
1786 /*
1768 * Read the AN advertisement and LP ability registers and resolve 1787 * Read the AN advertisement and LP ability registers and resolve
1769 * local flow control settings accordingly 1788 * local flow control settings accordingly
1770 */ 1789 */
1771 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 1790 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1772 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 1791 (hw->phy.media_type != ixgbe_media_type_backplane)) {
1773 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && 1792 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1774 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) { 1793 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1794 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1795 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
1796 /*
1797 * Now we need to check if the user selected Rx ONLY
1798 * of pause frames. In this case, we had to advertise
1799 * FULL flow control because we could not advertise RX
1800 * ONLY. Hence, we must now check to see if we need to
1801 * turn OFF the TRANSMISSION of PAUSE frames.
1802 */
1803 if (hw->fc.requested_mode == ixgbe_fc_full) {
1804 hw->fc.current_mode = ixgbe_fc_full;
1805 hw_dbg(hw, "Flow Control = FULL.\n");
1806 } else {
1807 hw->fc.current_mode = ixgbe_fc_rx_pause;
1808 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1809 }
1810 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1811 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1812 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1813 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1814 hw->fc.current_mode = ixgbe_fc_tx_pause;
1815 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1816 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1817 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1818 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1819 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1820 hw->fc.current_mode = ixgbe_fc_rx_pause;
1821 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1822 } else {
1823 hw->fc.current_mode = ixgbe_fc_none;
1824 hw_dbg(hw, "Flow Control = NONE.\n");
1825 }
1826 }
1827
1828 if (hw->phy.media_type == ixgbe_media_type_backplane) {
1775 /* 1829 /*
1776 * Now we need to check if the user selected Rx ONLY 1830 * Read the 10g AN autoc and LP ability registers and resolve
1777 * of pause frames. In this case, we had to advertise 1831 * local flow control settings accordingly
1778 * FULL flow control because we could not advertise RX
1779 * ONLY. Hence, we must now check to see if we need to
1780 * turn OFF the TRANSMISSION of PAUSE frames.
1781 */ 1832 */
1782 if (hw->fc.requested_mode == ixgbe_fc_full) { 1833 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1783 hw->fc.current_mode = ixgbe_fc_full; 1834 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
1784 hw_dbg(hw, "Flow Control = FULL.\n"); 1835
1785 } else { 1836 if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1837 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
1838 /*
1839 * Now we need to check if the user selected Rx ONLY
1840 * of pause frames. In this case, we had to advertise
1841 * FULL flow control because we could not advertise RX
1842 * ONLY. Hence, we must now check to see if we need to
1843 * turn OFF the TRANSMISSION of PAUSE frames.
1844 */
1845 if (hw->fc.requested_mode == ixgbe_fc_full) {
1846 hw->fc.current_mode = ixgbe_fc_full;
1847 hw_dbg(hw, "Flow Control = FULL.\n");
1848 } else {
1849 hw->fc.current_mode = ixgbe_fc_rx_pause;
1850 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1851 }
1852 } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1853 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1854 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1855 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1856 hw->fc.current_mode = ixgbe_fc_tx_pause;
1857 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1858 } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1859 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1860 !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1861 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1786 hw->fc.current_mode = ixgbe_fc_rx_pause; 1862 hw->fc.current_mode = ixgbe_fc_rx_pause;
1787 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); 1863 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1864 } else {
1865 hw->fc.current_mode = ixgbe_fc_none;
1866 hw_dbg(hw, "Flow Control = NONE.\n");
1788 } 1867 }
1789 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1790 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1791 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1792 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1793 hw->fc.current_mode = ixgbe_fc_tx_pause;
1794 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1795 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1796 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1797 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1798 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1799 hw->fc.current_mode = ixgbe_fc_rx_pause;
1800 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1801 } else {
1802 hw->fc.current_mode = ixgbe_fc_none;
1803 hw_dbg(hw, "Flow Control = NONE.\n");
1804 } 1868 }
1805
1806 /* Record that current_mode is the result of a successful autoneg */ 1869 /* Record that current_mode is the result of a successful autoneg */
1807 hw->fc.fc_was_autonegged = true; 1870 hw->fc.fc_was_autonegged = true;
1808 1871
@@ -1919,7 +1982,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1919#endif /* CONFIG_DCB */ 1982#endif /* CONFIG_DCB */
1920 default: 1983 default:
1921 hw_dbg(hw, "Flow control param set incorrectly\n"); 1984 hw_dbg(hw, "Flow control param set incorrectly\n");
1922 ret_val = -IXGBE_ERR_CONFIG; 1985 ret_val = IXGBE_ERR_CONFIG;
1923 goto out; 1986 goto out;
1924 break; 1987 break;
1925 } 1988 }
@@ -1927,9 +1990,6 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1927 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 1990 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
1928 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 1991 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
1929 1992
1930 /* Enable and restart autoneg to inform the link partner */
1931 reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
1932
1933 /* Disable AN timeout */ 1993 /* Disable AN timeout */
1934 if (hw->fc.strict_ieee) 1994 if (hw->fc.strict_ieee)
1935 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 1995 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
@@ -1937,6 +1997,70 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1937 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 1997 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
1938 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 1998 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
1939 1999
2000 /*
2001 * Set up the 10G flow control advertisement registers so the HW
2002 * can do fc autoneg once the cable is plugged in. If we end up
2003 * using 1g instead, this is harmless.
2004 */
2005 reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2006
2007 /*
2008 * The possible values of fc.requested_mode are:
2009 * 0: Flow control is completely disabled
2010 * 1: Rx flow control is enabled (we can receive pause frames,
2011 * but not send pause frames).
2012 * 2: Tx flow control is enabled (we can send pause frames but
2013 * we do not support receiving pause frames).
2014 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2015 * other: Invalid.
2016 */
2017 switch (hw->fc.requested_mode) {
2018 case ixgbe_fc_none:
2019 /* Flow control completely disabled by software override. */
2020 reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2021 break;
2022 case ixgbe_fc_rx_pause:
2023 /*
2024 * Rx Flow control is enabled and Tx Flow control is
2025 * disabled by software override. Since there really
2026 * isn't a way to advertise that we are capable of RX
2027 * Pause ONLY, we will advertise that we support both
2028 * symmetric and asymmetric Rx PAUSE. Later, we will
2029 * disable the adapter's ability to send PAUSE frames.
2030 */
2031 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2032 break;
2033 case ixgbe_fc_tx_pause:
2034 /*
2035 * Tx Flow control is enabled, and Rx Flow control is
2036 * disabled by software override.
2037 */
2038 reg |= (IXGBE_AUTOC_ASM_PAUSE);
2039 reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
2040 break;
2041 case ixgbe_fc_full:
2042 /* Flow control (both Rx and Tx) is enabled by SW override. */
2043 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2044 break;
2045#ifdef CONFIG_DCB
2046 case ixgbe_fc_pfc:
2047 goto out;
2048 break;
2049#endif /* CONFIG_DCB */
2050 default:
2051 hw_dbg(hw, "Flow control param set incorrectly\n");
2052 ret_val = IXGBE_ERR_CONFIG;
2053 goto out;
2054 break;
2055 }
2056 /*
2057 * AUTOC restart handles negotiation of 1G and 10G. There is
2058 * no need to set the PCS1GCTL register.
2059 */
2060 reg |= IXGBE_AUTOC_AN_RESTART;
2061 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
2062 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2063
1940out: 2064out:
1941 return ret_val; 2065 return ret_val;
1942} 2066}
@@ -2000,7 +2124,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2000 2124
2001 while (timeout) { 2125 while (timeout) {
2002 if (ixgbe_get_eeprom_semaphore(hw)) 2126 if (ixgbe_get_eeprom_semaphore(hw))
2003 return -IXGBE_ERR_SWFW_SYNC; 2127 return IXGBE_ERR_SWFW_SYNC;
2004 2128
2005 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2129 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2006 if (!(gssr & (fwmask | swmask))) 2130 if (!(gssr & (fwmask | swmask)))
@@ -2017,7 +2141,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2017 2141
2018 if (!timeout) { 2142 if (!timeout) {
2019 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n"); 2143 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
2020 return -IXGBE_ERR_SWFW_SYNC; 2144 return IXGBE_ERR_SWFW_SYNC;
2021 } 2145 }
2022 2146
2023 gssr |= swmask; 2147 gssr |= swmask;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 53b0a6680254..fa314cb005a4 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -53,6 +53,10 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
53 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, 53 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
54 {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)}, 54 {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
55 {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)}, 55 {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
56 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
57 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
58 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
59 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
56 {"lsc_int", IXGBE_STAT(lsc_int)}, 60 {"lsc_int", IXGBE_STAT(lsc_int)},
57 {"tx_busy", IXGBE_STAT(tx_busy)}, 61 {"tx_busy", IXGBE_STAT(tx_busy)},
58 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 62 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index c407bd9de0dd..cbb143ca1eb8 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -49,7 +49,7 @@ char ixgbe_driver_name[] = "ixgbe";
49static const char ixgbe_driver_string[] = 49static const char ixgbe_driver_string[] =
50 "Intel(R) 10 Gigabit PCI Express Network Driver"; 50 "Intel(R) 10 Gigabit PCI Express Network Driver";
51 51
52#define DRV_VERSION "2.0.37-k2" 52#define DRV_VERSION "2.0.44-k2"
53const char ixgbe_driver_version[] = DRV_VERSION; 53const char ixgbe_driver_version[] = DRV_VERSION;
54static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; 54static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
55 55
@@ -97,8 +97,12 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
97 board_82599 }, 97 board_82599 },
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), 98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
99 board_82599 }, 99 board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
101 board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
101 board_82599 }, 103 board_82599 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
105 board_82599 },
102 106
103 /* required last entry */ 107 /* required last entry */
104 {0, } 108 {0, }
@@ -1885,12 +1889,29 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1885 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 1889 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1886 adapter->tx_ring[i].head = IXGBE_TDH(j); 1890 adapter->tx_ring[i].head = IXGBE_TDH(j);
1887 adapter->tx_ring[i].tail = IXGBE_TDT(j); 1891 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1888 /* Disable Tx Head Writeback RO bit, since this hoses 1892 /*
1893 * Disable Tx Head Writeback RO bit, since this hoses
1889 * bookkeeping if things aren't delivered in order. 1894 * bookkeeping if things aren't delivered in order.
1890 */ 1895 */
1891 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 1896 switch (hw->mac.type) {
1897 case ixgbe_mac_82598EB:
1898 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
1899 break;
1900 case ixgbe_mac_82599EB:
1901 default:
1902 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
1903 break;
1904 }
1892 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1905 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1893 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 1906 switch (hw->mac.type) {
1907 case ixgbe_mac_82598EB:
1908 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
1909 break;
1910 case ixgbe_mac_82599EB:
1911 default:
1912 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
1913 break;
1914 }
1894 } 1915 }
1895 if (hw->mac.type == ixgbe_mac_82599EB) { 1916 if (hw->mac.type == ixgbe_mac_82599EB) {
1896 /* We enable 8 traffic classes, DCB only */ 1917 /* We enable 8 traffic classes, DCB only */
@@ -4432,10 +4453,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4432 4453
4433 /* 82598 hardware only has a 32 bit counter in the high register */ 4454 /* 82598 hardware only has a 32 bit counter in the high register */
4434 if (hw->mac.type == ixgbe_mac_82599EB) { 4455 if (hw->mac.type == ixgbe_mac_82599EB) {
4456 u64 tmp;
4435 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 4457 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
4436 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ 4458 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
4459 adapter->stats.gorc += (tmp << 32);
4437 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 4460 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
4438 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ 4461 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
4462 adapter->stats.gotc += (tmp << 32);
4439 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL); 4463 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
4440 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 4464 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
4441 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 4465 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
@@ -5071,7 +5095,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5071 /* Right now, we support IPv4 only */ 5095 /* Right now, we support IPv4 only */
5072 struct ixgbe_atr_input atr_input; 5096 struct ixgbe_atr_input atr_input;
5073 struct tcphdr *th; 5097 struct tcphdr *th;
5074 struct udphdr *uh;
5075 struct iphdr *iph = ip_hdr(skb); 5098 struct iphdr *iph = ip_hdr(skb);
5076 struct ethhdr *eth = (struct ethhdr *)skb->data; 5099 struct ethhdr *eth = (struct ethhdr *)skb->data;
5077 u16 vlan_id, src_port, dst_port, flex_bytes; 5100 u16 vlan_id, src_port, dst_port, flex_bytes;
@@ -5085,12 +5108,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5085 dst_port = th->dest; 5108 dst_port = th->dest;
5086 l4type |= IXGBE_ATR_L4TYPE_TCP; 5109 l4type |= IXGBE_ATR_L4TYPE_TCP;
5087 /* l4type IPv4 type is 0, no need to assign */ 5110 /* l4type IPv4 type is 0, no need to assign */
5088 } else if(iph->protocol == IPPROTO_UDP) {
5089 uh = udp_hdr(skb);
5090 src_port = uh->source;
5091 dst_port = uh->dest;
5092 l4type |= IXGBE_ATR_L4TYPE_UDP;
5093 /* l4type IPv4 type is 0, no need to assign */
5094 } else { 5111 } else {
5095 /* Unsupported L4 header, just bail here */ 5112 /* Unsupported L4 header, just bail here */
5096 return; 5113 return;
@@ -5494,12 +5511,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5494 goto err_pci_reg; 5511 goto err_pci_reg;
5495 } 5512 }
5496 5513
5497 err = pci_enable_pcie_error_reporting(pdev); 5514 pci_enable_pcie_error_reporting(pdev);
5498 if (err) {
5499 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
5500 "0x%x\n", err);
5501 /* non-fatal, continue */
5502 }
5503 5515
5504 pci_set_master(pdev); 5516 pci_set_master(pdev);
5505 pci_save_state(pdev); 5517 pci_save_state(pdev);
@@ -5808,7 +5820,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
5808{ 5820{
5809 struct net_device *netdev = pci_get_drvdata(pdev); 5821 struct net_device *netdev = pci_get_drvdata(pdev);
5810 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5822 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5811 int err;
5812 5823
5813 set_bit(__IXGBE_DOWN, &adapter->state); 5824 set_bit(__IXGBE_DOWN, &adapter->state);
5814 /* clear the module not found bit to make sure the worker won't 5825 /* clear the module not found bit to make sure the worker won't
@@ -5859,10 +5870,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
5859 5870
5860 free_netdev(netdev); 5871 free_netdev(netdev);
5861 5872
5862 err = pci_disable_pcie_error_reporting(pdev); 5873 pci_disable_pcie_error_reporting(pdev);
5863 if (err)
5864 dev_err(&pdev->dev,
5865 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
5866 5874
5867 pci_disable_device(pdev); 5875 pci_disable_device(pdev);
5868} 5876}
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 8761d7899f7d..ef4bdd58e016 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -49,9 +49,11 @@
49#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 49#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
50#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 50#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
51#define IXGBE_DEV_ID_82599_KX4 0x10F7 51#define IXGBE_DEV_ID_82599_KX4 0x10F7
52#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
52#define IXGBE_DEV_ID_82599_CX4 0x10F9 53#define IXGBE_DEV_ID_82599_CX4 0x10F9
53#define IXGBE_DEV_ID_82599_SFP 0x10FB 54#define IXGBE_DEV_ID_82599_SFP 0x10FB
54#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC 55#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
56#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
55 57
56/* General Registers */ 58/* General Registers */
57#define IXGBE_CTRL 0x00000 59#define IXGBE_CTRL 0x00000
@@ -1336,6 +1338,8 @@
1336#define IXGBE_AUTOC_KX4_SUPP 0x80000000 1338#define IXGBE_AUTOC_KX4_SUPP 0x80000000
1337#define IXGBE_AUTOC_KX_SUPP 0x40000000 1339#define IXGBE_AUTOC_KX_SUPP 0x40000000
1338#define IXGBE_AUTOC_PAUSE 0x30000000 1340#define IXGBE_AUTOC_PAUSE 0x30000000
1341#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
1342#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
1339#define IXGBE_AUTOC_RF 0x08000000 1343#define IXGBE_AUTOC_RF 0x08000000
1340#define IXGBE_AUTOC_PD_TMR 0x06000000 1344#define IXGBE_AUTOC_PD_TMR 0x06000000
1341#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 1345#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
@@ -1404,6 +1408,8 @@
1404#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ 1408#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
1405#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ 1409#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
1406 1410
1411#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
1412
1407/* PCS1GLSTA Bit Masks */ 1413/* PCS1GLSTA Bit Masks */
1408#define IXGBE_PCS1GLSTA_LINK_OK 1 1414#define IXGBE_PCS1GLSTA_LINK_OK 1
1409#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 1415#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
@@ -1424,6 +1430,11 @@
1424#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 1430#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
1425#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 1431#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
1426 1432
1433/* ANLP1 Bit Masks */
1434#define IXGBE_ANLP1_PAUSE 0x0C00
1435#define IXGBE_ANLP1_SYM_PAUSE 0x0400
1436#define IXGBE_ANLP1_ASM_PAUSE 0x0800
1437
1427/* SW Semaphore Register bitmasks */ 1438/* SW Semaphore Register bitmasks */
1428#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 1439#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
1429#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 1440#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
new file mode 100644
index 000000000000..0be14d702beb
--- /dev/null
+++ b/drivers/net/ks8851_mll.c
@@ -0,0 +1,1697 @@
1/**
2 * drivers/net/ks8851_mll.c
3 * Copyright (c) 2009 Micrel Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/**
20 * Supports:
21 * KS8851 16bit MLL chip from Micrel Inc.
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/cache.h>
30#include <linux/crc32.h>
31#include <linux/mii.h>
32#include <linux/platform_device.h>
33#include <linux/delay.h>
34
35#define DRV_NAME "ks8851_mll"
36
37static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
38#define MAX_RECV_FRAMES 32
39#define MAX_BUF_SIZE 2048
40#define TX_BUF_SIZE 2000
41#define RX_BUF_SIZE 2000
42
43#define KS_CCR 0x08
44#define CCR_EEPROM (1 << 9)
45#define CCR_SPI (1 << 8)
46#define CCR_8BIT (1 << 7)
47#define CCR_16BIT (1 << 6)
48#define CCR_32BIT (1 << 5)
49#define CCR_SHARED (1 << 4)
50#define CCR_32PIN (1 << 0)
51
52/* MAC address registers */
53#define KS_MARL 0x10
54#define KS_MARM 0x12
55#define KS_MARH 0x14
56
57#define KS_OBCR 0x20
58#define OBCR_ODS_16MA (1 << 6)
59
60#define KS_EEPCR 0x22
61#define EEPCR_EESA (1 << 4)
62#define EEPCR_EESB (1 << 3)
63#define EEPCR_EEDO (1 << 2)
64#define EEPCR_EESCK (1 << 1)
65#define EEPCR_EECS (1 << 0)
66
67#define KS_MBIR 0x24
68#define MBIR_TXMBF (1 << 12)
69#define MBIR_TXMBFA (1 << 11)
70#define MBIR_RXMBF (1 << 4)
71#define MBIR_RXMBFA (1 << 3)
72
73#define KS_GRR 0x26
74#define GRR_QMU (1 << 1)
75#define GRR_GSR (1 << 0)
76
77#define KS_WFCR 0x2A
78#define WFCR_MPRXE (1 << 7)
79#define WFCR_WF3E (1 << 3)
80#define WFCR_WF2E (1 << 2)
81#define WFCR_WF1E (1 << 1)
82#define WFCR_WF0E (1 << 0)
83
84#define KS_WF0CRC0 0x30
85#define KS_WF0CRC1 0x32
86#define KS_WF0BM0 0x34
87#define KS_WF0BM1 0x36
88#define KS_WF0BM2 0x38
89#define KS_WF0BM3 0x3A
90
91#define KS_WF1CRC0 0x40
92#define KS_WF1CRC1 0x42
93#define KS_WF1BM0 0x44
94#define KS_WF1BM1 0x46
95#define KS_WF1BM2 0x48
96#define KS_WF1BM3 0x4A
97
98#define KS_WF2CRC0 0x50
99#define KS_WF2CRC1 0x52
100#define KS_WF2BM0 0x54
101#define KS_WF2BM1 0x56
102#define KS_WF2BM2 0x58
103#define KS_WF2BM3 0x5A
104
105#define KS_WF3CRC0 0x60
106#define KS_WF3CRC1 0x62
107#define KS_WF3BM0 0x64
108#define KS_WF3BM1 0x66
109#define KS_WF3BM2 0x68
110#define KS_WF3BM3 0x6A
111
112#define KS_TXCR 0x70
113#define TXCR_TCGICMP (1 << 8)
114#define TXCR_TCGUDP (1 << 7)
115#define TXCR_TCGTCP (1 << 6)
116#define TXCR_TCGIP (1 << 5)
117#define TXCR_FTXQ (1 << 4)
118#define TXCR_TXFCE (1 << 3)
119#define TXCR_TXPE (1 << 2)
120#define TXCR_TXCRC (1 << 1)
121#define TXCR_TXE (1 << 0)
122
123#define KS_TXSR 0x72
124#define TXSR_TXLC (1 << 13)
125#define TXSR_TXMC (1 << 12)
126#define TXSR_TXFID_MASK (0x3f << 0)
127#define TXSR_TXFID_SHIFT (0)
128#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
129
130
131#define KS_RXCR1 0x74
132#define RXCR1_FRXQ (1 << 15)
133#define RXCR1_RXUDPFCC (1 << 14)
134#define RXCR1_RXTCPFCC (1 << 13)
135#define RXCR1_RXIPFCC (1 << 12)
136#define RXCR1_RXPAFMA (1 << 11)
137#define RXCR1_RXFCE (1 << 10)
138#define RXCR1_RXEFE (1 << 9)
139#define RXCR1_RXMAFMA (1 << 8)
140#define RXCR1_RXBE (1 << 7)
141#define RXCR1_RXME (1 << 6)
142#define RXCR1_RXUE (1 << 5)
143#define RXCR1_RXAE (1 << 4)
144#define RXCR1_RXINVF (1 << 1)
145#define RXCR1_RXE (1 << 0)
146#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
147 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
148
149#define KS_RXCR2 0x76
150#define RXCR2_SRDBL_MASK (0x7 << 5)
151#define RXCR2_SRDBL_SHIFT (5)
152#define RXCR2_SRDBL_4B (0x0 << 5)
153#define RXCR2_SRDBL_8B (0x1 << 5)
154#define RXCR2_SRDBL_16B (0x2 << 5)
155#define RXCR2_SRDBL_32B (0x3 << 5)
156/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
157#define RXCR2_IUFFP (1 << 4)
158#define RXCR2_RXIUFCEZ (1 << 3)
159#define RXCR2_UDPLFE (1 << 2)
160#define RXCR2_RXICMPFCC (1 << 1)
161#define RXCR2_RXSAF (1 << 0)
162
163#define KS_TXMIR 0x78
164
165#define KS_RXFHSR 0x7C
166#define RXFSHR_RXFV (1 << 15)
167#define RXFSHR_RXICMPFCS (1 << 13)
168#define RXFSHR_RXIPFCS (1 << 12)
169#define RXFSHR_RXTCPFCS (1 << 11)
170#define RXFSHR_RXUDPFCS (1 << 10)
171#define RXFSHR_RXBF (1 << 7)
172#define RXFSHR_RXMF (1 << 6)
173#define RXFSHR_RXUF (1 << 5)
174#define RXFSHR_RXMR (1 << 4)
175#define RXFSHR_RXFT (1 << 3)
176#define RXFSHR_RXFTL (1 << 2)
177#define RXFSHR_RXRF (1 << 1)
178#define RXFSHR_RXCE (1 << 0)
179#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
180 RXFSHR_RXFTL | RXFSHR_RXMR |\
181 RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
182 RXFSHR_RXTCPFCS)
183#define KS_RXFHBCR 0x7E
184#define RXFHBCR_CNT_MASK 0x0FFF
185
186#define KS_TXQCR 0x80
187#define TXQCR_AETFE (1 << 2)
188#define TXQCR_TXQMAM (1 << 1)
189#define TXQCR_METFE (1 << 0)
190
191#define KS_RXQCR 0x82
192#define RXQCR_RXDTTS (1 << 12)
193#define RXQCR_RXDBCTS (1 << 11)
194#define RXQCR_RXFCTS (1 << 10)
195#define RXQCR_RXIPHTOE (1 << 9)
196#define RXQCR_RXDTTE (1 << 7)
197#define RXQCR_RXDBCTE (1 << 6)
198#define RXQCR_RXFCTE (1 << 5)
199#define RXQCR_ADRFE (1 << 4)
200#define RXQCR_SDA (1 << 3)
201#define RXQCR_RRXEF (1 << 0)
202#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
203
204#define KS_TXFDPR 0x84
205#define TXFDPR_TXFPAI (1 << 14)
206#define TXFDPR_TXFP_MASK (0x7ff << 0)
207#define TXFDPR_TXFP_SHIFT (0)
208
209#define KS_RXFDPR 0x86
210#define RXFDPR_RXFPAI (1 << 14)
211
212#define KS_RXDTTR 0x8C
213#define KS_RXDBCTR 0x8E
214
215#define KS_IER 0x90
216#define KS_ISR 0x92
217#define IRQ_LCI (1 << 15)
218#define IRQ_TXI (1 << 14)
219#define IRQ_RXI (1 << 13)
220#define IRQ_RXOI (1 << 11)
221#define IRQ_TXPSI (1 << 9)
222#define IRQ_RXPSI (1 << 8)
223#define IRQ_TXSAI (1 << 6)
224#define IRQ_RXWFDI (1 << 5)
225#define IRQ_RXMPDI (1 << 4)
226#define IRQ_LDI (1 << 3)
227#define IRQ_EDI (1 << 2)
228#define IRQ_SPIBEI (1 << 1)
229#define IRQ_DEDI (1 << 0)
230
231#define KS_RXFCTR 0x9C
232#define RXFCTR_THRESHOLD_MASK 0x00FF
233
234#define KS_RXFC 0x9D
235#define RXFCTR_RXFC_MASK (0xff << 8)
236#define RXFCTR_RXFC_SHIFT (8)
237#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
238#define RXFCTR_RXFCT_MASK (0xff << 0)
239#define RXFCTR_RXFCT_SHIFT (0)
240
241#define KS_TXNTFSR 0x9E
242
243#define KS_MAHTR0 0xA0
244#define KS_MAHTR1 0xA2
245#define KS_MAHTR2 0xA4
246#define KS_MAHTR3 0xA6
247
248#define KS_FCLWR 0xB0
249#define KS_FCHWR 0xB2
250#define KS_FCOWR 0xB4
251
252#define KS_CIDER 0xC0
253#define CIDER_ID 0x8870
254#define CIDER_REV_MASK (0x7 << 1)
255#define CIDER_REV_SHIFT (1)
256#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
257
258#define KS_CGCR 0xC6
259#define KS_IACR 0xC8
260#define IACR_RDEN (1 << 12)
261#define IACR_TSEL_MASK (0x3 << 10)
262#define IACR_TSEL_SHIFT (10)
263#define IACR_TSEL_MIB (0x3 << 10)
264#define IACR_ADDR_MASK (0x1f << 0)
265#define IACR_ADDR_SHIFT (0)
266
267#define KS_IADLR 0xD0
268#define KS_IAHDR 0xD2
269
270#define KS_PMECR 0xD4
271#define PMECR_PME_DELAY (1 << 14)
272#define PMECR_PME_POL (1 << 12)
273#define PMECR_WOL_WAKEUP (1 << 11)
274#define PMECR_WOL_MAGICPKT (1 << 10)
275#define PMECR_WOL_LINKUP (1 << 9)
276#define PMECR_WOL_ENERGY (1 << 8)
277#define PMECR_AUTO_WAKE_EN (1 << 7)
278#define PMECR_WAKEUP_NORMAL (1 << 6)
279#define PMECR_WKEVT_MASK (0xf << 2)
280#define PMECR_WKEVT_SHIFT (2)
281#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
282#define PMECR_WKEVT_ENERGY (0x1 << 2)
283#define PMECR_WKEVT_LINK (0x2 << 2)
284#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
285#define PMECR_WKEVT_FRAME (0x8 << 2)
286#define PMECR_PM_MASK (0x3 << 0)
287#define PMECR_PM_SHIFT (0)
288#define PMECR_PM_NORMAL (0x0 << 0)
289#define PMECR_PM_ENERGY (0x1 << 0)
290#define PMECR_PM_SOFTDOWN (0x2 << 0)
291#define PMECR_PM_POWERSAVE (0x3 << 0)
292
293/* Standard MII PHY data */
294#define KS_P1MBCR 0xE4
295#define P1MBCR_FORCE_FDX (1 << 8)
296
297#define KS_P1MBSR 0xE6
298#define P1MBSR_AN_COMPLETE (1 << 5)
299#define P1MBSR_AN_CAPABLE (1 << 3)
300#define P1MBSR_LINK_UP (1 << 2)
301
302#define KS_PHY1ILR 0xE8
303#define KS_PHY1IHR 0xEA
304#define KS_P1ANAR 0xEC
305#define KS_P1ANLPR 0xEE
306
307#define KS_P1SCLMD 0xF4
308#define P1SCLMD_LEDOFF (1 << 15)
309#define P1SCLMD_TXIDS (1 << 14)
310#define P1SCLMD_RESTARTAN (1 << 13)
311#define P1SCLMD_DISAUTOMDIX (1 << 10)
312#define P1SCLMD_FORCEMDIX (1 << 9)
313#define P1SCLMD_AUTONEGEN (1 << 7)
314#define P1SCLMD_FORCE100 (1 << 6)
315#define P1SCLMD_FORCEFDX (1 << 5)
316#define P1SCLMD_ADV_FLOW (1 << 4)
317#define P1SCLMD_ADV_100BT_FDX (1 << 3)
318#define P1SCLMD_ADV_100BT_HDX (1 << 2)
319#define P1SCLMD_ADV_10BT_FDX (1 << 1)
320#define P1SCLMD_ADV_10BT_HDX (1 << 0)
321
322#define KS_P1CR 0xF6
323#define P1CR_HP_MDIX (1 << 15)
324#define P1CR_REV_POL (1 << 13)
325#define P1CR_OP_100M (1 << 10)
326#define P1CR_OP_FDX (1 << 9)
327#define P1CR_OP_MDI (1 << 7)
328#define P1CR_AN_DONE (1 << 6)
329#define P1CR_LINK_GOOD (1 << 5)
330#define P1CR_PNTR_FLOW (1 << 4)
331#define P1CR_PNTR_100BT_FDX (1 << 3)
332#define P1CR_PNTR_100BT_HDX (1 << 2)
333#define P1CR_PNTR_10BT_FDX (1 << 1)
334#define P1CR_PNTR_10BT_HDX (1 << 0)
335
336/* TX Frame control */
337
338#define TXFR_TXIC (1 << 15)
339#define TXFR_TXFID_MASK (0x3f << 0)
340#define TXFR_TXFID_SHIFT (0)
341
342#define KS_P1SR 0xF8
343#define P1SR_HP_MDIX (1 << 15)
344#define P1SR_REV_POL (1 << 13)
345#define P1SR_OP_100M (1 << 10)
346#define P1SR_OP_FDX (1 << 9)
347#define P1SR_OP_MDI (1 << 7)
348#define P1SR_AN_DONE (1 << 6)
349#define P1SR_LINK_GOOD (1 << 5)
350#define P1SR_PNTR_FLOW (1 << 4)
351#define P1SR_PNTR_100BT_FDX (1 << 3)
352#define P1SR_PNTR_100BT_HDX (1 << 2)
353#define P1SR_PNTR_10BT_FDX (1 << 1)
354#define P1SR_PNTR_10BT_HDX (1 << 0)
355
356#define ENUM_BUS_NONE 0
357#define ENUM_BUS_8BIT 1
358#define ENUM_BUS_16BIT 2
359#define ENUM_BUS_32BIT 3
360
361#define MAX_MCAST_LST 32
362#define HW_MCAST_SIZE 8
363#define MAC_ADDR_LEN 6
364
365/**
366 * union ks_tx_hdr - tx header data
367 * @txb: The header as bytes
368 * @txw: The header as 16bit, little-endian words
369 *
370 * A dual representation of the tx header data to allow
371 * access to individual bytes, and to allow 16bit accesses
372 * with 16bit alignment.
373 */
374union ks_tx_hdr {
375 u8 txb[4];
376 __le16 txw[2];
377};
378
379/**
380 * struct ks_net - KS8851 driver private data
381 * @net_device : The network device we're bound to
382 * @hw_addr : start address of data register.
383 * @hw_addr_cmd : start address of command register.
384 * @txh : temporaly buffer to save status/length.
385 * @lock : Lock to ensure that the device is not accessed when busy.
386 * @pdev : Pointer to platform device.
387 * @mii : The MII state information for the mii calls.
388 * @frame_head_info : frame header information for multi-pkt rx.
389 * @statelock : Lock on this structure for tx list.
390 * @msg_enable : The message flags controlling driver output (see ethtool).
391 * @frame_cnt : number of frames received.
392 * @bus_width : i/o bus width.
393 * @irq : irq number assigned to this device.
394 * @rc_rxqcr : Cached copy of KS_RXQCR.
395 * @rc_txcr : Cached copy of KS_TXCR.
396 * @rc_ier : Cached copy of KS_IER.
397 * @sharedbus : Multipex(addr and data bus) mode indicator.
398 * @cmd_reg_cache : command register cached.
399 * @cmd_reg_cache_int : command register cached. Used in the irq handler.
400 * @promiscuous : promiscuous mode indicator.
401 * @all_mcast : mutlicast indicator.
402 * @mcast_lst_size : size of multicast list.
403 * @mcast_lst : multicast list.
404 * @mcast_bits : multicast enabed.
405 * @mac_addr : MAC address assigned to this device.
406 * @fid : frame id.
407 * @extra_byte : number of extra byte prepended rx pkt.
408 * @enabled : indicator this device works.
409 *
410 * The @lock ensures that the chip is protected when certain operations are
411 * in progress. When the read or write packet transfer is in progress, most
412 * of the chip registers are not accessible until the transfer is finished and
413 * the DMA has been de-asserted.
414 *
415 * The @statelock is used to protect information in the structure which may
416 * need to be accessed via several sources, such as the network driver layer
417 * or one of the work queues.
418 *
419 */
420
421/* Receive multiplex framer header info */
422struct type_frame_head {
423 u16 sts; /* Frame status */
424 u16 len; /* Byte count */
425};
426
427struct ks_net {
428 struct net_device *netdev;
429 void __iomem *hw_addr;
430 void __iomem *hw_addr_cmd;
431 union ks_tx_hdr txh ____cacheline_aligned;
432 struct mutex lock; /* spinlock to be interrupt safe */
433 struct platform_device *pdev;
434 struct mii_if_info mii;
435 struct type_frame_head *frame_head_info;
436 spinlock_t statelock;
437 u32 msg_enable;
438 u32 frame_cnt;
439 int bus_width;
440 int irq;
441
442 u16 rc_rxqcr;
443 u16 rc_txcr;
444 u16 rc_ier;
445 u16 sharedbus;
446 u16 cmd_reg_cache;
447 u16 cmd_reg_cache_int;
448 u16 promiscuous;
449 u16 all_mcast;
450 u16 mcast_lst_size;
451 u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN];
452 u8 mcast_bits[HW_MCAST_SIZE];
453 u8 mac_addr[6];
454 u8 fid;
455 u8 extra_byte;
456 u8 enabled;
457};
458
459static int msg_enable;
460
461#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
462#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
463#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
464#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
465
466#define BE3 0x8000 /* Byte Enable 3 */
467#define BE2 0x4000 /* Byte Enable 2 */
468#define BE1 0x2000 /* Byte Enable 1 */
469#define BE0 0x1000 /* Byte Enable 0 */
470
471/**
472 * register read/write calls.
473 *
474 * All these calls issue transactions to access the chip's registers. They
475 * all require that the necessary lock is held to prevent accesses when the
476 * chip is busy transfering packet data (RX/TX FIFO accesses).
477 */
478
479/**
480 * ks_rdreg8 - read 8 bit register from device
481 * @ks : The chip information
482 * @offset: The register address
483 *
484 * Read a 8bit register from the chip, returning the result
485 */
486static u8 ks_rdreg8(struct ks_net *ks, int offset)
487{
488 u16 data;
489 u8 shift_bit = offset & 0x03;
490 u8 shift_data = (offset & 1) << 3;
491 ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
492 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
493 data = ioread16(ks->hw_addr);
494 return (u8)(data >> shift_data);
495}
496
497/**
498 * ks_rdreg16 - read 16 bit register from device
499 * @ks : The chip information
500 * @offset: The register address
501 *
502 * Read a 16bit register from the chip, returning the result
503 */
504
505static u16 ks_rdreg16(struct ks_net *ks, int offset)
506{
507 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
508 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
509 return ioread16(ks->hw_addr);
510}
511
512/**
513 * ks_wrreg8 - write 8bit register value to chip
514 * @ks: The chip information
515 * @offset: The register address
516 * @value: The value to write
517 *
518 */
519static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
520{
521 u8 shift_bit = (offset & 0x03);
522 u16 value_write = (u16)(value << ((offset & 1) << 3));
523 ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
524 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
525 iowrite16(value_write, ks->hw_addr);
526}
527
528/**
529 * ks_wrreg16 - write 16bit register value to chip
530 * @ks: The chip information
531 * @offset: The register address
532 * @value: The value to write
533 *
534 */
535
536static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
537{
538 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
539 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
540 iowrite16(value, ks->hw_addr);
541}
542
543/**
544 * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
545 * @ks: The chip state
546 * @wptr: buffer address to save data
547 * @len: length in byte to read
548 *
549 */
550static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
551{
552 len >>= 1;
553 while (len--)
554 *wptr++ = (u16)ioread16(ks->hw_addr);
555}
556
557/**
558 * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
559 * @ks: The chip information
560 * @wptr: buffer address
561 * @len: length in byte to write
562 *
563 */
564static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
565{
566 len >>= 1;
567 while (len--)
568 iowrite16(*wptr++, ks->hw_addr);
569}
570
571/**
572 * ks_tx_fifo_space - return the available hardware buffer size.
573 * @ks: The chip information
574 *
575 */
576static inline u16 ks_tx_fifo_space(struct ks_net *ks)
577{
578 return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
579}
580
581/**
582 * ks_save_cmd_reg - save the command register from the cache.
583 * @ks: The chip information
584 *
585 */
586static inline void ks_save_cmd_reg(struct ks_net *ks)
587{
588 /*ks8851 MLL has a bug to read back the command register.
589 * So rely on software to save the content of command register.
590 */
591 ks->cmd_reg_cache_int = ks->cmd_reg_cache;
592}
593
594/**
595 * ks_restore_cmd_reg - restore the command register from the cache and
596 * write to hardware register.
597 * @ks: The chip information
598 *
599 */
600static inline void ks_restore_cmd_reg(struct ks_net *ks)
601{
602 ks->cmd_reg_cache = ks->cmd_reg_cache_int;
603 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
604}
605
606/**
607 * ks_set_powermode - set power mode of the device
608 * @ks: The chip information
609 * @pwrmode: The power mode value to write to KS_PMECR.
610 *
611 * Change the power mode of the chip.
612 */
613static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
614{
615 unsigned pmecr;
616
617 if (netif_msg_hw(ks))
618 ks_dbg(ks, "setting power mode %d\n", pwrmode);
619
620 ks_rdreg16(ks, KS_GRR);
621 pmecr = ks_rdreg16(ks, KS_PMECR);
622 pmecr &= ~PMECR_PM_MASK;
623 pmecr |= pwrmode;
624
625 ks_wrreg16(ks, KS_PMECR, pmecr);
626}
627
628/**
629 * ks_read_config - read chip configuration of bus width.
630 * @ks: The chip information
631 *
632 */
633static void ks_read_config(struct ks_net *ks)
634{
635 u16 reg_data = 0;
636
637 /* Regardless of bus width, 8 bit read should always work.*/
638 reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
639 reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
640
641 /* addr/data bus are multiplexed */
642 ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
643
644 /* There are garbage data when reading data from QMU,
645 depending on bus-width.
646 */
647
648 if (reg_data & CCR_8BIT) {
649 ks->bus_width = ENUM_BUS_8BIT;
650 ks->extra_byte = 1;
651 } else if (reg_data & CCR_16BIT) {
652 ks->bus_width = ENUM_BUS_16BIT;
653 ks->extra_byte = 2;
654 } else {
655 ks->bus_width = ENUM_BUS_32BIT;
656 ks->extra_byte = 4;
657 }
658}
659
660/**
661 * ks_soft_reset - issue one of the soft reset to the device
662 * @ks: The device state.
663 * @op: The bit(s) to set in the GRR
664 *
665 * Issue the relevant soft-reset command to the device's GRR register
666 * specified by @op.
667 *
668 * Note, the delays are in there as a caution to ensure that the reset
669 * has time to take effect and then complete. Since the datasheet does
670 * not currently specify the exact sequence, we have chosen something
671 * that seems to work with our device.
672 */
673static void ks_soft_reset(struct ks_net *ks, unsigned op)
674{
675 /* Disable interrupt first */
676 ks_wrreg16(ks, KS_IER, 0x0000);
677 ks_wrreg16(ks, KS_GRR, op);
678 mdelay(10); /* wait a short time to effect reset */
679 ks_wrreg16(ks, KS_GRR, 0);
680 mdelay(1); /* wait for condition to clear */
681}
682
683
684/**
685 * ks_read_qmu - read 1 pkt data from the QMU.
686 * @ks: The chip information
687 * @buf: buffer address to save 1 pkt
688 * @len: Pkt length
689 * Here is the sequence to read 1 pkt:
690 * 1. set sudo DMA mode
691 * 2. read prepend data
692 * 3. read pkt data
693 * 4. reset sudo DMA Mode
694 */
695static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
696{
697 u32 r = ks->extra_byte & 0x1 ;
698 u32 w = ks->extra_byte - r;
699
700 /* 1. set sudo DMA mode */
701 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
702 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
703
704 /* 2. read prepend data */
705 /**
706 * read 4 + extra bytes and discard them.
707 * extra bytes for dummy, 2 for status, 2 for len
708 */
709
710 /* use likely(r) for 8 bit access for performance */
711 if (unlikely(r))
712 ioread8(ks->hw_addr);
713 ks_inblk(ks, buf, w + 2 + 2);
714
715 /* 3. read pkt data */
716 ks_inblk(ks, buf, ALIGN(len, 4));
717
718 /* 4. reset sudo DMA Mode */
719 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
720}
721
722/**
723 * ks_rcv - read multiple pkts data from the QMU.
724 * @ks: The chip information
725 * @netdev: The network device being opened.
726 *
727 * Read all of header information before reading pkt content.
728 * It is not allowed only port of pkts in QMU after issuing
729 * interrupt ack.
730 */
731static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
732{
733 u32 i;
734 struct type_frame_head *frame_hdr = ks->frame_head_info;
735 struct sk_buff *skb;
736
737 ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
738
739 /* read all header information */
740 for (i = 0; i < ks->frame_cnt; i++) {
741 /* Checking Received packet status */
742 frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
743 /* Get packet len from hardware */
744 frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
745 frame_hdr++;
746 }
747
748 frame_hdr = ks->frame_head_info;
749 while (ks->frame_cnt--) {
750 skb = dev_alloc_skb(frame_hdr->len + 16);
751 if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
752 (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
753 skb_reserve(skb, 2);
754 /* read data block including CRC 4 bytes */
755 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len + 4);
756 skb_put(skb, frame_hdr->len);
757 skb->dev = netdev;
758 skb->protocol = eth_type_trans(skb, netdev);
759 netif_rx(skb);
760 } else {
761 printk(KERN_ERR "%s: err:skb alloc\n", __func__);
762 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
763 if (skb)
764 dev_kfree_skb_irq(skb);
765 }
766 frame_hdr++;
767 }
768}
769
770/**
771 * ks_update_link_status - link status update.
772 * @netdev: The network device being opened.
773 * @ks: The chip information
774 *
775 */
776
777static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
778{
779 /* check the status of the link */
780 u32 link_up_status;
781 if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
782 netif_carrier_on(netdev);
783 link_up_status = true;
784 } else {
785 netif_carrier_off(netdev);
786 link_up_status = false;
787 }
788 if (netif_msg_link(ks))
789 ks_dbg(ks, "%s: %s\n",
790 __func__, link_up_status ? "UP" : "DOWN");
791}
792
793/**
794 * ks_irq - device interrupt handler
795 * @irq: Interrupt number passed from the IRQ hnalder.
796 * @pw: The private word passed to register_irq(), our struct ks_net.
797 *
798 * This is the handler invoked to find out what happened
799 *
800 * Read the interrupt status, work out what needs to be done and then clear
801 * any of the interrupts that are not needed.
802 */
803
804static irqreturn_t ks_irq(int irq, void *pw)
805{
806 struct ks_net *ks = pw;
807 struct net_device *netdev = ks->netdev;
808 u16 status;
809
810 /*this should be the first in IRQ handler */
811 ks_save_cmd_reg(ks);
812
813 status = ks_rdreg16(ks, KS_ISR);
814 if (unlikely(!status)) {
815 ks_restore_cmd_reg(ks);
816 return IRQ_NONE;
817 }
818
819 ks_wrreg16(ks, KS_ISR, status);
820
821 if (likely(status & IRQ_RXI))
822 ks_rcv(ks, netdev);
823
824 if (unlikely(status & IRQ_LCI))
825 ks_update_link_status(netdev, ks);
826
827 if (unlikely(status & IRQ_TXI))
828 netif_wake_queue(netdev);
829
830 if (unlikely(status & IRQ_LDI)) {
831
832 u16 pmecr = ks_rdreg16(ks, KS_PMECR);
833 pmecr &= ~PMECR_WKEVT_MASK;
834 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
835 }
836
837 /* this should be the last in IRQ handler*/
838 ks_restore_cmd_reg(ks);
839 return IRQ_HANDLED;
840}
841
842
843/**
844 * ks_net_open - open network device
845 * @netdev: The network device being opened.
846 *
847 * Called when the network device is marked active, such as a user executing
848 * 'ifconfig up' on the device.
849 */
850static int ks_net_open(struct net_device *netdev)
851{
852 struct ks_net *ks = netdev_priv(netdev);
853 int err;
854
855#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
856 /* lock the card, even if we may not actually do anything
857 * else at the moment.
858 */
859
860 if (netif_msg_ifup(ks))
861 ks_dbg(ks, "%s - entry\n", __func__);
862
863 /* reset the HW */
864 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, ks);
865
866 if (err) {
867 printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
868 ks->irq, err);
869 return err;
870 }
871
872 if (netif_msg_ifup(ks))
873 ks_dbg(ks, "network device %s up\n", netdev->name);
874
875 return 0;
876}
877
878/**
879 * ks_net_stop - close network device
880 * @netdev: The device being closed.
881 *
882 * Called to close down a network device which has been active. Cancell any
883 * work, shutdown the RX and TX process and then place the chip into a low
884 * power state whilst it is not being used.
885 */
886static int ks_net_stop(struct net_device *netdev)
887{
888 struct ks_net *ks = netdev_priv(netdev);
889
890 if (netif_msg_ifdown(ks))
891 ks_info(ks, "%s: shutting down\n", netdev->name);
892
893 netif_stop_queue(netdev);
894
895 kfree(ks->frame_head_info);
896
897 mutex_lock(&ks->lock);
898
899 /* turn off the IRQs and ack any outstanding */
900 ks_wrreg16(ks, KS_IER, 0x0000);
901 ks_wrreg16(ks, KS_ISR, 0xffff);
902
903 /* shutdown RX process */
904 ks_wrreg16(ks, KS_RXCR1, 0x0000);
905
906 /* shutdown TX process */
907 ks_wrreg16(ks, KS_TXCR, 0x0000);
908
909 /* set powermode to soft power down to save power */
910 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
911 free_irq(ks->irq, netdev);
912 mutex_unlock(&ks->lock);
913 return 0;
914}
915
916
917/**
918 * ks_write_qmu - write 1 pkt data to the QMU.
919 * @ks: The chip information
920 * @pdata: buffer address to save 1 pkt
921 * @len: Pkt length in byte
922 * Here is the sequence to write 1 pkt:
923 * 1. set sudo DMA mode
924 * 2. write status/length
925 * 3. write pkt data
926 * 4. reset sudo DMA Mode
927 * 5. reset sudo DMA mode
928 * 6. Wait until pkt is out
929 */
930static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
931{
932 unsigned fid = ks->fid;
933
934 fid = ks->fid;
935 ks->fid = (ks->fid + 1) & TXFR_TXFID_MASK;
936
937 /* reduce the tx interrupt occurrances. */
938 if (!fid)
939 fid |= TXFR_TXIC; /* irq on completion */
940
941 /* start header at txb[0] to align txw entries */
942 ks->txh.txw[0] = cpu_to_le16(fid);
943 ks->txh.txw[1] = cpu_to_le16(len);
944
945 /* 1. set sudo-DMA mode */
946 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
947 /* 2. write status/lenth info */
948 ks_outblk(ks, ks->txh.txw, 4);
949 /* 3. write pkt data */
950 ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
951 /* 4. reset sudo-DMA mode */
952 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
953 /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
954 ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
955 /* 6. wait until TXQCR_METFE is auto-cleared */
956 while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
957 ;
958}
959
960static void ks_disable_int(struct ks_net *ks)
961{
962 ks_wrreg16(ks, KS_IER, 0x0000);
963} /* ks_disable_int */
964
965static void ks_enable_int(struct ks_net *ks)
966{
967 ks_wrreg16(ks, KS_IER, ks->rc_ier);
968} /* ks_enable_int */
969
970/**
971 * ks_start_xmit - transmit packet
972 * @skb : The buffer to transmit
973 * @netdev : The device used to transmit the packet.
974 *
975 * Called by the network layer to transmit the @skb.
976 * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
977 * So while tx is in-progress, prevent IRQ interrupt from happenning.
978 */
979static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
980{
981 int retv = NETDEV_TX_OK;
982 struct ks_net *ks = netdev_priv(netdev);
983
984 disable_irq(netdev->irq);
985 ks_disable_int(ks);
986 spin_lock(&ks->statelock);
987
988 /* Extra space are required:
989 * 4 byte for alignment, 4 for status/length, 4 for CRC
990 */
991
992 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
993 ks_write_qmu(ks, skb->data, skb->len);
994 dev_kfree_skb(skb);
995 } else
996 retv = NETDEV_TX_BUSY;
997 spin_unlock(&ks->statelock);
998 ks_enable_int(ks);
999 enable_irq(netdev->irq);
1000 return retv;
1001}
1002
1003/**
1004 * ks_start_rx - ready to serve pkts
1005 * @ks : The chip information
1006 *
1007 */
1008static void ks_start_rx(struct ks_net *ks)
1009{
1010 u16 cntl;
1011
1012 /* Enables QMU Receive (RXCR1). */
1013 cntl = ks_rdreg16(ks, KS_RXCR1);
1014 cntl |= RXCR1_RXE ;
1015 ks_wrreg16(ks, KS_RXCR1, cntl);
1016} /* ks_start_rx */
1017
1018/**
1019 * ks_stop_rx - stop to serve pkts
1020 * @ks : The chip information
1021 *
1022 */
1023static void ks_stop_rx(struct ks_net *ks)
1024{
1025 u16 cntl;
1026
1027 /* Disables QMU Receive (RXCR1). */
1028 cntl = ks_rdreg16(ks, KS_RXCR1);
1029 cntl &= ~RXCR1_RXE ;
1030 ks_wrreg16(ks, KS_RXCR1, cntl);
1031
1032} /* ks_stop_rx */
1033
1034static unsigned long const ethernet_polynomial = 0x04c11db7U;
1035
1036static unsigned long ether_gen_crc(int length, u8 *data)
1037{
1038 long crc = -1;
1039 while (--length >= 0) {
1040 u8 current_octet = *data++;
1041 int bit;
1042
1043 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1044 crc = (crc << 1) ^
1045 ((crc < 0) ^ (current_octet & 1) ?
1046 ethernet_polynomial : 0);
1047 }
1048 }
1049 return (unsigned long)crc;
1050} /* ether_gen_crc */
1051
1052/**
1053* ks_set_grpaddr - set multicast information
1054* @ks : The chip information
1055*/
1056
1057static void ks_set_grpaddr(struct ks_net *ks)
1058{
1059 u8 i;
1060 u32 index, position, value;
1061
1062 memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
1063
1064 for (i = 0; i < ks->mcast_lst_size; i++) {
1065 position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
1066 index = position >> 3;
1067 value = 1 << (position & 7);
1068 ks->mcast_bits[index] |= (u8)value;
1069 }
1070
1071 for (i = 0; i < HW_MCAST_SIZE; i++) {
1072 if (i & 1) {
1073 ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
1074 (ks->mcast_bits[i] << 8) |
1075 ks->mcast_bits[i - 1]);
1076 }
1077 }
1078} /* ks_set_grpaddr */
1079
1080/*
1081* ks_clear_mcast - clear multicast information
1082*
1083* @ks : The chip information
1084* This routine removes all mcast addresses set in the hardware.
1085*/
1086
1087static void ks_clear_mcast(struct ks_net *ks)
1088{
1089 u16 i, mcast_size;
1090 for (i = 0; i < HW_MCAST_SIZE; i++)
1091 ks->mcast_bits[i] = 0;
1092
1093 mcast_size = HW_MCAST_SIZE >> 2;
1094 for (i = 0; i < mcast_size; i++)
1095 ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
1096}
1097
1098static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
1099{
1100 u16 cntl;
1101 ks->promiscuous = promiscuous_mode;
1102 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1103 cntl = ks_rdreg16(ks, KS_RXCR1);
1104
1105 cntl &= ~RXCR1_FILTER_MASK;
1106 if (promiscuous_mode)
1107 /* Enable Promiscuous mode */
1108 cntl |= RXCR1_RXAE | RXCR1_RXINVF;
1109 else
1110 /* Disable Promiscuous mode (default normal mode) */
1111 cntl |= RXCR1_RXPAFMA;
1112
1113 ks_wrreg16(ks, KS_RXCR1, cntl);
1114
1115 if (ks->enabled)
1116 ks_start_rx(ks);
1117
1118} /* ks_set_promis */
1119
1120static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1121{
1122 u16 cntl;
1123
1124 ks->all_mcast = mcast;
1125 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1126 cntl = ks_rdreg16(ks, KS_RXCR1);
1127 cntl &= ~RXCR1_FILTER_MASK;
1128 if (mcast)
1129 /* Enable "Perfect with Multicast address passed mode" */
1130 cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1131 else
1132 /**
1133 * Disable "Perfect with Multicast address passed
1134 * mode" (normal mode).
1135 */
1136 cntl |= RXCR1_RXPAFMA;
1137
1138 ks_wrreg16(ks, KS_RXCR1, cntl);
1139
1140 if (ks->enabled)
1141 ks_start_rx(ks);
1142} /* ks_set_mcast */
1143
1144static void ks_set_rx_mode(struct net_device *netdev)
1145{
1146 struct ks_net *ks = netdev_priv(netdev);
1147 struct dev_mc_list *ptr;
1148
1149 /* Turn on/off promiscuous mode. */
1150 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
1151 ks_set_promis(ks,
1152 (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
1153 /* Turn on/off all mcast mode. */
1154 else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
1155 ks_set_mcast(ks,
1156 (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
1157 else
1158 ks_set_promis(ks, false);
1159
1160 if ((netdev->flags & IFF_MULTICAST) && netdev->mc_count) {
1161 if (netdev->mc_count <= MAX_MCAST_LST) {
1162 int i = 0;
1163 for (ptr = netdev->mc_list; ptr; ptr = ptr->next) {
1164 if (!(*ptr->dmi_addr & 1))
1165 continue;
1166 if (i >= MAX_MCAST_LST)
1167 break;
1168 memcpy(ks->mcast_lst[i++], ptr->dmi_addr,
1169 MAC_ADDR_LEN);
1170 }
1171 ks->mcast_lst_size = (u8)i;
1172 ks_set_grpaddr(ks);
1173 } else {
1174 /**
1175 * List too big to support so
1176 * turn on all mcast mode.
1177 */
1178 ks->mcast_lst_size = MAX_MCAST_LST;
1179 ks_set_mcast(ks, true);
1180 }
1181 } else {
1182 ks->mcast_lst_size = 0;
1183 ks_clear_mcast(ks);
1184 }
1185} /* ks_set_rx_mode */
1186
1187static void ks_set_mac(struct ks_net *ks, u8 *data)
1188{
1189 u16 *pw = (u16 *)data;
1190 u16 w, u;
1191
1192 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1193
1194 u = *pw++;
1195 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1196 ks_wrreg16(ks, KS_MARH, w);
1197
1198 u = *pw++;
1199 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1200 ks_wrreg16(ks, KS_MARM, w);
1201
1202 u = *pw;
1203 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1204 ks_wrreg16(ks, KS_MARL, w);
1205
1206 memcpy(ks->mac_addr, data, 6);
1207
1208 if (ks->enabled)
1209 ks_start_rx(ks);
1210}
1211
1212static int ks_set_mac_address(struct net_device *netdev, void *paddr)
1213{
1214 struct ks_net *ks = netdev_priv(netdev);
1215 struct sockaddr *addr = paddr;
1216 u8 *da;
1217
1218 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1219
1220 da = (u8 *)netdev->dev_addr;
1221
1222 ks_set_mac(ks, da);
1223 return 0;
1224}
1225
1226static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1227{
1228 struct ks_net *ks = netdev_priv(netdev);
1229
1230 if (!netif_running(netdev))
1231 return -EINVAL;
1232
1233 return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
1234}
1235
1236static const struct net_device_ops ks_netdev_ops = {
1237 .ndo_open = ks_net_open,
1238 .ndo_stop = ks_net_stop,
1239 .ndo_do_ioctl = ks_net_ioctl,
1240 .ndo_start_xmit = ks_start_xmit,
1241 .ndo_set_mac_address = ks_set_mac_address,
1242 .ndo_set_rx_mode = ks_set_rx_mode,
1243 .ndo_change_mtu = eth_change_mtu,
1244 .ndo_validate_addr = eth_validate_addr,
1245};
1246
1247/* ethtool support */
1248
1249static void ks_get_drvinfo(struct net_device *netdev,
1250 struct ethtool_drvinfo *di)
1251{
1252 strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
1253 strlcpy(di->version, "1.00", sizeof(di->version));
1254 strlcpy(di->bus_info, dev_name(netdev->dev.parent),
1255 sizeof(di->bus_info));
1256}
1257
1258static u32 ks_get_msglevel(struct net_device *netdev)
1259{
1260 struct ks_net *ks = netdev_priv(netdev);
1261 return ks->msg_enable;
1262}
1263
1264static void ks_set_msglevel(struct net_device *netdev, u32 to)
1265{
1266 struct ks_net *ks = netdev_priv(netdev);
1267 ks->msg_enable = to;
1268}
1269
1270static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1271{
1272 struct ks_net *ks = netdev_priv(netdev);
1273 return mii_ethtool_gset(&ks->mii, cmd);
1274}
1275
1276static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1277{
1278 struct ks_net *ks = netdev_priv(netdev);
1279 return mii_ethtool_sset(&ks->mii, cmd);
1280}
1281
1282static u32 ks_get_link(struct net_device *netdev)
1283{
1284 struct ks_net *ks = netdev_priv(netdev);
1285 return mii_link_ok(&ks->mii);
1286}
1287
1288static int ks_nway_reset(struct net_device *netdev)
1289{
1290 struct ks_net *ks = netdev_priv(netdev);
1291 return mii_nway_restart(&ks->mii);
1292}
1293
1294static const struct ethtool_ops ks_ethtool_ops = {
1295 .get_drvinfo = ks_get_drvinfo,
1296 .get_msglevel = ks_get_msglevel,
1297 .set_msglevel = ks_set_msglevel,
1298 .get_settings = ks_get_settings,
1299 .set_settings = ks_set_settings,
1300 .get_link = ks_get_link,
1301 .nway_reset = ks_nway_reset,
1302};
1303
1304/* MII interface controls */
1305
1306/**
1307 * ks_phy_reg - convert MII register into a KS8851 register
1308 * @reg: MII register number.
1309 *
1310 * Return the KS8851 register number for the corresponding MII PHY register
1311 * if possible. Return zero if the MII register has no direct mapping to the
1312 * KS8851 register set.
1313 */
1314static int ks_phy_reg(int reg)
1315{
1316 switch (reg) {
1317 case MII_BMCR:
1318 return KS_P1MBCR;
1319 case MII_BMSR:
1320 return KS_P1MBSR;
1321 case MII_PHYSID1:
1322 return KS_PHY1ILR;
1323 case MII_PHYSID2:
1324 return KS_PHY1IHR;
1325 case MII_ADVERTISE:
1326 return KS_P1ANAR;
1327 case MII_LPA:
1328 return KS_P1ANLPR;
1329 }
1330
1331 return 0x0;
1332}
1333
1334/**
1335 * ks_phy_read - MII interface PHY register read.
1336 * @netdev: The network device the PHY is on.
1337 * @phy_addr: Address of PHY (ignored as we only have one)
1338 * @reg: The register to read.
1339 *
1340 * This call reads data from the PHY register specified in @reg. Since the
1341 * device does not support all the MII registers, the non-existant values
1342 * are always returned as zero.
1343 *
1344 * We return zero for unsupported registers as the MII code does not check
1345 * the value returned for any error status, and simply returns it to the
1346 * caller. The mii-tool that the driver was tested with takes any -ve error
1347 * as real PHY capabilities, thus displaying incorrect data to the user.
1348 */
1349static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1350{
1351 struct ks_net *ks = netdev_priv(netdev);
1352 int ksreg;
1353 int result;
1354
1355 ksreg = ks_phy_reg(reg);
1356 if (!ksreg)
1357 return 0x0; /* no error return allowed, so use zero */
1358
1359 mutex_lock(&ks->lock);
1360 result = ks_rdreg16(ks, ksreg);
1361 mutex_unlock(&ks->lock);
1362
1363 return result;
1364}
1365
1366static void ks_phy_write(struct net_device *netdev,
1367 int phy, int reg, int value)
1368{
1369 struct ks_net *ks = netdev_priv(netdev);
1370 int ksreg;
1371
1372 ksreg = ks_phy_reg(reg);
1373 if (ksreg) {
1374 mutex_lock(&ks->lock);
1375 ks_wrreg16(ks, ksreg, value);
1376 mutex_unlock(&ks->lock);
1377 }
1378}
1379
1380/**
1381 * ks_read_selftest - read the selftest memory info.
1382 * @ks: The device state
1383 *
1384 * Read and check the TX/RX memory selftest information.
1385 */
1386static int ks_read_selftest(struct ks_net *ks)
1387{
1388 unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1389 int ret = 0;
1390 unsigned rd;
1391
1392 rd = ks_rdreg16(ks, KS_MBIR);
1393
1394 if ((rd & both_done) != both_done) {
1395 ks_warn(ks, "Memory selftest not finished\n");
1396 return 0;
1397 }
1398
1399 if (rd & MBIR_TXMBFA) {
1400 ks_err(ks, "TX memory selftest fails\n");
1401 ret |= 1;
1402 }
1403
1404 if (rd & MBIR_RXMBFA) {
1405 ks_err(ks, "RX memory selftest fails\n");
1406 ret |= 2;
1407 }
1408
1409 ks_info(ks, "the selftest passes\n");
1410 return ret;
1411}
1412
1413static void ks_disable(struct ks_net *ks)
1414{
1415 u16 w;
1416
1417 w = ks_rdreg16(ks, KS_TXCR);
1418
1419 /* Disables QMU Transmit (TXCR). */
1420 w &= ~TXCR_TXE;
1421 ks_wrreg16(ks, KS_TXCR, w);
1422
1423 /* Disables QMU Receive (RXCR1). */
1424 w = ks_rdreg16(ks, KS_RXCR1);
1425 w &= ~RXCR1_RXE ;
1426 ks_wrreg16(ks, KS_RXCR1, w);
1427
1428 ks->enabled = false;
1429
1430} /* ks_disable */
1431
1432static void ks_setup(struct ks_net *ks)
1433{
1434 u16 w;
1435
1436 /**
1437 * Configure QMU Transmit
1438 */
1439
1440 /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
1441 ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1442
1443 /* Setup Receive Frame Data Pointer Auto-Increment */
1444 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1445
1446 /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1447 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
1448
1449 /* Setup RxQ Command Control (RXQCR) */
1450 ks->rc_rxqcr = RXQCR_CMD_CNTL;
1451 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1452
1453 /**
1454 * set the force mode to half duplex, default is full duplex
1455 * because if the auto-negotiation fails, most switch uses
1456 * half-duplex.
1457 */
1458
1459 w = ks_rdreg16(ks, KS_P1MBCR);
1460 w &= ~P1MBCR_FORCE_FDX;
1461 ks_wrreg16(ks, KS_P1MBCR, w);
1462
1463 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1464 ks_wrreg16(ks, KS_TXCR, w);
1465
1466 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE;
1467
1468 if (ks->promiscuous) /* bPromiscuous */
1469 w |= (RXCR1_RXAE | RXCR1_RXINVF);
1470 else if (ks->all_mcast) /* Multicast address passed mode */
1471 w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1472 else /* Normal mode */
1473 w |= RXCR1_RXPAFMA;
1474
1475 ks_wrreg16(ks, KS_RXCR1, w);
1476} /*ks_setup */
1477
1478
1479static void ks_setup_int(struct ks_net *ks)
1480{
1481 ks->rc_ier = 0x00;
1482 /* Clear the interrupts status of the hardware. */
1483 ks_wrreg16(ks, KS_ISR, 0xffff);
1484
1485 /* Enables the interrupts of the hardware. */
1486 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1487} /* ks_setup_int */
1488
1489void ks_enable(struct ks_net *ks)
1490{
1491 u16 w;
1492
1493 w = ks_rdreg16(ks, KS_TXCR);
1494 /* Enables QMU Transmit (TXCR). */
1495 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
1496
1497 /*
1498 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
1499 * Enable
1500 */
1501
1502 w = ks_rdreg16(ks, KS_RXQCR);
1503 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
1504
1505 /* Enables QMU Receive (RXCR1). */
1506 w = ks_rdreg16(ks, KS_RXCR1);
1507 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
1508 ks->enabled = true;
1509} /* ks_enable */
1510
1511static int ks_hw_init(struct ks_net *ks)
1512{
1513#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1514 ks->promiscuous = 0;
1515 ks->all_mcast = 0;
1516 ks->mcast_lst_size = 0;
1517
1518 ks->frame_head_info = (struct type_frame_head *) \
1519 kmalloc(MHEADER_SIZE, GFP_KERNEL);
1520 if (!ks->frame_head_info) {
1521 printk(KERN_ERR "Error: Fail to allocate frame memory\n");
1522 return false;
1523 }
1524
1525 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1526 return true;
1527}
1528
1529
1530static int __devinit ks8851_probe(struct platform_device *pdev)
1531{
1532 int err = -ENOMEM;
1533 struct resource *io_d, *io_c;
1534 struct net_device *netdev;
1535 struct ks_net *ks;
1536 u16 id, data;
1537
1538 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1539 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1540
1541 if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
1542 goto err_mem_region;
1543
1544 if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
1545 goto err_mem_region1;
1546
1547 netdev = alloc_etherdev(sizeof(struct ks_net));
1548 if (!netdev)
1549 goto err_alloc_etherdev;
1550
1551 SET_NETDEV_DEV(netdev, &pdev->dev);
1552
1553 ks = netdev_priv(netdev);
1554 ks->netdev = netdev;
1555 ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
1556
1557 if (!ks->hw_addr)
1558 goto err_ioremap;
1559
1560 ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
1561 if (!ks->hw_addr_cmd)
1562 goto err_ioremap1;
1563
1564 ks->irq = platform_get_irq(pdev, 0);
1565
1566 if (ks->irq < 0) {
1567 err = ks->irq;
1568 goto err_get_irq;
1569 }
1570
1571 ks->pdev = pdev;
1572
1573 mutex_init(&ks->lock);
1574 spin_lock_init(&ks->statelock);
1575
1576 netdev->netdev_ops = &ks_netdev_ops;
1577 netdev->ethtool_ops = &ks_ethtool_ops;
1578
1579 /* setup mii state */
1580 ks->mii.dev = netdev;
1581 ks->mii.phy_id = 1,
1582 ks->mii.phy_id_mask = 1;
1583 ks->mii.reg_num_mask = 0xf;
1584 ks->mii.mdio_read = ks_phy_read;
1585 ks->mii.mdio_write = ks_phy_write;
1586
1587 ks_info(ks, "message enable is %d\n", msg_enable);
1588 /* set the default message enable */
1589 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1590 NETIF_MSG_PROBE |
1591 NETIF_MSG_LINK));
1592 ks_read_config(ks);
1593
1594 /* simple check for a valid chip being connected to the bus */
1595 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1596 ks_err(ks, "failed to read device ID\n");
1597 err = -ENODEV;
1598 goto err_register;
1599 }
1600
1601 if (ks_read_selftest(ks)) {
1602 ks_err(ks, "failed to read device ID\n");
1603 err = -ENODEV;
1604 goto err_register;
1605 }
1606
1607 err = register_netdev(netdev);
1608 if (err)
1609 goto err_register;
1610
1611 platform_set_drvdata(pdev, netdev);
1612
1613 ks_soft_reset(ks, GRR_GSR);
1614 ks_hw_init(ks);
1615 ks_disable(ks);
1616 ks_setup(ks);
1617 ks_setup_int(ks);
1618 ks_enable_int(ks);
1619 ks_enable(ks);
1620 memcpy(netdev->dev_addr, ks->mac_addr, 6);
1621
1622 data = ks_rdreg16(ks, KS_OBCR);
1623 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1624
1625 /**
1626 * If you want to use the default MAC addr,
1627 * comment out the 2 functions below.
1628 */
1629
1630 random_ether_addr(netdev->dev_addr);
1631 ks_set_mac(ks, netdev->dev_addr);
1632
1633 id = ks_rdreg16(ks, KS_CIDER);
1634
1635 printk(KERN_INFO DRV_NAME
1636 " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1637 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1638 return 0;
1639
1640err_register:
1641err_get_irq:
1642 iounmap(ks->hw_addr_cmd);
1643err_ioremap1:
1644 iounmap(ks->hw_addr);
1645err_ioremap:
1646 free_netdev(netdev);
1647err_alloc_etherdev:
1648 release_mem_region(io_c->start, resource_size(io_c));
1649err_mem_region1:
1650 release_mem_region(io_d->start, resource_size(io_d));
1651err_mem_region:
1652 return err;
1653}
1654
1655static int __devexit ks8851_remove(struct platform_device *pdev)
1656{
1657 struct net_device *netdev = platform_get_drvdata(pdev);
1658 struct ks_net *ks = netdev_priv(netdev);
1659 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1660
1661 unregister_netdev(netdev);
1662 iounmap(ks->hw_addr);
1663 free_netdev(netdev);
1664 release_mem_region(iomem->start, resource_size(iomem));
1665 platform_set_drvdata(pdev, NULL);
1666 return 0;
1667
1668}
1669
1670static struct platform_driver ks8851_platform_driver = {
1671 .driver = {
1672 .name = DRV_NAME,
1673 .owner = THIS_MODULE,
1674 },
1675 .probe = ks8851_probe,
1676 .remove = __devexit_p(ks8851_remove),
1677};
1678
1679static int __init ks8851_init(void)
1680{
1681 return platform_driver_register(&ks8851_platform_driver);
1682}
1683
1684static void __exit ks8851_exit(void)
1685{
1686 platform_driver_unregister(&ks8851_platform_driver);
1687}
1688
1689module_init(ks8851_init);
1690module_exit(ks8851_exit);
1691
1692MODULE_DESCRIPTION("KS8851 MLL Network driver");
1693MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1694MODULE_LICENSE("GPL");
1695module_param_named(message, msg_enable, int, 0);
1696MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1697
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 92ceb689b4d4..2af81735386b 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -828,7 +828,7 @@ static int __exit meth_remove(struct platform_device *pdev)
828 828
829static struct platform_driver meth_driver = { 829static struct platform_driver meth_driver = {
830 .probe = meth_probe, 830 .probe = meth_probe,
831 .remove = __devexit_p(meth_remove), 831 .remove = __exit_p(meth_remove),
832 .driver = { 832 .driver = {
833 .name = "meth", 833 .name = "meth",
834 .owner = THIS_MODULE, 834 .owner = THIS_MODULE,
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 3dd481e77f92..5dd7225b178e 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1282,6 +1282,7 @@ static struct pci_device_id mlx4_pci_table[] = {
1282 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 1282 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
1283 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 1283 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
1284 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 1284 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
1285 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
1285 { 0, } 1286 { 0, }
1286}; 1287};
1287 1288
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index b5aa974827e5..9b9eab107704 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1714,7 +1714,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1714 /* 4 fragments per cmd des */ 1714 /* 4 fragments per cmd des */
1715 no_of_desc = (frag_count + 3) >> 2; 1715 no_of_desc = (frag_count + 3) >> 2;
1716 1716
1717 if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) { 1717 if (unlikely(no_of_desc + 2 > netxen_tx_avail(tx_ring))) {
1718 netif_stop_queue(netdev); 1718 netif_stop_queue(netdev);
1719 return NETDEV_TX_BUSY; 1719 return NETDEV_TX_BUSY;
1720 } 1720 }
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c
index 064a4fe1dd90..28a86224879d 100644
--- a/drivers/net/pasemi_mac_ethtool.c
+++ b/drivers/net/pasemi_mac_ethtool.c
@@ -71,6 +71,9 @@ pasemi_mac_ethtool_get_settings(struct net_device *netdev,
71 struct pasemi_mac *mac = netdev_priv(netdev); 71 struct pasemi_mac *mac = netdev_priv(netdev);
72 struct phy_device *phydev = mac->phydev; 72 struct phy_device *phydev = mac->phydev;
73 73
74 if (!phydev)
75 return -EOPNOTSUPP;
76
74 return phy_ethtool_gset(phydev, cmd); 77 return phy_ethtool_gset(phydev, cmd);
75} 78}
76 79
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 474876c879cb..bd3447f04902 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1754,14 +1754,14 @@ static struct pcmcia_device_id pcnet_ids[] = {
1754 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), 1754 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"),
1755 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), 1755 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"),
1756 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), 1756 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"),
1757 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"), 1757 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
1758 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"), 1758 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
1759 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "DP83903.cis"), 1759 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
1760 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), 1760 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
1761 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), 1761 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
1762 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"), 1762 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
1763 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), 1763 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"),
1764 PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"), 1764 PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"),
1765 PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), 1765 PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b),
1766 PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", 1766 PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0",
1767 0xb4be14e3, 0x43ac239b, 0x0877b627), 1767 0xb4be14e3, 0x43ac239b, 0x0877b627),
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index cc394d073755..5910df60c93e 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -2179,7 +2179,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
2179 * session or the special tunnel type. 2179 * session or the special tunnel type.
2180 */ 2180 */
2181static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, 2181static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
2182 char __user *optval, int optlen) 2182 char __user *optval, unsigned int optlen)
2183{ 2183{
2184 struct sock *sk = sock->sk; 2184 struct sock *sk = sock->sk;
2185 struct pppol2tp_session *session = sk->sk_user_data; 2185 struct pppol2tp_session *session = sk->sk_user_data;
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index a9845a2f243f..3ec6e85587a2 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -9,6 +9,7 @@
9 9
10#include <linux/pci.h> 10#include <linux/pci.h>
11#include <linux/netdevice.h> 11#include <linux/netdevice.h>
12#include <linux/rtnetlink.h>
12 13
13/* 14/*
14 * General definitions... 15 * General definitions...
@@ -135,9 +136,9 @@ enum {
135 RST_FO_TFO = (1 << 0), 136 RST_FO_TFO = (1 << 0),
136 RST_FO_RR_MASK = 0x00060000, 137 RST_FO_RR_MASK = 0x00060000,
137 RST_FO_RR_CQ_CAM = 0x00000000, 138 RST_FO_RR_CQ_CAM = 0x00000000,
138 RST_FO_RR_DROP = 0x00000001, 139 RST_FO_RR_DROP = 0x00000002,
139 RST_FO_RR_DQ = 0x00000002, 140 RST_FO_RR_DQ = 0x00000004,
140 RST_FO_RR_RCV_FUNC_CQ = 0x00000003, 141 RST_FO_RR_RCV_FUNC_CQ = 0x00000006,
141 RST_FO_FRB = (1 << 12), 142 RST_FO_FRB = (1 << 12),
142 RST_FO_MOP = (1 << 13), 143 RST_FO_MOP = (1 << 13),
143 RST_FO_REG = (1 << 14), 144 RST_FO_REG = (1 << 14),
@@ -1381,15 +1382,15 @@ struct intr_context {
1381 1382
1382/* adapter flags definitions. */ 1383/* adapter flags definitions. */
1383enum { 1384enum {
1384 QL_ADAPTER_UP = (1 << 0), /* Adapter has been brought up. */ 1385 QL_ADAPTER_UP = 0, /* Adapter has been brought up. */
1385 QL_LEGACY_ENABLED = (1 << 3), 1386 QL_LEGACY_ENABLED = 1,
1386 QL_MSI_ENABLED = (1 << 3), 1387 QL_MSI_ENABLED = 2,
1387 QL_MSIX_ENABLED = (1 << 4), 1388 QL_MSIX_ENABLED = 3,
1388 QL_DMA64 = (1 << 5), 1389 QL_DMA64 = 4,
1389 QL_PROMISCUOUS = (1 << 6), 1390 QL_PROMISCUOUS = 5,
1390 QL_ALLMULTI = (1 << 7), 1391 QL_ALLMULTI = 6,
1391 QL_PORT_CFG = (1 << 8), 1392 QL_PORT_CFG = 7,
1392 QL_CAM_RT_SET = (1 << 9), 1393 QL_CAM_RT_SET = 8,
1393}; 1394};
1394 1395
1395/* link_status bit definitions */ 1396/* link_status bit definitions */
@@ -1477,7 +1478,6 @@ struct ql_adapter {
1477 u32 mailbox_in; 1478 u32 mailbox_in;
1478 u32 mailbox_out; 1479 u32 mailbox_out;
1479 struct mbox_params idc_mbc; 1480 struct mbox_params idc_mbc;
1480 struct mutex mpi_mutex;
1481 1481
1482 int tx_ring_size; 1482 int tx_ring_size;
1483 int rx_ring_size; 1483 int rx_ring_size;
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 68f9bd280f86..52073946bce3 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -45,7 +45,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
45 if (!netif_running(qdev->ndev)) 45 if (!netif_running(qdev->ndev))
46 return status; 46 return status;
47 47
48 spin_lock(&qdev->hw_lock);
49 /* Skip the default queue, and update the outbound handler 48 /* Skip the default queue, and update the outbound handler
50 * queues if they changed. 49 * queues if they changed.
51 */ 50 */
@@ -92,7 +91,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
92 } 91 }
93 } 92 }
94exit: 93exit:
95 spin_unlock(&qdev->hw_lock);
96 return status; 94 return status;
97} 95}
98 96
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 7783c5db81dc..61680715cde0 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -34,7 +34,6 @@
34#include <linux/etherdevice.h> 34#include <linux/etherdevice.h>
35#include <linux/ethtool.h> 35#include <linux/ethtool.h>
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/rtnetlink.h>
38#include <linux/if_vlan.h> 37#include <linux/if_vlan.h>
39#include <linux/delay.h> 38#include <linux/delay.h>
40#include <linux/mm.h> 39#include <linux/mm.h>
@@ -1926,12 +1925,10 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1926 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 1925 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1927 if (status) 1926 if (status)
1928 return; 1927 return;
1929 spin_lock(&qdev->hw_lock);
1930 if (ql_set_mac_addr_reg 1928 if (ql_set_mac_addr_reg
1931 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { 1929 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1932 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); 1930 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1933 } 1931 }
1934 spin_unlock(&qdev->hw_lock);
1935 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 1932 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1936} 1933}
1937 1934
@@ -1945,12 +1942,10 @@ static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1945 if (status) 1942 if (status)
1946 return; 1943 return;
1947 1944
1948 spin_lock(&qdev->hw_lock);
1949 if (ql_set_mac_addr_reg 1945 if (ql_set_mac_addr_reg
1950 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { 1946 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1951 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); 1947 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1952 } 1948 }
1953 spin_unlock(&qdev->hw_lock);
1954 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 1949 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1955 1950
1956} 1951}
@@ -2001,15 +1996,17 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2001 /* 1996 /*
2002 * Check MPI processor activity. 1997 * Check MPI processor activity.
2003 */ 1998 */
2004 if (var & STS_PI) { 1999 if ((var & STS_PI) &&
2000 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2005 /* 2001 /*
2006 * We've got an async event or mailbox completion. 2002 * We've got an async event or mailbox completion.
2007 * Handle it and clear the source of the interrupt. 2003 * Handle it and clear the source of the interrupt.
2008 */ 2004 */
2009 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); 2005 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2010 ql_disable_completion_interrupt(qdev, intr_context->intr); 2006 ql_disable_completion_interrupt(qdev, intr_context->intr);
2011 queue_delayed_work_on(smp_processor_id(), qdev->workqueue, 2007 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2012 &qdev->mpi_work, 0); 2008 queue_delayed_work_on(smp_processor_id(),
2009 qdev->workqueue, &qdev->mpi_work, 0);
2013 work_done++; 2010 work_done++;
2014 } 2011 }
2015 2012
@@ -3142,14 +3139,14 @@ static int ql_route_initialize(struct ql_adapter *qdev)
3142{ 3139{
3143 int status = 0; 3140 int status = 0;
3144 3141
3145 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 3142 /* Clear all the entries in the routing table. */
3143 status = ql_clear_routing_entries(qdev);
3146 if (status) 3144 if (status)
3147 return status; 3145 return status;
3148 3146
3149 /* Clear all the entries in the routing table. */ 3147 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3150 status = ql_clear_routing_entries(qdev);
3151 if (status) 3148 if (status)
3152 goto exit; 3149 return status;
3153 3150
3154 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); 3151 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3155 if (status) { 3152 if (status) {
@@ -3380,12 +3377,10 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3380 3377
3381 ql_free_rx_buffers(qdev); 3378 ql_free_rx_buffers(qdev);
3382 3379
3383 spin_lock(&qdev->hw_lock);
3384 status = ql_adapter_reset(qdev); 3380 status = ql_adapter_reset(qdev);
3385 if (status) 3381 if (status)
3386 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n", 3382 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3387 qdev->func); 3383 qdev->func);
3388 spin_unlock(&qdev->hw_lock);
3389 return status; 3384 return status;
3390} 3385}
3391 3386
@@ -3587,7 +3582,6 @@ static void qlge_set_multicast_list(struct net_device *ndev)
3587 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 3582 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3588 if (status) 3583 if (status)
3589 return; 3584 return;
3590 spin_lock(&qdev->hw_lock);
3591 /* 3585 /*
3592 * Set or clear promiscuous mode if a 3586 * Set or clear promiscuous mode if a
3593 * transition is taking place. 3587 * transition is taking place.
@@ -3664,7 +3658,6 @@ static void qlge_set_multicast_list(struct net_device *ndev)
3664 } 3658 }
3665 } 3659 }
3666exit: 3660exit:
3667 spin_unlock(&qdev->hw_lock);
3668 ql_sem_unlock(qdev, SEM_RT_IDX_MASK); 3661 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3669} 3662}
3670 3663
@@ -3684,10 +3677,8 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
3684 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 3677 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3685 if (status) 3678 if (status)
3686 return status; 3679 return status;
3687 spin_lock(&qdev->hw_lock);
3688 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, 3680 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3689 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); 3681 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
3690 spin_unlock(&qdev->hw_lock);
3691 if (status) 3682 if (status)
3692 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); 3683 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3693 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 3684 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
@@ -3705,7 +3696,7 @@ static void ql_asic_reset_work(struct work_struct *work)
3705 struct ql_adapter *qdev = 3696 struct ql_adapter *qdev =
3706 container_of(work, struct ql_adapter, asic_reset_work.work); 3697 container_of(work, struct ql_adapter, asic_reset_work.work);
3707 int status; 3698 int status;
3708 3699 rtnl_lock();
3709 status = ql_adapter_down(qdev); 3700 status = ql_adapter_down(qdev);
3710 if (status) 3701 if (status)
3711 goto error; 3702 goto error;
@@ -3713,12 +3704,12 @@ static void ql_asic_reset_work(struct work_struct *work)
3713 status = ql_adapter_up(qdev); 3704 status = ql_adapter_up(qdev);
3714 if (status) 3705 if (status)
3715 goto error; 3706 goto error;
3716 3707 rtnl_unlock();
3717 return; 3708 return;
3718error: 3709error:
3719 QPRINTK(qdev, IFUP, ALERT, 3710 QPRINTK(qdev, IFUP, ALERT,
3720 "Driver up/down cycle failed, closing device\n"); 3711 "Driver up/down cycle failed, closing device\n");
3721 rtnl_lock(); 3712
3722 set_bit(QL_ADAPTER_UP, &qdev->flags); 3713 set_bit(QL_ADAPTER_UP, &qdev->flags);
3723 dev_close(qdev->ndev); 3714 dev_close(qdev->ndev);
3724 rtnl_unlock(); 3715 rtnl_unlock();
@@ -3834,11 +3825,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3834 return err; 3825 return err;
3835 } 3826 }
3836 3827
3828 qdev->ndev = ndev;
3829 qdev->pdev = pdev;
3830 pci_set_drvdata(pdev, ndev);
3837 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 3831 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3838 if (pos <= 0) { 3832 if (pos <= 0) {
3839 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " 3833 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3840 "aborting.\n"); 3834 "aborting.\n");
3841 goto err_out; 3835 return pos;
3842 } else { 3836 } else {
3843 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); 3837 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3844 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; 3838 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
@@ -3851,7 +3845,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3851 err = pci_request_regions(pdev, DRV_NAME); 3845 err = pci_request_regions(pdev, DRV_NAME);
3852 if (err) { 3846 if (err) {
3853 dev_err(&pdev->dev, "PCI region request failed.\n"); 3847 dev_err(&pdev->dev, "PCI region request failed.\n");
3854 goto err_out; 3848 return err;
3855 } 3849 }
3856 3850
3857 pci_set_master(pdev); 3851 pci_set_master(pdev);
@@ -3869,7 +3863,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3869 goto err_out; 3863 goto err_out;
3870 } 3864 }
3871 3865
3872 pci_set_drvdata(pdev, ndev);
3873 qdev->reg_base = 3866 qdev->reg_base =
3874 ioremap_nocache(pci_resource_start(pdev, 1), 3867 ioremap_nocache(pci_resource_start(pdev, 1),
3875 pci_resource_len(pdev, 1)); 3868 pci_resource_len(pdev, 1));
@@ -3889,8 +3882,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3889 goto err_out; 3882 goto err_out;
3890 } 3883 }
3891 3884
3892 qdev->ndev = ndev;
3893 qdev->pdev = pdev;
3894 err = ql_get_board_info(qdev); 3885 err = ql_get_board_info(qdev);
3895 if (err) { 3886 if (err) {
3896 dev_err(&pdev->dev, "Register access failed.\n"); 3887 dev_err(&pdev->dev, "Register access failed.\n");
@@ -3930,7 +3921,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3930 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); 3921 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3931 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); 3922 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
3932 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); 3923 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
3933 mutex_init(&qdev->mpi_mutex);
3934 init_completion(&qdev->ide_completion); 3924 init_completion(&qdev->ide_completion);
3935 3925
3936 if (!cards_found) { 3926 if (!cards_found) {
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 6685bd97da91..c2e43073047e 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -472,7 +472,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
472{ 472{
473 int status, count; 473 int status, count;
474 474
475 mutex_lock(&qdev->mpi_mutex);
476 475
477 /* Begin polled mode for MPI */ 476 /* Begin polled mode for MPI */
478 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 477 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
@@ -541,7 +540,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
541 status = -EIO; 540 status = -EIO;
542 } 541 }
543end: 542end:
544 mutex_unlock(&qdev->mpi_mutex);
545 /* End polled mode for MPI */ 543 /* End polled mode for MPI */
546 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 544 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
547 return status; 545 return status;
@@ -776,7 +774,9 @@ static int ql_idc_wait(struct ql_adapter *qdev)
776static int ql_set_port_cfg(struct ql_adapter *qdev) 774static int ql_set_port_cfg(struct ql_adapter *qdev)
777{ 775{
778 int status; 776 int status;
777 rtnl_lock();
779 status = ql_mb_set_port_cfg(qdev); 778 status = ql_mb_set_port_cfg(qdev);
779 rtnl_unlock();
780 if (status) 780 if (status)
781 return status; 781 return status;
782 status = ql_idc_wait(qdev); 782 status = ql_idc_wait(qdev);
@@ -797,7 +797,9 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
797 container_of(work, struct ql_adapter, mpi_port_cfg_work.work); 797 container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
798 int status; 798 int status;
799 799
800 rtnl_lock();
800 status = ql_mb_get_port_cfg(qdev); 801 status = ql_mb_get_port_cfg(qdev);
802 rtnl_unlock();
801 if (status) { 803 if (status) {
802 QPRINTK(qdev, DRV, ERR, 804 QPRINTK(qdev, DRV, ERR,
803 "Bug: Failed to get port config data.\n"); 805 "Bug: Failed to get port config data.\n");
@@ -855,7 +857,9 @@ void ql_mpi_idc_work(struct work_struct *work)
855 * needs to be set. 857 * needs to be set.
856 * */ 858 * */
857 set_bit(QL_CAM_RT_SET, &qdev->flags); 859 set_bit(QL_CAM_RT_SET, &qdev->flags);
860 rtnl_lock();
858 status = ql_mb_idc_ack(qdev); 861 status = ql_mb_idc_ack(qdev);
862 rtnl_unlock();
859 if (status) { 863 if (status) {
860 QPRINTK(qdev, DRV, ERR, 864 QPRINTK(qdev, DRV, ERR,
861 "Bug: No pending IDC!\n"); 865 "Bug: No pending IDC!\n");
@@ -871,7 +875,7 @@ void ql_mpi_work(struct work_struct *work)
871 struct mbox_params *mbcp = &mbc; 875 struct mbox_params *mbcp = &mbc;
872 int err = 0; 876 int err = 0;
873 877
874 mutex_lock(&qdev->mpi_mutex); 878 rtnl_lock();
875 879
876 while (ql_read32(qdev, STS) & STS_PI) { 880 while (ql_read32(qdev, STS) & STS_PI) {
877 memset(mbcp, 0, sizeof(struct mbox_params)); 881 memset(mbcp, 0, sizeof(struct mbox_params));
@@ -884,7 +888,7 @@ void ql_mpi_work(struct work_struct *work)
884 break; 888 break;
885 } 889 }
886 890
887 mutex_unlock(&qdev->mpi_mutex); 891 rtnl_unlock();
888 ql_enable_completion_interrupt(qdev, 0); 892 ql_enable_completion_interrupt(qdev, 0);
889} 893}
890 894
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index ecf3279fbef5..f4dfd1f679a9 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -826,7 +826,7 @@ static int __exit sgiseeq_remove(struct platform_device *pdev)
826 826
827static struct platform_driver sgiseeq_driver = { 827static struct platform_driver sgiseeq_driver = {
828 .probe = sgiseeq_probe, 828 .probe = sgiseeq_probe,
829 .remove = __devexit_p(sgiseeq_remove), 829 .remove = __exit_p(sgiseeq_remove),
830 .driver = { 830 .driver = {
831 .name = "sgiseeq", 831 .name = "sgiseeq",
832 .owner = THIS_MODULE, 832 .owner = THIS_MODULE,
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 55bad4081966..01f6811f1324 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3935,11 +3935,14 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3935#endif 3935#endif
3936 3936
3937 err = -ENOMEM; 3937 err = -ENOMEM;
3938 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 3938 /* space for skge@pci:0000:04:00.0 */
3939 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:" )
3940 + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
3939 if (!hw) { 3941 if (!hw) {
3940 dev_err(&pdev->dev, "cannot allocate hardware struct\n"); 3942 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
3941 goto err_out_free_regions; 3943 goto err_out_free_regions;
3942 } 3944 }
3945 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
3943 3946
3944 hw->pdev = pdev; 3947 hw->pdev = pdev;
3945 spin_lock_init(&hw->hw_lock); 3948 spin_lock_init(&hw->hw_lock);
@@ -3974,7 +3977,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3974 goto err_out_free_netdev; 3977 goto err_out_free_netdev;
3975 } 3978 }
3976 3979
3977 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); 3980 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw);
3978 if (err) { 3981 if (err) {
3979 dev_err(&pdev->dev, "%s: cannot assign irq %d\n", 3982 dev_err(&pdev->dev, "%s: cannot assign irq %d\n",
3980 dev->name, pdev->irq); 3983 dev->name, pdev->irq);
@@ -3982,14 +3985,17 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3982 } 3985 }
3983 skge_show_addr(dev); 3986 skge_show_addr(dev);
3984 3987
3985 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) { 3988 if (hw->ports > 1) {
3986 if (register_netdev(dev1) == 0) 3989 dev1 = skge_devinit(hw, 1, using_dac);
3990 if (dev1 && register_netdev(dev1) == 0)
3987 skge_show_addr(dev1); 3991 skge_show_addr(dev1);
3988 else { 3992 else {
3989 /* Failure to register second port need not be fatal */ 3993 /* Failure to register second port need not be fatal */
3990 dev_warn(&pdev->dev, "register of second port failed\n"); 3994 dev_warn(&pdev->dev, "register of second port failed\n");
3991 hw->dev[1] = NULL; 3995 hw->dev[1] = NULL;
3992 free_netdev(dev1); 3996 hw->ports = 1;
3997 if (dev1)
3998 free_netdev(dev1);
3993 } 3999 }
3994 } 4000 }
3995 pci_set_drvdata(pdev, hw); 4001 pci_set_drvdata(pdev, hw);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 17caccbb7685..831de1b6e96e 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2423,6 +2423,8 @@ struct skge_hw {
2423 u16 phy_addr; 2423 u16 phy_addr;
2424 spinlock_t phy_lock; 2424 spinlock_t phy_lock;
2425 struct tasklet_struct phy_task; 2425 struct tasklet_struct phy_task;
2426
2427 char irq_name[0]; /* skge@pci:000:04:00.0 */
2426}; 2428};
2427 2429
2428enum pause_control { 2430enum pause_control {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index ef1165718dd7..2ab5c39f33ca 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -4487,13 +4487,16 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4487 wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0; 4487 wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
4488 4488
4489 err = -ENOMEM; 4489 err = -ENOMEM;
4490 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 4490
4491 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
4492 + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
4491 if (!hw) { 4493 if (!hw) {
4492 dev_err(&pdev->dev, "cannot allocate hardware struct\n"); 4494 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
4493 goto err_out_free_regions; 4495 goto err_out_free_regions;
4494 } 4496 }
4495 4497
4496 hw->pdev = pdev; 4498 hw->pdev = pdev;
4499 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
4497 4500
4498 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 4501 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
4499 if (!hw->regs) { 4502 if (!hw->regs) {
@@ -4539,7 +4542,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4539 4542
4540 err = request_irq(pdev->irq, sky2_intr, 4543 err = request_irq(pdev->irq, sky2_intr,
4541 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, 4544 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
4542 dev->name, hw); 4545 hw->irq_name, hw);
4543 if (err) { 4546 if (err) {
4544 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); 4547 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
4545 goto err_out_unregister; 4548 goto err_out_unregister;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index e0f23a101043..ed54129698b4 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2085,6 +2085,8 @@ struct sky2_hw {
2085 struct timer_list watchdog_timer; 2085 struct timer_list watchdog_timer;
2086 struct work_struct restart_work; 2086 struct work_struct restart_work;
2087 wait_queue_head_t msi_wait; 2087 wait_queue_head_t msi_wait;
2088
2089 char irq_name[0];
2088}; 2090};
2089 2091
2090static inline int sky2_is_copper(const struct sky2_hw *hw) 2092static inline int sky2_is_copper(const struct sky2_hw *hw)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f09bc5dfe8b2..ba5d3fe753b6 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -902,11 +902,12 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
902 struct tg3 *tp = bp->priv; 902 struct tg3 *tp = bp->priv;
903 u32 val; 903 u32 val;
904 904
905 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) 905 spin_lock_bh(&tp->lock);
906 return -EAGAIN;
907 906
908 if (tg3_readphy(tp, reg, &val)) 907 if (tg3_readphy(tp, reg, &val))
909 return -EIO; 908 val = -EIO;
909
910 spin_unlock_bh(&tp->lock);
910 911
911 return val; 912 return val;
912} 913}
@@ -914,14 +915,16 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
914static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 915static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
915{ 916{
916 struct tg3 *tp = bp->priv; 917 struct tg3 *tp = bp->priv;
918 u32 ret = 0;
917 919
918 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) 920 spin_lock_bh(&tp->lock);
919 return -EAGAIN;
920 921
921 if (tg3_writephy(tp, reg, val)) 922 if (tg3_writephy(tp, reg, val))
922 return -EIO; 923 ret = -EIO;
923 924
924 return 0; 925 spin_unlock_bh(&tp->lock);
926
927 return ret;
925} 928}
926 929
927static int tg3_mdio_reset(struct mii_bus *bp) 930static int tg3_mdio_reset(struct mii_bus *bp)
@@ -1011,12 +1014,6 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
1011 1014
1012static void tg3_mdio_start(struct tg3 *tp) 1015static void tg3_mdio_start(struct tg3 *tp)
1013{ 1016{
1014 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1015 mutex_lock(&tp->mdio_bus->mdio_lock);
1016 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1017 mutex_unlock(&tp->mdio_bus->mdio_lock);
1018 }
1019
1020 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1017 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1021 tw32_f(MAC_MI_MODE, tp->mi_mode); 1018 tw32_f(MAC_MI_MODE, tp->mi_mode);
1022 udelay(80); 1019 udelay(80);
@@ -1041,15 +1038,6 @@ static void tg3_mdio_start(struct tg3 *tp)
1041 tg3_mdio_config_5785(tp); 1038 tg3_mdio_config_5785(tp);
1042} 1039}
1043 1040
1044static void tg3_mdio_stop(struct tg3 *tp)
1045{
1046 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1047 mutex_lock(&tp->mdio_bus->mdio_lock);
1048 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
1049 mutex_unlock(&tp->mdio_bus->mdio_lock);
1050 }
1051}
1052
1053static int tg3_mdio_init(struct tg3 *tp) 1041static int tg3_mdio_init(struct tg3 *tp)
1054{ 1042{
1055 int i; 1043 int i;
@@ -1141,7 +1129,6 @@ static void tg3_mdio_fini(struct tg3 *tp)
1141 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; 1129 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1142 mdiobus_unregister(tp->mdio_bus); 1130 mdiobus_unregister(tp->mdio_bus);
1143 mdiobus_free(tp->mdio_bus); 1131 mdiobus_free(tp->mdio_bus);
1144 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1145 } 1132 }
1146} 1133}
1147 1134
@@ -1363,7 +1350,7 @@ static void tg3_adjust_link(struct net_device *dev)
1363 struct tg3 *tp = netdev_priv(dev); 1350 struct tg3 *tp = netdev_priv(dev);
1364 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1351 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1365 1352
1366 spin_lock(&tp->lock); 1353 spin_lock_bh(&tp->lock);
1367 1354
1368 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 1355 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1369 MAC_MODE_HALF_DUPLEX); 1356 MAC_MODE_HALF_DUPLEX);
@@ -1431,7 +1418,7 @@ static void tg3_adjust_link(struct net_device *dev)
1431 tp->link_config.active_speed = phydev->speed; 1418 tp->link_config.active_speed = phydev->speed;
1432 tp->link_config.active_duplex = phydev->duplex; 1419 tp->link_config.active_duplex = phydev->duplex;
1433 1420
1434 spin_unlock(&tp->lock); 1421 spin_unlock_bh(&tp->lock);
1435 1422
1436 if (linkmesg) 1423 if (linkmesg)
1437 tg3_link_report(tp); 1424 tg3_link_report(tp);
@@ -6392,8 +6379,6 @@ static int tg3_chip_reset(struct tg3 *tp)
6392 6379
6393 tg3_nvram_lock(tp); 6380 tg3_nvram_lock(tp);
6394 6381
6395 tg3_mdio_stop(tp);
6396
6397 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 6382 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6398 6383
6399 /* No matching tg3_nvram_unlock() after this because 6384 /* No matching tg3_nvram_unlock() after this because
@@ -8698,6 +8683,8 @@ static int tg3_close(struct net_device *dev)
8698 8683
8699 del_timer_sync(&tp->timer); 8684 del_timer_sync(&tp->timer);
8700 8685
8686 tg3_phy_stop(tp);
8687
8701 tg3_full_lock(tp, 1); 8688 tg3_full_lock(tp, 1);
8702#if 0 8689#if 0
8703 tg3_dump_state(tp); 8690 tg3_dump_state(tp);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 82b45d8797b4..bab7940158e6 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2412,7 +2412,6 @@ struct ring_info {
2412 2412
2413struct tx_ring_info { 2413struct tx_ring_info {
2414 struct sk_buff *skb; 2414 struct sk_buff *skb;
2415 u32 prev_vlan_tag;
2416}; 2415};
2417 2416
2418struct tg3_config_info { 2417struct tg3_config_info {
@@ -2749,7 +2748,6 @@ struct tg3 {
2749#define TG3_FLG3_5701_DMA_BUG 0x00000008 2748#define TG3_FLG3_5701_DMA_BUG 0x00000008
2750#define TG3_FLG3_USE_PHYLIB 0x00000010 2749#define TG3_FLG3_USE_PHYLIB 0x00000010
2751#define TG3_FLG3_MDIOBUS_INITED 0x00000020 2750#define TG3_FLG3_MDIOBUS_INITED 0x00000020
2752#define TG3_FLG3_MDIOBUS_PAUSED 0x00000040
2753#define TG3_FLG3_PHY_CONNECTED 0x00000080 2751#define TG3_FLG3_PHY_CONNECTED 0x00000080
2754#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100 2752#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100
2755#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 2753#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index d032bba9bc4c..0caa8008c51c 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -418,6 +418,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
418 goto halt_fail_and_release; 418 goto halt_fail_and_release;
419 } 419 }
420 memcpy(net->dev_addr, bp, ETH_ALEN); 420 memcpy(net->dev_addr, bp, ETH_ALEN);
421 memcpy(net->perm_addr, bp, ETH_ALEN);
421 422
422 /* set a nonzero filter to enable data transfers */ 423 /* set a nonzero filter to enable data transfers */
423 memset(u.set, 0, sizeof *u.set); 424 memset(u.set, 0, sizeof *u.set);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d445845f2779..8d009760277c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -948,7 +948,7 @@ free:
948 return err; 948 return err;
949} 949}
950 950
951static void virtnet_remove(struct virtio_device *vdev) 951static void __devexit virtnet_remove(struct virtio_device *vdev)
952{ 952{
953 struct virtnet_info *vi = vdev->priv; 953 struct virtnet_info *vi = vdev->priv;
954 struct sk_buff *skb; 954 struct sk_buff *skb;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 49ea9c92b7e6..d7a764a2fc1a 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -31,13 +31,12 @@ config STRIP
31 ---help--- 31 ---help---
32 Say Y if you have a Metricom radio and intend to use Starmode Radio 32 Say Y if you have a Metricom radio and intend to use Starmode Radio
33 IP. STRIP is a radio protocol developed for the MosquitoNet project 33 IP. STRIP is a radio protocol developed for the MosquitoNet project
34 (on the WWW at <http://mosquitonet.stanford.edu/>) to send Internet 34 to send Internet traffic using Metricom radios. Metricom radios are
35 traffic using Metricom radios. Metricom radios are small, battery 35 small, battery powered, 100kbit/sec packet radio transceivers, about
36 powered, 100kbit/sec packet radio transceivers, about the size and 36 the size and weight of a cellular telephone. (You may also have heard
37 weight of a cellular telephone. (You may also have heard them called 37 them called "Metricom modems" but we avoid the term "modem" because
38 "Metricom modems" but we avoid the term "modem" because it misleads 38 it misleads many people into thinking that you can plug a Metricom
39 many people into thinking that you can plug a Metricom modem into a 39 modem into a phone line and use it as a modem.)
40 phone line and use it as a modem.)
41 40
42 You can use STRIP on any Linux machine with a serial port, although 41 You can use STRIP on any Linux machine with a serial port, although
43 it is obviously most useful for people with laptop computers. If you 42 it is obviously most useful for people with laptop computers. If you
diff --git a/drivers/net/wireless/ath/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c
index b3e5cf3735b0..dbd488da18b1 100644
--- a/drivers/net/wireless/ath/ar9170/phy.c
+++ b/drivers/net/wireless/ath/ar9170/phy.c
@@ -1141,7 +1141,8 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
1141 u8 vpds[2][AR5416_PD_GAIN_ICEPTS]; 1141 u8 vpds[2][AR5416_PD_GAIN_ICEPTS];
1142 u8 pwrs[2][AR5416_PD_GAIN_ICEPTS]; 1142 u8 pwrs[2][AR5416_PD_GAIN_ICEPTS];
1143 int chain, idx, i; 1143 int chain, idx, i;
1144 u8 f; 1144 u32 phy_data = 0;
1145 u8 f, tmp;
1145 1146
1146 switch (channel->band) { 1147 switch (channel->band) {
1147 case IEEE80211_BAND_2GHZ: 1148 case IEEE80211_BAND_2GHZ:
@@ -1208,9 +1209,6 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
1208 } 1209 }
1209 1210
1210 for (i = 0; i < 76; i++) { 1211 for (i = 0; i < 76; i++) {
1211 u32 phy_data;
1212 u8 tmp;
1213
1214 if (i < 25) { 1212 if (i < 25) {
1215 tmp = ar9170_interpolate_val(i, &pwrs[0][0], 1213 tmp = ar9170_interpolate_val(i, &pwrs[0][0],
1216 &vpds[0][0]); 1214 &vpds[0][0]);
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index e96091b31499..9c1397996e0a 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -340,10 +340,15 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
340 q->mmio_base + B43_PIO_TXDATA, 340 q->mmio_base + B43_PIO_TXDATA,
341 sizeof(u16)); 341 sizeof(u16));
342 if (data_len & 1) { 342 if (data_len & 1) {
343 u8 tail[2] = { 0, };
344
343 /* Write the last byte. */ 345 /* Write the last byte. */
344 ctl &= ~B43_PIO_TXCTL_WRITEHI; 346 ctl &= ~B43_PIO_TXCTL_WRITEHI;
345 b43_piotx_write16(q, B43_PIO_TXCTL, ctl); 347 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
346 b43_piotx_write16(q, B43_PIO_TXDATA, data[data_len - 1]); 348 tail[0] = data[data_len - 1];
349 ssb_block_write(dev->dev, tail, 2,
350 q->mmio_base + B43_PIO_TXDATA,
351 sizeof(u16));
347 } 352 }
348 353
349 return ctl; 354 return ctl;
@@ -386,26 +391,31 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
386 q->mmio_base + B43_PIO8_TXDATA, 391 q->mmio_base + B43_PIO8_TXDATA,
387 sizeof(u32)); 392 sizeof(u32));
388 if (data_len & 3) { 393 if (data_len & 3) {
389 u32 value = 0; 394 u8 tail[4] = { 0, };
390 395
391 /* Write the last few bytes. */ 396 /* Write the last few bytes. */
392 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 | 397 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
393 B43_PIO8_TXCTL_24_31); 398 B43_PIO8_TXCTL_24_31);
394 data = &(data[data_len - 1]);
395 switch (data_len & 3) { 399 switch (data_len & 3) {
396 case 3: 400 case 3:
397 ctl |= B43_PIO8_TXCTL_16_23; 401 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
398 value |= (u32)(*data) << 16; 402 tail[0] = data[data_len - 3];
399 data--; 403 tail[1] = data[data_len - 2];
404 tail[2] = data[data_len - 1];
405 break;
400 case 2: 406 case 2:
401 ctl |= B43_PIO8_TXCTL_8_15; 407 ctl |= B43_PIO8_TXCTL_8_15;
402 value |= (u32)(*data) << 8; 408 tail[0] = data[data_len - 2];
403 data--; 409 tail[1] = data[data_len - 1];
410 break;
404 case 1: 411 case 1:
405 value |= (u32)(*data); 412 tail[0] = data[data_len - 1];
413 break;
406 } 414 }
407 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); 415 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
408 b43_piotx_write32(q, B43_PIO8_TXDATA, value); 416 ssb_block_write(dev->dev, tail, 4,
417 q->mmio_base + B43_PIO8_TXDATA,
418 sizeof(u32));
409 } 419 }
410 420
411 return ctl; 421 return ctl;
@@ -693,21 +703,25 @@ data_ready:
693 q->mmio_base + B43_PIO8_RXDATA, 703 q->mmio_base + B43_PIO8_RXDATA,
694 sizeof(u32)); 704 sizeof(u32));
695 if (len & 3) { 705 if (len & 3) {
696 u32 value; 706 u8 tail[4] = { 0, };
697 char *data;
698 707
699 /* Read the last few bytes. */ 708 /* Read the last few bytes. */
700 value = b43_piorx_read32(q, B43_PIO8_RXDATA); 709 ssb_block_read(dev->dev, tail, 4,
701 data = &(skb->data[len + padding - 1]); 710 q->mmio_base + B43_PIO8_RXDATA,
711 sizeof(u32));
702 switch (len & 3) { 712 switch (len & 3) {
703 case 3: 713 case 3:
704 *data = (value >> 16); 714 skb->data[len + padding - 3] = tail[0];
705 data--; 715 skb->data[len + padding - 2] = tail[1];
716 skb->data[len + padding - 1] = tail[2];
717 break;
706 case 2: 718 case 2:
707 *data = (value >> 8); 719 skb->data[len + padding - 2] = tail[0];
708 data--; 720 skb->data[len + padding - 1] = tail[1];
721 break;
709 case 1: 722 case 1:
710 *data = value; 723 skb->data[len + padding - 1] = tail[0];
724 break;
711 } 725 }
712 } 726 }
713 } else { 727 } else {
@@ -715,11 +729,13 @@ data_ready:
715 q->mmio_base + B43_PIO_RXDATA, 729 q->mmio_base + B43_PIO_RXDATA,
716 sizeof(u16)); 730 sizeof(u16));
717 if (len & 1) { 731 if (len & 1) {
718 u16 value; 732 u8 tail[2] = { 0, };
719 733
720 /* Read the last byte. */ 734 /* Read the last byte. */
721 value = b43_piorx_read16(q, B43_PIO_RXDATA); 735 ssb_block_read(dev->dev, tail, 2,
722 skb->data[len + padding - 1] = value; 736 q->mmio_base + B43_PIO_RXDATA,
737 sizeof(u16));
738 skb->data[len + padding - 1] = tail[0];
723 } 739 }
724 } 740 }
725 741
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 896f532182f0..38cfd79e0590 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -631,6 +631,9 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
631 data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000; 631 data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000;
632 if (WARN_ON(!data->beacon_int)) 632 if (WARN_ON(!data->beacon_int))
633 data->beacon_int = 1; 633 data->beacon_int = 1;
634 if (data->started)
635 mod_timer(&data->beacon_timer,
636 jiffies + data->beacon_int);
634 } 637 }
635 638
636 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 639 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 1cbd9b4a3efc..b8f5ee33445e 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2381,6 +2381,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2381 /* Huawei-3Com */ 2381 /* Huawei-3Com */
2382 { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) }, 2382 { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) },
2383 /* Hercules */ 2383 /* Hercules */
2384 { USB_DEVICE(0x06f8, 0xe002), USB_DEVICE_DATA(&rt73usb_ops) },
2384 { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) }, 2385 { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) },
2385 { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) }, 2386 { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) },
2386 /* Linksys */ 2387 /* Linksys */