aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/adi/Kconfig2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c259
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h132
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c460
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h87
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c837
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c17
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/cadence/Kconfig9
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c1276
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.h112
-rw-r--r--drivers/net/ethernet/cadence/macb.c524
-rw-r--r--drivers/net/ethernet/cadence/macb.h50
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c59
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h7
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c16
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h51
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c409
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h177
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c80
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h20
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c779
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c5
-rw-r--r--drivers/net/ethernet/freescale/Kconfig9
-rw-r--r--drivers/net/ethernet/freescale/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/fec.c161
-rw-r--r--drivers/net/ethernet/freescale/fec.h119
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c383
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c14
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c6
-rw-r--r--drivers/net/ethernet/intel/Kconfig28
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c11
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c20
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h14
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c94
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c71
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c49
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h76
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c55
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c1340
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c55
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c158
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c73
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c104
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c277
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c124
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c469
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c225
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c13
-rw-r--r--drivers/net/ethernet/neterion/s2io.c3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig16
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c93
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c16
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c16
-rw-r--r--drivers/net/ethernet/realtek/atp.c58
-rw-r--r--drivers/net/ethernet/realtek/atp.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c100
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig8
-rw-r--r--drivers/net/ethernet/sfc/Makefile3
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/nic.h26
-rw-r--r--drivers/net/ethernet/ti/Kconfig8
-rw-r--r--drivers/net/ethernet/ti/Makefile2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c412
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c31
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h1
-rw-r--r--drivers/net/ethernet/ti/cpts.c427
-rw-r--r--drivers/net/ethernet/ti/cpts.h146
104 files changed, 7238 insertions, 3876 deletions
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index 49a30d37ae4a..e49c0eff040b 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -61,7 +61,7 @@ config BFIN_RX_DESC_NUM
61 61
62config BFIN_MAC_USE_HWSTAMP 62config BFIN_MAC_USE_HWSTAMP
63 bool "Use IEEE 1588 hwstamp" 63 bool "Use IEEE 1588 hwstamp"
64 depends on BFIN_MAC && BF518 64 select PTP_1588_CLOCK
65 default y 65 default y
66 ---help--- 66 ---help---
67 To support the IEEE 1588 Precision Time Protocol (PTP), select y here 67 To support the IEEE 1588 Precision Time Protocol (PTP), select y here
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index f816426e1085..f1c458dc039a 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -548,14 +548,17 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev,
548 return 0; 548 return 0;
549} 549}
550 550
551#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
551static int bfin_mac_ethtool_get_ts_info(struct net_device *dev, 552static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
552 struct ethtool_ts_info *info) 553 struct ethtool_ts_info *info)
553{ 554{
555 struct bfin_mac_local *lp = netdev_priv(dev);
556
554 info->so_timestamping = 557 info->so_timestamping =
555 SOF_TIMESTAMPING_TX_HARDWARE | 558 SOF_TIMESTAMPING_TX_HARDWARE |
556 SOF_TIMESTAMPING_RX_HARDWARE | 559 SOF_TIMESTAMPING_RX_HARDWARE |
557 SOF_TIMESTAMPING_SYS_HARDWARE; 560 SOF_TIMESTAMPING_RAW_HARDWARE;
558 info->phc_index = -1; 561 info->phc_index = lp->phc_index;
559 info->tx_types = 562 info->tx_types =
560 (1 << HWTSTAMP_TX_OFF) | 563 (1 << HWTSTAMP_TX_OFF) |
561 (1 << HWTSTAMP_TX_ON); 564 (1 << HWTSTAMP_TX_ON);
@@ -566,6 +569,7 @@ static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
566 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 569 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
567 return 0; 570 return 0;
568} 571}
572#endif
569 573
570static const struct ethtool_ops bfin_mac_ethtool_ops = { 574static const struct ethtool_ops bfin_mac_ethtool_ops = {
571 .get_settings = bfin_mac_ethtool_getsettings, 575 .get_settings = bfin_mac_ethtool_getsettings,
@@ -574,7 +578,9 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
574 .get_drvinfo = bfin_mac_ethtool_getdrvinfo, 578 .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
575 .get_wol = bfin_mac_ethtool_getwol, 579 .get_wol = bfin_mac_ethtool_getwol,
576 .set_wol = bfin_mac_ethtool_setwol, 580 .set_wol = bfin_mac_ethtool_setwol,
581#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
577 .get_ts_info = bfin_mac_ethtool_get_ts_info, 582 .get_ts_info = bfin_mac_ethtool_get_ts_info,
583#endif
578}; 584};
579 585
580/**************************************************************************/ 586/**************************************************************************/
@@ -649,6 +655,20 @@ static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
649#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP 655#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
650#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE) 656#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
651 657
658static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
659{
660 u32 ipn = 1000000000UL / input_clk;
661 u32 ppn = 1;
662 unsigned int shift = 0;
663
664 while (ppn <= ipn) {
665 ppn <<= 1;
666 shift++;
667 }
668 *shift_result = shift;
669 return 1000000000UL / ppn;
670}
671
652static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev, 672static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
653 struct ifreq *ifr, int cmd) 673 struct ifreq *ifr, int cmd)
654{ 674{
@@ -798,19 +818,7 @@ static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
798 bfin_read_EMAC_PTP_TXSNAPLO(); 818 bfin_read_EMAC_PTP_TXSNAPLO();
799 bfin_read_EMAC_PTP_TXSNAPHI(); 819 bfin_read_EMAC_PTP_TXSNAPHI();
800 820
801 /*
802 * Set registers so that rollover occurs soon to test this.
803 */
804 bfin_write_EMAC_PTP_TIMELO(0x00000000);
805 bfin_write_EMAC_PTP_TIMEHI(0xFF800000);
806
807 SSYNC(); 821 SSYNC();
808
809 lp->compare.last_update = 0;
810 timecounter_init(&lp->clock,
811 &lp->cycles,
812 ktime_to_ns(ktime_get_real()));
813 timecompare_update(&lp->compare, 0);
814 } 822 }
815 823
816 lp->stamp_cfg = config; 824 lp->stamp_cfg = config;
@@ -818,15 +826,6 @@ static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
818 -EFAULT : 0; 826 -EFAULT : 0;
819} 827}
820 828
821static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompare *cmp)
822{
823 ktime_t sys = ktime_get_real();
824
825 pr_debug("%s %s hardware:%d,%d transform system:%d,%d system:%d,%d, cmp:%lld, %lld\n",
826 __func__, s, hw->tv.sec, hw->tv.nsec, ts->tv.sec, ts->tv.nsec, sys.tv.sec,
827 sys.tv.nsec, cmp->offset, cmp->skew);
828}
829
830static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb) 829static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
831{ 830{
832 struct bfin_mac_local *lp = netdev_priv(netdev); 831 struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -857,15 +856,9 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
857 regval = bfin_read_EMAC_PTP_TXSNAPLO(); 856 regval = bfin_read_EMAC_PTP_TXSNAPLO();
858 regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32; 857 regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
859 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 858 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
860 ns = timecounter_cyc2time(&lp->clock, 859 ns = regval << lp->shift;
861 regval);
862 timecompare_update(&lp->compare, ns);
863 shhwtstamps.hwtstamp = ns_to_ktime(ns); 860 shhwtstamps.hwtstamp = ns_to_ktime(ns);
864 shhwtstamps.syststamp =
865 timecompare_transform(&lp->compare, ns);
866 skb_tstamp_tx(skb, &shhwtstamps); 861 skb_tstamp_tx(skb, &shhwtstamps);
867
868 bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
869 } 862 }
870 } 863 }
871} 864}
@@ -888,55 +881,184 @@ static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
888 881
889 regval = bfin_read_EMAC_PTP_RXSNAPLO(); 882 regval = bfin_read_EMAC_PTP_RXSNAPLO();
890 regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32; 883 regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
891 ns = timecounter_cyc2time(&lp->clock, regval); 884 ns = regval << lp->shift;
892 timecompare_update(&lp->compare, ns);
893 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 885 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
894 shhwtstamps->hwtstamp = ns_to_ktime(ns); 886 shhwtstamps->hwtstamp = ns_to_ktime(ns);
895 shhwtstamps->syststamp = timecompare_transform(&lp->compare, ns); 887}
888
889static void bfin_mac_hwtstamp_init(struct net_device *netdev)
890{
891 struct bfin_mac_local *lp = netdev_priv(netdev);
892 u64 addend, ppb;
893 u32 input_clk, phc_clk;
894
895 /* Initialize hardware timer */
896 input_clk = get_sclk();
897 phc_clk = bfin_select_phc_clock(input_clk, &lp->shift);
898 addend = phc_clk * (1ULL << 32);
899 do_div(addend, input_clk);
900 bfin_write_EMAC_PTP_ADDEND((u32)addend);
901
902 lp->addend = addend;
903 ppb = 1000000000ULL * input_clk;
904 do_div(ppb, phc_clk);
905 lp->max_ppb = ppb - 1000000000ULL - 1ULL;
896 906
897 bfin_dump_hwtamp("RX", &shhwtstamps->hwtstamp, &shhwtstamps->syststamp, &lp->compare); 907 /* Initialize hwstamp config */
908 lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
909 lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
898} 910}
899 911
900/* 912static u64 bfin_ptp_time_read(struct bfin_mac_local *lp)
901 * bfin_read_clock - read raw cycle counter (to be used by time counter)
902 */
903static cycle_t bfin_read_clock(const struct cyclecounter *tc)
904{ 913{
905 u64 stamp; 914 u64 ns;
915 u32 lo, hi;
916
917 lo = bfin_read_EMAC_PTP_TIMELO();
918 hi = bfin_read_EMAC_PTP_TIMEHI();
906 919
907 stamp = bfin_read_EMAC_PTP_TIMELO(); 920 ns = ((u64) hi) << 32;
908 stamp |= (u64)bfin_read_EMAC_PTP_TIMEHI() << 32ULL; 921 ns |= lo;
922 ns <<= lp->shift;
909 923
910 return stamp; 924 return ns;
911} 925}
912 926
913#define PTP_CLK 25000000 927static void bfin_ptp_time_write(struct bfin_mac_local *lp, u64 ns)
928{
929 u32 hi, lo;
914 930
915static void bfin_mac_hwtstamp_init(struct net_device *netdev) 931 ns >>= lp->shift;
932 hi = ns >> 32;
933 lo = ns & 0xffffffff;
934
935 bfin_write_EMAC_PTP_TIMELO(lo);
936 bfin_write_EMAC_PTP_TIMEHI(hi);
937}
938
939/* PTP Hardware Clock operations */
940
941static int bfin_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
942{
943 u64 adj;
944 u32 diff, addend;
945 int neg_adj = 0;
946 struct bfin_mac_local *lp =
947 container_of(ptp, struct bfin_mac_local, caps);
948
949 if (ppb < 0) {
950 neg_adj = 1;
951 ppb = -ppb;
952 }
953 addend = lp->addend;
954 adj = addend;
955 adj *= ppb;
956 diff = div_u64(adj, 1000000000ULL);
957
958 addend = neg_adj ? addend - diff : addend + diff;
959
960 bfin_write_EMAC_PTP_ADDEND(addend);
961
962 return 0;
963}
964
965static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
966{
967 s64 now;
968 unsigned long flags;
969 struct bfin_mac_local *lp =
970 container_of(ptp, struct bfin_mac_local, caps);
971
972 spin_lock_irqsave(&lp->phc_lock, flags);
973
974 now = bfin_ptp_time_read(lp);
975 now += delta;
976 bfin_ptp_time_write(lp, now);
977
978 spin_unlock_irqrestore(&lp->phc_lock, flags);
979
980 return 0;
981}
982
983static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
984{
985 u64 ns;
986 u32 remainder;
987 unsigned long flags;
988 struct bfin_mac_local *lp =
989 container_of(ptp, struct bfin_mac_local, caps);
990
991 spin_lock_irqsave(&lp->phc_lock, flags);
992
993 ns = bfin_ptp_time_read(lp);
994
995 spin_unlock_irqrestore(&lp->phc_lock, flags);
996
997 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
998 ts->tv_nsec = remainder;
999 return 0;
1000}
1001
1002static int bfin_ptp_settime(struct ptp_clock_info *ptp,
1003 const struct timespec *ts)
1004{
1005 u64 ns;
1006 unsigned long flags;
1007 struct bfin_mac_local *lp =
1008 container_of(ptp, struct bfin_mac_local, caps);
1009
1010 ns = ts->tv_sec * 1000000000ULL;
1011 ns += ts->tv_nsec;
1012
1013 spin_lock_irqsave(&lp->phc_lock, flags);
1014
1015 bfin_ptp_time_write(lp, ns);
1016
1017 spin_unlock_irqrestore(&lp->phc_lock, flags);
1018
1019 return 0;
1020}
1021
1022static int bfin_ptp_enable(struct ptp_clock_info *ptp,
1023 struct ptp_clock_request *rq, int on)
1024{
1025 return -EOPNOTSUPP;
1026}
1027
1028static struct ptp_clock_info bfin_ptp_caps = {
1029 .owner = THIS_MODULE,
1030 .name = "BF518 clock",
1031 .max_adj = 0,
1032 .n_alarm = 0,
1033 .n_ext_ts = 0,
1034 .n_per_out = 0,
1035 .pps = 0,
1036 .adjfreq = bfin_ptp_adjfreq,
1037 .adjtime = bfin_ptp_adjtime,
1038 .gettime = bfin_ptp_gettime,
1039 .settime = bfin_ptp_settime,
1040 .enable = bfin_ptp_enable,
1041};
1042
1043static int bfin_phc_init(struct net_device *netdev, struct device *dev)
916{ 1044{
917 struct bfin_mac_local *lp = netdev_priv(netdev); 1045 struct bfin_mac_local *lp = netdev_priv(netdev);
918 u64 append;
919 1046
920 /* Initialize hardware timer */ 1047 lp->caps = bfin_ptp_caps;
921 append = PTP_CLK * (1ULL << 32); 1048 lp->caps.max_adj = lp->max_ppb;
922 do_div(append, get_sclk()); 1049 lp->clock = ptp_clock_register(&lp->caps, dev);
923 bfin_write_EMAC_PTP_ADDEND((u32)append); 1050 if (IS_ERR(lp->clock))
924 1051 return PTR_ERR(lp->clock);
925 memset(&lp->cycles, 0, sizeof(lp->cycles));
926 lp->cycles.read = bfin_read_clock;
927 lp->cycles.mask = CLOCKSOURCE_MASK(64);
928 lp->cycles.mult = 1000000000 / PTP_CLK;
929 lp->cycles.shift = 0;
930
931 /* Synchronize our NIC clock against system wall clock */
932 memset(&lp->compare, 0, sizeof(lp->compare));
933 lp->compare.source = &lp->clock;
934 lp->compare.target = ktime_get_real;
935 lp->compare.num_samples = 10;
936 1052
937 /* Initialize hwstamp config */ 1053 lp->phc_index = ptp_clock_index(lp->clock);
938 lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE; 1054 spin_lock_init(&lp->phc_lock);
939 lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF; 1055
1056 return 0;
1057}
1058
1059static void bfin_phc_release(struct bfin_mac_local *lp)
1060{
1061 ptp_clock_unregister(lp->clock);
940} 1062}
941 1063
942#else 1064#else
@@ -945,6 +1067,8 @@ static void bfin_mac_hwtstamp_init(struct net_device *netdev)
945# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP) 1067# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
946# define bfin_rx_hwtstamp(dev, skb) 1068# define bfin_rx_hwtstamp(dev, skb)
947# define bfin_tx_hwtstamp(dev, skb) 1069# define bfin_tx_hwtstamp(dev, skb)
1070# define bfin_phc_init(netdev, dev) 0
1071# define bfin_phc_release(lp)
948#endif 1072#endif
949 1073
950static inline void _tx_reclaim_skb(void) 1074static inline void _tx_reclaim_skb(void)
@@ -1579,12 +1703,17 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1579 } 1703 }
1580 1704
1581 bfin_mac_hwtstamp_init(ndev); 1705 bfin_mac_hwtstamp_init(ndev);
1706 if (bfin_phc_init(ndev, &pdev->dev)) {
1707 dev_err(&pdev->dev, "Cannot register PHC device!\n");
1708 goto out_err_phc;
1709 }
1582 1710
1583 /* now, print out the card info, in a short format.. */ 1711 /* now, print out the card info, in a short format.. */
1584 netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); 1712 netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
1585 1713
1586 return 0; 1714 return 0;
1587 1715
1716out_err_phc:
1588out_err_reg_ndev: 1717out_err_reg_ndev:
1589 free_irq(IRQ_MAC_RX, ndev); 1718 free_irq(IRQ_MAC_RX, ndev);
1590out_err_request_irq: 1719out_err_request_irq:
@@ -1603,6 +1732,8 @@ static int __devexit bfin_mac_remove(struct platform_device *pdev)
1603 struct net_device *ndev = platform_get_drvdata(pdev); 1732 struct net_device *ndev = platform_get_drvdata(pdev);
1604 struct bfin_mac_local *lp = netdev_priv(ndev); 1733 struct bfin_mac_local *lp = netdev_priv(ndev);
1605 1734
1735 bfin_phc_release(lp);
1736
1606 platform_set_drvdata(pdev, NULL); 1737 platform_set_drvdata(pdev, NULL);
1607 1738
1608 lp->mii_bus->priv = NULL; 1739 lp->mii_bus->priv = NULL;
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index 960905c08223..7a07ee07906b 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -11,8 +11,7 @@
11#define _BFIN_MAC_H_ 11#define _BFIN_MAC_H_
12 12
13#include <linux/net_tstamp.h> 13#include <linux/net_tstamp.h>
14#include <linux/clocksource.h> 14#include <linux/ptp_clock_kernel.h>
15#include <linux/timecompare.h>
16#include <linux/timer.h> 15#include <linux/timer.h>
17#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
18#include <linux/bfin_mac.h> 17#include <linux/bfin_mac.h>
@@ -94,10 +93,14 @@ struct bfin_mac_local {
94 struct mii_bus *mii_bus; 93 struct mii_bus *mii_bus;
95 94
96#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP) 95#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP)
97 struct cyclecounter cycles; 96 u32 addend;
98 struct timecounter clock; 97 unsigned int shift;
99 struct timecompare compare; 98 s32 max_ppb;
100 struct hwtstamp_config stamp_cfg; 99 struct hwtstamp_config stamp_cfg;
100 struct ptp_clock_info caps;
101 struct ptp_clock *clock;
102 int phc_index;
103 spinlock_t phc_lock; /* protects time lo/hi registers */
101#endif 104#endif
102}; 105};
103 106
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 72897c47b8c8..de121ccd675e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -34,18 +34,10 @@
34 34
35#include "bnx2x_hsi.h" 35#include "bnx2x_hsi.h"
36 36
37#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
38#define BCM_CNIC 1
39#include "../cnic_if.h" 37#include "../cnic_if.h"
40#endif
41 38
42#ifdef BCM_CNIC 39
43#define BNX2X_MIN_MSIX_VEC_CNT 3 40#define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt)
44#define BNX2X_MSIX_VEC_FP_START 2
45#else
46#define BNX2X_MIN_MSIX_VEC_CNT 2
47#define BNX2X_MSIX_VEC_FP_START 1
48#endif
49 41
50#include <linux/mdio.h> 42#include <linux/mdio.h>
51 43
@@ -256,15 +248,10 @@ enum {
256 /* FCoE L2 */ 248 /* FCoE L2 */
257#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1) 249#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
258 250
259/** Additional rings budgeting */ 251#define CNIC_SUPPORT(bp) ((bp)->cnic_support)
260#ifdef BCM_CNIC 252#define CNIC_ENABLED(bp) ((bp)->cnic_enabled)
261#define CNIC_PRESENT 1 253#define CNIC_LOADED(bp) ((bp)->cnic_loaded)
262#define FCOE_PRESENT 1 254#define FCOE_INIT(bp) ((bp)->fcoe_init)
263#else
264#define CNIC_PRESENT 0
265#define FCOE_PRESENT 0
266#endif /* BCM_CNIC */
267#define NON_ETH_CONTEXT_USE (FCOE_PRESENT)
268 255
269#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ 256#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
270 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR 257 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
@@ -297,9 +284,7 @@ enum {
297 OOO_TXQ_IDX_OFFSET, 284 OOO_TXQ_IDX_OFFSET,
298}; 285};
299#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos) 286#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
300#ifdef BCM_CNIC
301#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET) 287#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
302#endif
303 288
304/* fast path */ 289/* fast path */
305/* 290/*
@@ -585,15 +570,9 @@ struct bnx2x_fastpath {
585 ->var) 570 ->var)
586 571
587 572
588#define IS_ETH_FP(fp) (fp->index < \ 573#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp))
589 BNX2X_NUM_ETH_QUEUES(fp->bp)) 574#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp))
590#ifdef BCM_CNIC 575#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
591#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX(fp->bp))
592#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
593#else
594#define IS_FCOE_FP(fp) false
595#define IS_FCOE_IDX(idx) false
596#endif
597 576
598 577
599/* MC hsi */ 578/* MC hsi */
@@ -886,6 +865,18 @@ struct bnx2x_common {
886 (CHIP_REV(bp) == CHIP_REV_Bx)) 865 (CHIP_REV(bp) == CHIP_REV_Bx))
887#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ 866#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
888 (CHIP_REV(bp) == CHIP_REV_Ax)) 867 (CHIP_REV(bp) == CHIP_REV_Ax))
868/* This define is used in two main places:
869 * 1. In the early stages of nic_load, to know if to configrue Parser / Searcher
870 * to nic-only mode or to offload mode. Offload mode is configured if either the
871 * chip is E1x (where MIC_MODE register is not applicable), or if cnic already
872 * registered for this port (which means that the user wants storage services).
873 * 2. During cnic-related load, to know if offload mode is already configured in
874 * the HW or needs to be configrued.
875 * Since the transition from nic-mode to offload-mode in HW causes traffic
876 * coruption, nic-mode is configured only in ports on which storage services
877 * where never requested.
878 */
879#define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
889 880
890 int flash_size; 881 int flash_size;
891#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ 882#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
@@ -1003,18 +994,15 @@ union cdu_context {
1003#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */ 994#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
1004#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) 995#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
1005 996
1006#ifdef BCM_CNIC
1007#define CNIC_ISCSI_CID_MAX 256 997#define CNIC_ISCSI_CID_MAX 256
1008#define CNIC_FCOE_CID_MAX 2048 998#define CNIC_FCOE_CID_MAX 2048
1009#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX) 999#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
1010#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) 1000#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
1011#endif
1012 1001
1013#define QM_ILT_PAGE_SZ_HW 0 1002#define QM_ILT_PAGE_SZ_HW 0
1014#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ 1003#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
1015#define QM_CID_ROUND 1024 1004#define QM_CID_ROUND 1024
1016 1005
1017#ifdef BCM_CNIC
1018/* TM (timers) host DB constants */ 1006/* TM (timers) host DB constants */
1019#define TM_ILT_PAGE_SZ_HW 0 1007#define TM_ILT_PAGE_SZ_HW 0
1020#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ 1008#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
@@ -1032,8 +1020,6 @@ union cdu_context {
1032#define SRC_T2_SZ SRC_ILT_SZ 1020#define SRC_T2_SZ SRC_ILT_SZ
1033#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) 1021#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
1034 1022
1035#endif
1036
1037#define MAX_DMAE_C 8 1023#define MAX_DMAE_C 8
1038 1024
1039/* DMA memory not used in fastpath */ 1025/* DMA memory not used in fastpath */
@@ -1227,7 +1213,6 @@ struct bnx2x {
1227 struct bnx2x_sp_objs *sp_objs; 1213 struct bnx2x_sp_objs *sp_objs;
1228 struct bnx2x_fp_stats *fp_stats; 1214 struct bnx2x_fp_stats *fp_stats;
1229 struct bnx2x_fp_txdata *bnx2x_txq; 1215 struct bnx2x_fp_txdata *bnx2x_txq;
1230 int bnx2x_txq_size;
1231 void __iomem *regview; 1216 void __iomem *regview;
1232 void __iomem *doorbells; 1217 void __iomem *doorbells;
1233 u16 db_size; 1218 u16 db_size;
@@ -1350,6 +1335,15 @@ struct bnx2x {
1350#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) 1335#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
1351#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) 1336#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
1352 1337
1338 u8 cnic_support;
1339 bool cnic_enabled;
1340 bool cnic_loaded;
1341
1342 /* Flag that indicates that we can start looking for FCoE L2 queue
1343 * completions in the default status block.
1344 */
1345 bool fcoe_init;
1346
1353 int pm_cap; 1347 int pm_cap;
1354 int mrrs; 1348 int mrrs;
1355 1349
@@ -1420,6 +1414,8 @@ struct bnx2x {
1420#define BNX2X_MAX_COS 3 1414#define BNX2X_MAX_COS 3
1421#define BNX2X_MAX_TX_COS 2 1415#define BNX2X_MAX_TX_COS 2
1422 int num_queues; 1416 int num_queues;
1417 uint num_ethernet_queues;
1418 uint num_cnic_queues;
1423 int num_napi_queues; 1419 int num_napi_queues;
1424 int disable_tpa; 1420 int disable_tpa;
1425 1421
@@ -1433,6 +1429,7 @@ struct bnx2x {
1433 u8 igu_dsb_id; 1429 u8 igu_dsb_id;
1434 u8 igu_base_sb; 1430 u8 igu_base_sb;
1435 u8 igu_sb_cnt; 1431 u8 igu_sb_cnt;
1432 u8 min_msix_vec_cnt;
1436 1433
1437 dma_addr_t def_status_blk_mapping; 1434 dma_addr_t def_status_blk_mapping;
1438 1435
@@ -1478,16 +1475,16 @@ struct bnx2x {
1478 * Maximum supported number of RSS queues: number of IGU SBs minus one that goes 1475 * Maximum supported number of RSS queues: number of IGU SBs minus one that goes
1479 * to CNIC. 1476 * to CNIC.
1480 */ 1477 */
1481#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT) 1478#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_SUPPORT(bp))
1482 1479
1483/* 1480/*
1484 * Maximum CID count that might be required by the bnx2x: 1481 * Maximum CID count that might be required by the bnx2x:
1485 * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI 1482 * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
1486 */ 1483 */
1487#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \ 1484#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
1488 + NON_ETH_CONTEXT_USE + CNIC_PRESENT) 1485 + 2 * CNIC_SUPPORT(bp))
1489#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \ 1486#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
1490 + NON_ETH_CONTEXT_USE + CNIC_PRESENT) 1487 + 2 * CNIC_SUPPORT(bp))
1491#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ 1488#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
1492 ILT_PAGE_CIDS)) 1489 ILT_PAGE_CIDS))
1493 1490
@@ -1495,9 +1492,6 @@ struct bnx2x {
1495 1492
1496 int dropless_fc; 1493 int dropless_fc;
1497 1494
1498#ifdef BCM_CNIC
1499 u32 cnic_flags;
1500#define BNX2X_CNIC_FLAG_MAC_SET 1
1501 void *t2; 1495 void *t2;
1502 dma_addr_t t2_mapping; 1496 dma_addr_t t2_mapping;
1503 struct cnic_ops __rcu *cnic_ops; 1497 struct cnic_ops __rcu *cnic_ops;
@@ -1518,7 +1512,6 @@ struct bnx2x {
1518 1512
1519 /* Start index of the "special" (CNIC related) L2 cleints */ 1513 /* Start index of the "special" (CNIC related) L2 cleints */
1520 u8 cnic_base_cl_id; 1514 u8 cnic_base_cl_id;
1521#endif
1522 1515
1523 int dmae_ready; 1516 int dmae_ready;
1524 /* used to synchronize dmae accesses */ 1517 /* used to synchronize dmae accesses */
@@ -1647,9 +1640,9 @@ struct bnx2x {
1647/* Tx queues may be less or equal to Rx queues */ 1640/* Tx queues may be less or equal to Rx queues */
1648extern int num_queues; 1641extern int num_queues;
1649#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) 1642#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1650#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE) 1643#define BNX2X_NUM_ETH_QUEUES(bp) ((bp)->num_ethernet_queues)
1651#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \ 1644#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \
1652 NON_ETH_CONTEXT_USE) 1645 (bp)->num_cnic_queues)
1653#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) 1646#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
1654 1647
1655#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) 1648#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
@@ -1689,6 +1682,13 @@ struct bnx2x_func_init_params {
1689 u16 spq_prod; /* valid iff FUNC_FLG_SPQ */ 1682 u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
1690}; 1683};
1691 1684
1685#define for_each_cnic_queue(bp, var) \
1686 for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
1687 (var)++) \
1688 if (skip_queue(bp, var)) \
1689 continue; \
1690 else
1691
1692#define for_each_eth_queue(bp, var) \ 1692#define for_each_eth_queue(bp, var) \
1693 for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) 1693 for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
1694 1694
@@ -1702,6 +1702,22 @@ struct bnx2x_func_init_params {
1702 else 1702 else
1703 1703
1704/* Skip forwarding FP */ 1704/* Skip forwarding FP */
1705#define for_each_valid_rx_queue(bp, var) \
1706 for ((var) = 0; \
1707 (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \
1708 BNX2X_NUM_ETH_QUEUES(bp)); \
1709 (var)++) \
1710 if (skip_rx_queue(bp, var)) \
1711 continue; \
1712 else
1713
1714#define for_each_rx_queue_cnic(bp, var) \
1715 for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
1716 (var)++) \
1717 if (skip_rx_queue(bp, var)) \
1718 continue; \
1719 else
1720
1705#define for_each_rx_queue(bp, var) \ 1721#define for_each_rx_queue(bp, var) \
1706 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 1722 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
1707 if (skip_rx_queue(bp, var)) \ 1723 if (skip_rx_queue(bp, var)) \
@@ -1709,6 +1725,22 @@ struct bnx2x_func_init_params {
1709 else 1725 else
1710 1726
1711/* Skip OOO FP */ 1727/* Skip OOO FP */
1728#define for_each_valid_tx_queue(bp, var) \
1729 for ((var) = 0; \
1730 (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \
1731 BNX2X_NUM_ETH_QUEUES(bp)); \
1732 (var)++) \
1733 if (skip_tx_queue(bp, var)) \
1734 continue; \
1735 else
1736
1737#define for_each_tx_queue_cnic(bp, var) \
1738 for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
1739 (var)++) \
1740 if (skip_tx_queue(bp, var)) \
1741 continue; \
1742 else
1743
1712#define for_each_tx_queue(bp, var) \ 1744#define for_each_tx_queue(bp, var) \
1713 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 1745 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
1714 if (skip_tx_queue(bp, var)) \ 1746 if (skip_tx_queue(bp, var)) \
@@ -2179,7 +2211,6 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
2179#define BNX2X_MF_SD_PROTOCOL(bp) \ 2211#define BNX2X_MF_SD_PROTOCOL(bp) \
2180 ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK) 2212 ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
2181 2213
2182#ifdef BCM_CNIC
2183#define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \ 2214#define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \
2184 (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI) 2215 (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
2185 2216
@@ -2196,9 +2227,12 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
2196#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \ 2227#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
2197 (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ 2228 (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
2198 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) 2229 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
2199#else
2200#define IS_MF_FCOE_AFEX(bp) false
2201#endif
2202 2230
2231enum {
2232 SWITCH_UPDATE,
2233 AFEX_UPDATE,
2234};
2235
2236#define NUM_MACS 8
2203 2237
2204#endif /* bnx2x.h */ 2238#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4833b6a9031c..54d522da1aa7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1152,6 +1152,25 @@ static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1152 } 1152 }
1153} 1153}
1154 1154
1155void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1156{
1157 int j;
1158
1159 for_each_rx_queue_cnic(bp, j) {
1160 struct bnx2x_fastpath *fp = &bp->fp[j];
1161
1162 fp->rx_bd_cons = 0;
1163
1164 /* Activate BD ring */
1165 /* Warning!
1166 * this will generate an interrupt (to the TSTORM)
1167 * must only be done after chip is initialized
1168 */
1169 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1170 fp->rx_sge_prod);
1171 }
1172}
1173
1155void bnx2x_init_rx_rings(struct bnx2x *bp) 1174void bnx2x_init_rx_rings(struct bnx2x *bp)
1156{ 1175{
1157 int func = BP_FUNC(bp); 1176 int func = BP_FUNC(bp);
@@ -1159,7 +1178,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1159 int i, j; 1178 int i, j;
1160 1179
1161 /* Allocate TPA resources */ 1180 /* Allocate TPA resources */
1162 for_each_rx_queue(bp, j) { 1181 for_each_eth_queue(bp, j) {
1163 struct bnx2x_fastpath *fp = &bp->fp[j]; 1182 struct bnx2x_fastpath *fp = &bp->fp[j];
1164 1183
1165 DP(NETIF_MSG_IFUP, 1184 DP(NETIF_MSG_IFUP,
@@ -1217,7 +1236,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1217 } 1236 }
1218 } 1237 }
1219 1238
1220 for_each_rx_queue(bp, j) { 1239 for_each_eth_queue(bp, j) {
1221 struct bnx2x_fastpath *fp = &bp->fp[j]; 1240 struct bnx2x_fastpath *fp = &bp->fp[j];
1222 1241
1223 fp->rx_bd_cons = 0; 1242 fp->rx_bd_cons = 0;
@@ -1244,29 +1263,45 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1244 } 1263 }
1245} 1264}
1246 1265
1247static void bnx2x_free_tx_skbs(struct bnx2x *bp) 1266static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1248{ 1267{
1249 int i;
1250 u8 cos; 1268 u8 cos;
1269 struct bnx2x *bp = fp->bp;
1251 1270
1252 for_each_tx_queue(bp, i) { 1271 for_each_cos_in_tx_queue(fp, cos) {
1253 struct bnx2x_fastpath *fp = &bp->fp[i]; 1272 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1254 for_each_cos_in_tx_queue(fp, cos) { 1273 unsigned pkts_compl = 0, bytes_compl = 0;
1255 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1256 unsigned pkts_compl = 0, bytes_compl = 0;
1257 1274
1258 u16 sw_prod = txdata->tx_pkt_prod; 1275 u16 sw_prod = txdata->tx_pkt_prod;
1259 u16 sw_cons = txdata->tx_pkt_cons; 1276 u16 sw_cons = txdata->tx_pkt_cons;
1260 1277
1261 while (sw_cons != sw_prod) { 1278 while (sw_cons != sw_prod) {
1262 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), 1279 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1263 &pkts_compl, &bytes_compl); 1280 &pkts_compl, &bytes_compl);
1264 sw_cons++; 1281 sw_cons++;
1265 }
1266 netdev_tx_reset_queue(
1267 netdev_get_tx_queue(bp->dev,
1268 txdata->txq_index));
1269 } 1282 }
1283
1284 netdev_tx_reset_queue(
1285 netdev_get_tx_queue(bp->dev,
1286 txdata->txq_index));
1287 }
1288}
1289
1290static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1291{
1292 int i;
1293
1294 for_each_tx_queue_cnic(bp, i) {
1295 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1296 }
1297}
1298
1299static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1300{
1301 int i;
1302
1303 for_each_eth_queue(bp, i) {
1304 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1270 } 1305 }
1271} 1306}
1272 1307
@@ -1294,11 +1329,20 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1294 } 1329 }
1295} 1330}
1296 1331
1332static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1333{
1334 int j;
1335
1336 for_each_rx_queue_cnic(bp, j) {
1337 bnx2x_free_rx_bds(&bp->fp[j]);
1338 }
1339}
1340
1297static void bnx2x_free_rx_skbs(struct bnx2x *bp) 1341static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1298{ 1342{
1299 int j; 1343 int j;
1300 1344
1301 for_each_rx_queue(bp, j) { 1345 for_each_eth_queue(bp, j) {
1302 struct bnx2x_fastpath *fp = &bp->fp[j]; 1346 struct bnx2x_fastpath *fp = &bp->fp[j];
1303 1347
1304 bnx2x_free_rx_bds(fp); 1348 bnx2x_free_rx_bds(fp);
@@ -1308,6 +1352,12 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1308 } 1352 }
1309} 1353}
1310 1354
1355void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1356{
1357 bnx2x_free_tx_skbs_cnic(bp);
1358 bnx2x_free_rx_skbs_cnic(bp);
1359}
1360
1311void bnx2x_free_skbs(struct bnx2x *bp) 1361void bnx2x_free_skbs(struct bnx2x *bp)
1312{ 1362{
1313 bnx2x_free_tx_skbs(bp); 1363 bnx2x_free_tx_skbs(bp);
@@ -1347,11 +1397,12 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1347 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", 1397 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1348 bp->msix_table[offset].vector); 1398 bp->msix_table[offset].vector);
1349 offset++; 1399 offset++;
1350#ifdef BCM_CNIC 1400
1351 if (nvecs == offset) 1401 if (CNIC_SUPPORT(bp)) {
1352 return; 1402 if (nvecs == offset)
1353 offset++; 1403 return;
1354#endif 1404 offset++;
1405 }
1355 1406
1356 for_each_eth_queue(bp, i) { 1407 for_each_eth_queue(bp, i) {
1357 if (nvecs == offset) 1408 if (nvecs == offset)
@@ -1368,7 +1419,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
1368 if (bp->flags & USING_MSIX_FLAG && 1419 if (bp->flags & USING_MSIX_FLAG &&
1369 !(bp->flags & USING_SINGLE_MSIX_FLAG)) 1420 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1370 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + 1421 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1371 CNIC_PRESENT + 1); 1422 CNIC_SUPPORT(bp) + 1);
1372 else 1423 else
1373 free_irq(bp->dev->irq, bp->dev); 1424 free_irq(bp->dev->irq, bp->dev);
1374} 1425}
@@ -1382,12 +1433,14 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1382 bp->msix_table[0].entry); 1433 bp->msix_table[0].entry);
1383 msix_vec++; 1434 msix_vec++;
1384 1435
1385#ifdef BCM_CNIC 1436 /* Cnic requires an msix vector for itself */
1386 bp->msix_table[msix_vec].entry = msix_vec; 1437 if (CNIC_SUPPORT(bp)) {
1387 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n", 1438 bp->msix_table[msix_vec].entry = msix_vec;
1388 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); 1439 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1389 msix_vec++; 1440 msix_vec, bp->msix_table[msix_vec].entry);
1390#endif 1441 msix_vec++;
1442 }
1443
1391 /* We need separate vectors for ETH queues only (not FCoE) */ 1444 /* We need separate vectors for ETH queues only (not FCoE) */
1392 for_each_eth_queue(bp, i) { 1445 for_each_eth_queue(bp, i) {
1393 bp->msix_table[msix_vec].entry = msix_vec; 1446 bp->msix_table[msix_vec].entry = msix_vec;
@@ -1396,7 +1449,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1396 msix_vec++; 1449 msix_vec++;
1397 } 1450 }
1398 1451
1399 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1; 1452 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
1400 1453
1401 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); 1454 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1402 1455
@@ -1404,7 +1457,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1404 * reconfigure number of tx/rx queues according to available 1457 * reconfigure number of tx/rx queues according to available
1405 * MSI-X vectors 1458 * MSI-X vectors
1406 */ 1459 */
1407 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { 1460 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1408 /* how less vectors we will have? */ 1461 /* how less vectors we will have? */
1409 int diff = req_cnt - rc; 1462 int diff = req_cnt - rc;
1410 1463
@@ -1419,7 +1472,8 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1419 /* 1472 /*
1420 * decrease number of queues by number of unallocated entries 1473 * decrease number of queues by number of unallocated entries
1421 */ 1474 */
1422 bp->num_queues -= diff; 1475 bp->num_ethernet_queues -= diff;
1476 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1423 1477
1424 BNX2X_DEV_INFO("New queue configuration set: %d\n", 1478 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1425 bp->num_queues); 1479 bp->num_queues);
@@ -1435,6 +1489,9 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1435 BNX2X_DEV_INFO("Using single MSI-X vector\n"); 1489 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1436 bp->flags |= USING_SINGLE_MSIX_FLAG; 1490 bp->flags |= USING_SINGLE_MSIX_FLAG;
1437 1491
1492 BNX2X_DEV_INFO("set number of queues to 1\n");
1493 bp->num_ethernet_queues = 1;
1494 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1438 } else if (rc < 0) { 1495 } else if (rc < 0) {
1439 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); 1496 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1440 goto no_msix; 1497 goto no_msix;
@@ -1464,9 +1521,9 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1464 return -EBUSY; 1521 return -EBUSY;
1465 } 1522 }
1466 1523
1467#ifdef BCM_CNIC 1524 if (CNIC_SUPPORT(bp))
1468 offset++; 1525 offset++;
1469#endif 1526
1470 for_each_eth_queue(bp, i) { 1527 for_each_eth_queue(bp, i) {
1471 struct bnx2x_fastpath *fp = &bp->fp[i]; 1528 struct bnx2x_fastpath *fp = &bp->fp[i];
1472 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1529 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
@@ -1485,7 +1542,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1485 } 1542 }
1486 1543
1487 i = BNX2X_NUM_ETH_QUEUES(bp); 1544 i = BNX2X_NUM_ETH_QUEUES(bp);
1488 offset = 1 + CNIC_PRESENT; 1545 offset = 1 + CNIC_SUPPORT(bp);
1489 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", 1546 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1490 bp->msix_table[0].vector, 1547 bp->msix_table[0].vector,
1491 0, bp->msix_table[offset].vector, 1548 0, bp->msix_table[offset].vector,
@@ -1556,19 +1613,35 @@ static int bnx2x_setup_irqs(struct bnx2x *bp)
1556 return 0; 1613 return 0;
1557} 1614}
1558 1615
1616static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1617{
1618 int i;
1619
1620 for_each_rx_queue_cnic(bp, i)
1621 napi_enable(&bnx2x_fp(bp, i, napi));
1622}
1623
1559static void bnx2x_napi_enable(struct bnx2x *bp) 1624static void bnx2x_napi_enable(struct bnx2x *bp)
1560{ 1625{
1561 int i; 1626 int i;
1562 1627
1563 for_each_rx_queue(bp, i) 1628 for_each_eth_queue(bp, i)
1564 napi_enable(&bnx2x_fp(bp, i, napi)); 1629 napi_enable(&bnx2x_fp(bp, i, napi));
1565} 1630}
1566 1631
1632static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1633{
1634 int i;
1635
1636 for_each_rx_queue_cnic(bp, i)
1637 napi_disable(&bnx2x_fp(bp, i, napi));
1638}
1639
1567static void bnx2x_napi_disable(struct bnx2x *bp) 1640static void bnx2x_napi_disable(struct bnx2x *bp)
1568{ 1641{
1569 int i; 1642 int i;
1570 1643
1571 for_each_rx_queue(bp, i) 1644 for_each_eth_queue(bp, i)
1572 napi_disable(&bnx2x_fp(bp, i, napi)); 1645 napi_disable(&bnx2x_fp(bp, i, napi));
1573} 1646}
1574 1647
@@ -1576,6 +1649,8 @@ void bnx2x_netif_start(struct bnx2x *bp)
1576{ 1649{
1577 if (netif_running(bp->dev)) { 1650 if (netif_running(bp->dev)) {
1578 bnx2x_napi_enable(bp); 1651 bnx2x_napi_enable(bp);
1652 if (CNIC_LOADED(bp))
1653 bnx2x_napi_enable_cnic(bp);
1579 bnx2x_int_enable(bp); 1654 bnx2x_int_enable(bp);
1580 if (bp->state == BNX2X_STATE_OPEN) 1655 if (bp->state == BNX2X_STATE_OPEN)
1581 netif_tx_wake_all_queues(bp->dev); 1656 netif_tx_wake_all_queues(bp->dev);
@@ -1586,14 +1661,15 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1586{ 1661{
1587 bnx2x_int_disable_sync(bp, disable_hw); 1662 bnx2x_int_disable_sync(bp, disable_hw);
1588 bnx2x_napi_disable(bp); 1663 bnx2x_napi_disable(bp);
1664 if (CNIC_LOADED(bp))
1665 bnx2x_napi_disable_cnic(bp);
1589} 1666}
1590 1667
1591u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1668u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1592{ 1669{
1593 struct bnx2x *bp = netdev_priv(dev); 1670 struct bnx2x *bp = netdev_priv(dev);
1594 1671
1595#ifdef BCM_CNIC 1672 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1596 if (!NO_FCOE(bp)) {
1597 struct ethhdr *hdr = (struct ethhdr *)skb->data; 1673 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1598 u16 ether_type = ntohs(hdr->h_proto); 1674 u16 ether_type = ntohs(hdr->h_proto);
1599 1675
@@ -1609,7 +1685,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1609 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) 1685 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1610 return bnx2x_fcoe_tx(bp, txq_index); 1686 return bnx2x_fcoe_tx(bp, txq_index);
1611 } 1687 }
1612#endif 1688
1613 /* select a non-FCoE queue */ 1689 /* select a non-FCoE queue */
1614 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); 1690 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1615} 1691}
@@ -1618,15 +1694,15 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1618void bnx2x_set_num_queues(struct bnx2x *bp) 1694void bnx2x_set_num_queues(struct bnx2x *bp)
1619{ 1695{
1620 /* RSS queues */ 1696 /* RSS queues */
1621 bp->num_queues = bnx2x_calc_num_queues(bp); 1697 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1622 1698
1623#ifdef BCM_CNIC
1624 /* override in STORAGE SD modes */ 1699 /* override in STORAGE SD modes */
1625 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) 1700 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1626 bp->num_queues = 1; 1701 bp->num_ethernet_queues = 1;
1627#endif 1702
1628 /* Add special queues */ 1703 /* Add special queues */
1629 bp->num_queues += NON_ETH_CONTEXT_USE; 1704 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1705 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1630 1706
1631 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); 1707 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1632} 1708}
@@ -1653,20 +1729,18 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1653 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() 1729 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1654 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). 1730 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1655 */ 1731 */
1656static int bnx2x_set_real_num_queues(struct bnx2x *bp) 1732static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1657{ 1733{
1658 int rc, tx, rx; 1734 int rc, tx, rx;
1659 1735
1660 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; 1736 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1661 rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE; 1737 rx = BNX2X_NUM_ETH_QUEUES(bp);
1662 1738
1663/* account for fcoe queue */ 1739/* account for fcoe queue */
1664#ifdef BCM_CNIC 1740 if (include_cnic && !NO_FCOE(bp)) {
1665 if (!NO_FCOE(bp)) { 1741 rx++;
1666 rx += FCOE_PRESENT; 1742 tx++;
1667 tx += FCOE_PRESENT;
1668 } 1743 }
1669#endif
1670 1744
1671 rc = netif_set_real_num_tx_queues(bp->dev, tx); 1745 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1672 if (rc) { 1746 if (rc) {
@@ -1859,14 +1933,26 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
1859 (bp)->state = BNX2X_STATE_ERROR; \ 1933 (bp)->state = BNX2X_STATE_ERROR; \
1860 goto label; \ 1934 goto label; \
1861 } while (0) 1935 } while (0)
1862#else 1936
1937#define LOAD_ERROR_EXIT_CNIC(bp, label) \
1938 do { \
1939 bp->cnic_loaded = false; \
1940 goto label; \
1941 } while (0)
1942#else /*BNX2X_STOP_ON_ERROR*/
1863#define LOAD_ERROR_EXIT(bp, label) \ 1943#define LOAD_ERROR_EXIT(bp, label) \
1864 do { \ 1944 do { \
1865 (bp)->state = BNX2X_STATE_ERROR; \ 1945 (bp)->state = BNX2X_STATE_ERROR; \
1866 (bp)->panic = 1; \ 1946 (bp)->panic = 1; \
1867 return -EBUSY; \ 1947 return -EBUSY; \
1868 } while (0) 1948 } while (0)
1869#endif 1949#define LOAD_ERROR_EXIT_CNIC(bp, label) \
1950 do { \
1951 bp->cnic_loaded = false; \
1952 (bp)->panic = 1; \
1953 return -EBUSY; \
1954 } while (0)
1955#endif /*BNX2X_STOP_ON_ERROR*/
1870 1956
1871bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err) 1957bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1872{ 1958{
@@ -1959,10 +2045,8 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1959 fp->max_cos = 1; 2045 fp->max_cos = 1;
1960 2046
1961 /* Init txdata pointers */ 2047 /* Init txdata pointers */
1962#ifdef BCM_CNIC
1963 if (IS_FCOE_FP(fp)) 2048 if (IS_FCOE_FP(fp))
1964 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; 2049 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
1965#endif
1966 if (IS_ETH_FP(fp)) 2050 if (IS_ETH_FP(fp))
1967 for_each_cos_in_tx_queue(fp, cos) 2051 for_each_cos_in_tx_queue(fp, cos)
1968 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * 2052 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
@@ -1980,11 +2064,95 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1980 else if (bp->flags & GRO_ENABLE_FLAG) 2064 else if (bp->flags & GRO_ENABLE_FLAG)
1981 fp->mode = TPA_MODE_GRO; 2065 fp->mode = TPA_MODE_GRO;
1982 2066
1983#ifdef BCM_CNIC
1984 /* We don't want TPA on an FCoE L2 ring */ 2067 /* We don't want TPA on an FCoE L2 ring */
1985 if (IS_FCOE_FP(fp)) 2068 if (IS_FCOE_FP(fp))
1986 fp->disable_tpa = 1; 2069 fp->disable_tpa = 1;
1987#endif 2070}
2071
2072int bnx2x_load_cnic(struct bnx2x *bp)
2073{
2074 int i, rc, port = BP_PORT(bp);
2075
2076 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2077
2078 mutex_init(&bp->cnic_mutex);
2079
2080 rc = bnx2x_alloc_mem_cnic(bp);
2081 if (rc) {
2082 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2083 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2084 }
2085
2086 rc = bnx2x_alloc_fp_mem_cnic(bp);
2087 if (rc) {
2088 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2089 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2090 }
2091
2092 /* Update the number of queues with the cnic queues */
2093 rc = bnx2x_set_real_num_queues(bp, 1);
2094 if (rc) {
2095 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2096 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2097 }
2098
2099 /* Add all CNIC NAPI objects */
2100 bnx2x_add_all_napi_cnic(bp);
2101 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2102 bnx2x_napi_enable_cnic(bp);
2103
2104 rc = bnx2x_init_hw_func_cnic(bp);
2105 if (rc)
2106 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2107
2108 bnx2x_nic_init_cnic(bp);
2109
2110 /* Enable Timer scan */
2111 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2112
2113 for_each_cnic_queue(bp, i) {
2114 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2115 if (rc) {
2116 BNX2X_ERR("Queue setup failed\n");
2117 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2118 }
2119 }
2120
2121 /* Initialize Rx filter. */
2122 netif_addr_lock_bh(bp->dev);
2123 bnx2x_set_rx_mode(bp->dev);
2124 netif_addr_unlock_bh(bp->dev);
2125
2126 /* re-read iscsi info */
2127 bnx2x_get_iscsi_info(bp);
2128 bnx2x_setup_cnic_irq_info(bp);
2129 bnx2x_setup_cnic_info(bp);
2130 bp->cnic_loaded = true;
2131 if (bp->state == BNX2X_STATE_OPEN)
2132 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2133
2134
2135 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2136
2137 return 0;
2138
2139#ifndef BNX2X_STOP_ON_ERROR
2140load_error_cnic2:
2141 /* Disable Timer scan */
2142 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2143
2144load_error_cnic1:
2145 bnx2x_napi_disable_cnic(bp);
2146 /* Update the number of queues without the cnic queues */
2147 rc = bnx2x_set_real_num_queues(bp, 0);
2148 if (rc)
2149 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2150load_error_cnic0:
2151 BNX2X_ERR("CNIC-related load failed\n");
2152 bnx2x_free_fp_mem_cnic(bp);
2153 bnx2x_free_mem_cnic(bp);
2154 return rc;
2155#endif /* ! BNX2X_STOP_ON_ERROR */
1988} 2156}
1989 2157
1990 2158
@@ -1995,6 +2163,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1995 u32 load_code; 2163 u32 load_code;
1996 int i, rc; 2164 int i, rc;
1997 2165
2166 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2167 DP(NETIF_MSG_IFUP,
2168 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2169
1998#ifdef BNX2X_STOP_ON_ERROR 2170#ifdef BNX2X_STOP_ON_ERROR
1999 if (unlikely(bp->panic)) { 2171 if (unlikely(bp->panic)) {
2000 BNX2X_ERR("Can't load NIC when there is panic\n"); 2172 BNX2X_ERR("Can't load NIC when there is panic\n");
@@ -2022,9 +2194,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2022 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); 2194 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2023 for_each_queue(bp, i) 2195 for_each_queue(bp, i)
2024 bnx2x_bz_fp(bp, i); 2196 bnx2x_bz_fp(bp, i);
2025 memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size * 2197 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2026 sizeof(struct bnx2x_fp_txdata)); 2198 bp->num_cnic_queues) *
2199 sizeof(struct bnx2x_fp_txdata));
2027 2200
2201 bp->fcoe_init = false;
2028 2202
2029 /* Set the receive queues buffer size */ 2203 /* Set the receive queues buffer size */
2030 bnx2x_set_rx_buf_size(bp); 2204 bnx2x_set_rx_buf_size(bp);
@@ -2034,9 +2208,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2034 2208
2035 /* As long as bnx2x_alloc_mem() may possibly update 2209 /* As long as bnx2x_alloc_mem() may possibly update
2036 * bp->num_queues, bnx2x_set_real_num_queues() should always 2210 * bp->num_queues, bnx2x_set_real_num_queues() should always
2037 * come after it. 2211 * come after it. At this stage cnic queues are not counted.
2038 */ 2212 */
2039 rc = bnx2x_set_real_num_queues(bp); 2213 rc = bnx2x_set_real_num_queues(bp, 0);
2040 if (rc) { 2214 if (rc) {
2041 BNX2X_ERR("Unable to set real_num_queues\n"); 2215 BNX2X_ERR("Unable to set real_num_queues\n");
2042 LOAD_ERROR_EXIT(bp, load_error0); 2216 LOAD_ERROR_EXIT(bp, load_error0);
@@ -2050,6 +2224,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2050 2224
2051 /* Add all NAPI objects */ 2225 /* Add all NAPI objects */
2052 bnx2x_add_all_napi(bp); 2226 bnx2x_add_all_napi(bp);
2227 DP(NETIF_MSG_IFUP, "napi added\n");
2053 bnx2x_napi_enable(bp); 2228 bnx2x_napi_enable(bp);
2054 2229
2055 /* set pf load just before approaching the MCP */ 2230 /* set pf load just before approaching the MCP */
@@ -2191,23 +2366,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2191 LOAD_ERROR_EXIT(bp, load_error3); 2366 LOAD_ERROR_EXIT(bp, load_error3);
2192 } 2367 }
2193 2368
2194#ifdef BCM_CNIC 2369 for_each_nondefault_eth_queue(bp, i) {
2195 /* Enable Timer scan */
2196 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2197#endif
2198
2199 for_each_nondefault_queue(bp, i) {
2200 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); 2370 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2201 if (rc) { 2371 if (rc) {
2202 BNX2X_ERR("Queue setup failed\n"); 2372 BNX2X_ERR("Queue setup failed\n");
2203 LOAD_ERROR_EXIT(bp, load_error4); 2373 LOAD_ERROR_EXIT(bp, load_error3);
2204 } 2374 }
2205 } 2375 }
2206 2376
2207 rc = bnx2x_init_rss_pf(bp); 2377 rc = bnx2x_init_rss_pf(bp);
2208 if (rc) { 2378 if (rc) {
2209 BNX2X_ERR("PF RSS init failed\n"); 2379 BNX2X_ERR("PF RSS init failed\n");
2210 LOAD_ERROR_EXIT(bp, load_error4); 2380 LOAD_ERROR_EXIT(bp, load_error3);
2211 } 2381 }
2212 2382
2213 /* Now when Clients are configured we are ready to work */ 2383 /* Now when Clients are configured we are ready to work */
@@ -2217,7 +2387,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2217 rc = bnx2x_set_eth_mac(bp, true); 2387 rc = bnx2x_set_eth_mac(bp, true);
2218 if (rc) { 2388 if (rc) {
2219 BNX2X_ERR("Setting Ethernet MAC failed\n"); 2389 BNX2X_ERR("Setting Ethernet MAC failed\n");
2220 LOAD_ERROR_EXIT(bp, load_error4); 2390 LOAD_ERROR_EXIT(bp, load_error3);
2221 } 2391 }
2222 2392
2223 if (bp->pending_max) { 2393 if (bp->pending_max) {
@@ -2264,14 +2434,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2264 /* start the timer */ 2434 /* start the timer */
2265 mod_timer(&bp->timer, jiffies + bp->current_interval); 2435 mod_timer(&bp->timer, jiffies + bp->current_interval);
2266 2436
2267#ifdef BCM_CNIC 2437 if (CNIC_ENABLED(bp))
2268 /* re-read iscsi info */ 2438 bnx2x_load_cnic(bp);
2269 bnx2x_get_iscsi_info(bp);
2270 bnx2x_setup_cnic_irq_info(bp);
2271 bnx2x_setup_cnic_info(bp);
2272 if (bp->state == BNX2X_STATE_OPEN)
2273 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2274#endif
2275 2439
2276 /* mark driver is loaded in shmem2 */ 2440 /* mark driver is loaded in shmem2 */
2277 if (SHMEM2_HAS(bp, drv_capabilities_flag)) { 2441 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
@@ -2293,14 +2457,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2293 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) 2457 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2294 bnx2x_dcbx_init(bp, false); 2458 bnx2x_dcbx_init(bp, false);
2295 2459
2460 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2461
2296 return 0; 2462 return 0;
2297 2463
2298#ifndef BNX2X_STOP_ON_ERROR 2464#ifndef BNX2X_STOP_ON_ERROR
2299load_error4:
2300#ifdef BCM_CNIC
2301 /* Disable Timer scan */
2302 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2303#endif
2304load_error3: 2465load_error3:
2305 bnx2x_int_disable_sync(bp, 1); 2466 bnx2x_int_disable_sync(bp, 1);
2306 2467
@@ -2338,6 +2499,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2338 int i; 2499 int i;
2339 bool global = false; 2500 bool global = false;
2340 2501
2502 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2503
2341 /* mark driver is unloaded in shmem2 */ 2504 /* mark driver is unloaded in shmem2 */
2342 if (SHMEM2_HAS(bp, drv_capabilities_flag)) { 2505 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2343 u32 val; 2506 u32 val;
@@ -2373,14 +2536,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2373 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 2536 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2374 smp_mb(); 2537 smp_mb();
2375 2538
2539 if (CNIC_LOADED(bp))
2540 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2541
2376 /* Stop Tx */ 2542 /* Stop Tx */
2377 bnx2x_tx_disable(bp); 2543 bnx2x_tx_disable(bp);
2378 netdev_reset_tc(bp->dev); 2544 netdev_reset_tc(bp->dev);
2379 2545
2380#ifdef BCM_CNIC
2381 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2382#endif
2383
2384 bp->rx_mode = BNX2X_RX_MODE_NONE; 2546 bp->rx_mode = BNX2X_RX_MODE_NONE;
2385 2547
2386 del_timer_sync(&bp->timer); 2548 del_timer_sync(&bp->timer);
@@ -2414,7 +2576,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2414 bnx2x_netif_stop(bp, 1); 2576 bnx2x_netif_stop(bp, 1);
2415 /* Delete all NAPI objects */ 2577 /* Delete all NAPI objects */
2416 bnx2x_del_all_napi(bp); 2578 bnx2x_del_all_napi(bp);
2417 2579 if (CNIC_LOADED(bp))
2580 bnx2x_del_all_napi_cnic(bp);
2418 /* Release IRQs */ 2581 /* Release IRQs */
2419 bnx2x_free_irq(bp); 2582 bnx2x_free_irq(bp);
2420 2583
@@ -2435,12 +2598,19 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2435 2598
2436 /* Free SKBs, SGEs, TPA pool and driver internals */ 2599 /* Free SKBs, SGEs, TPA pool and driver internals */
2437 bnx2x_free_skbs(bp); 2600 bnx2x_free_skbs(bp);
2601 if (CNIC_LOADED(bp))
2602 bnx2x_free_skbs_cnic(bp);
2438 for_each_rx_queue(bp, i) 2603 for_each_rx_queue(bp, i)
2439 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 2604 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2440 2605
2606 if (CNIC_LOADED(bp)) {
2607 bnx2x_free_fp_mem_cnic(bp);
2608 bnx2x_free_mem_cnic(bp);
2609 }
2441 bnx2x_free_mem(bp); 2610 bnx2x_free_mem(bp);
2442 2611
2443 bp->state = BNX2X_STATE_CLOSED; 2612 bp->state = BNX2X_STATE_CLOSED;
2613 bp->cnic_loaded = false;
2444 2614
2445 /* Check if there are pending parity attentions. If there are - set 2615 /* Check if there are pending parity attentions. If there are - set
2446 * RECOVERY_IN_PROGRESS. 2616 * RECOVERY_IN_PROGRESS.
@@ -2460,6 +2630,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2460 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp))) 2630 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2461 bnx2x_disable_close_the_gate(bp); 2631 bnx2x_disable_close_the_gate(bp);
2462 2632
2633 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2634
2463 return 0; 2635 return 0;
2464} 2636}
2465 2637
@@ -2550,7 +2722,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2550 2722
2551 /* Fall out from the NAPI loop if needed */ 2723 /* Fall out from the NAPI loop if needed */
2552 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 2724 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2553#ifdef BCM_CNIC 2725
2554 /* No need to update SB for FCoE L2 ring as long as 2726 /* No need to update SB for FCoE L2 ring as long as
2555 * it's connected to the default SB and the SB 2727 * it's connected to the default SB and the SB
2556 * has been updated when NAPI was scheduled. 2728 * has been updated when NAPI was scheduled.
@@ -2559,8 +2731,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2559 napi_complete(napi); 2731 napi_complete(napi);
2560 break; 2732 break;
2561 } 2733 }
2562#endif
2563
2564 bnx2x_update_fpsb_idx(fp); 2734 bnx2x_update_fpsb_idx(fp);
2565 /* bnx2x_has_rx_work() reads the status block, 2735 /* bnx2x_has_rx_work() reads the status block,
2566 * thus we need to ensure that status block indices 2736 * thus we need to ensure that status block indices
@@ -2940,7 +3110,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2940 txq_index = skb_get_queue_mapping(skb); 3110 txq_index = skb_get_queue_mapping(skb);
2941 txq = netdev_get_tx_queue(dev, txq_index); 3111 txq = netdev_get_tx_queue(dev, txq_index);
2942 3112
2943 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); 3113 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
2944 3114
2945 txdata = &bp->bnx2x_txq[txq_index]; 3115 txdata = &bp->bnx2x_txq[txq_index];
2946 3116
@@ -3339,13 +3509,11 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3339 return -EINVAL; 3509 return -EINVAL;
3340 } 3510 }
3341 3511
3342#ifdef BCM_CNIC
3343 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) && 3512 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3344 !is_zero_ether_addr(addr->sa_data)) { 3513 !is_zero_ether_addr(addr->sa_data)) {
3345 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n"); 3514 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3346 return -EINVAL; 3515 return -EINVAL;
3347 } 3516 }
3348#endif
3349 3517
3350 if (netif_running(dev)) { 3518 if (netif_running(dev)) {
3351 rc = bnx2x_set_eth_mac(bp, false); 3519 rc = bnx2x_set_eth_mac(bp, false);
@@ -3369,13 +3537,11 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3369 u8 cos; 3537 u8 cos;
3370 3538
3371 /* Common */ 3539 /* Common */
3372#ifdef BCM_CNIC 3540
3373 if (IS_FCOE_IDX(fp_index)) { 3541 if (IS_FCOE_IDX(fp_index)) {
3374 memset(sb, 0, sizeof(union host_hc_status_block)); 3542 memset(sb, 0, sizeof(union host_hc_status_block));
3375 fp->status_blk_mapping = 0; 3543 fp->status_blk_mapping = 0;
3376
3377 } else { 3544 } else {
3378#endif
3379 /* status blocks */ 3545 /* status blocks */
3380 if (!CHIP_IS_E1x(bp)) 3546 if (!CHIP_IS_E1x(bp))
3381 BNX2X_PCI_FREE(sb->e2_sb, 3547 BNX2X_PCI_FREE(sb->e2_sb,
@@ -3387,9 +3553,8 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3387 bnx2x_fp(bp, fp_index, 3553 bnx2x_fp(bp, fp_index,
3388 status_blk_mapping), 3554 status_blk_mapping),
3389 sizeof(struct host_hc_status_block_e1x)); 3555 sizeof(struct host_hc_status_block_e1x));
3390#ifdef BCM_CNIC
3391 } 3556 }
3392#endif 3557
3393 /* Rx */ 3558 /* Rx */
3394 if (!skip_rx_queue(bp, fp_index)) { 3559 if (!skip_rx_queue(bp, fp_index)) {
3395 bnx2x_free_rx_bds(fp); 3560 bnx2x_free_rx_bds(fp);
@@ -3431,10 +3596,17 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3431 /* end of fastpath */ 3596 /* end of fastpath */
3432} 3597}
3433 3598
3599void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3600{
3601 int i;
3602 for_each_cnic_queue(bp, i)
3603 bnx2x_free_fp_mem_at(bp, i);
3604}
3605
3434void bnx2x_free_fp_mem(struct bnx2x *bp) 3606void bnx2x_free_fp_mem(struct bnx2x *bp)
3435{ 3607{
3436 int i; 3608 int i;
3437 for_each_queue(bp, i) 3609 for_each_eth_queue(bp, i)
3438 bnx2x_free_fp_mem_at(bp, i); 3610 bnx2x_free_fp_mem_at(bp, i);
3439} 3611}
3440 3612
@@ -3519,14 +3691,11 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3519 u8 cos; 3691 u8 cos;
3520 int rx_ring_size = 0; 3692 int rx_ring_size = 0;
3521 3693
3522#ifdef BCM_CNIC
3523 if (!bp->rx_ring_size && 3694 if (!bp->rx_ring_size &&
3524 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { 3695 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3525 rx_ring_size = MIN_RX_SIZE_NONTPA; 3696 rx_ring_size = MIN_RX_SIZE_NONTPA;
3526 bp->rx_ring_size = rx_ring_size; 3697 bp->rx_ring_size = rx_ring_size;
3527 } else 3698 } else if (!bp->rx_ring_size) {
3528#endif
3529 if (!bp->rx_ring_size) {
3530 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); 3699 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3531 3700
3532 if (CHIP_IS_E3(bp)) { 3701 if (CHIP_IS_E3(bp)) {
@@ -3550,9 +3719,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3550 3719
3551 /* Common */ 3720 /* Common */
3552 sb = &bnx2x_fp(bp, index, status_blk); 3721 sb = &bnx2x_fp(bp, index, status_blk);
3553#ifdef BCM_CNIC 3722
3554 if (!IS_FCOE_IDX(index)) { 3723 if (!IS_FCOE_IDX(index)) {
3555#endif
3556 /* status blocks */ 3724 /* status blocks */
3557 if (!CHIP_IS_E1x(bp)) 3725 if (!CHIP_IS_E1x(bp))
3558 BNX2X_PCI_ALLOC(sb->e2_sb, 3726 BNX2X_PCI_ALLOC(sb->e2_sb,
@@ -3562,9 +3730,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3562 BNX2X_PCI_ALLOC(sb->e1x_sb, 3730 BNX2X_PCI_ALLOC(sb->e1x_sb,
3563 &bnx2x_fp(bp, index, status_blk_mapping), 3731 &bnx2x_fp(bp, index, status_blk_mapping),
3564 sizeof(struct host_hc_status_block_e1x)); 3732 sizeof(struct host_hc_status_block_e1x));
3565#ifdef BCM_CNIC
3566 } 3733 }
3567#endif
3568 3734
3569 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to 3735 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3570 * set shortcuts for it. 3736 * set shortcuts for it.
@@ -3641,31 +3807,31 @@ alloc_mem_err:
3641 return 0; 3807 return 0;
3642} 3808}
3643 3809
3810int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
3811{
3812 if (!NO_FCOE(bp))
3813 /* FCoE */
3814 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3815 /* we will fail load process instead of mark
3816 * NO_FCOE_FLAG
3817 */
3818 return -ENOMEM;
3819
3820 return 0;
3821}
3822
3644int bnx2x_alloc_fp_mem(struct bnx2x *bp) 3823int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3645{ 3824{
3646 int i; 3825 int i;
3647 3826
3648 /** 3827 /* 1. Allocate FP for leading - fatal if error
3649 * 1. Allocate FP for leading - fatal if error 3828 * 2. Allocate RSS - fix number of queues if error
3650 * 2. {CNIC} Allocate FCoE FP - fatal if error
3651 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3652 * 4. Allocate RSS - fix number of queues if error
3653 */ 3829 */
3654 3830
3655 /* leading */ 3831 /* leading */
3656 if (bnx2x_alloc_fp_mem_at(bp, 0)) 3832 if (bnx2x_alloc_fp_mem_at(bp, 0))
3657 return -ENOMEM; 3833 return -ENOMEM;
3658 3834
3659#ifdef BCM_CNIC
3660 if (!NO_FCOE(bp))
3661 /* FCoE */
3662 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3663 /* we will fail load process instead of mark
3664 * NO_FCOE_FLAG
3665 */
3666 return -ENOMEM;
3667#endif
3668
3669 /* RSS */ 3835 /* RSS */
3670 for_each_nondefault_eth_queue(bp, i) 3836 for_each_nondefault_eth_queue(bp, i)
3671 if (bnx2x_alloc_fp_mem_at(bp, i)) 3837 if (bnx2x_alloc_fp_mem_at(bp, i))
@@ -3676,17 +3842,17 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3676 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; 3842 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3677 3843
3678 WARN_ON(delta < 0); 3844 WARN_ON(delta < 0);
3679#ifdef BCM_CNIC 3845 if (CNIC_SUPPORT(bp))
3680 /** 3846 /* move non eth FPs next to last eth FP
3681 * move non eth FPs next to last eth FP 3847 * must be done in that order
3682 * must be done in that order 3848 * FCOE_IDX < FWD_IDX < OOO_IDX
3683 * FCOE_IDX < FWD_IDX < OOO_IDX 3849 */
3684 */
3685 3850
3686 /* move FCoE fp even NO_FCOE_FLAG is on */ 3851 /* move FCoE fp even NO_FCOE_FLAG is on */
3687 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); 3852 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3688#endif 3853 bp->num_ethernet_queues -= delta;
3689 bp->num_queues -= delta; 3854 bp->num_queues = bp->num_ethernet_queues +
3855 bp->num_cnic_queues;
3690 BNX2X_ERR("Adjusted num of queues from %d to %d\n", 3856 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3691 bp->num_queues + delta, bp->num_queues); 3857 bp->num_queues + delta, bp->num_queues);
3692 } 3858 }
@@ -3711,7 +3877,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3711 struct msix_entry *tbl; 3877 struct msix_entry *tbl;
3712 struct bnx2x_ilt *ilt; 3878 struct bnx2x_ilt *ilt;
3713 int msix_table_size = 0; 3879 int msix_table_size = 0;
3714 int fp_array_size; 3880 int fp_array_size, txq_array_size;
3715 int i; 3881 int i;
3716 3882
3717 /* 3883 /*
@@ -3721,7 +3887,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3721 msix_table_size = bp->igu_sb_cnt + 1; 3887 msix_table_size = bp->igu_sb_cnt + 1;
3722 3888
3723 /* fp array: RSS plus CNIC related L2 queues */ 3889 /* fp array: RSS plus CNIC related L2 queues */
3724 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE; 3890 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
3725 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size); 3891 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3726 3892
3727 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL); 3893 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
@@ -3750,12 +3916,12 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3750 goto alloc_err; 3916 goto alloc_err;
3751 3917
3752 /* Allocate memory for the transmission queues array */ 3918 /* Allocate memory for the transmission queues array */
3753 bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS; 3919 txq_array_size =
3754#ifdef BCM_CNIC 3920 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
3755 bp->bnx2x_txq_size++; 3921 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
3756#endif 3922
3757 bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size, 3923 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
3758 sizeof(struct bnx2x_fp_txdata), GFP_KERNEL); 3924 GFP_KERNEL);
3759 if (!bp->bnx2x_txq) 3925 if (!bp->bnx2x_txq)
3760 goto alloc_err; 3926 goto alloc_err;
3761 3927
@@ -3838,7 +4004,7 @@ int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3838 return LINK_CONFIG_IDX(sel_phy_idx); 4004 return LINK_CONFIG_IDX(sel_phy_idx);
3839} 4005}
3840 4006
3841#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 4007#ifdef NETDEV_FCOE_WWNN
3842int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) 4008int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3843{ 4009{
3844 struct bnx2x *bp = netdev_priv(dev); 4010 struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 9c5ea6c5b4c7..ad280740b134 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -238,7 +238,6 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
238 * @dev_instance: private instance 238 * @dev_instance: private instance
239 */ 239 */
240irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); 240irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
241#ifdef BCM_CNIC
242 241
243/** 242/**
244 * bnx2x_cnic_notify - send command to cnic driver 243 * bnx2x_cnic_notify - send command to cnic driver
@@ -262,8 +261,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
262 */ 261 */
263void bnx2x_setup_cnic_info(struct bnx2x *bp); 262void bnx2x_setup_cnic_info(struct bnx2x *bp);
264 263
265#endif
266
267/** 264/**
268 * bnx2x_int_enable - enable HW interrupts. 265 * bnx2x_int_enable - enable HW interrupts.
269 * 266 *
@@ -283,7 +280,7 @@ void bnx2x_int_enable(struct bnx2x *bp);
283void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 280void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
284 281
285/** 282/**
286 * bnx2x_nic_init - init driver internals. 283 * bnx2x_nic_init_cnic - init driver internals for cnic.
287 * 284 *
288 * @bp: driver handle 285 * @bp: driver handle
289 * @load_code: COMMON, PORT or FUNCTION 286 * @load_code: COMMON, PORT or FUNCTION
@@ -293,9 +290,26 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
293 * - status blocks 290 * - status blocks
294 * - etc. 291 * - etc.
295 */ 292 */
296void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); 293void bnx2x_nic_init_cnic(struct bnx2x *bp);
297 294
298/** 295/**
296 * bnx2x_nic_init - init driver internals.
297 *
298 * @bp: driver handle
299 *
300 * Initializes:
301 * - rings
302 * - status blocks
303 * - etc.
304 */
305void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
306/**
307 * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
308 *
309 * @bp: driver handle
310 */
311int bnx2x_alloc_mem_cnic(struct bnx2x *bp);
312/**
299 * bnx2x_alloc_mem - allocate driver's memory. 313 * bnx2x_alloc_mem - allocate driver's memory.
300 * 314 *
301 * @bp: driver handle 315 * @bp: driver handle
@@ -303,6 +317,12 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
303int bnx2x_alloc_mem(struct bnx2x *bp); 317int bnx2x_alloc_mem(struct bnx2x *bp);
304 318
305/** 319/**
320 * bnx2x_free_mem_cnic - release driver's memory for cnic.
321 *
322 * @bp: driver handle
323 */
324void bnx2x_free_mem_cnic(struct bnx2x *bp);
325/**
306 * bnx2x_free_mem - release driver's memory. 326 * bnx2x_free_mem - release driver's memory.
307 * 327 *
308 * @bp: driver handle 328 * @bp: driver handle
@@ -407,6 +427,7 @@ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
407void bnx2x_set_reset_in_progress(struct bnx2x *bp); 427void bnx2x_set_reset_in_progress(struct bnx2x *bp);
408void bnx2x_set_reset_global(struct bnx2x *bp); 428void bnx2x_set_reset_global(struct bnx2x *bp);
409void bnx2x_disable_close_the_gate(struct bnx2x *bp); 429void bnx2x_disable_close_the_gate(struct bnx2x *bp);
430int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
410 431
411/** 432/**
412 * bnx2x_sp_event - handle ramrods completion. 433 * bnx2x_sp_event - handle ramrods completion.
@@ -424,6 +445,14 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
424void bnx2x_ilt_set_info(struct bnx2x *bp); 445void bnx2x_ilt_set_info(struct bnx2x *bp);
425 446
426/** 447/**
448 * bnx2x_ilt_set_cnic_info - prepare ILT configurations for SRC
449 * and TM.
450 *
451 * @bp: driver handle
452 */
453void bnx2x_ilt_set_info_cnic(struct bnx2x *bp);
454
455/**
427 * bnx2x_dcbx_init - initialize dcbx protocol. 456 * bnx2x_dcbx_init - initialize dcbx protocol.
428 * 457 *
429 * @bp: driver handle 458 * @bp: driver handle
@@ -491,12 +520,17 @@ int bnx2x_resume(struct pci_dev *pdev);
491/* Release IRQ vectors */ 520/* Release IRQ vectors */
492void bnx2x_free_irq(struct bnx2x *bp); 521void bnx2x_free_irq(struct bnx2x *bp);
493 522
523void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
494void bnx2x_free_fp_mem(struct bnx2x *bp); 524void bnx2x_free_fp_mem(struct bnx2x *bp);
525int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
495int bnx2x_alloc_fp_mem(struct bnx2x *bp); 526int bnx2x_alloc_fp_mem(struct bnx2x *bp);
496void bnx2x_init_rx_rings(struct bnx2x *bp); 527void bnx2x_init_rx_rings(struct bnx2x *bp);
528void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
529void bnx2x_free_skbs_cnic(struct bnx2x *bp);
497void bnx2x_free_skbs(struct bnx2x *bp); 530void bnx2x_free_skbs(struct bnx2x *bp);
498void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); 531void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
499void bnx2x_netif_start(struct bnx2x *bp); 532void bnx2x_netif_start(struct bnx2x *bp);
533int bnx2x_load_cnic(struct bnx2x *bp);
500 534
501/** 535/**
502 * bnx2x_enable_msix - set msix configuration. 536 * bnx2x_enable_msix - set msix configuration.
@@ -547,7 +581,7 @@ void bnx2x_free_mem_bp(struct bnx2x *bp);
547 */ 581 */
548int bnx2x_change_mtu(struct net_device *dev, int new_mtu); 582int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
549 583
550#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 584#ifdef NETDEV_FCOE_WWNN
551/** 585/**
552 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port 586 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port
553 * 587 *
@@ -793,23 +827,39 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
793 sge->addr_lo = 0; 827 sge->addr_lo = 0;
794} 828}
795 829
796static inline void bnx2x_add_all_napi(struct bnx2x *bp) 830static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
797{ 831{
798 int i; 832 int i;
799 833
800 bp->num_napi_queues = bp->num_queues; 834 /* Add NAPI objects */
835 for_each_rx_queue_cnic(bp, i)
836 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
837 bnx2x_poll, BNX2X_NAPI_WEIGHT);
838}
839
840static inline void bnx2x_add_all_napi(struct bnx2x *bp)
841{
842 int i;
801 843
802 /* Add NAPI objects */ 844 /* Add NAPI objects */
803 for_each_rx_queue(bp, i) 845 for_each_eth_queue(bp, i)
804 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 846 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
805 bnx2x_poll, BNX2X_NAPI_WEIGHT); 847 bnx2x_poll, BNX2X_NAPI_WEIGHT);
806} 848}
807 849
850static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
851{
852 int i;
853
854 for_each_rx_queue_cnic(bp, i)
855 netif_napi_del(&bnx2x_fp(bp, i, napi));
856}
857
808static inline void bnx2x_del_all_napi(struct bnx2x *bp) 858static inline void bnx2x_del_all_napi(struct bnx2x *bp)
809{ 859{
810 int i; 860 int i;
811 861
812 for_each_rx_queue(bp, i) 862 for_each_eth_queue(bp, i)
813 netif_napi_del(&bnx2x_fp(bp, i, napi)); 863 netif_napi_del(&bnx2x_fp(bp, i, napi));
814} 864}
815 865
@@ -979,11 +1029,9 @@ static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
979{ 1029{
980 struct bnx2x *bp = fp->bp; 1030 struct bnx2x *bp = fp->bp;
981 if (!CHIP_IS_E1x(bp)) { 1031 if (!CHIP_IS_E1x(bp)) {
982#ifdef BCM_CNIC
983 /* there are special statistics counters for FCoE 136..140 */ 1032 /* there are special statistics counters for FCoE 136..140 */
984 if (IS_FCOE_FP(fp)) 1033 if (IS_FCOE_FP(fp))
985 return bp->cnic_base_cl_id + (bp->pf_num >> 1); 1034 return bp->cnic_base_cl_id + (bp->pf_num >> 1);
986#endif
987 return fp->cl_id; 1035 return fp->cl_id;
988 } 1036 }
989 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x; 1037 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x;
@@ -1102,7 +1150,6 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,
1102 txdata->cid, txdata->txq_index); 1150 txdata->cid, txdata->txq_index);
1103} 1151}
1104 1152
1105#ifdef BCM_CNIC
1106static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) 1153static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
1107{ 1154{
1108 return bp->cnic_base_cl_id + cl_idx + 1155 return bp->cnic_base_cl_id + cl_idx +
@@ -1162,7 +1209,6 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1162 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 1209 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
1163 fp->igu_sb_id); 1210 fp->igu_sb_id);
1164} 1211}
1165#endif
1166 1212
1167static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, 1213static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
1168 struct bnx2x_fp_txdata *txdata) 1214 struct bnx2x_fp_txdata *txdata)
@@ -1280,7 +1326,7 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
1280 */ 1326 */
1281 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; 1327 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
1282} 1328}
1283#ifdef BCM_CNIC 1329
1284/** 1330/**
1285 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. 1331 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
1286 * 1332 *
@@ -1288,7 +1334,6 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
1288 * 1334 *
1289 */ 1335 */
1290void bnx2x_get_iscsi_info(struct bnx2x *bp); 1336void bnx2x_get_iscsi_info(struct bnx2x *bp);
1291#endif
1292 1337
1293/** 1338/**
1294 * bnx2x_link_sync_notify - send notification to other functions. 1339 * bnx2x_link_sync_notify - send notification to other functions.
@@ -1340,13 +1385,11 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
1340 1385
1341static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) 1386static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
1342{ 1387{
1343 if (is_valid_ether_addr(addr)) 1388 if (is_valid_ether_addr(addr) ||
1389 (is_zero_ether_addr(addr) &&
1390 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))))
1344 return true; 1391 return true;
1345#ifdef BCM_CNIC 1392
1346 if (is_zero_ether_addr(addr) &&
1347 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))
1348 return true;
1349#endif
1350 return false; 1393 return false;
1351} 1394}
1352 1395
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 2245c3895409..cba4a16ab86a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1908,10 +1908,10 @@ static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
1908 /* first the HW mac address */ 1908 /* first the HW mac address */
1909 memcpy(perm_addr, netdev->dev_addr, netdev->addr_len); 1909 memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
1910 1910
1911#ifdef BCM_CNIC 1911 if (CNIC_LOADED(bp))
1912 /* second SAN address */ 1912 /* second SAN address */
1913 memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len); 1913 memcpy(perm_addr+netdev->addr_len, bp->fip_mac,
1914#endif 1914 netdev->addr_len);
1915} 1915}
1916 1916
1917static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, 1917static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 6e5bdd1a31d9..c40c0253e105 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2901,7 +2901,9 @@ static void bnx2x_get_channels(struct net_device *dev,
2901static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) 2901static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
2902{ 2902{
2903 bnx2x_disable_msi(bp); 2903 bnx2x_disable_msi(bp);
2904 BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE; 2904 bp->num_ethernet_queues = num_rss;
2905 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
2906 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
2905 bnx2x_set_int_mode(bp); 2907 bnx2x_set_int_mode(bp);
2906} 2908}
2907 2909
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 18704929e642..7eaa74b78a5b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -4845,9 +4845,17 @@ struct vif_list_event_data {
4845 __le32 reserved2; 4845 __le32 reserved2;
4846}; 4846};
4847 4847
4848/* 4848/* function update event data */
4849 * union for all event ring message types 4849struct function_update_event_data {
4850 */ 4850 u8 echo;
4851 u8 reserved;
4852 __le16 reserved0;
4853 __le32 reserved1;
4854 __le32 reserved2;
4855};
4856
4857
4858/* union for all event ring message types */
4851union event_data { 4859union event_data {
4852 struct vf_pf_event_data vf_pf_event; 4860 struct vf_pf_event_data vf_pf_event;
4853 struct eth_event_data eth_event; 4861 struct eth_event_data eth_event;
@@ -4855,6 +4863,7 @@ union event_data {
4855 struct vf_flr_event_data vf_flr_event; 4863 struct vf_flr_event_data vf_flr_event;
4856 struct malicious_vf_event_data malicious_vf_event; 4864 struct malicious_vf_event_data malicious_vf_event;
4857 struct vif_list_event_data vif_list_event; 4865 struct vif_list_event_data vif_list_event;
4866 struct function_update_event_data function_update_event;
4858}; 4867};
4859 4868
4860 4869
@@ -4984,8 +4993,10 @@ struct function_update_data {
4984 u8 allowed_priorities; 4993 u8 allowed_priorities;
4985 u8 network_cos_mode; 4994 u8 network_cos_mode;
4986 u8 lb_mode_en; 4995 u8 lb_mode_en;
4987 u8 reserved0; 4996 u8 tx_switch_suspend_change_flg;
4988 __le32 reserved1; 4997 u8 tx_switch_suspend;
4998 u8 echo;
4999 __le16 reserved1;
4989}; 5000};
4990 5001
4991 5002
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index fe66d902dc62..d755acfe7a40 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -648,15 +648,25 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
648 return rc; 648 return rc;
649} 649}
650 650
651static int bnx2x_ilt_mem_op_cnic(struct bnx2x *bp, u8 memop)
652{
653 int rc = 0;
654
655 if (CONFIGURE_NIC_MODE(bp))
656 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
657 if (!rc)
658 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
659
660 return rc;
661}
662
651static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop) 663static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
652{ 664{
653 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop); 665 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
654 if (!rc) 666 if (!rc)
655 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop); 667 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
656 if (!rc) 668 if (!rc && CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
657 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop); 669 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
658 if (!rc)
659 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
660 670
661 return rc; 671 return rc;
662} 672}
@@ -781,12 +791,19 @@ static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
781 bnx2x_ilt_client_init_op(bp, ilt_cli, initop); 791 bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
782} 792}
783 793
794static void bnx2x_ilt_init_op_cnic(struct bnx2x *bp, u8 initop)
795{
796 if (CONFIGURE_NIC_MODE(bp))
797 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
798 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
799}
800
784static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop) 801static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
785{ 802{
786 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop); 803 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
787 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop); 804 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
788 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop); 805 if (CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
789 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop); 806 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
790} 807}
791 808
792static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num, 809static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
@@ -890,7 +907,6 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
890/**************************************************************************** 907/****************************************************************************
891* SRC initializations 908* SRC initializations
892****************************************************************************/ 909****************************************************************************/
893#ifdef BCM_CNIC
894/* called during init func stage */ 910/* called during init func stage */
895static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, 911static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
896 dma_addr_t t2_mapping, int src_cid_count) 912 dma_addr_t t2_mapping, int src_cid_count)
@@ -915,5 +931,4 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
915 U64_HI((u64)t2_mapping + 931 U64_HI((u64)t2_mapping +
916 (src_cid_count-1) * sizeof(struct src_ent))); 932 (src_cid_count-1) * sizeof(struct src_ent)));
917} 933}
918#endif
919#endif /* BNX2X_INIT_OPS_H */ 934#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index f6cfdc6cf20f..cd002943fac8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -12066,7 +12066,7 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
12066 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); 12066 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
12067} 12067}
12068 12068
12069static void bnx2x_set_rx_filter(struct link_params *params, u8 en) 12069void bnx2x_set_rx_filter(struct link_params *params, u8 en)
12070{ 12070{
12071 struct bnx2x *bp = params->bp; 12071 struct bnx2x *bp = params->bp;
12072 u8 val = en * 0x1F; 12072 u8 val = en * 0x1F;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 9165b89a4b19..ba981ced628b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -432,7 +432,8 @@ int bnx2x_phy_probe(struct link_params *params);
432u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base, 432u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
433 u32 shmem2_base, u8 port); 433 u32 shmem2_base, u8 port);
434 434
435 435/* Open / close the gate between the NIG and the BRB */
436void bnx2x_set_rx_filter(struct link_params *params, u8 en);
436 437
437/* DCBX structs */ 438/* DCBX structs */
438 439
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index bd1fd3d87c24..04b9f0ab183b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -791,10 +791,9 @@ void bnx2x_panic_dump(struct bnx2x *bp)
791 791
792 /* host sb data */ 792 /* host sb data */
793 793
794#ifdef BCM_CNIC
795 if (IS_FCOE_FP(fp)) 794 if (IS_FCOE_FP(fp))
796 continue; 795 continue;
797#endif 796
798 BNX2X_ERR(" run indexes ("); 797 BNX2X_ERR(" run indexes (");
799 for (j = 0; j < HC_SB_MAX_SM; j++) 798 for (j = 0; j < HC_SB_MAX_SM; j++)
800 pr_cont("0x%x%s", 799 pr_cont("0x%x%s",
@@ -859,7 +858,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
859#ifdef BNX2X_STOP_ON_ERROR 858#ifdef BNX2X_STOP_ON_ERROR
860 /* Rings */ 859 /* Rings */
861 /* Rx */ 860 /* Rx */
862 for_each_rx_queue(bp, i) { 861 for_each_valid_rx_queue(bp, i) {
863 struct bnx2x_fastpath *fp = &bp->fp[i]; 862 struct bnx2x_fastpath *fp = &bp->fp[i];
864 863
865 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 864 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
@@ -893,7 +892,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
893 } 892 }
894 893
895 /* Tx */ 894 /* Tx */
896 for_each_tx_queue(bp, i) { 895 for_each_valid_tx_queue(bp, i) {
897 struct bnx2x_fastpath *fp = &bp->fp[i]; 896 struct bnx2x_fastpath *fp = &bp->fp[i];
898 for_each_cos_in_tx_queue(fp, cos) { 897 for_each_cos_in_tx_queue(fp, cos) {
899 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 898 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
@@ -1504,9 +1503,8 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1504 if (msix) { 1503 if (msix) {
1505 synchronize_irq(bp->msix_table[0].vector); 1504 synchronize_irq(bp->msix_table[0].vector);
1506 offset = 1; 1505 offset = 1;
1507#ifdef BCM_CNIC 1506 if (CNIC_SUPPORT(bp))
1508 offset++; 1507 offset++;
1509#endif
1510 for_each_eth_queue(bp, i) 1508 for_each_eth_queue(bp, i)
1511 synchronize_irq(bp->msix_table[offset++].vector); 1509 synchronize_irq(bp->msix_table[offset++].vector);
1512 } else 1510 } else
@@ -1588,9 +1586,8 @@ static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1588 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1586 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1589} 1587}
1590 1588
1591#ifdef BCM_CNIC
1592static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); 1589static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1593#endif 1590
1594 1591
1595void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) 1592void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1596{ 1593{
@@ -1720,7 +1717,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1720 for_each_eth_queue(bp, i) { 1717 for_each_eth_queue(bp, i) {
1721 struct bnx2x_fastpath *fp = &bp->fp[i]; 1718 struct bnx2x_fastpath *fp = &bp->fp[i];
1722 1719
1723 mask = 0x2 << (fp->index + CNIC_PRESENT); 1720 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1724 if (status & mask) { 1721 if (status & mask) {
1725 /* Handle Rx or Tx according to SB id */ 1722 /* Handle Rx or Tx according to SB id */
1726 prefetch(fp->rx_cons_sb); 1723 prefetch(fp->rx_cons_sb);
@@ -1732,22 +1729,23 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1732 } 1729 }
1733 } 1730 }
1734 1731
1735#ifdef BCM_CNIC 1732 if (CNIC_SUPPORT(bp)) {
1736 mask = 0x2; 1733 mask = 0x2;
1737 if (status & (mask | 0x1)) { 1734 if (status & (mask | 0x1)) {
1738 struct cnic_ops *c_ops = NULL; 1735 struct cnic_ops *c_ops = NULL;
1739 1736
1740 if (likely(bp->state == BNX2X_STATE_OPEN)) { 1737 if (likely(bp->state == BNX2X_STATE_OPEN)) {
1741 rcu_read_lock(); 1738 rcu_read_lock();
1742 c_ops = rcu_dereference(bp->cnic_ops); 1739 c_ops = rcu_dereference(bp->cnic_ops);
1743 if (c_ops) 1740 if (c_ops)
1744 c_ops->cnic_handler(bp->cnic_data, NULL); 1741 c_ops->cnic_handler(bp->cnic_data,
1745 rcu_read_unlock(); 1742 NULL);
1746 } 1743 rcu_read_unlock();
1744 }
1747 1745
1748 status &= ~mask; 1746 status &= ~mask;
1747 }
1749 } 1748 }
1750#endif
1751 1749
1752 if (unlikely(status & 0x1)) { 1750 if (unlikely(status & 0x1)) {
1753 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1751 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
@@ -3075,11 +3073,13 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3075 3073
3076static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) 3074static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3077{ 3075{
3078#ifdef BCM_CNIC
3079 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3076 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3080 struct fcoe_stats_info *fcoe_stat = 3077 struct fcoe_stats_info *fcoe_stat =
3081 &bp->slowpath->drv_info_to_mcp.fcoe_stat; 3078 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3082 3079
3080 if (!CNIC_LOADED(bp))
3081 return;
3082
3083 memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT, 3083 memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
3084 bp->fip_mac, ETH_ALEN); 3084 bp->fip_mac, ETH_ALEN);
3085 3085
@@ -3162,16 +3162,17 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3162 3162
3163 /* ask L5 driver to add data to the struct */ 3163 /* ask L5 driver to add data to the struct */
3164 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); 3164 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3165#endif
3166} 3165}
3167 3166
3168static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) 3167static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3169{ 3168{
3170#ifdef BCM_CNIC
3171 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3169 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3172 struct iscsi_stats_info *iscsi_stat = 3170 struct iscsi_stats_info *iscsi_stat =
3173 &bp->slowpath->drv_info_to_mcp.iscsi_stat; 3171 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3174 3172
3173 if (!CNIC_LOADED(bp))
3174 return;
3175
3175 memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT, 3176 memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
3176 bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); 3177 bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
3177 3178
@@ -3180,7 +3181,6 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3180 3181
3181 /* ask L5 driver to add data to the struct */ 3182 /* ask L5 driver to add data to the struct */
3182 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); 3183 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3183#endif
3184} 3184}
3185 3185
3186/* called due to MCP event (on pmf): 3186/* called due to MCP event (on pmf):
@@ -4572,7 +4572,6 @@ static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
4572 mmiowb(); /* keep prod updates ordered */ 4572 mmiowb(); /* keep prod updates ordered */
4573} 4573}
4574 4574
4575#ifdef BCM_CNIC
4576static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, 4575static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4577 union event_ring_elem *elem) 4576 union event_ring_elem *elem)
4578{ 4577{
@@ -4594,7 +4593,6 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4594 bnx2x_cnic_cfc_comp(bp, cid, err); 4593 bnx2x_cnic_cfc_comp(bp, cid, err);
4595 return 0; 4594 return 0;
4596} 4595}
4597#endif
4598 4596
4599static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) 4597static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4600{ 4598{
@@ -4635,11 +4633,9 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4635 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 4633 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
4636 case BNX2X_FILTER_MAC_PENDING: 4634 case BNX2X_FILTER_MAC_PENDING:
4637 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 4635 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
4638#ifdef BCM_CNIC 4636 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
4639 if (cid == BNX2X_ISCSI_ETH_CID(bp))
4640 vlan_mac_obj = &bp->iscsi_l2_mac_obj; 4637 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
4641 else 4638 else
4642#endif
4643 vlan_mac_obj = &bp->sp_objs[cid].mac_obj; 4639 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
4644 4640
4645 break; 4641 break;
@@ -4665,9 +4661,7 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4665 4661
4666} 4662}
4667 4663
4668#ifdef BCM_CNIC
4669static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 4664static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
4670#endif
4671 4665
4672static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) 4666static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4673{ 4667{
@@ -4678,14 +4672,12 @@ static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4678 /* Send rx_mode command again if was requested */ 4672 /* Send rx_mode command again if was requested */
4679 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) 4673 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
4680 bnx2x_set_storm_rx_mode(bp); 4674 bnx2x_set_storm_rx_mode(bp);
4681#ifdef BCM_CNIC
4682 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, 4675 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
4683 &bp->sp_state)) 4676 &bp->sp_state))
4684 bnx2x_set_iscsi_eth_rx_mode(bp, true); 4677 bnx2x_set_iscsi_eth_rx_mode(bp, true);
4685 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, 4678 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
4686 &bp->sp_state)) 4679 &bp->sp_state))
4687 bnx2x_set_iscsi_eth_rx_mode(bp, false); 4680 bnx2x_set_iscsi_eth_rx_mode(bp, false);
4688#endif
4689 4681
4690 netif_addr_unlock_bh(bp->dev); 4682 netif_addr_unlock_bh(bp->dev);
4691} 4683}
@@ -4747,7 +4739,6 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
4747 q); 4739 q);
4748 } 4740 }
4749 4741
4750#ifdef BCM_CNIC
4751 if (!NO_FCOE(bp)) { 4742 if (!NO_FCOE(bp)) {
4752 fp = &bp->fp[FCOE_IDX(bp)]; 4743 fp = &bp->fp[FCOE_IDX(bp)];
4753 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 4744 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
@@ -4770,22 +4761,16 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
4770 bnx2x_link_report(bp); 4761 bnx2x_link_report(bp);
4771 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 4762 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4772 } 4763 }
4773#else
4774 /* If no FCoE ring - ACK MCP now */
4775 bnx2x_link_report(bp);
4776 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4777#endif /* BCM_CNIC */
4778} 4764}
4779 4765
4780static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 4766static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
4781 struct bnx2x *bp, u32 cid) 4767 struct bnx2x *bp, u32 cid)
4782{ 4768{
4783 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 4769 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
4784#ifdef BCM_CNIC 4770
4785 if (cid == BNX2X_FCOE_ETH_CID(bp)) 4771 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
4786 return &bnx2x_fcoe_sp_obj(bp, q_obj); 4772 return &bnx2x_fcoe_sp_obj(bp, q_obj);
4787 else 4773 else
4788#endif
4789 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; 4774 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
4790} 4775}
4791 4776
@@ -4793,6 +4778,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
4793{ 4778{
4794 u16 hw_cons, sw_cons, sw_prod; 4779 u16 hw_cons, sw_cons, sw_prod;
4795 union event_ring_elem *elem; 4780 union event_ring_elem *elem;
4781 u8 echo;
4796 u32 cid; 4782 u32 cid;
4797 u8 opcode; 4783 u8 opcode;
4798 int spqe_cnt = 0; 4784 int spqe_cnt = 0;
@@ -4847,10 +4833,11 @@ static void bnx2x_eq_int(struct bnx2x *bp)
4847 */ 4833 */
4848 DP(BNX2X_MSG_SP, 4834 DP(BNX2X_MSG_SP,
4849 "got delete ramrod for MULTI[%d]\n", cid); 4835 "got delete ramrod for MULTI[%d]\n", cid);
4850#ifdef BCM_CNIC 4836
4851 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) 4837 if (CNIC_LOADED(bp) &&
4838 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
4852 goto next_spqe; 4839 goto next_spqe;
4853#endif 4840
4854 q_obj = bnx2x_cid_to_q_obj(bp, cid); 4841 q_obj = bnx2x_cid_to_q_obj(bp, cid);
4855 4842
4856 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) 4843 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
@@ -4875,21 +4862,34 @@ static void bnx2x_eq_int(struct bnx2x *bp)
4875 break; 4862 break;
4876 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); 4863 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
4877 goto next_spqe; 4864 goto next_spqe;
4865
4878 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 4866 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
4879 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, 4867 echo = elem->message.data.function_update_event.echo;
4880 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 4868 if (echo == SWITCH_UPDATE) {
4881 f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE); 4869 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4870 "got FUNC_SWITCH_UPDATE ramrod\n");
4871 if (f_obj->complete_cmd(
4872 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
4873 break;
4882 4874
4883 /* We will perform the Queues update from sp_rtnl task 4875 } else {
4884 * as all Queue SP operations should run under 4876 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
4885 * rtnl_lock. 4877 "AFEX: ramrod completed FUNCTION_UPDATE\n");
4886 */ 4878 f_obj->complete_cmd(bp, f_obj,
4887 smp_mb__before_clear_bit(); 4879 BNX2X_F_CMD_AFEX_UPDATE);
4888 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, 4880
4889 &bp->sp_rtnl_state); 4881 /* We will perform the Queues update from
4890 smp_mb__after_clear_bit(); 4882 * sp_rtnl task as all Queue SP operations
4883 * should run under rtnl_lock.
4884 */
4885 smp_mb__before_clear_bit();
4886 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
4887 &bp->sp_rtnl_state);
4888 smp_mb__after_clear_bit();
4889
4890 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4891 }
4891 4892
4892 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4893 goto next_spqe; 4893 goto next_spqe;
4894 4894
4895 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 4895 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
@@ -4999,11 +4999,10 @@ static void bnx2x_sp_task(struct work_struct *work)
4999 4999
5000 /* SP events: STAT_QUERY and others */ 5000 /* SP events: STAT_QUERY and others */
5001 if (status & BNX2X_DEF_SB_IDX) { 5001 if (status & BNX2X_DEF_SB_IDX) {
5002#ifdef BCM_CNIC
5003 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 5002 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5004 5003
5005 if ((!NO_FCOE(bp)) && 5004 if (FCOE_INIT(bp) &&
5006 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 5005 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5007 /* 5006 /*
5008 * Prevent local bottom-halves from running as 5007 * Prevent local bottom-halves from running as
5009 * we are going to change the local NAPI list. 5008 * we are going to change the local NAPI list.
@@ -5012,7 +5011,7 @@ static void bnx2x_sp_task(struct work_struct *work)
5012 napi_schedule(&bnx2x_fcoe(bp, napi)); 5011 napi_schedule(&bnx2x_fcoe(bp, napi));
5013 local_bh_enable(); 5012 local_bh_enable();
5014 } 5013 }
5015#endif 5014
5016 /* Handle EQ completions */ 5015 /* Handle EQ completions */
5017 bnx2x_eq_int(bp); 5016 bnx2x_eq_int(bp);
5018 5017
@@ -5050,8 +5049,7 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5050 return IRQ_HANDLED; 5049 return IRQ_HANDLED;
5051#endif 5050#endif
5052 5051
5053#ifdef BCM_CNIC 5052 if (CNIC_LOADED(bp)) {
5054 {
5055 struct cnic_ops *c_ops; 5053 struct cnic_ops *c_ops;
5056 5054
5057 rcu_read_lock(); 5055 rcu_read_lock();
@@ -5060,7 +5058,7 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5060 c_ops->cnic_handler(bp->cnic_data, NULL); 5058 c_ops->cnic_handler(bp->cnic_data, NULL);
5061 rcu_read_unlock(); 5059 rcu_read_unlock();
5062 } 5060 }
5063#endif 5061
5064 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 5062 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
5065 5063
5066 return IRQ_HANDLED; 5064 return IRQ_HANDLED;
@@ -5498,12 +5496,10 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5498 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 5496 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
5499 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 5497 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
5500 5498
5501#ifdef BCM_CNIC
5502 if (!NO_FCOE(bp)) 5499 if (!NO_FCOE(bp))
5503 5500
5504 /* Configure rx_mode of FCoE Queue */ 5501 /* Configure rx_mode of FCoE Queue */
5505 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); 5502 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
5506#endif
5507 5503
5508 switch (bp->rx_mode) { 5504 switch (bp->rx_mode) {
5509 case BNX2X_RX_MODE_NONE: 5505 case BNX2X_RX_MODE_NONE:
@@ -5624,12 +5620,12 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5624 5620
5625static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) 5621static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
5626{ 5622{
5627 return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT; 5623 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
5628} 5624}
5629 5625
5630static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) 5626static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
5631{ 5627{
5632 return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; 5628 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
5633} 5629}
5634 5630
5635static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) 5631static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
@@ -5720,23 +5716,25 @@ static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
5720 txdata->tx_pkt = 0; 5716 txdata->tx_pkt = 0;
5721} 5717}
5722 5718
5719static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
5720{
5721 int i;
5722
5723 for_each_tx_queue_cnic(bp, i)
5724 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
5725}
5723static void bnx2x_init_tx_rings(struct bnx2x *bp) 5726static void bnx2x_init_tx_rings(struct bnx2x *bp)
5724{ 5727{
5725 int i; 5728 int i;
5726 u8 cos; 5729 u8 cos;
5727 5730
5728 for_each_tx_queue(bp, i) 5731 for_each_eth_queue(bp, i)
5729 for_each_cos_in_tx_queue(&bp->fp[i], cos) 5732 for_each_cos_in_tx_queue(&bp->fp[i], cos)
5730 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); 5733 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
5731} 5734}
5732 5735
5733void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 5736void bnx2x_nic_init_cnic(struct bnx2x *bp)
5734{ 5737{
5735 int i;
5736
5737 for_each_eth_queue(bp, i)
5738 bnx2x_init_eth_fp(bp, i);
5739#ifdef BCM_CNIC
5740 if (!NO_FCOE(bp)) 5738 if (!NO_FCOE(bp))
5741 bnx2x_init_fcoe_fp(bp); 5739 bnx2x_init_fcoe_fp(bp);
5742 5740
@@ -5744,8 +5742,22 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5744 BNX2X_VF_ID_INVALID, false, 5742 BNX2X_VF_ID_INVALID, false,
5745 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); 5743 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
5746 5744
5747#endif 5745 /* ensure status block indices were read */
5746 rmb();
5747 bnx2x_init_rx_rings_cnic(bp);
5748 bnx2x_init_tx_rings_cnic(bp);
5749
5750 /* flush all */
5751 mb();
5752 mmiowb();
5753}
5748 5754
5755void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5756{
5757 int i;
5758
5759 for_each_eth_queue(bp, i)
5760 bnx2x_init_eth_fp(bp, i);
5749 /* Initialize MOD_ABS interrupts */ 5761 /* Initialize MOD_ABS interrupts */
5750 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, 5762 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
5751 bp->common.shmem_base, bp->common.shmem2_base, 5763 bp->common.shmem_base, bp->common.shmem2_base,
@@ -6031,10 +6043,9 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
6031 msleep(50); 6043 msleep(50);
6032 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6044 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6033 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6045 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6034#ifndef BCM_CNIC 6046 if (!CNIC_SUPPORT(bp))
6035 /* set NIC mode */ 6047 /* set NIC mode */
6036 REG_WR(bp, PRS_REG_NIC_MODE, 1); 6048 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6037#endif
6038 6049
6039 /* Enable inputs of parser neighbor blocks */ 6050 /* Enable inputs of parser neighbor blocks */
6040 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 6051 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
@@ -6522,9 +6533,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6522 REG_WR(bp, QM_REG_SOFT_RESET, 1); 6533 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6523 REG_WR(bp, QM_REG_SOFT_RESET, 0); 6534 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6524 6535
6525#ifdef BCM_CNIC 6536 if (CNIC_SUPPORT(bp))
6526 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); 6537 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
6527#endif
6528 6538
6529 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); 6539 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
6530 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); 6540 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
@@ -6611,18 +6621,18 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6611 6621
6612 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); 6622 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
6613 6623
6614#ifdef BCM_CNIC 6624 if (CNIC_SUPPORT(bp)) {
6615 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 6625 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6616 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 6626 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6617 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); 6627 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6618 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); 6628 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6619 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); 6629 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6620 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 6630 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6621 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); 6631 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6622 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 6632 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6623 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); 6633 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6624 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); 6634 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6625#endif 6635 }
6626 REG_WR(bp, SRC_REG_SOFT_RST, 0); 6636 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6627 6637
6628 if (sizeof(union cdu_context) != 1024) 6638 if (sizeof(union cdu_context) != 1024)
@@ -6786,11 +6796,11 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6786 /* QM cid (connection) count */ 6796 /* QM cid (connection) count */
6787 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); 6797 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
6788 6798
6789#ifdef BCM_CNIC 6799 if (CNIC_SUPPORT(bp)) {
6790 bnx2x_init_block(bp, BLOCK_TM, init_phase); 6800 bnx2x_init_block(bp, BLOCK_TM, init_phase);
6791 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 6801 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6792 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 6802 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6793#endif 6803 }
6794 6804
6795 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 6805 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
6796 6806
@@ -6877,9 +6887,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6877 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 6887 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6878 } 6888 }
6879 6889
6880#ifdef BCM_CNIC 6890 if (CNIC_SUPPORT(bp))
6881 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 6891 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
6882#endif 6892
6883 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 6893 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
6884 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 6894 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
6885 6895
@@ -7040,6 +7050,130 @@ static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7040 bnx2x_ilt_wr(bp, i, 0); 7050 bnx2x_ilt_wr(bp, i, 0);
7041} 7051}
7042 7052
7053
7054void bnx2x_init_searcher(struct bnx2x *bp)
7055{
7056 int port = BP_PORT(bp);
7057 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7058 /* T1 hash bits value determines the T1 number of entries */
7059 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7060}
7061
7062static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7063{
7064 int rc;
7065 struct bnx2x_func_state_params func_params = {NULL};
7066 struct bnx2x_func_switch_update_params *switch_update_params =
7067 &func_params.params.switch_update;
7068
7069 /* Prepare parameters for function state transitions */
7070 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7071 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7072
7073 func_params.f_obj = &bp->func_obj;
7074 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7075
7076 /* Function parameters */
7077 switch_update_params->suspend = suspend;
7078
7079 rc = bnx2x_func_state_change(bp, &func_params);
7080
7081 return rc;
7082}
7083
7084int bnx2x_reset_nic_mode(struct bnx2x *bp)
7085{
7086 int rc, i, port = BP_PORT(bp);
7087 int vlan_en = 0, mac_en[NUM_MACS];
7088
7089
7090 /* Close input from network */
7091 if (bp->mf_mode == SINGLE_FUNCTION) {
7092 bnx2x_set_rx_filter(&bp->link_params, 0);
7093 } else {
7094 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7095 NIG_REG_LLH0_FUNC_EN);
7096 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7097 NIG_REG_LLH0_FUNC_EN, 0);
7098 for (i = 0; i < NUM_MACS; i++) {
7099 mac_en[i] = REG_RD(bp, port ?
7100 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7101 4 * i) :
7102 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7103 4 * i));
7104 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7105 4 * i) :
7106 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7107 }
7108 }
7109
7110 /* Close BMC to host */
7111 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7112 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7113
7114 /* Suspend Tx switching to the PF. Completion of this ramrod
7115 * further guarantees that all the packets of that PF / child
7116 * VFs in BRB were processed by the Parser, so it is safe to
7117 * change the NIC_MODE register.
7118 */
7119 rc = bnx2x_func_switch_update(bp, 1);
7120 if (rc) {
7121 BNX2X_ERR("Can't suspend tx-switching!\n");
7122 return rc;
7123 }
7124
7125 /* Change NIC_MODE register */
7126 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7127
7128 /* Open input from network */
7129 if (bp->mf_mode == SINGLE_FUNCTION) {
7130 bnx2x_set_rx_filter(&bp->link_params, 1);
7131 } else {
7132 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7133 NIG_REG_LLH0_FUNC_EN, vlan_en);
7134 for (i = 0; i < NUM_MACS; i++) {
7135 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7136 4 * i) :
7137 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7138 mac_en[i]);
7139 }
7140 }
7141
7142 /* Enable BMC to host */
7143 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7144 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7145
7146 /* Resume Tx switching to the PF */
7147 rc = bnx2x_func_switch_update(bp, 0);
7148 if (rc) {
7149 BNX2X_ERR("Can't resume tx-switching!\n");
7150 return rc;
7151 }
7152
7153 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7154 return 0;
7155}
7156
7157int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7158{
7159 int rc;
7160
7161 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7162
7163 if (CONFIGURE_NIC_MODE(bp)) {
7164 /* Configrue searcher as part of function hw init */
7165 bnx2x_init_searcher(bp);
7166
7167 /* Reset NIC mode */
7168 rc = bnx2x_reset_nic_mode(bp);
7169 if (rc)
7170 BNX2X_ERR("Can't change NIC mode!\n");
7171 return rc;
7172 }
7173
7174 return 0;
7175}
7176
7043static int bnx2x_init_hw_func(struct bnx2x *bp) 7177static int bnx2x_init_hw_func(struct bnx2x *bp)
7044{ 7178{
7045 int port = BP_PORT(bp); 7179 int port = BP_PORT(bp);
@@ -7082,17 +7216,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7082 } 7216 }
7083 bnx2x_ilt_init_op(bp, INITOP_SET); 7217 bnx2x_ilt_init_op(bp, INITOP_SET);
7084 7218
7085#ifdef BCM_CNIC 7219 if (!CONFIGURE_NIC_MODE(bp)) {
7086 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); 7220 bnx2x_init_searcher(bp);
7087 7221 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7088 /* T1 hash bits value determines the T1 number of entries */ 7222 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7089 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 7223 } else {
7090#endif 7224 /* Set NIC mode */
7225 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7226 DP(NETIF_MSG_IFUP, "NIC MODE configrued\n");
7091 7227
7092#ifndef BCM_CNIC 7228 }
7093 /* set NIC mode */
7094 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7095#endif /* BCM_CNIC */
7096 7229
7097 if (!CHIP_IS_E1x(bp)) { 7230 if (!CHIP_IS_E1x(bp)) {
7098 u32 pf_conf = IGU_PF_CONF_FUNC_EN; 7231 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
@@ -7343,6 +7476,20 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7343} 7476}
7344 7477
7345 7478
7479void bnx2x_free_mem_cnic(struct bnx2x *bp)
7480{
7481 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
7482
7483 if (!CHIP_IS_E1x(bp))
7484 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
7485 sizeof(struct host_hc_status_block_e2));
7486 else
7487 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
7488 sizeof(struct host_hc_status_block_e1x));
7489
7490 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7491}
7492
7346void bnx2x_free_mem(struct bnx2x *bp) 7493void bnx2x_free_mem(struct bnx2x *bp)
7347{ 7494{
7348 int i; 7495 int i;
@@ -7367,17 +7514,6 @@ void bnx2x_free_mem(struct bnx2x *bp)
7367 7514
7368 BNX2X_FREE(bp->ilt->lines); 7515 BNX2X_FREE(bp->ilt->lines);
7369 7516
7370#ifdef BCM_CNIC
7371 if (!CHIP_IS_E1x(bp))
7372 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
7373 sizeof(struct host_hc_status_block_e2));
7374 else
7375 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
7376 sizeof(struct host_hc_status_block_e1x));
7377
7378 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7379#endif
7380
7381 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 7517 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7382 7518
7383 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 7519 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
@@ -7445,24 +7581,44 @@ alloc_mem_err:
7445 return -ENOMEM; 7581 return -ENOMEM;
7446} 7582}
7447 7583
7448 7584int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
7449int bnx2x_alloc_mem(struct bnx2x *bp)
7450{ 7585{
7451 int i, allocated, context_size;
7452
7453#ifdef BCM_CNIC
7454 if (!CHIP_IS_E1x(bp)) 7586 if (!CHIP_IS_E1x(bp))
7455 /* size = the status block + ramrod buffers */ 7587 /* size = the status block + ramrod buffers */
7456 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, 7588 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
7457 sizeof(struct host_hc_status_block_e2)); 7589 sizeof(struct host_hc_status_block_e2));
7458 else 7590 else
7459 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping, 7591 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
7460 sizeof(struct host_hc_status_block_e1x)); 7592 &bp->cnic_sb_mapping,
7593 sizeof(struct
7594 host_hc_status_block_e1x));
7461 7595
7462 /* allocate searcher T2 table */ 7596 if (CONFIGURE_NIC_MODE(bp))
7463 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 7597 /* allocate searcher T2 table, as it wan't allocated before */
7464#endif 7598 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7599
7600 /* write address to which L5 should insert its values */
7601 bp->cnic_eth_dev.addr_drv_info_to_mcp =
7602 &bp->slowpath->drv_info_to_mcp;
7603
7604 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
7605 goto alloc_mem_err;
7606
7607 return 0;
7608
7609alloc_mem_err:
7610 bnx2x_free_mem_cnic(bp);
7611 BNX2X_ERR("Can't allocate memory\n");
7612 return -ENOMEM;
7613}
7614
7615int bnx2x_alloc_mem(struct bnx2x *bp)
7616{
7617 int i, allocated, context_size;
7465 7618
7619 if (!CONFIGURE_NIC_MODE(bp))
7620 /* allocate searcher T2 table */
7621 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7466 7622
7467 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, 7623 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7468 sizeof(struct host_sp_status_block)); 7624 sizeof(struct host_sp_status_block));
@@ -7470,11 +7626,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
7470 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 7626 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7471 sizeof(struct bnx2x_slowpath)); 7627 sizeof(struct bnx2x_slowpath));
7472 7628
7473#ifdef BCM_CNIC
7474 /* write address to which L5 should insert its values */
7475 bp->cnic_eth_dev.addr_drv_info_to_mcp = &bp->slowpath->drv_info_to_mcp;
7476#endif
7477
7478 /* Allocated memory for FW statistics */ 7629 /* Allocated memory for FW statistics */
7479 if (bnx2x_alloc_fw_stats_mem(bp)) 7630 if (bnx2x_alloc_fw_stats_mem(bp))
7480 goto alloc_mem_err; 7631 goto alloc_mem_err;
@@ -7596,14 +7747,12 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
7596{ 7747{
7597 unsigned long ramrod_flags = 0; 7748 unsigned long ramrod_flags = 0;
7598 7749
7599#ifdef BCM_CNIC
7600 if (is_zero_ether_addr(bp->dev->dev_addr) && 7750 if (is_zero_ether_addr(bp->dev->dev_addr) &&
7601 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { 7751 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
7602 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, 7752 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
7603 "Ignoring Zero MAC for STORAGE SD mode\n"); 7753 "Ignoring Zero MAC for STORAGE SD mode\n");
7604 return 0; 7754 return 0;
7605 } 7755 }
7606#endif
7607 7756
7608 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); 7757 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
7609 7758
@@ -7632,7 +7781,8 @@ void bnx2x_set_int_mode(struct bnx2x *bp)
7632 bnx2x_enable_msi(bp); 7781 bnx2x_enable_msi(bp);
7633 /* falling through... */ 7782 /* falling through... */
7634 case INT_MODE_INTx: 7783 case INT_MODE_INTx:
7635 bp->num_queues = 1 + NON_ETH_CONTEXT_USE; 7784 bp->num_ethernet_queues = 1;
7785 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
7636 BNX2X_DEV_INFO("set number of queues to 1\n"); 7786 BNX2X_DEV_INFO("set number of queues to 1\n");
7637 break; 7787 break;
7638 default: 7788 default:
@@ -7644,9 +7794,10 @@ void bnx2x_set_int_mode(struct bnx2x *bp)
7644 bp->flags & USING_SINGLE_MSIX_FLAG) { 7794 bp->flags & USING_SINGLE_MSIX_FLAG) {
7645 /* failed to enable multiple MSI-X */ 7795 /* failed to enable multiple MSI-X */
7646 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", 7796 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
7647 bp->num_queues, 1 + NON_ETH_CONTEXT_USE); 7797 bp->num_queues,
7798 1 + bp->num_cnic_queues);
7648 7799
7649 bp->num_queues = 1 + NON_ETH_CONTEXT_USE; 7800 bp->num_queues = 1 + bp->num_cnic_queues;
7650 7801
7651 /* Try to enable MSI */ 7802 /* Try to enable MSI */
7652 if (!(bp->flags & USING_SINGLE_MSIX_FLAG) && 7803 if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
@@ -7679,9 +7830,9 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
7679 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 7830 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
7680 ilt_client->start = line; 7831 ilt_client->start = line;
7681 line += bnx2x_cid_ilt_lines(bp); 7832 line += bnx2x_cid_ilt_lines(bp);
7682#ifdef BCM_CNIC 7833
7683 line += CNIC_ILT_LINES; 7834 if (CNIC_SUPPORT(bp))
7684#endif 7835 line += CNIC_ILT_LINES;
7685 ilt_client->end = line - 1; 7836 ilt_client->end = line - 1;
7686 7837
7687 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7838 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
@@ -7714,49 +7865,43 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
7714 ilog2(ilt_client->page_size >> 12)); 7865 ilog2(ilt_client->page_size >> 12));
7715 7866
7716 } 7867 }
7717 /* SRC */
7718 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
7719#ifdef BCM_CNIC
7720 ilt_client->client_num = ILT_CLIENT_SRC;
7721 ilt_client->page_size = SRC_ILT_PAGE_SZ;
7722 ilt_client->flags = 0;
7723 ilt_client->start = line;
7724 line += SRC_ILT_LINES;
7725 ilt_client->end = line - 1;
7726 7868
7727 DP(NETIF_MSG_IFUP, 7869 if (CNIC_SUPPORT(bp)) {
7728 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7870 /* SRC */
7729 ilt_client->start, 7871 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
7730 ilt_client->end, 7872 ilt_client->client_num = ILT_CLIENT_SRC;
7731 ilt_client->page_size, 7873 ilt_client->page_size = SRC_ILT_PAGE_SZ;
7732 ilt_client->flags, 7874 ilt_client->flags = 0;
7733 ilog2(ilt_client->page_size >> 12)); 7875 ilt_client->start = line;
7876 line += SRC_ILT_LINES;
7877 ilt_client->end = line - 1;
7734 7878
7735#else 7879 DP(NETIF_MSG_IFUP,
7736 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); 7880 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7737#endif 7881 ilt_client->start,
7882 ilt_client->end,
7883 ilt_client->page_size,
7884 ilt_client->flags,
7885 ilog2(ilt_client->page_size >> 12));
7738 7886
7739 /* TM */ 7887 /* TM */
7740 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 7888 ilt_client = &ilt->clients[ILT_CLIENT_TM];
7741#ifdef BCM_CNIC 7889 ilt_client->client_num = ILT_CLIENT_TM;
7742 ilt_client->client_num = ILT_CLIENT_TM; 7890 ilt_client->page_size = TM_ILT_PAGE_SZ;
7743 ilt_client->page_size = TM_ILT_PAGE_SZ; 7891 ilt_client->flags = 0;
7744 ilt_client->flags = 0; 7892 ilt_client->start = line;
7745 ilt_client->start = line; 7893 line += TM_ILT_LINES;
7746 line += TM_ILT_LINES; 7894 ilt_client->end = line - 1;
7747 ilt_client->end = line - 1;
7748 7895
7749 DP(NETIF_MSG_IFUP, 7896 DP(NETIF_MSG_IFUP,
7750 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7897 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7751 ilt_client->start, 7898 ilt_client->start,
7752 ilt_client->end, 7899 ilt_client->end,
7753 ilt_client->page_size, 7900 ilt_client->page_size,
7754 ilt_client->flags, 7901 ilt_client->flags,
7755 ilog2(ilt_client->page_size >> 12)); 7902 ilog2(ilt_client->page_size >> 12));
7903 }
7756 7904
7757#else
7758 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
7759#endif
7760 BUG_ON(line > ILT_MAX_LINES); 7905 BUG_ON(line > ILT_MAX_LINES);
7761} 7906}
7762 7907
@@ -7924,6 +8069,9 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7924 /* Set the command */ 8069 /* Set the command */
7925 q_params.cmd = BNX2X_Q_CMD_SETUP; 8070 q_params.cmd = BNX2X_Q_CMD_SETUP;
7926 8071
8072 if (IS_FCOE_FP(fp))
8073 bp->fcoe_init = true;
8074
7927 /* Change the state to SETUP */ 8075 /* Change the state to SETUP */
7928 rc = bnx2x_queue_state_change(bp, &q_params); 8076 rc = bnx2x_queue_state_change(bp, &q_params);
7929 if (rc) { 8077 if (rc) {
@@ -8037,12 +8185,12 @@ static void bnx2x_reset_func(struct bnx2x *bp)
8037 SB_DISABLED); 8185 SB_DISABLED);
8038 } 8186 }
8039 8187
8040#ifdef BCM_CNIC 8188 if (CNIC_LOADED(bp))
8041 /* CNIC SB */ 8189 /* CNIC SB */
8042 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8190 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8043 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)), 8191 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8044 SB_DISABLED); 8192 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8045#endif 8193
8046 /* SP SB */ 8194 /* SP SB */
8047 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8195 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8048 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 8196 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
@@ -8061,19 +8209,19 @@ static void bnx2x_reset_func(struct bnx2x *bp)
8061 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 8209 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8062 } 8210 }
8063 8211
8064#ifdef BCM_CNIC 8212 if (CNIC_LOADED(bp)) {
8065 /* Disable Timer scan */ 8213 /* Disable Timer scan */
8066 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 8214 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8067 /* 8215 /*
8068 * Wait for at least 10ms and up to 2 second for the timers scan to 8216 * Wait for at least 10ms and up to 2 second for the timers
8069 * complete 8217 * scan to complete
8070 */ 8218 */
8071 for (i = 0; i < 200; i++) { 8219 for (i = 0; i < 200; i++) {
8072 msleep(10); 8220 msleep(10);
8073 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) 8221 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8074 break; 8222 break;
8223 }
8075 } 8224 }
8076#endif
8077 /* Clear ILT */ 8225 /* Clear ILT */
8078 bnx2x_clear_func_ilt(bp, func); 8226 bnx2x_clear_func_ilt(bp, func);
8079 8227
@@ -8409,13 +8557,24 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
8409 /* Close multi and leading connections 8557 /* Close multi and leading connections
8410 * Completions for ramrods are collected in a synchronous way 8558 * Completions for ramrods are collected in a synchronous way
8411 */ 8559 */
8412 for_each_queue(bp, i) 8560 for_each_eth_queue(bp, i)
8413 if (bnx2x_stop_queue(bp, i)) 8561 if (bnx2x_stop_queue(bp, i))
8414#ifdef BNX2X_STOP_ON_ERROR 8562#ifdef BNX2X_STOP_ON_ERROR
8415 return; 8563 return;
8416#else 8564#else
8417 goto unload_error; 8565 goto unload_error;
8418#endif 8566#endif
8567
8568 if (CNIC_LOADED(bp)) {
8569 for_each_cnic_queue(bp, i)
8570 if (bnx2x_stop_queue(bp, i))
8571#ifdef BNX2X_STOP_ON_ERROR
8572 return;
8573#else
8574 goto unload_error;
8575#endif
8576 }
8577
8419 /* If SP settings didn't get completed so far - something 8578 /* If SP settings didn't get completed so far - something
8420 * very wrong has happen. 8579 * very wrong has happen.
8421 */ 8580 */
@@ -8437,6 +8596,8 @@ unload_error:
8437 bnx2x_netif_stop(bp, 1); 8596 bnx2x_netif_stop(bp, 1);
8438 /* Delete all NAPI objects */ 8597 /* Delete all NAPI objects */
8439 bnx2x_del_all_napi(bp); 8598 bnx2x_del_all_napi(bp);
8599 if (CNIC_LOADED(bp))
8600 bnx2x_del_all_napi_cnic(bp);
8440 8601
8441 /* Release IRQs */ 8602 /* Release IRQs */
8442 bnx2x_free_irq(bp); 8603 bnx2x_free_irq(bp);
@@ -10224,12 +10385,15 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
10224void bnx2x_get_iscsi_info(struct bnx2x *bp) 10385void bnx2x_get_iscsi_info(struct bnx2x *bp)
10225{ 10386{
10226 u32 no_flags = NO_ISCSI_FLAG; 10387 u32 no_flags = NO_ISCSI_FLAG;
10227#ifdef BCM_CNIC
10228 int port = BP_PORT(bp); 10388 int port = BP_PORT(bp);
10229
10230 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 10389 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10231 drv_lic_key[port].max_iscsi_conn); 10390 drv_lic_key[port].max_iscsi_conn);
10232 10391
10392 if (!CNIC_SUPPORT(bp)) {
10393 bp->flags |= no_flags;
10394 return;
10395 }
10396
10233 /* Get the number of maximum allowed iSCSI connections */ 10397 /* Get the number of maximum allowed iSCSI connections */
10234 bp->cnic_eth_dev.max_iscsi_conn = 10398 bp->cnic_eth_dev.max_iscsi_conn =
10235 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> 10399 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
@@ -10244,12 +10408,9 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp)
10244 */ 10408 */
10245 if (!bp->cnic_eth_dev.max_iscsi_conn) 10409 if (!bp->cnic_eth_dev.max_iscsi_conn)
10246 bp->flags |= no_flags; 10410 bp->flags |= no_flags;
10247#else 10411
10248 bp->flags |= no_flags;
10249#endif
10250} 10412}
10251 10413
10252#ifdef BCM_CNIC
10253static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) 10414static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
10254{ 10415{
10255 /* Port info */ 10416 /* Port info */
@@ -10264,16 +10425,18 @@ static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
10264 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 10425 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10265 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); 10426 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
10266} 10427}
10267#endif
10268static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) 10428static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
10269{ 10429{
10270#ifdef BCM_CNIC
10271 int port = BP_PORT(bp); 10430 int port = BP_PORT(bp);
10272 int func = BP_ABS_FUNC(bp); 10431 int func = BP_ABS_FUNC(bp);
10273
10274 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 10432 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10275 drv_lic_key[port].max_fcoe_conn); 10433 drv_lic_key[port].max_fcoe_conn);
10276 10434
10435 if (!CNIC_SUPPORT(bp)) {
10436 bp->flags |= NO_FCOE_FLAG;
10437 return;
10438 }
10439
10277 /* Get the number of maximum allowed FCoE connections */ 10440 /* Get the number of maximum allowed FCoE connections */
10278 bp->cnic_eth_dev.max_fcoe_conn = 10441 bp->cnic_eth_dev.max_fcoe_conn =
10279 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> 10442 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
@@ -10319,9 +10482,6 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
10319 */ 10482 */
10320 if (!bp->cnic_eth_dev.max_fcoe_conn) 10483 if (!bp->cnic_eth_dev.max_fcoe_conn)
10321 bp->flags |= NO_FCOE_FLAG; 10484 bp->flags |= NO_FCOE_FLAG;
10322#else
10323 bp->flags |= NO_FCOE_FLAG;
10324#endif
10325} 10485}
10326 10486
10327static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) 10487static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
@@ -10335,132 +10495,133 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
10335 bnx2x_get_fcoe_info(bp); 10495 bnx2x_get_fcoe_info(bp);
10336} 10496}
10337 10497
10338static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) 10498static void __devinit bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
10339{ 10499{
10340 u32 val, val2; 10500 u32 val, val2;
10341 int func = BP_ABS_FUNC(bp); 10501 int func = BP_ABS_FUNC(bp);
10342 int port = BP_PORT(bp); 10502 int port = BP_PORT(bp);
10343#ifdef BCM_CNIC
10344 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; 10503 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
10345 u8 *fip_mac = bp->fip_mac; 10504 u8 *fip_mac = bp->fip_mac;
10346#endif
10347 10505
10348 /* Zero primary MAC configuration */ 10506 if (IS_MF(bp)) {
10349 memset(bp->dev->dev_addr, 0, ETH_ALEN); 10507 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
10350
10351 if (BP_NOMCP(bp)) {
10352 BNX2X_ERROR("warning: random MAC workaround active\n");
10353 eth_hw_addr_random(bp->dev);
10354 } else if (IS_MF(bp)) {
10355 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
10356 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
10357 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
10358 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
10359 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10360
10361#ifdef BCM_CNIC
10362 /*
10363 * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
10364 * FCoE MAC then the appropriate feature should be disabled. 10508 * FCoE MAC then the appropriate feature should be disabled.
10365 * 10509 * In non SD mode features configuration comes from struct
10366 * In non SD mode features configuration comes from 10510 * func_ext_config.
10367 * struct func_ext_config.
10368 */ 10511 */
10369 if (!IS_MF_SD(bp)) { 10512 if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
10370 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 10513 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
10371 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 10514 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
10372 val2 = MF_CFG_RD(bp, func_ext_config[func]. 10515 val2 = MF_CFG_RD(bp, func_ext_config[func].
10373 iscsi_mac_addr_upper); 10516 iscsi_mac_addr_upper);
10374 val = MF_CFG_RD(bp, func_ext_config[func]. 10517 val = MF_CFG_RD(bp, func_ext_config[func].
10375 iscsi_mac_addr_lower); 10518 iscsi_mac_addr_lower);
10376 bnx2x_set_mac_buf(iscsi_mac, val, val2); 10519 bnx2x_set_mac_buf(iscsi_mac, val, val2);
10377 BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", 10520 BNX2X_DEV_INFO
10378 iscsi_mac); 10521 ("Read iSCSI MAC: %pM\n", iscsi_mac);
10379 } else 10522 } else {
10380 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 10523 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
10524 }
10381 10525
10382 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 10526 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
10383 val2 = MF_CFG_RD(bp, func_ext_config[func]. 10527 val2 = MF_CFG_RD(bp, func_ext_config[func].
10384 fcoe_mac_addr_upper); 10528 fcoe_mac_addr_upper);
10385 val = MF_CFG_RD(bp, func_ext_config[func]. 10529 val = MF_CFG_RD(bp, func_ext_config[func].
10386 fcoe_mac_addr_lower); 10530 fcoe_mac_addr_lower);
10387 bnx2x_set_mac_buf(fip_mac, val, val2); 10531 bnx2x_set_mac_buf(fip_mac, val, val2);
10388 BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n", 10532 BNX2X_DEV_INFO
10389 fip_mac); 10533 ("Read FCoE L2 MAC: %pM\n", fip_mac);
10390 10534 } else {
10391 } else
10392 bp->flags |= NO_FCOE_FLAG; 10535 bp->flags |= NO_FCOE_FLAG;
10536 }
10393 10537
10394 bp->mf_ext_config = cfg; 10538 bp->mf_ext_config = cfg;
10395 10539
10396 } else { /* SD MODE */ 10540 } else { /* SD MODE */
10397 if (IS_MF_STORAGE_SD(bp)) { 10541 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
10398 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { 10542 /* use primary mac as iscsi mac */
10399 /* use primary mac as iscsi mac */ 10543 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
10400 memcpy(iscsi_mac, bp->dev->dev_addr, 10544
10401 ETH_ALEN); 10545 BNX2X_DEV_INFO("SD ISCSI MODE\n");
10402 10546 BNX2X_DEV_INFO
10403 BNX2X_DEV_INFO("SD ISCSI MODE\n"); 10547 ("Read iSCSI MAC: %pM\n", iscsi_mac);
10404 BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", 10548 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
10405 iscsi_mac); 10549 /* use primary mac as fip mac */
10406 } else { /* FCoE */ 10550 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
10407 memcpy(fip_mac, bp->dev->dev_addr, 10551 BNX2X_DEV_INFO("SD FCoE MODE\n");
10408 ETH_ALEN); 10552 BNX2X_DEV_INFO
10409 BNX2X_DEV_INFO("SD FCoE MODE\n"); 10553 ("Read FIP MAC: %pM\n", fip_mac);
10410 BNX2X_DEV_INFO("Read FIP MAC: %pM\n",
10411 fip_mac);
10412 }
10413 /* Zero primary MAC configuration */
10414 memset(bp->dev->dev_addr, 0, ETH_ALEN);
10415 } 10554 }
10416 } 10555 }
10417 10556
10557 if (IS_MF_STORAGE_SD(bp))
10558 /* Zero primary MAC configuration */
10559 memset(bp->dev->dev_addr, 0, ETH_ALEN);
10560
10418 if (IS_MF_FCOE_AFEX(bp)) 10561 if (IS_MF_FCOE_AFEX(bp))
10419 /* use FIP MAC as primary MAC */ 10562 /* use FIP MAC as primary MAC */
10420 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); 10563 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
10421 10564
10422#endif
10423 } else { 10565 } else {
10424 /* in SF read MACs from port configuration */
10425 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
10426 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
10427 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10428
10429#ifdef BCM_CNIC
10430 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10566 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
10431 iscsi_mac_upper); 10567 iscsi_mac_upper);
10432 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10568 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
10433 iscsi_mac_lower); 10569 iscsi_mac_lower);
10434 bnx2x_set_mac_buf(iscsi_mac, val, val2); 10570 bnx2x_set_mac_buf(iscsi_mac, val, val2);
10435 10571
10436 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10572 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
10437 fcoe_fip_mac_upper); 10573 fcoe_fip_mac_upper);
10438 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10574 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
10439 fcoe_fip_mac_lower); 10575 fcoe_fip_mac_lower);
10440 bnx2x_set_mac_buf(fip_mac, val, val2); 10576 bnx2x_set_mac_buf(fip_mac, val, val2);
10441#endif
10442 } 10577 }
10443 10578
10444 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 10579 /* Disable iSCSI OOO if MAC configuration is invalid. */
10445 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
10446
10447#ifdef BCM_CNIC
10448 /* Disable iSCSI if MAC configuration is
10449 * invalid.
10450 */
10451 if (!is_valid_ether_addr(iscsi_mac)) { 10580 if (!is_valid_ether_addr(iscsi_mac)) {
10452 bp->flags |= NO_ISCSI_FLAG; 10581 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
10453 memset(iscsi_mac, 0, ETH_ALEN); 10582 memset(iscsi_mac, 0, ETH_ALEN);
10454 } 10583 }
10455 10584
10456 /* Disable FCoE if MAC configuration is 10585 /* Disable FCoE if MAC configuration is invalid. */
10457 * invalid.
10458 */
10459 if (!is_valid_ether_addr(fip_mac)) { 10586 if (!is_valid_ether_addr(fip_mac)) {
10460 bp->flags |= NO_FCOE_FLAG; 10587 bp->flags |= NO_FCOE_FLAG;
10461 memset(bp->fip_mac, 0, ETH_ALEN); 10588 memset(bp->fip_mac, 0, ETH_ALEN);
10462 } 10589 }
10463#endif 10590}
10591
10592static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
10593{
10594 u32 val, val2;
10595 int func = BP_ABS_FUNC(bp);
10596 int port = BP_PORT(bp);
10597
10598 /* Zero primary MAC configuration */
10599 memset(bp->dev->dev_addr, 0, ETH_ALEN);
10600
10601 if (BP_NOMCP(bp)) {
10602 BNX2X_ERROR("warning: random MAC workaround active\n");
10603 eth_hw_addr_random(bp->dev);
10604 } else if (IS_MF(bp)) {
10605 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
10606 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
10607 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
10608 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
10609 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10610
10611 if (CNIC_SUPPORT(bp))
10612 bnx2x_get_cnic_mac_hwinfo(bp);
10613 } else {
10614 /* in SF read MACs from port configuration */
10615 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
10616 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
10617 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10618
10619 if (CNIC_SUPPORT(bp))
10620 bnx2x_get_cnic_mac_hwinfo(bp);
10621 }
10622
10623 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
10624 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
10464 10625
10465 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) 10626 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
10466 dev_err(&bp->pdev->dev, 10627 dev_err(&bp->pdev->dev,
@@ -10837,9 +10998,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10837 mutex_init(&bp->port.phy_mutex); 10998 mutex_init(&bp->port.phy_mutex);
10838 mutex_init(&bp->fw_mb_mutex); 10999 mutex_init(&bp->fw_mb_mutex);
10839 spin_lock_init(&bp->stats_lock); 11000 spin_lock_init(&bp->stats_lock);
10840#ifdef BCM_CNIC 11001
10841 mutex_init(&bp->cnic_mutex);
10842#endif
10843 11002
10844 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 11003 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
10845 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 11004 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -10877,10 +11036,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10877 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); 11036 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
10878 11037
10879 bp->disable_tpa = disable_tpa; 11038 bp->disable_tpa = disable_tpa;
10880
10881#ifdef BCM_CNIC
10882 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); 11039 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
10883#endif
10884 11040
10885 /* Set TPA flags */ 11041 /* Set TPA flags */
10886 if (bp->disable_tpa) { 11042 if (bp->disable_tpa) {
@@ -10914,12 +11070,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10914 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); 11070 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
10915 bnx2x_dcbx_init_params(bp); 11071 bnx2x_dcbx_init_params(bp);
10916 11072
10917#ifdef BCM_CNIC
10918 if (CHIP_IS_E1x(bp)) 11073 if (CHIP_IS_E1x(bp))
10919 bp->cnic_base_cl_id = FP_SB_MAX_E1x; 11074 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
10920 else 11075 else
10921 bp->cnic_base_cl_id = FP_SB_MAX_E2; 11076 bp->cnic_base_cl_id = FP_SB_MAX_E2;
10922#endif
10923 11077
10924 /* multiple tx priority */ 11078 /* multiple tx priority */
10925 if (CHIP_IS_E1x(bp)) 11079 if (CHIP_IS_E1x(bp))
@@ -10929,6 +11083,16 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10929 if (CHIP_IS_E3B0(bp)) 11083 if (CHIP_IS_E3B0(bp))
10930 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; 11084 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
10931 11085
11086 /* We need at least one default status block for slow-path events,
11087 * second status block for the L2 queue, and a third status block for
11088 * CNIC if supproted.
11089 */
11090 if (CNIC_SUPPORT(bp))
11091 bp->min_msix_vec_cnt = 3;
11092 else
11093 bp->min_msix_vec_cnt = 2;
11094 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
11095
10932 return rc; 11096 return rc;
10933} 11097}
10934 11098
@@ -11165,11 +11329,9 @@ void bnx2x_set_rx_mode(struct net_device *dev)
11165 } 11329 }
11166 11330
11167 bp->rx_mode = rx_mode; 11331 bp->rx_mode = rx_mode;
11168#ifdef BCM_CNIC
11169 /* handle ISCSI SD mode */ 11332 /* handle ISCSI SD mode */
11170 if (IS_MF_ISCSI_SD(bp)) 11333 if (IS_MF_ISCSI_SD(bp))
11171 bp->rx_mode = BNX2X_RX_MODE_NONE; 11334 bp->rx_mode = BNX2X_RX_MODE_NONE;
11172#endif
11173 11335
11174 /* Schedule the rx_mode command */ 11336 /* Schedule the rx_mode command */
11175 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { 11337 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@ -11281,7 +11443,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
11281#endif 11443#endif
11282 .ndo_setup_tc = bnx2x_setup_tc, 11444 .ndo_setup_tc = bnx2x_setup_tc,
11283 11445
11284#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 11446#ifdef NETDEV_FCOE_WWNN
11285 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 11447 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
11286#endif 11448#endif
11287}; 11449};
@@ -11747,9 +11909,8 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11747{ 11909{
11748 int cid_count = BNX2X_L2_MAX_CID(bp); 11910 int cid_count = BNX2X_L2_MAX_CID(bp);
11749 11911
11750#ifdef BCM_CNIC 11912 if (CNIC_SUPPORT(bp))
11751 cid_count += CNIC_CID_MAX; 11913 cid_count += CNIC_CID_MAX;
11752#endif
11753 return roundup(cid_count, QM_CID_ROUND); 11914 return roundup(cid_count, QM_CID_ROUND);
11754} 11915}
11755 11916
@@ -11759,7 +11920,8 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11759 * @dev: pci device 11920 * @dev: pci device
11760 * 11921 *
11761 */ 11922 */
11762static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) 11923static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
11924 int cnic_cnt)
11763{ 11925{
11764 int pos; 11926 int pos;
11765 u16 control; 11927 u16 control;
@@ -11771,7 +11933,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
11771 * one fast path queue: one FP queue + SB for CNIC 11933 * one fast path queue: one FP queue + SB for CNIC
11772 */ 11934 */
11773 if (!pos) 11935 if (!pos)
11774 return 1 + CNIC_PRESENT; 11936 return 1 + cnic_cnt;
11775 11937
11776 /* 11938 /*
11777 * The value in the PCI configuration space is the index of the last 11939 * The value in the PCI configuration space is the index of the last
@@ -11791,6 +11953,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11791 int pcie_width, pcie_speed; 11953 int pcie_width, pcie_speed;
11792 int rc, max_non_def_sbs; 11954 int rc, max_non_def_sbs;
11793 int rx_count, tx_count, rss_count, doorbell_size; 11955 int rx_count, tx_count, rss_count, doorbell_size;
11956 int cnic_cnt;
11794 /* 11957 /*
11795 * An estimated maximum supported CoS number according to the chip 11958 * An estimated maximum supported CoS number according to the chip
11796 * version. 11959 * version.
@@ -11834,21 +11997,22 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11834 return -ENODEV; 11997 return -ENODEV;
11835 } 11998 }
11836 11999
11837 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); 12000 cnic_cnt = 1;
12001 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
11838 12002
11839 WARN_ON(!max_non_def_sbs); 12003 WARN_ON(!max_non_def_sbs);
11840 12004
11841 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 12005 /* Maximum number of RSS queues: one IGU SB goes to CNIC */
11842 rss_count = max_non_def_sbs - CNIC_PRESENT; 12006 rss_count = max_non_def_sbs - cnic_cnt;
11843 12007
11844 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ 12008 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
11845 rx_count = rss_count + FCOE_PRESENT; 12009 rx_count = rss_count + cnic_cnt;
11846 12010
11847 /* 12011 /*
11848 * Maximum number of netdev Tx queues: 12012 * Maximum number of netdev Tx queues:
11849 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 12013 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
11850 */ 12014 */
11851 tx_count = rss_count * max_cos_est + FCOE_PRESENT; 12015 tx_count = rss_count * max_cos_est + cnic_cnt;
11852 12016
11853 /* dev zeroed in init_etherdev */ 12017 /* dev zeroed in init_etherdev */
11854 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); 12018 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
@@ -11859,6 +12023,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11859 12023
11860 bp->igu_sb_cnt = max_non_def_sbs; 12024 bp->igu_sb_cnt = max_non_def_sbs;
11861 bp->msg_enable = debug; 12025 bp->msg_enable = debug;
12026 bp->cnic_support = cnic_cnt;
12027
11862 pci_set_drvdata(pdev, dev); 12028 pci_set_drvdata(pdev, dev);
11863 12029
11864 rc = bnx2x_init_dev(pdev, dev, ent->driver_data); 12030 rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
@@ -11867,6 +12033,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11867 return rc; 12033 return rc;
11868 } 12034 }
11869 12035
12036 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
11870 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs); 12037 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
11871 12038
11872 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", 12039 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
@@ -11899,10 +12066,10 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11899 /* calc qm_cid_count */ 12066 /* calc qm_cid_count */
11900 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); 12067 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
11901 12068
11902#ifdef BCM_CNIC 12069 /* disable FCOE L2 queue for E1x*/
11903 /* disable FCOE L2 queue for E1x */
11904 if (CHIP_IS_E1x(bp)) 12070 if (CHIP_IS_E1x(bp))
11905 bp->flags |= NO_FCOE_FLAG; 12071 bp->flags |= NO_FCOE_FLAG;
12072
11906 /* disable FCOE for 57840 device, until FW supports it */ 12073 /* disable FCOE for 57840 device, until FW supports it */
11907 switch (ent->driver_data) { 12074 switch (ent->driver_data) {
11908 case BCM57840_O: 12075 case BCM57840_O:
@@ -11912,8 +12079,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11912 case BCM57840_MF: 12079 case BCM57840_MF:
11913 bp->flags |= NO_FCOE_FLAG; 12080 bp->flags |= NO_FCOE_FLAG;
11914 } 12081 }
11915#endif
11916
11917 12082
11918 /* Set bp->num_queues for MSI-X mode*/ 12083 /* Set bp->num_queues for MSI-X mode*/
11919 bnx2x_set_num_queues(bp); 12084 bnx2x_set_num_queues(bp);
@@ -11929,14 +12094,13 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11929 goto init_one_exit; 12094 goto init_one_exit;
11930 } 12095 }
11931 12096
11932#ifdef BCM_CNIC 12097
11933 if (!NO_FCOE(bp)) { 12098 if (!NO_FCOE(bp)) {
11934 /* Add storage MAC address */ 12099 /* Add storage MAC address */
11935 rtnl_lock(); 12100 rtnl_lock();
11936 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 12101 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
11937 rtnl_unlock(); 12102 rtnl_unlock();
11938 } 12103 }
11939#endif
11940 12104
11941 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 12105 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
11942 12106
@@ -11981,14 +12145,12 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11981 } 12145 }
11982 bp = netdev_priv(dev); 12146 bp = netdev_priv(dev);
11983 12147
11984#ifdef BCM_CNIC
11985 /* Delete storage MAC address */ 12148 /* Delete storage MAC address */
11986 if (!NO_FCOE(bp)) { 12149 if (!NO_FCOE(bp)) {
11987 rtnl_lock(); 12150 rtnl_lock();
11988 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 12151 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
11989 rtnl_unlock(); 12152 rtnl_unlock();
11990 } 12153 }
11991#endif
11992 12154
11993#ifdef BCM_DCBNL 12155#ifdef BCM_DCBNL
11994 /* Delete app tlvs from dcbnl */ 12156 /* Delete app tlvs from dcbnl */
@@ -12036,15 +12198,17 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12036 12198
12037 bp->rx_mode = BNX2X_RX_MODE_NONE; 12199 bp->rx_mode = BNX2X_RX_MODE_NONE;
12038 12200
12039#ifdef BCM_CNIC 12201 if (CNIC_LOADED(bp))
12040 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 12202 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
12041#endif 12203
12042 /* Stop Tx */ 12204 /* Stop Tx */
12043 bnx2x_tx_disable(bp); 12205 bnx2x_tx_disable(bp);
12044 12206
12045 bnx2x_netif_stop(bp, 0); 12207 bnx2x_netif_stop(bp, 0);
12046 /* Delete all NAPI objects */ 12208 /* Delete all NAPI objects */
12047 bnx2x_del_all_napi(bp); 12209 bnx2x_del_all_napi(bp);
12210 if (CNIC_LOADED(bp))
12211 bnx2x_del_all_napi_cnic(bp);
12048 12212
12049 del_timer_sync(&bp->timer); 12213 del_timer_sync(&bp->timer);
12050 12214
@@ -12235,7 +12399,6 @@ void bnx2x_notify_link_changed(struct bnx2x *bp)
12235module_init(bnx2x_init); 12399module_init(bnx2x_init);
12236module_exit(bnx2x_cleanup); 12400module_exit(bnx2x_cleanup);
12237 12401
12238#ifdef BCM_CNIC
12239/** 12402/**
12240 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). 12403 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
12241 * 12404 *
@@ -12688,12 +12851,31 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12688{ 12851{
12689 struct bnx2x *bp = netdev_priv(dev); 12852 struct bnx2x *bp = netdev_priv(dev);
12690 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12853 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12854 int rc;
12855
12856 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
12691 12857
12692 if (ops == NULL) { 12858 if (ops == NULL) {
12693 BNX2X_ERR("NULL ops received\n"); 12859 BNX2X_ERR("NULL ops received\n");
12694 return -EINVAL; 12860 return -EINVAL;
12695 } 12861 }
12696 12862
12863 if (!CNIC_SUPPORT(bp)) {
12864 BNX2X_ERR("Can't register CNIC when not supported\n");
12865 return -EOPNOTSUPP;
12866 }
12867
12868 if (!CNIC_LOADED(bp)) {
12869 rc = bnx2x_load_cnic(bp);
12870 if (rc) {
12871 BNX2X_ERR("CNIC-related load failed\n");
12872 return rc;
12873 }
12874
12875 }
12876
12877 bp->cnic_enabled = true;
12878
12697 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); 12879 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12698 if (!bp->cnic_kwq) 12880 if (!bp->cnic_kwq)
12699 return -ENOMEM; 12881 return -ENOMEM;
@@ -12785,5 +12967,4 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12785} 12967}
12786EXPORT_SYMBOL(bnx2x_cnic_probe); 12968EXPORT_SYMBOL(bnx2x_cnic_probe);
12787 12969
12788#endif /* BCM_CNIC */
12789 12970
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 1b1999d34c71..7d93adb57f31 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2107,6 +2107,7 @@
2107#define NIG_REG_LLH1_ERROR_MASK 0x10090 2107#define NIG_REG_LLH1_ERROR_MASK 0x10090
2108/* [RW 8] event id for llh1 */ 2108/* [RW 8] event id for llh1 */
2109#define NIG_REG_LLH1_EVENT_ID 0x10088 2109#define NIG_REG_LLH1_EVENT_ID 0x10088
2110#define NIG_REG_LLH1_FUNC_EN 0x16104
2110#define NIG_REG_LLH1_FUNC_MEM 0x161c0 2111#define NIG_REG_LLH1_FUNC_MEM 0x161c0
2111#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160 2112#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
2112#define NIG_REG_LLH1_FUNC_MEM_SIZE 16 2113#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
@@ -2302,6 +2303,15 @@
2302 * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to 2303 * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
2303 * accommodate the 9 input clients to ETS arbiter. */ 2304 * accommodate the 9 input clients to ETS arbiter. */
2304#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684 2305#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684
2306/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
2307 * packets to BRB LB interface to forward the packet to the host. All
2308 * packets from MCP are forwarded to the network when this bit is cleared -
2309 * regardless of the configured destination in tx_mng_destination register.
2310 * When MCP-to-host paths for both ports 0 and 1 are disabled - the arbiter
2311 * for BRB LB interface is bypassed and PBF LB traffic is always selected to
2312 * send to BRB LB.
2313 */
2314#define NIG_REG_P0_TX_MNG_HOST_ENABLE 0x182f4
2305#define NIG_REG_P1_HWPFC_ENABLE 0x181d0 2315#define NIG_REG_P1_HWPFC_ENABLE 0x181d0
2306#define NIG_REG_P1_MAC_IN_EN 0x185c0 2316#define NIG_REG_P1_MAC_IN_EN 0x185c0
2307/* [RW 1] Output enable for TX MAC interface */ 2317/* [RW 1] Output enable for TX MAC interface */
@@ -2418,6 +2428,12 @@
2418#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4 2428#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4
2419/* [R 1] TX FIFO for transmitting data to MAC is empty. */ 2429/* [R 1] TX FIFO for transmitting data to MAC is empty. */
2420#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594 2430#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594
2431/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
2432 * packets to BRB LB interface to forward the packet to the host. All
2433 * packets from MCP are forwarded to the network when this bit is cleared -
2434 * regardless of the configured destination in tx_mng_destination register.
2435 */
2436#define NIG_REG_P1_TX_MNG_HOST_ENABLE 0x182f8
2421/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets 2437/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets
2422 forwarded to the host. */ 2438 forwarded to the host. */
2423#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8 2439#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 614981c02264..b8b4b749daab 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -5350,12 +5350,24 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
5350 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) && 5350 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5351 (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 5351 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5352 next_state = BNX2X_F_STATE_STARTED; 5352 next_state = BNX2X_F_STATE_STARTED;
5353
5354 /* Switch_update ramrod can be sent in either started or
5355 * tx_stopped state, and it doesn't change the state.
5356 */
5357 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5358 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5359 next_state = BNX2X_F_STATE_STARTED;
5360
5353 else if (cmd == BNX2X_F_CMD_TX_STOP) 5361 else if (cmd == BNX2X_F_CMD_TX_STOP)
5354 next_state = BNX2X_F_STATE_TX_STOPPED; 5362 next_state = BNX2X_F_STATE_TX_STOPPED;
5355 5363
5356 break; 5364 break;
5357 case BNX2X_F_STATE_TX_STOPPED: 5365 case BNX2X_F_STATE_TX_STOPPED:
5358 if (cmd == BNX2X_F_CMD_TX_START) 5366 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5367 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5368 next_state = BNX2X_F_STATE_TX_STOPPED;
5369
5370 else if (cmd == BNX2X_F_CMD_TX_START)
5359 next_state = BNX2X_F_STATE_STARTED; 5371 next_state = BNX2X_F_STATE_STARTED;
5360 5372
5361 break; 5373 break;
@@ -5637,6 +5649,28 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
5637 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5649 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5638} 5650}
5639 5651
5652static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5653 struct bnx2x_func_state_params *params)
5654{
5655 struct bnx2x_func_sp_obj *o = params->f_obj;
5656 struct function_update_data *rdata =
5657 (struct function_update_data *)o->rdata;
5658 dma_addr_t data_mapping = o->rdata_mapping;
5659 struct bnx2x_func_switch_update_params *switch_update_params =
5660 &params->params.switch_update;
5661
5662 memset(rdata, 0, sizeof(*rdata));
5663
5664 /* Fill the ramrod data with provided parameters */
5665 rdata->tx_switch_suspend_change_flg = 1;
5666 rdata->tx_switch_suspend = switch_update_params->suspend;
5667 rdata->echo = SWITCH_UPDATE;
5668
5669 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5670 U64_HI(data_mapping),
5671 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5672}
5673
5640static inline int bnx2x_func_send_afex_update(struct bnx2x *bp, 5674static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5641 struct bnx2x_func_state_params *params) 5675 struct bnx2x_func_state_params *params)
5642{ 5676{
@@ -5657,6 +5691,7 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5657 cpu_to_le16(afex_update_params->afex_default_vlan); 5691 cpu_to_le16(afex_update_params->afex_default_vlan);
5658 rdata->allowed_priorities_change_flg = 1; 5692 rdata->allowed_priorities_change_flg = 1;
5659 rdata->allowed_priorities = afex_update_params->allowed_priorities; 5693 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5694 rdata->echo = AFEX_UPDATE;
5660 5695
5661 /* No need for an explicit memory barrier here as long we would 5696 /* No need for an explicit memory barrier here as long we would
5662 * need to ensure the ordering of writing to the SPQ element 5697 * need to ensure the ordering of writing to the SPQ element
@@ -5773,6 +5808,8 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
5773 return bnx2x_func_send_tx_stop(bp, params); 5808 return bnx2x_func_send_tx_stop(bp, params);
5774 case BNX2X_F_CMD_TX_START: 5809 case BNX2X_F_CMD_TX_START:
5775 return bnx2x_func_send_tx_start(bp, params); 5810 return bnx2x_func_send_tx_start(bp, params);
5811 case BNX2X_F_CMD_SWITCH_UPDATE:
5812 return bnx2x_func_send_switch_update(bp, params);
5776 default: 5813 default:
5777 BNX2X_ERR("Unknown command: %d\n", params->cmd); 5814 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5778 return -EINVAL; 5815 return -EINVAL;
@@ -5818,16 +5855,30 @@ int bnx2x_func_state_change(struct bnx2x *bp,
5818 struct bnx2x_func_state_params *params) 5855 struct bnx2x_func_state_params *params)
5819{ 5856{
5820 struct bnx2x_func_sp_obj *o = params->f_obj; 5857 struct bnx2x_func_sp_obj *o = params->f_obj;
5821 int rc; 5858 int rc, cnt = 300;
5822 enum bnx2x_func_cmd cmd = params->cmd; 5859 enum bnx2x_func_cmd cmd = params->cmd;
5823 unsigned long *pending = &o->pending; 5860 unsigned long *pending = &o->pending;
5824 5861
5825 mutex_lock(&o->one_pending_mutex); 5862 mutex_lock(&o->one_pending_mutex);
5826 5863
5827 /* Check that the requested transition is legal */ 5864 /* Check that the requested transition is legal */
5828 if (o->check_transition(bp, o, params)) { 5865 rc = o->check_transition(bp, o, params);
5866 if ((rc == -EBUSY) &&
5867 (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
5868 while ((rc == -EBUSY) && (--cnt > 0)) {
5869 mutex_unlock(&o->one_pending_mutex);
5870 msleep(10);
5871 mutex_lock(&o->one_pending_mutex);
5872 rc = o->check_transition(bp, o, params);
5873 }
5874 if (rc == -EBUSY) {
5875 mutex_unlock(&o->one_pending_mutex);
5876 BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5877 return rc;
5878 }
5879 } else if (rc) {
5829 mutex_unlock(&o->one_pending_mutex); 5880 mutex_unlock(&o->one_pending_mutex);
5830 return -EINVAL; 5881 return rc;
5831 } 5882 }
5832 5883
5833 /* Set "pending" bit */ 5884 /* Set "pending" bit */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index acf2fe4ca608..adbd91b1bdfc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -40,6 +40,12 @@ enum {
40 * pending commands list. 40 * pending commands list.
41 */ 41 */
42 RAMROD_CONT, 42 RAMROD_CONT,
43 /* If there is another pending ramrod, wait until it finishes and
44 * re-try to submit this one. This flag can be set only in sleepable
45 * context, and should not be set from the context that completes the
46 * ramrods as deadlock will occur.
47 */
48 RAMROD_RETRY,
43}; 49};
44 50
45typedef enum { 51typedef enum {
@@ -1061,6 +1067,7 @@ enum bnx2x_func_cmd {
1061 BNX2X_F_CMD_AFEX_VIFLISTS, 1067 BNX2X_F_CMD_AFEX_VIFLISTS,
1062 BNX2X_F_CMD_TX_STOP, 1068 BNX2X_F_CMD_TX_STOP,
1063 BNX2X_F_CMD_TX_START, 1069 BNX2X_F_CMD_TX_START,
1070 BNX2X_F_CMD_SWITCH_UPDATE,
1064 BNX2X_F_CMD_MAX, 1071 BNX2X_F_CMD_MAX,
1065}; 1072};
1066 1073
@@ -1103,6 +1110,10 @@ struct bnx2x_func_start_params {
1103 u8 network_cos_mode; 1110 u8 network_cos_mode;
1104}; 1111};
1105 1112
1113struct bnx2x_func_switch_update_params {
1114 u8 suspend;
1115};
1116
1106struct bnx2x_func_afex_update_params { 1117struct bnx2x_func_afex_update_params {
1107 u16 vif_id; 1118 u16 vif_id;
1108 u16 afex_default_vlan; 1119 u16 afex_default_vlan;
@@ -1136,6 +1147,7 @@ struct bnx2x_func_state_params {
1136 struct bnx2x_func_hw_init_params hw_init; 1147 struct bnx2x_func_hw_init_params hw_init;
1137 struct bnx2x_func_hw_reset_params hw_reset; 1148 struct bnx2x_func_hw_reset_params hw_reset;
1138 struct bnx2x_func_start_params start; 1149 struct bnx2x_func_start_params start;
1150 struct bnx2x_func_switch_update_params switch_update;
1139 struct bnx2x_func_afex_update_params afex_update; 1151 struct bnx2x_func_afex_update_params afex_update;
1140 struct bnx2x_func_afex_viflists_params afex_viflists; 1152 struct bnx2x_func_afex_viflists_params afex_viflists;
1141 struct bnx2x_func_tx_start_params tx_start; 1153 struct bnx2x_func_tx_start_params tx_start;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a8800ac10df9..038ce0215e3e 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -90,10 +90,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
90 90
91#define DRV_MODULE_NAME "tg3" 91#define DRV_MODULE_NAME "tg3"
92#define TG3_MAJ_NUM 3 92#define TG3_MAJ_NUM 3
93#define TG3_MIN_NUM 125 93#define TG3_MIN_NUM 126
94#define DRV_MODULE_VERSION \ 94#define DRV_MODULE_VERSION \
95 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 95 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
96#define DRV_MODULE_RELDATE "September 26, 2012" 96#define DRV_MODULE_RELDATE "November 05, 2012"
97 97
98#define RESET_KIND_SHUTDOWN 0 98#define RESET_KIND_SHUTDOWN 0
99#define RESET_KIND_INIT 1 99#define RESET_KIND_INIT 1
@@ -291,6 +291,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, 291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, 296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, 297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
@@ -10429,10 +10430,8 @@ static void tg3_stop(struct tg3 *tp)
10429{ 10430{
10430 int i; 10431 int i;
10431 10432
10432 tg3_napi_disable(tp);
10433 tg3_reset_task_cancel(tp); 10433 tg3_reset_task_cancel(tp);
10434 10434 tg3_netif_stop(tp);
10435 netif_tx_disable(tp->dev);
10436 10435
10437 tg3_timer_stop(tp); 10436 tg3_timer_stop(tp);
10438 10437
@@ -14026,7 +14025,8 @@ out_not_found:
14026 14025
14027out_no_vpd: 14026out_no_vpd:
14028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 14027 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14029 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717) 14028 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14029 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14030 strcpy(tp->board_part_number, "BCM5717"); 14030 strcpy(tp->board_part_number, "BCM5717");
14031 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) 14031 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14032 strcpy(tp->board_part_number, "BCM5718"); 14032 strcpy(tp->board_part_number, "BCM5718");
@@ -14397,6 +14397,7 @@ static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14397 tg3_flag_set(tp, CPMU_PRESENT); 14397 tg3_flag_set(tp, CPMU_PRESENT);
14398 14398
14399 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 14399 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14400 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
14400 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 14401 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14401 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 14402 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14402 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) 14403 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
@@ -14424,6 +14425,9 @@ static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14424 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) 14425 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14425 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 14426 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14426 14427
14428 if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14429 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14430
14427 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 14431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14428 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || 14432 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14429 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) 14433 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
@@ -16013,6 +16017,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
16013 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || 16017 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16014 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || 16018 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16015 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 16019 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16020 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16016 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 16021 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16017 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 16022 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16018 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) { 16023 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index d9308c32102e..b3c2bf2c082f 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -50,6 +50,7 @@
50#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */ 50#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */
51#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */ 51#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */
52#define TG3PCI_DEVICE_TIGON3_5717 0x1655 52#define TG3PCI_DEVICE_TIGON3_5717 0x1655
53#define TG3PCI_DEVICE_TIGON3_5717_C 0x1665
53#define TG3PCI_DEVICE_TIGON3_5718 0x1656 54#define TG3PCI_DEVICE_TIGON3_5718 0x1656
54#define TG3PCI_DEVICE_TIGON3_57781 0x16b1 55#define TG3PCI_DEVICE_TIGON3_57781 0x16b1
55#define TG3PCI_DEVICE_TIGON3_57785 0x16b5 56#define TG3PCI_DEVICE_TIGON3_57785 0x16b5
@@ -149,6 +150,7 @@
149#define CHIPREV_ID_57780_A0 0x57780000 150#define CHIPREV_ID_57780_A0 0x57780000
150#define CHIPREV_ID_57780_A1 0x57780001 151#define CHIPREV_ID_57780_A1 0x57780001
151#define CHIPREV_ID_5717_A0 0x05717000 152#define CHIPREV_ID_5717_A0 0x05717000
153#define CHIPREV_ID_5717_C0 0x05717200
152#define CHIPREV_ID_57765_A0 0x57785000 154#define CHIPREV_ID_57765_A0 0x57785000
153#define CHIPREV_ID_5719_A0 0x05719000 155#define CHIPREV_ID_5719_A0 0x05719000
154#define CHIPREV_ID_5720_A0 0x05720000 156#define CHIPREV_ID_5720_A0 0x05720000
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index db931916da08..ceb0de0cf62c 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -2,13 +2,10 @@
2# Atmel device configuration 2# Atmel device configuration
3# 3#
4 4
5config HAVE_NET_MACB
6 bool
7
8config NET_CADENCE 5config NET_CADENCE
9 bool "Cadence devices" 6 bool "Cadence devices"
7 depends on HAS_IOMEM
10 default y 8 default y
11 depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
12 ---help--- 9 ---help---
13 If you have a network (Ethernet) card belonging to this class, say Y. 10 If you have a network (Ethernet) card belonging to this class, say Y.
14 Make sure you know the name of your card. Read the Ethernet-HOWTO, 11 Make sure you know the name of your card. Read the Ethernet-HOWTO,
@@ -25,16 +22,14 @@ if NET_CADENCE
25 22
26config ARM_AT91_ETHER 23config ARM_AT91_ETHER
27 tristate "AT91RM9200 Ethernet support" 24 tristate "AT91RM9200 Ethernet support"
28 depends on ARM && ARCH_AT91RM9200
29 select NET_CORE 25 select NET_CORE
30 select MII 26 select MACB
31 ---help--- 27 ---help---
32 If you wish to compile a kernel for the AT91RM9200 and enable 28 If you wish to compile a kernel for the AT91RM9200 and enable
33 ethernet support, then you should always answer Y to this. 29 ethernet support, then you should always answer Y to this.
34 30
35config MACB 31config MACB
36 tristate "Cadence MACB/GEM support" 32 tristate "Cadence MACB/GEM support"
37 depends on HAVE_NET_MACB
38 select PHYLIB 33 select PHYLIB
39 ---help--- 34 ---help---
40 The Cadence MACB ethernet interface is found on many Atmel AT32 and 35 The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 4e980a7886fb..e7a476cff6c5 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -6,11 +6,6 @@
6 * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc. 6 * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
7 * Initial version by Rick Bronson 01/11/2003 7 * Initial version by Rick Bronson 01/11/2003
8 * 8 *
9 * Intel LXT971A PHY support by Christopher Bahns & David Knickerbocker
10 * (Polaroid Corporation)
11 *
12 * Realtek RTL8201(B)L PHY support by Roman Avramenko <roman@imsystems.ru>
13 *
14 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
@@ -20,7 +15,6 @@
20#include <linux/module.h> 15#include <linux/module.h>
21#include <linux/init.h> 16#include <linux/init.h>
22#include <linux/interrupt.h> 17#include <linux/interrupt.h>
23#include <linux/mii.h>
24#include <linux/netdevice.h> 18#include <linux/netdevice.h>
25#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
26#include <linux/skbuff.h> 20#include <linux/skbuff.h>
@@ -31,956 +25,251 @@
31#include <linux/clk.h> 25#include <linux/clk.h>
32#include <linux/gfp.h> 26#include <linux/gfp.h>
33#include <linux/phy.h> 27#include <linux/phy.h>
28#include <linux/io.h>
29#include <linux/of.h>
30#include <linux/of_device.h>
31#include <linux/of_net.h>
32#include <linux/pinctrl/consumer.h>
34 33
35#include <asm/io.h> 34#include "macb.h"
36#include <asm/uaccess.h>
37#include <asm/mach-types.h>
38
39#include <mach/at91rm9200_emac.h>
40#include <asm/gpio.h>
41#include <mach/board.h>
42
43#include "at91_ether.h"
44
45#define DRV_NAME "at91_ether"
46#define DRV_VERSION "1.0"
47
48#define LINK_POLL_INTERVAL (HZ)
49
50/* ..................................................................... */
51
52/*
53 * Read from a EMAC register.
54 */
55static inline unsigned long at91_emac_read(struct at91_private *lp, unsigned int reg)
56{
57 return __raw_readl(lp->emac_base + reg);
58}
59
60/*
61 * Write to a EMAC register.
62 */
63static inline void at91_emac_write(struct at91_private *lp, unsigned int reg, unsigned long value)
64{
65 __raw_writel(value, lp->emac_base + reg);
66}
67
68/* ........................... PHY INTERFACE ........................... */
69
70/*
71 * Enable the MDIO bit in MAC control register
72 * When not called from an interrupt-handler, access to the PHY must be
73 * protected by a spinlock.
74 */
75static void enable_mdi(struct at91_private *lp)
76{
77 unsigned long ctl;
78
79 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
80 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */
81}
82
83/*
84 * Disable the MDIO bit in the MAC control register
85 */
86static void disable_mdi(struct at91_private *lp)
87{
88 unsigned long ctl;
89
90 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
91 at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */
92}
93
94/*
95 * Wait until the PHY operation is complete.
96 */
97static inline void at91_phy_wait(struct at91_private *lp)
98{
99 unsigned long timeout = jiffies + 2;
100
101 while (!(at91_emac_read(lp, AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) {
102 if (time_after(jiffies, timeout)) {
103 printk("at91_ether: MIO timeout\n");
104 break;
105 }
106 cpu_relax();
107 }
108}
109
110/*
111 * Write value to the a PHY register
112 * Note: MDI interface is assumed to already have been enabled.
113 */
114static void write_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int value)
115{
116 at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W
117 | ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA));
118
119 /* Wait until IDLE bit in Network Status register is cleared */
120 at91_phy_wait(lp);
121}
122
123/*
124 * Read value stored in a PHY register.
125 * Note: MDI interface is assumed to already have been enabled.
126 */
127static void read_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int *value)
128{
129 at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R
130 | ((phy_addr & 0x1f) << 23) | (address << 18));
131
132 /* Wait until IDLE bit in Network Status register is cleared */
133 at91_phy_wait(lp);
134
135 *value = at91_emac_read(lp, AT91_EMAC_MAN) & AT91_EMAC_DATA;
136}
137
138/* ........................... PHY MANAGEMENT .......................... */
139
140/*
141 * Access the PHY to determine the current link speed and mode, and update the
142 * MAC accordingly.
143 * If no link or auto-negotiation is busy, then no changes are made.
144 */
145static void update_linkspeed(struct net_device *dev, int silent)
146{
147 struct at91_private *lp = netdev_priv(dev);
148 unsigned int bmsr, bmcr, lpa, mac_cfg;
149 unsigned int speed, duplex;
150
151 if (!mii_link_ok(&lp->mii)) { /* no link */
152 netif_carrier_off(dev);
153 if (!silent)
154 printk(KERN_INFO "%s: Link down.\n", dev->name);
155 return;
156 }
157
158 /* Link up, or auto-negotiation still in progress */
159 read_phy(lp, lp->phy_address, MII_BMSR, &bmsr);
160 read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
161 if (bmcr & BMCR_ANENABLE) { /* AutoNegotiation is enabled */
162 if (!(bmsr & BMSR_ANEGCOMPLETE))
163 return; /* Do nothing - another interrupt generated when negotiation complete */
164
165 read_phy(lp, lp->phy_address, MII_LPA, &lpa);
166 if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100;
167 else speed = SPEED_10;
168 if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL;
169 else duplex = DUPLEX_HALF;
170 } else {
171 speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
172 duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
173 }
174
175 /* Update the MAC */
176 mac_cfg = at91_emac_read(lp, AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD);
177 if (speed == SPEED_100) {
178 if (duplex == DUPLEX_FULL) /* 100 Full Duplex */
179 mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD;
180 else /* 100 Half Duplex */
181 mac_cfg |= AT91_EMAC_SPD;
182 } else {
183 if (duplex == DUPLEX_FULL) /* 10 Full Duplex */
184 mac_cfg |= AT91_EMAC_FD;
185 else {} /* 10 Half Duplex */
186 }
187 at91_emac_write(lp, AT91_EMAC_CFG, mac_cfg);
188
189 if (!silent)
190 printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
191 netif_carrier_on(dev);
192}
193
194/*
195 * Handle interrupts from the PHY
196 */
197static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
198{
199 struct net_device *dev = (struct net_device *) dev_id;
200 struct at91_private *lp = netdev_priv(dev);
201 unsigned int phy;
202
203 /*
204 * This hander is triggered on both edges, but the PHY chips expect
205 * level-triggering. We therefore have to check if the PHY actually has
206 * an IRQ pending.
207 */
208 enable_mdi(lp);
209 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
210 read_phy(lp, lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */
211 if (!(phy & (1 << 0)))
212 goto done;
213 }
214 else if (lp->phy_type == MII_LXT971A_ID) {
215 read_phy(lp, lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */
216 if (!(phy & (1 << 2)))
217 goto done;
218 }
219 else if (lp->phy_type == MII_BCM5221_ID) {
220 read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */
221 if (!(phy & (1 << 0)))
222 goto done;
223 }
224 else if (lp->phy_type == MII_KS8721_ID) {
225 read_phy(lp, lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */
226 if (!(phy & ((1 << 2) | 1)))
227 goto done;
228 }
229 else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */
230 read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &phy);
231 if (!(phy & ((1 << 2) | 1)))
232 goto done;
233 }
234 else if (lp->phy_type == MII_DP83848_ID) {
235 read_phy(lp, lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */
236 if (!(phy & (1 << 7)))
237 goto done;
238 }
239
240 update_linkspeed(dev, 0);
241
242done:
243 disable_mdi(lp);
244
245 return IRQ_HANDLED;
246}
247
248/*
249 * Initialize and enable the PHY interrupt for link-state changes
250 */
251static void enable_phyirq(struct net_device *dev)
252{
253 struct at91_private *lp = netdev_priv(dev);
254 unsigned int dsintr, irq_number;
255 int status;
256
257 if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
258 /*
259 * PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L),
260 * or board does not have it connected.
261 */
262 mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
263 return;
264 }
265
266 irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
267 status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
268 if (status) {
269 printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
270 return;
271 }
272
273 spin_lock_irq(&lp->lock);
274 enable_mdi(lp);
275
276 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
277 read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
278 dsintr = dsintr & ~0xf00; /* clear bits 8..11 */
279 write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
280 }
281 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
282 read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
283 dsintr = dsintr | 0xf2; /* set bits 1, 4..7 */
284 write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
285 }
286 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
287 dsintr = (1 << 15) | ( 1 << 14);
288 write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
289 }
290 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
291 dsintr = (1 << 10) | ( 1 << 8);
292 write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
293 }
294 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
295 read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
296 dsintr = dsintr | 0x500; /* set bits 8, 10 */
297 write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
298 }
299 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
300 read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
301 dsintr = dsintr | 0x3c; /* set bits 2..5 */
302 write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
303 read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
304 dsintr = dsintr | 0x3; /* set bits 0,1 */
305 write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
306 }
307
308 disable_mdi(lp);
309 spin_unlock_irq(&lp->lock);
310}
311
312/*
313 * Disable the PHY interrupt
314 */
315static void disable_phyirq(struct net_device *dev)
316{
317 struct at91_private *lp = netdev_priv(dev);
318 unsigned int dsintr;
319 unsigned int irq_number;
320
321 if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
322 del_timer_sync(&lp->check_timer);
323 return;
324 }
325
326 spin_lock_irq(&lp->lock);
327 enable_mdi(lp);
328
329 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
330 read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
331 dsintr = dsintr | 0xf00; /* set bits 8..11 */
332 write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
333 }
334 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
335 read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
336 dsintr = dsintr & ~0xf2; /* clear bits 1, 4..7 */
337 write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
338 }
339 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
340 read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &dsintr);
341 dsintr = ~(1 << 14);
342 write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
343 }
344 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
345 read_phy(lp, lp->phy_address, MII_TPISTATUS, &dsintr);
346 dsintr = ~((1 << 10) | (1 << 8));
347 write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
348 }
349 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
350 read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
351 dsintr = dsintr & ~0x500; /* clear bits 8, 10 */
352 write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
353 }
354 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
355 read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
356 dsintr = dsintr & ~0x3; /* clear bits 0, 1 */
357 write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
358 read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
359 dsintr = dsintr & ~0x3c; /* clear bits 2..5 */
360 write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
361 }
362
363 disable_mdi(lp);
364 spin_unlock_irq(&lp->lock);
365
366 irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
367 free_irq(irq_number, dev); /* Free interrupt handler */
368}
369
370/*
371 * Perform a software reset of the PHY.
372 */
373#if 0
374static void reset_phy(struct net_device *dev)
375{
376 struct at91_private *lp = netdev_priv(dev);
377 unsigned int bmcr;
378
379 spin_lock_irq(&lp->lock);
380 enable_mdi(lp);
381
382 /* Perform PHY reset */
383 write_phy(lp, lp->phy_address, MII_BMCR, BMCR_RESET);
384
385 /* Wait until PHY reset is complete */
386 do {
387 read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
388 } while (!(bmcr & BMCR_RESET));
389
390 disable_mdi(lp);
391 spin_unlock_irq(&lp->lock);
392}
393#endif
394
395static void at91ether_check_link(unsigned long dev_id)
396{
397 struct net_device *dev = (struct net_device *) dev_id;
398 struct at91_private *lp = netdev_priv(dev);
399
400 enable_mdi(lp);
401 update_linkspeed(dev, 1);
402 disable_mdi(lp);
403
404 mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
405}
406
407/*
408 * Perform any PHY-specific initialization.
409 */
410static void __init initialize_phy(struct at91_private *lp)
411{
412 unsigned int val;
413
414 spin_lock_irq(&lp->lock);
415 enable_mdi(lp);
416
417 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
418 read_phy(lp, lp->phy_address, MII_DSCR_REG, &val);
419 if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */
420 lp->phy_media = PORT_FIBRE;
421 } else if (machine_is_csb337()) {
422 /* mix link activity status into LED2 link state */
423 write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x0d22);
424 } else if (machine_is_ecbat91())
425 write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x156A);
426
427 disable_mdi(lp);
428 spin_unlock_irq(&lp->lock);
429}
430
431/* ......................... ADDRESS MANAGEMENT ........................ */
432
433/*
434 * NOTE: Your bootloader must always set the MAC address correctly before
435 * booting into Linux.
436 *
437 * - It must always set the MAC address after reset, even if it doesn't
438 * happen to access the Ethernet while it's booting. Some versions of
439 * U-Boot on the AT91RM9200-DK do not do this.
440 *
441 * - Likewise it must store the addresses in the correct byte order.
442 * MicroMonitor (uMon) on the CSB337 does this incorrectly (and
443 * continues to do so, for bug-compatibility).
444 */
445
446static short __init unpack_mac_address(struct net_device *dev, unsigned int hi, unsigned int lo)
447{
448 char addr[6];
449
450 if (machine_is_csb337()) {
451 addr[5] = (lo & 0xff); /* The CSB337 bootloader stores the MAC the wrong-way around */
452 addr[4] = (lo & 0xff00) >> 8;
453 addr[3] = (lo & 0xff0000) >> 16;
454 addr[2] = (lo & 0xff000000) >> 24;
455 addr[1] = (hi & 0xff);
456 addr[0] = (hi & 0xff00) >> 8;
457 }
458 else {
459 addr[0] = (lo & 0xff);
460 addr[1] = (lo & 0xff00) >> 8;
461 addr[2] = (lo & 0xff0000) >> 16;
462 addr[3] = (lo & 0xff000000) >> 24;
463 addr[4] = (hi & 0xff);
464 addr[5] = (hi & 0xff00) >> 8;
465 }
466
467 if (is_valid_ether_addr(addr)) {
468 memcpy(dev->dev_addr, &addr, 6);
469 return 1;
470 }
471 return 0;
472}
473
474/*
475 * Set the ethernet MAC address in dev->dev_addr
476 */
477static void __init get_mac_address(struct net_device *dev)
478{
479 struct at91_private *lp = netdev_priv(dev);
480
481 /* Check Specific-Address 1 */
482 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA1H), at91_emac_read(lp, AT91_EMAC_SA1L)))
483 return;
484 /* Check Specific-Address 2 */
485 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA2H), at91_emac_read(lp, AT91_EMAC_SA2L)))
486 return;
487 /* Check Specific-Address 3 */
488 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA3H), at91_emac_read(lp, AT91_EMAC_SA3L)))
489 return;
490 /* Check Specific-Address 4 */
491 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA4H), at91_emac_read(lp, AT91_EMAC_SA4L)))
492 return;
493
494 printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n");
495}
496
497/*
498 * Program the hardware MAC address from dev->dev_addr.
499 */
500static void update_mac_address(struct net_device *dev)
501{
502 struct at91_private *lp = netdev_priv(dev);
503
504 at91_emac_write(lp, AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0]));
505 at91_emac_write(lp, AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4]));
506
507 at91_emac_write(lp, AT91_EMAC_SA2L, 0);
508 at91_emac_write(lp, AT91_EMAC_SA2H, 0);
509}
510
511/*
512 * Store the new hardware address in dev->dev_addr, and update the MAC.
513 */
514static int set_mac_address(struct net_device *dev, void* addr)
515{
516 struct sockaddr *address = addr;
517
518 if (!is_valid_ether_addr(address->sa_data))
519 return -EADDRNOTAVAIL;
520
521 memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
522 update_mac_address(dev);
523 35
524 printk("%s: Setting MAC address to %pM\n", dev->name, 36/* 1518 rounded up */
525 dev->dev_addr); 37#define MAX_RBUFF_SZ 0x600
38/* max number of receive buffers */
39#define MAX_RX_DESCR 9
526 40
527 return 0; 41/* Initialize and start the Receiver and Transmit subsystems */
528} 42static int at91ether_start(struct net_device *dev)
529
530static int inline hash_bit_value(int bitnr, __u8 *addr)
531{
532 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
533 return 1;
534 return 0;
535}
536
537/*
538 * The hash address register is 64 bits long and takes up two locations in the memory map.
539 * The least significant bits are stored in EMAC_HSL and the most significant
540 * bits in EMAC_HSH.
541 *
542 * The unicast hash enable and the multicast hash enable bits in the network configuration
543 * register enable the reception of hash matched frames. The destination address is
544 * reduced to a 6 bit index into the 64 bit hash register using the following hash function.
545 * The hash function is an exclusive or of every sixth bit of the destination address.
546 * hash_index[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
547 * hash_index[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
548 * hash_index[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
549 * hash_index[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
550 * hash_index[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
551 * hash_index[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
552 * da[0] represents the least significant bit of the first byte received, that is, the multicast/
553 * unicast indicator, and da[47] represents the most significant bit of the last byte
554 * received.
555 * If the hash index points to a bit that is set in the hash register then the frame will be
556 * matched according to whether the frame is multicast or unicast.
557 * A multicast match will be signalled if the multicast hash enable bit is set, da[0] is 1 and
558 * the hash index points to a bit set in the hash register.
559 * A unicast match will be signalled if the unicast hash enable bit is set, da[0] is 0 and the
560 * hash index points to a bit set in the hash register.
561 * To receive all multicast frames, the hash register should be set with all ones and the
562 * multicast hash enable bit should be set in the network configuration register.
563 */
564
565/*
566 * Return the hash index value for the specified address.
567 */
568static int hash_get_index(__u8 *addr)
569{
570 int i, j, bitval;
571 int hash_index = 0;
572
573 for (j = 0; j < 6; j++) {
574 for (i = 0, bitval = 0; i < 8; i++)
575 bitval ^= hash_bit_value(i*6 + j, addr);
576
577 hash_index |= (bitval << j);
578 }
579
580 return hash_index;
581}
582
583/*
584 * Add multicast addresses to the internal multicast-hash table.
585 */
586static void at91ether_sethashtable(struct net_device *dev)
587{ 43{
588 struct at91_private *lp = netdev_priv(dev); 44 struct macb *lp = netdev_priv(dev);
589 struct netdev_hw_addr *ha; 45 dma_addr_t addr;
590 unsigned long mc_filter[2]; 46 u32 ctl;
591 unsigned int bitnr; 47 int i;
592
593 mc_filter[0] = mc_filter[1] = 0;
594
595 netdev_for_each_mc_addr(ha, dev) {
596 bitnr = hash_get_index(ha->addr);
597 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
598 }
599
600 at91_emac_write(lp, AT91_EMAC_HSL, mc_filter[0]);
601 at91_emac_write(lp, AT91_EMAC_HSH, mc_filter[1]);
602}
603 48
604/* 49 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
605 * Enable/Disable promiscuous and multicast modes. 50 MAX_RX_DESCR * sizeof(struct macb_dma_desc),
606 */ 51 &lp->rx_ring_dma, GFP_KERNEL);
607static void at91ether_set_multicast_list(struct net_device *dev) 52 if (!lp->rx_ring) {
608{ 53 netdev_err(dev, "unable to alloc rx ring DMA buffer\n");
609 struct at91_private *lp = netdev_priv(dev); 54 return -ENOMEM;
610 unsigned long cfg;
611
612 cfg = at91_emac_read(lp, AT91_EMAC_CFG);
613
614 if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */
615 cfg |= AT91_EMAC_CAF;
616 else if (dev->flags & (~IFF_PROMISC)) /* Disable promiscuous mode */
617 cfg &= ~AT91_EMAC_CAF;
618
619 if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */
620 at91_emac_write(lp, AT91_EMAC_HSH, -1);
621 at91_emac_write(lp, AT91_EMAC_HSL, -1);
622 cfg |= AT91_EMAC_MTI;
623 } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
624 at91ether_sethashtable(dev);
625 cfg |= AT91_EMAC_MTI;
626 } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
627 at91_emac_write(lp, AT91_EMAC_HSH, 0);
628 at91_emac_write(lp, AT91_EMAC_HSL, 0);
629 cfg &= ~AT91_EMAC_MTI;
630 } 55 }
631 56
632 at91_emac_write(lp, AT91_EMAC_CFG, cfg); 57 lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
633} 58 MAX_RX_DESCR * MAX_RBUFF_SZ,
634 59 &lp->rx_buffers_dma, GFP_KERNEL);
635/* ......................... ETHTOOL SUPPORT ........................... */ 60 if (!lp->rx_buffers) {
636 61 netdev_err(dev, "unable to alloc rx data DMA buffer\n");
637static int mdio_read(struct net_device *dev, int phy_id, int location)
638{
639 struct at91_private *lp = netdev_priv(dev);
640 unsigned int value;
641
642 read_phy(lp, phy_id, location, &value);
643 return value;
644}
645
646static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
647{
648 struct at91_private *lp = netdev_priv(dev);
649
650 write_phy(lp, phy_id, location, value);
651}
652
653static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
654{
655 struct at91_private *lp = netdev_priv(dev);
656 int ret;
657
658 spin_lock_irq(&lp->lock);
659 enable_mdi(lp);
660
661 ret = mii_ethtool_gset(&lp->mii, cmd);
662
663 disable_mdi(lp);
664 spin_unlock_irq(&lp->lock);
665 62
666 if (lp->phy_media == PORT_FIBRE) { /* override media type since mii.c doesn't know */ 63 dma_free_coherent(&lp->pdev->dev,
667 cmd->supported = SUPPORTED_FIBRE; 64 MAX_RX_DESCR * sizeof(struct macb_dma_desc),
668 cmd->port = PORT_FIBRE; 65 lp->rx_ring, lp->rx_ring_dma);
66 lp->rx_ring = NULL;
67 return -ENOMEM;
669 } 68 }
670 69
671 return ret; 70 addr = lp->rx_buffers_dma;
672}
673
674static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
675{
676 struct at91_private *lp = netdev_priv(dev);
677 int ret;
678
679 spin_lock_irq(&lp->lock);
680 enable_mdi(lp);
681
682 ret = mii_ethtool_sset(&lp->mii, cmd);
683
684 disable_mdi(lp);
685 spin_unlock_irq(&lp->lock);
686
687 return ret;
688}
689
690static int at91ether_nwayreset(struct net_device *dev)
691{
692 struct at91_private *lp = netdev_priv(dev);
693 int ret;
694
695 spin_lock_irq(&lp->lock);
696 enable_mdi(lp);
697
698 ret = mii_nway_restart(&lp->mii);
699
700 disable_mdi(lp);
701 spin_unlock_irq(&lp->lock);
702
703 return ret;
704}
705
706static void at91ether_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
707{
708 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
709 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
710 strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
711}
712
713static const struct ethtool_ops at91ether_ethtool_ops = {
714 .get_settings = at91ether_get_settings,
715 .set_settings = at91ether_set_settings,
716 .get_drvinfo = at91ether_get_drvinfo,
717 .nway_reset = at91ether_nwayreset,
718 .get_link = ethtool_op_get_link,
719};
720
721static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
722{
723 struct at91_private *lp = netdev_priv(dev);
724 int res;
725
726 if (!netif_running(dev))
727 return -EINVAL;
728
729 spin_lock_irq(&lp->lock);
730 enable_mdi(lp);
731 res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL);
732 disable_mdi(lp);
733 spin_unlock_irq(&lp->lock);
734
735 return res;
736}
737
738/* ................................ MAC ................................ */
739
740/*
741 * Initialize and start the Receiver and Transmit subsystems
742 */
743static void at91ether_start(struct net_device *dev)
744{
745 struct at91_private *lp = netdev_priv(dev);
746 struct recv_desc_bufs *dlist, *dlist_phys;
747 int i;
748 unsigned long ctl;
749
750 dlist = lp->dlist;
751 dlist_phys = lp->dlist_phys;
752
753 for (i = 0; i < MAX_RX_DESCR; i++) { 71 for (i = 0; i < MAX_RX_DESCR; i++) {
754 dlist->descriptors[i].addr = (unsigned int) &dlist_phys->recv_buf[i][0]; 72 lp->rx_ring[i].addr = addr;
755 dlist->descriptors[i].size = 0; 73 lp->rx_ring[i].ctrl = 0;
74 addr += MAX_RBUFF_SZ;
756 } 75 }
757 76
758 /* Set the Wrap bit on the last descriptor */ 77 /* Set the Wrap bit on the last descriptor */
759 dlist->descriptors[i-1].addr |= EMAC_DESC_WRAP; 78 lp->rx_ring[MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
760 79
761 /* Reset buffer index */ 80 /* Reset buffer index */
762 lp->rxBuffIndex = 0; 81 lp->rx_tail = 0;
763 82
764 /* Program address of descriptor list in Rx Buffer Queue register */ 83 /* Program address of descriptor list in Rx Buffer Queue register */
765 at91_emac_write(lp, AT91_EMAC_RBQP, (unsigned long) dlist_phys); 84 macb_writel(lp, RBQP, lp->rx_ring_dma);
766 85
767 /* Enable Receive and Transmit */ 86 /* Enable Receive and Transmit */
768 ctl = at91_emac_read(lp, AT91_EMAC_CTL); 87 ctl = macb_readl(lp, NCR);
769 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE); 88 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
89
90 return 0;
770} 91}
771 92
772/* 93/* Open the ethernet interface */
773 * Open the ethernet interface
774 */
775static int at91ether_open(struct net_device *dev) 94static int at91ether_open(struct net_device *dev)
776{ 95{
777 struct at91_private *lp = netdev_priv(dev); 96 struct macb *lp = netdev_priv(dev);
778 unsigned long ctl; 97 u32 ctl;
98 int ret;
779 99
780 if (!is_valid_ether_addr(dev->dev_addr)) 100 if (!is_valid_ether_addr(dev->dev_addr))
781 return -EADDRNOTAVAIL; 101 return -EADDRNOTAVAIL;
782 102
783 clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */
784
785 /* Clear internal statistics */ 103 /* Clear internal statistics */
786 ctl = at91_emac_read(lp, AT91_EMAC_CTL); 104 ctl = macb_readl(lp, NCR);
787 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_CSR); 105 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
788 106
789 /* Update the MAC address (incase user has changed it) */ 107 macb_set_hwaddr(lp);
790 update_mac_address(dev);
791 108
792 /* Enable PHY interrupt */ 109 ret = at91ether_start(dev);
793 enable_phyirq(dev); 110 if (ret)
111 return ret;
794 112
795 /* Enable MAC interrupts */ 113 /* Enable MAC interrupts */
796 at91_emac_write(lp, AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA 114 macb_writel(lp, IER, MACB_BIT(RCOMP) |
797 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM 115 MACB_BIT(RXUBR) |
798 | AT91_EMAC_ROVR | AT91_EMAC_ABT); 116 MACB_BIT(ISR_TUND) |
799 117 MACB_BIT(ISR_RLE) |
800 /* Determine current link speed */ 118 MACB_BIT(TCOMP) |
801 spin_lock_irq(&lp->lock); 119 MACB_BIT(ISR_ROVR) |
802 enable_mdi(lp); 120 MACB_BIT(HRESP));
803 update_linkspeed(dev, 0); 121
804 disable_mdi(lp); 122 /* schedule a link state check */
805 spin_unlock_irq(&lp->lock); 123 phy_start(lp->phy_dev);
806 124
807 at91ether_start(dev);
808 netif_start_queue(dev); 125 netif_start_queue(dev);
126
809 return 0; 127 return 0;
810} 128}
811 129
812/* 130/* Close the interface */
813 * Close the interface
814 */
815static int at91ether_close(struct net_device *dev) 131static int at91ether_close(struct net_device *dev)
816{ 132{
817 struct at91_private *lp = netdev_priv(dev); 133 struct macb *lp = netdev_priv(dev);
818 unsigned long ctl; 134 u32 ctl;
819 135
820 /* Disable Receiver and Transmitter */ 136 /* Disable Receiver and Transmitter */
821 ctl = at91_emac_read(lp, AT91_EMAC_CTL); 137 ctl = macb_readl(lp, NCR);
822 at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE)); 138 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
823
824 /* Disable PHY interrupt */
825 disable_phyirq(dev);
826 139
827 /* Disable MAC interrupts */ 140 /* Disable MAC interrupts */
828 at91_emac_write(lp, AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA 141 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
829 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM 142 MACB_BIT(RXUBR) |
830 | AT91_EMAC_ROVR | AT91_EMAC_ABT); 143 MACB_BIT(ISR_TUND) |
144 MACB_BIT(ISR_RLE) |
145 MACB_BIT(TCOMP) |
146 MACB_BIT(ISR_ROVR) |
147 MACB_BIT(HRESP));
831 148
832 netif_stop_queue(dev); 149 netif_stop_queue(dev);
833 150
834 clk_disable(lp->ether_clk); /* Disable Peripheral clock */ 151 dma_free_coherent(&lp->pdev->dev,
152 MAX_RX_DESCR * sizeof(struct macb_dma_desc),
153 lp->rx_ring, lp->rx_ring_dma);
154 lp->rx_ring = NULL;
155
156 dma_free_coherent(&lp->pdev->dev,
157 MAX_RX_DESCR * MAX_RBUFF_SZ,
158 lp->rx_buffers, lp->rx_buffers_dma);
159 lp->rx_buffers = NULL;
835 160
836 return 0; 161 return 0;
837} 162}
838 163
839/* 164/* Transmit packet */
840 * Transmit packet.
841 */
842static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) 165static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
843{ 166{
844 struct at91_private *lp = netdev_priv(dev); 167 struct macb *lp = netdev_priv(dev);
845 168
846 if (at91_emac_read(lp, AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) { 169 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
847 netif_stop_queue(dev); 170 netif_stop_queue(dev);
848 171
849 /* Store packet information (to free when Tx completed) */ 172 /* Store packet information (to free when Tx completed) */
850 lp->skb = skb; 173 lp->skb = skb;
851 lp->skb_length = skb->len; 174 lp->skb_length = skb->len;
852 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); 175 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
853 dev->stats.tx_bytes += skb->len; 176 DMA_TO_DEVICE);
854 177
855 /* Set address of the data in the Transmit Address register */ 178 /* Set address of the data in the Transmit Address register */
856 at91_emac_write(lp, AT91_EMAC_TAR, lp->skb_physaddr); 179 macb_writel(lp, TAR, lp->skb_physaddr);
857 /* Set length of the packet in the Transmit Control register */ 180 /* Set length of the packet in the Transmit Control register */
858 at91_emac_write(lp, AT91_EMAC_TCR, skb->len); 181 macb_writel(lp, TCR, skb->len);
859 182
860 } else { 183 } else {
861 printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n"); 184 netdev_err(dev, "%s called, but device is busy!\n", __func__);
862 return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb) 185 return NETDEV_TX_BUSY;
863 on this skb, he also reports -ENETDOWN and printk's, so either
864 we free and return(0) or don't free and return 1 */
865 } 186 }
866 187
867 return NETDEV_TX_OK; 188 return NETDEV_TX_OK;
868} 189}
869 190
870/* 191/* Extract received frame from buffer descriptors and sent to upper layers.
871 * Update the current statistics from the internal statistics registers.
872 */
873static struct net_device_stats *at91ether_stats(struct net_device *dev)
874{
875 struct at91_private *lp = netdev_priv(dev);
876 int ale, lenerr, seqe, lcol, ecol;
877
878 if (netif_running(dev)) {
879 dev->stats.rx_packets += at91_emac_read(lp, AT91_EMAC_OK); /* Good frames received */
880 ale = at91_emac_read(lp, AT91_EMAC_ALE);
881 dev->stats.rx_frame_errors += ale; /* Alignment errors */
882 lenerr = at91_emac_read(lp, AT91_EMAC_ELR) + at91_emac_read(lp, AT91_EMAC_USF);
883 dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */
884 seqe = at91_emac_read(lp, AT91_EMAC_SEQE);
885 dev->stats.rx_crc_errors += seqe; /* CRC error */
886 dev->stats.rx_fifo_errors += at91_emac_read(lp, AT91_EMAC_DRFC);/* Receive buffer not available */
887 dev->stats.rx_errors += (ale + lenerr + seqe
888 + at91_emac_read(lp, AT91_EMAC_CDE) + at91_emac_read(lp, AT91_EMAC_RJB));
889
890 dev->stats.tx_packets += at91_emac_read(lp, AT91_EMAC_FRA); /* Frames successfully transmitted */
891 dev->stats.tx_fifo_errors += at91_emac_read(lp, AT91_EMAC_TUE); /* Transmit FIFO underruns */
892 dev->stats.tx_carrier_errors += at91_emac_read(lp, AT91_EMAC_CSE); /* Carrier Sense errors */
893 dev->stats.tx_heartbeat_errors += at91_emac_read(lp, AT91_EMAC_SQEE);/* Heartbeat error */
894
895 lcol = at91_emac_read(lp, AT91_EMAC_LCOL);
896 ecol = at91_emac_read(lp, AT91_EMAC_ECOL);
897 dev->stats.tx_window_errors += lcol; /* Late collisions */
898 dev->stats.tx_aborted_errors += ecol; /* 16 collisions */
899
900 dev->stats.collisions += (at91_emac_read(lp, AT91_EMAC_SCOL) + at91_emac_read(lp, AT91_EMAC_MCOL) + lcol + ecol);
901 }
902 return &dev->stats;
903}
904
905/*
906 * Extract received frame from buffer descriptors and sent to upper layers.
907 * (Called from interrupt context) 192 * (Called from interrupt context)
908 */ 193 */
909static void at91ether_rx(struct net_device *dev) 194static void at91ether_rx(struct net_device *dev)
910{ 195{
911 struct at91_private *lp = netdev_priv(dev); 196 struct macb *lp = netdev_priv(dev);
912 struct recv_desc_bufs *dlist;
913 unsigned char *p_recv; 197 unsigned char *p_recv;
914 struct sk_buff *skb; 198 struct sk_buff *skb;
915 unsigned int pktlen; 199 unsigned int pktlen;
916 200
917 dlist = lp->dlist; 201 while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
918 while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) { 202 p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ;
919 p_recv = dlist->recv_buf[lp->rxBuffIndex]; 203 pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
920 pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff; /* Length of frame including FCS */
921 skb = netdev_alloc_skb(dev, pktlen + 2); 204 skb = netdev_alloc_skb(dev, pktlen + 2);
922 if (skb != NULL) { 205 if (skb) {
923 skb_reserve(skb, 2); 206 skb_reserve(skb, 2);
924 memcpy(skb_put(skb, pktlen), p_recv, pktlen); 207 memcpy(skb_put(skb, pktlen), p_recv, pktlen);
925 208
926 skb->protocol = eth_type_trans(skb, dev); 209 skb->protocol = eth_type_trans(skb, dev);
927 dev->stats.rx_bytes += pktlen; 210 lp->stats.rx_packets++;
211 lp->stats.rx_bytes += pktlen;
928 netif_rx(skb); 212 netif_rx(skb);
213 } else {
214 lp->stats.rx_dropped++;
215 netdev_notice(dev, "Memory squeeze, dropping packet.\n");
929 } 216 }
930 else {
931 dev->stats.rx_dropped += 1;
932 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
933 }
934 217
935 if (dlist->descriptors[lp->rxBuffIndex].size & EMAC_MULTICAST) 218 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
936 dev->stats.multicast++; 219 lp->stats.multicast++;
220
221 /* reset ownership bit */
222 lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
937 223
938 dlist->descriptors[lp->rxBuffIndex].addr &= ~EMAC_DESC_DONE; /* reset ownership bit */ 224 /* wrap after last buffer */
939 if (lp->rxBuffIndex == MAX_RX_DESCR-1) /* wrap after last buffer */ 225 if (lp->rx_tail == MAX_RX_DESCR - 1)
940 lp->rxBuffIndex = 0; 226 lp->rx_tail = 0;
941 else 227 else
942 lp->rxBuffIndex++; 228 lp->rx_tail++;
943 } 229 }
944} 230}
945 231
946/* 232/* MAC interrupt handler */
947 * MAC interrupt handler
948 */
949static irqreturn_t at91ether_interrupt(int irq, void *dev_id) 233static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
950{ 234{
951 struct net_device *dev = (struct net_device *) dev_id; 235 struct net_device *dev = dev_id;
952 struct at91_private *lp = netdev_priv(dev); 236 struct macb *lp = netdev_priv(dev);
953 unsigned long intstatus, ctl; 237 u32 intstatus, ctl;
954 238
955 /* MAC Interrupt Status register indicates what interrupts are pending. 239 /* MAC Interrupt Status register indicates what interrupts are pending.
956 It is automatically cleared once read. */ 240 * It is automatically cleared once read.
957 intstatus = at91_emac_read(lp, AT91_EMAC_ISR); 241 */
242 intstatus = macb_readl(lp, ISR);
958 243
959 if (intstatus & AT91_EMAC_RCOM) /* Receive complete */ 244 /* Receive complete */
245 if (intstatus & MACB_BIT(RCOMP))
960 at91ether_rx(dev); 246 at91ether_rx(dev);
961 247
962 if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */ 248 /* Transmit complete */
963 /* The TCOM bit is set even if the transmission failed. */ 249 if (intstatus & MACB_BIT(TCOMP)) {
964 if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY)) 250 /* The TCOM bit is set even if the transmission failed */
965 dev->stats.tx_errors += 1; 251 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
252 lp->stats.tx_errors++;
966 253
967 if (lp->skb) { 254 if (lp->skb) {
968 dev_kfree_skb_irq(lp->skb); 255 dev_kfree_skb_irq(lp->skb);
969 lp->skb = NULL; 256 lp->skb = NULL;
970 dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE); 257 dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE);
258 lp->stats.tx_packets++;
259 lp->stats.tx_bytes += lp->skb_length;
971 } 260 }
972 netif_wake_queue(dev); 261 netif_wake_queue(dev);
973 } 262 }
974 263
975 /* Work-around for Errata #11 */ 264 /* Work-around for EMAC Errata section 41.3.1 */
976 if (intstatus & AT91_EMAC_RBNA) { 265 if (intstatus & MACB_BIT(RXUBR)) {
977 ctl = at91_emac_read(lp, AT91_EMAC_CTL); 266 ctl = macb_readl(lp, NCR);
978 at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE); 267 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
979 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE); 268 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
980 } 269 }
981 270
982 if (intstatus & AT91_EMAC_ROVR) 271 if (intstatus & MACB_BIT(ISR_ROVR))
983 printk("%s: ROVR error\n", dev->name); 272 netdev_err(dev, "ROVR error\n");
984 273
985 return IRQ_HANDLED; 274 return IRQ_HANDLED;
986} 275}
@@ -1000,10 +289,10 @@ static const struct net_device_ops at91ether_netdev_ops = {
1000 .ndo_open = at91ether_open, 289 .ndo_open = at91ether_open,
1001 .ndo_stop = at91ether_close, 290 .ndo_stop = at91ether_close,
1002 .ndo_start_xmit = at91ether_start_xmit, 291 .ndo_start_xmit = at91ether_start_xmit,
1003 .ndo_get_stats = at91ether_stats, 292 .ndo_get_stats = macb_get_stats,
1004 .ndo_set_rx_mode = at91ether_set_multicast_list, 293 .ndo_set_rx_mode = macb_set_rx_mode,
1005 .ndo_set_mac_address = set_mac_address, 294 .ndo_set_mac_address = eth_mac_addr,
1006 .ndo_do_ioctl = at91ether_ioctl, 295 .ndo_do_ioctl = macb_ioctl,
1007 .ndo_validate_addr = eth_validate_addr, 296 .ndo_validate_addr = eth_validate_addr,
1008 .ndo_change_mtu = eth_change_mtu, 297 .ndo_change_mtu = eth_change_mtu,
1009#ifdef CONFIG_NET_POLL_CONTROLLER 298#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1011,197 +300,160 @@ static const struct net_device_ops at91ether_netdev_ops = {
1011#endif 300#endif
1012}; 301};
1013 302
1014/* 303#if defined(CONFIG_OF)
1015 * Detect the PHY type, and its address. 304static const struct of_device_id at91ether_dt_ids[] = {
1016 */ 305 { .compatible = "cdns,at91rm9200-emac" },
1017static int __init at91ether_phy_detect(struct at91_private *lp) 306 { .compatible = "cdns,emac" },
307 { /* sentinel */ }
308};
309
310MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
311
312static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
1018{ 313{
1019 unsigned int phyid1, phyid2; 314 struct device_node *np = pdev->dev.of_node;
1020 unsigned long phy_id;
1021 unsigned short phy_address = 0;
1022
1023 while (phy_address < PHY_MAX_ADDR) {
1024 /* Read the PHY ID registers */
1025 enable_mdi(lp);
1026 read_phy(lp, phy_address, MII_PHYSID1, &phyid1);
1027 read_phy(lp, phy_address, MII_PHYSID2, &phyid2);
1028 disable_mdi(lp);
1029
1030 phy_id = (phyid1 << 16) | (phyid2 & 0xfff0);
1031 switch (phy_id) {
1032 case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */
1033 case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */
1034 case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */
1035 case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */
1036 case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */
1037 case MII_DP83847_ID: /* National Semiconductor DP83847: */
1038 case MII_DP83848_ID: /* National Semiconductor DP83848: */
1039 case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */
1040 case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */
1041 case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */
1042 case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */
1043 /* store detected values */
1044 lp->phy_type = phy_id; /* Type of PHY connected */
1045 lp->phy_address = phy_address; /* MDI address of PHY */
1046 return 1;
1047 }
1048 315
1049 phy_address++; 316 if (np)
1050 } 317 return of_get_phy_mode(np);
1051 318
1052 return 0; /* not detected */ 319 return -ENODEV;
1053} 320}
1054 321
322static int at91ether_get_hwaddr_dt(struct macb *bp)
323{
324 struct device_node *np = bp->pdev->dev.of_node;
1055 325
1056/* 326 if (np) {
1057 * Detect MAC & PHY and perform ethernet interface initialization 327 const char *mac = of_get_mac_address(np);
1058 */ 328 if (mac) {
329 memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
330 return 0;
331 }
332 }
333
334 return -ENODEV;
335}
336#else
337static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
338{
339 return -ENODEV;
340}
341static int at91ether_get_hwaddr_dt(struct macb *bp)
342{
343 return -ENODEV;
344}
345#endif
346
347/* Detect MAC & PHY and perform ethernet interface initialization */
1059static int __init at91ether_probe(struct platform_device *pdev) 348static int __init at91ether_probe(struct platform_device *pdev)
1060{ 349{
1061 struct macb_platform_data *board_data = pdev->dev.platform_data; 350 struct macb_platform_data *board_data = pdev->dev.platform_data;
1062 struct resource *regs; 351 struct resource *regs;
1063 struct net_device *dev; 352 struct net_device *dev;
1064 struct at91_private *lp; 353 struct phy_device *phydev;
354 struct pinctrl *pinctrl;
355 struct macb *lp;
1065 int res; 356 int res;
357 u32 reg;
1066 358
1067 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 359 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1068 if (!regs) 360 if (!regs)
1069 return -ENOENT; 361 return -ENOENT;
1070 362
1071 dev = alloc_etherdev(sizeof(struct at91_private)); 363 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
364 if (IS_ERR(pinctrl)) {
365 res = PTR_ERR(pinctrl);
366 if (res == -EPROBE_DEFER)
367 return res;
368
369 dev_warn(&pdev->dev, "No pinctrl provided\n");
370 }
371
372 dev = alloc_etherdev(sizeof(struct macb));
1072 if (!dev) 373 if (!dev)
1073 return -ENOMEM; 374 return -ENOMEM;
1074 375
1075 lp = netdev_priv(dev); 376 lp = netdev_priv(dev);
1076 lp->board_data = *board_data; 377 lp->pdev = pdev;
378 lp->dev = dev;
1077 spin_lock_init(&lp->lock); 379 spin_lock_init(&lp->lock);
1078 380
1079 dev->base_addr = regs->start; /* physical base address */ 381 /* physical base address */
1080 lp->emac_base = ioremap(regs->start, regs->end - regs->start + 1); 382 dev->base_addr = regs->start;
1081 if (!lp->emac_base) { 383 lp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
384 if (!lp->regs) {
1082 res = -ENOMEM; 385 res = -ENOMEM;
1083 goto err_free_dev; 386 goto err_free_dev;
1084 } 387 }
1085 388
1086 /* Clock */ 389 /* Clock */
1087 lp->ether_clk = clk_get(&pdev->dev, "ether_clk"); 390 lp->pclk = devm_clk_get(&pdev->dev, "ether_clk");
1088 if (IS_ERR(lp->ether_clk)) { 391 if (IS_ERR(lp->pclk)) {
1089 res = PTR_ERR(lp->ether_clk); 392 res = PTR_ERR(lp->pclk);
1090 goto err_ioumap; 393 goto err_free_dev;
1091 } 394 }
1092 clk_enable(lp->ether_clk); 395 clk_enable(lp->pclk);
1093 396
1094 /* Install the interrupt handler */ 397 /* Install the interrupt handler */
1095 dev->irq = platform_get_irq(pdev, 0); 398 dev->irq = platform_get_irq(pdev, 0);
1096 if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) { 399 res = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 0, dev->name, dev);
1097 res = -EBUSY; 400 if (res)
1098 goto err_disable_clock; 401 goto err_disable_clock;
1099 }
1100
1101 /* Allocate memory for DMA Receive descriptors */
1102 lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL);
1103 if (lp->dlist == NULL) {
1104 res = -ENOMEM;
1105 goto err_free_irq;
1106 }
1107 402
1108 ether_setup(dev); 403 ether_setup(dev);
1109 dev->netdev_ops = &at91ether_netdev_ops; 404 dev->netdev_ops = &at91ether_netdev_ops;
1110 dev->ethtool_ops = &at91ether_ethtool_ops; 405 dev->ethtool_ops = &macb_ethtool_ops;
1111 platform_set_drvdata(pdev, dev); 406 platform_set_drvdata(pdev, dev);
1112 SET_NETDEV_DEV(dev, &pdev->dev); 407 SET_NETDEV_DEV(dev, &pdev->dev);
1113 408
1114 get_mac_address(dev); /* Get ethernet address and store it in dev->dev_addr */ 409 res = at91ether_get_hwaddr_dt(lp);
1115 update_mac_address(dev); /* Program ethernet address into MAC */ 410 if (res < 0)
1116 411 macb_get_hwaddr(lp);
1117 at91_emac_write(lp, AT91_EMAC_CTL, 0);
1118 412
1119 if (board_data->is_rmii) 413 res = at91ether_get_phy_mode_dt(pdev);
1120 at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII); 414 if (res < 0) {
1121 else 415 if (board_data && board_data->is_rmii)
1122 at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG); 416 lp->phy_interface = PHY_INTERFACE_MODE_RMII;
1123 417 else
1124 /* Detect PHY */ 418 lp->phy_interface = PHY_INTERFACE_MODE_MII;
1125 if (!at91ether_phy_detect(lp)) { 419 } else {
1126 printk(KERN_ERR "at91_ether: Could not detect ethernet PHY\n"); 420 lp->phy_interface = res;
1127 res = -ENODEV;
1128 goto err_free_dmamem;
1129 } 421 }
1130 422
1131 initialize_phy(lp); 423 macb_writel(lp, NCR, 0);
424
425 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
426 if (lp->phy_interface == PHY_INTERFACE_MODE_RMII)
427 reg |= MACB_BIT(RM9200_RMII);
1132 428
1133 lp->mii.dev = dev; /* Support for ethtool */ 429 macb_writel(lp, NCFGR, reg);
1134 lp->mii.mdio_read = mdio_read;
1135 lp->mii.mdio_write = mdio_write;
1136 lp->mii.phy_id = lp->phy_address;
1137 lp->mii.phy_id_mask = 0x1f;
1138 lp->mii.reg_num_mask = 0x1f;
1139 430
1140 /* Register the network interface */ 431 /* Register the network interface */
1141 res = register_netdev(dev); 432 res = register_netdev(dev);
1142 if (res) 433 if (res)
1143 goto err_free_dmamem; 434 goto err_disable_clock;
1144 435
1145 /* Determine current link speed */ 436 if (macb_mii_init(lp) != 0)
1146 spin_lock_irq(&lp->lock); 437 goto err_out_unregister_netdev;
1147 enable_mdi(lp); 438
1148 update_linkspeed(dev, 0); 439 /* will be enabled in open() */
1149 disable_mdi(lp); 440 netif_carrier_off(dev);
1150 spin_unlock_irq(&lp->lock); 441
1151 netif_carrier_off(dev); /* will be enabled in open() */ 442 phydev = lp->phy_dev;
1152 443 netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1153 /* If board has no PHY IRQ, use a timer to poll the PHY */ 444 phydev->drv->name, dev_name(&phydev->dev),
1154 if (gpio_is_valid(lp->board_data.phy_irq_pin)) { 445 phydev->irq);
1155 gpio_request(board_data->phy_irq_pin, "ethernet_phy");
1156 } else {
1157 /* If board has no PHY IRQ, use a timer to poll the PHY */
1158 init_timer(&lp->check_timer);
1159 lp->check_timer.data = (unsigned long)dev;
1160 lp->check_timer.function = at91ether_check_link;
1161 }
1162 446
1163 /* Display ethernet banner */ 447 /* Display ethernet banner */
1164 printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n", 448 netdev_info(dev, "AT91 ethernet at 0x%08lx int=%d (%pM)\n",
1165 dev->name, (uint) dev->base_addr, dev->irq, 449 dev->base_addr, dev->irq, dev->dev_addr);
1166 at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-",
1167 at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex",
1168 dev->dev_addr);
1169 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
1170 printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
1171 else if (lp->phy_type == MII_LXT971A_ID)
1172 printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name);
1173 else if (lp->phy_type == MII_RTL8201_ID)
1174 printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name);
1175 else if (lp->phy_type == MII_BCM5221_ID)
1176 printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name);
1177 else if (lp->phy_type == MII_DP83847_ID)
1178 printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name);
1179 else if (lp->phy_type == MII_DP83848_ID)
1180 printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name);
1181 else if (lp->phy_type == MII_AC101L_ID)
1182 printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name);
1183 else if (lp->phy_type == MII_KS8721_ID)
1184 printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name);
1185 else if (lp->phy_type == MII_T78Q21x3_ID)
1186 printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name);
1187 else if (lp->phy_type == MII_LAN83C185_ID)
1188 printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name);
1189
1190 clk_disable(lp->ether_clk); /* Disable Peripheral clock */
1191 450
1192 return 0; 451 return 0;
1193 452
1194 453err_out_unregister_netdev:
1195err_free_dmamem: 454 unregister_netdev(dev);
1196 platform_set_drvdata(pdev, NULL);
1197 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
1198err_free_irq:
1199 free_irq(dev->irq, dev);
1200err_disable_clock: 455err_disable_clock:
1201 clk_disable(lp->ether_clk); 456 clk_disable(lp->pclk);
1202 clk_put(lp->ether_clk);
1203err_ioumap:
1204 iounmap(lp->emac_base);
1205err_free_dev: 457err_free_dev:
1206 free_netdev(dev); 458 free_netdev(dev);
1207 return res; 459 return res;
@@ -1210,38 +462,33 @@ err_free_dev:
1210static int __devexit at91ether_remove(struct platform_device *pdev) 462static int __devexit at91ether_remove(struct platform_device *pdev)
1211{ 463{
1212 struct net_device *dev = platform_get_drvdata(pdev); 464 struct net_device *dev = platform_get_drvdata(pdev);
1213 struct at91_private *lp = netdev_priv(dev); 465 struct macb *lp = netdev_priv(dev);
1214 466
1215 if (gpio_is_valid(lp->board_data.phy_irq_pin)) 467 if (lp->phy_dev)
1216 gpio_free(lp->board_data.phy_irq_pin); 468 phy_disconnect(lp->phy_dev);
1217 469
470 mdiobus_unregister(lp->mii_bus);
471 kfree(lp->mii_bus->irq);
472 mdiobus_free(lp->mii_bus);
1218 unregister_netdev(dev); 473 unregister_netdev(dev);
1219 free_irq(dev->irq, dev); 474 clk_disable(lp->pclk);
1220 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
1221 clk_put(lp->ether_clk);
1222
1223 platform_set_drvdata(pdev, NULL);
1224 free_netdev(dev); 475 free_netdev(dev);
476 platform_set_drvdata(pdev, NULL);
477
1225 return 0; 478 return 0;
1226} 479}
1227 480
1228#ifdef CONFIG_PM 481#ifdef CONFIG_PM
1229
1230static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg) 482static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
1231{ 483{
1232 struct net_device *net_dev = platform_get_drvdata(pdev); 484 struct net_device *net_dev = platform_get_drvdata(pdev);
1233 struct at91_private *lp = netdev_priv(net_dev); 485 struct macb *lp = netdev_priv(net_dev);
1234 486
1235 if (netif_running(net_dev)) { 487 if (netif_running(net_dev)) {
1236 if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
1237 int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
1238 disable_irq(phy_irq);
1239 }
1240
1241 netif_stop_queue(net_dev); 488 netif_stop_queue(net_dev);
1242 netif_device_detach(net_dev); 489 netif_device_detach(net_dev);
1243 490
1244 clk_disable(lp->ether_clk); 491 clk_disable(lp->pclk);
1245 } 492 }
1246 return 0; 493 return 0;
1247} 494}
@@ -1249,22 +496,16 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
1249static int at91ether_resume(struct platform_device *pdev) 496static int at91ether_resume(struct platform_device *pdev)
1250{ 497{
1251 struct net_device *net_dev = platform_get_drvdata(pdev); 498 struct net_device *net_dev = platform_get_drvdata(pdev);
1252 struct at91_private *lp = netdev_priv(net_dev); 499 struct macb *lp = netdev_priv(net_dev);
1253 500
1254 if (netif_running(net_dev)) { 501 if (netif_running(net_dev)) {
1255 clk_enable(lp->ether_clk); 502 clk_enable(lp->pclk);
1256 503
1257 netif_device_attach(net_dev); 504 netif_device_attach(net_dev);
1258 netif_start_queue(net_dev); 505 netif_start_queue(net_dev);
1259
1260 if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
1261 int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
1262 enable_irq(phy_irq);
1263 }
1264 } 506 }
1265 return 0; 507 return 0;
1266} 508}
1267
1268#else 509#else
1269#define at91ether_suspend NULL 510#define at91ether_suspend NULL
1270#define at91ether_resume NULL 511#define at91ether_resume NULL
@@ -1275,8 +516,9 @@ static struct platform_driver at91ether_driver = {
1275 .suspend = at91ether_suspend, 516 .suspend = at91ether_suspend,
1276 .resume = at91ether_resume, 517 .resume = at91ether_resume,
1277 .driver = { 518 .driver = {
1278 .name = DRV_NAME, 519 .name = "at91_ether",
1279 .owner = THIS_MODULE, 520 .owner = THIS_MODULE,
521 .of_match_table = of_match_ptr(at91ether_dt_ids),
1280 }, 522 },
1281}; 523};
1282 524
@@ -1296,4 +538,4 @@ module_exit(at91ether_exit)
1296MODULE_LICENSE("GPL"); 538MODULE_LICENSE("GPL");
1297MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver"); 539MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
1298MODULE_AUTHOR("Andrew Victor"); 540MODULE_AUTHOR("Andrew Victor");
1299MODULE_ALIAS("platform:" DRV_NAME); 541MODULE_ALIAS("platform:at91_ether");
diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h
deleted file mode 100644
index 0ef6328fa7f8..000000000000
--- a/drivers/net/ethernet/cadence/at91_ether.h
+++ /dev/null
@@ -1,112 +0,0 @@
1/*
2 * Ethernet driver for the Atmel AT91RM9200 (Thunder)
3 *
4 * Copyright (C) SAN People (Pty) Ltd
5 *
6 * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
7 * Initial version by Rick Bronson.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#ifndef AT91_ETHERNET
16#define AT91_ETHERNET
17
18
19/* Davicom 9161 PHY */
20#define MII_DM9161_ID 0x0181b880
21#define MII_DM9161A_ID 0x0181b8a0
22#define MII_DSCR_REG 16
23#define MII_DSCSR_REG 17
24#define MII_DSINTR_REG 21
25
26/* Intel LXT971A PHY */
27#define MII_LXT971A_ID 0x001378E0
28#define MII_ISINTE_REG 18
29#define MII_ISINTS_REG 19
30#define MII_LEDCTRL_REG 20
31
32/* Realtek RTL8201 PHY */
33#define MII_RTL8201_ID 0x00008200
34
35/* Broadcom BCM5221 PHY */
36#define MII_BCM5221_ID 0x004061e0
37#define MII_BCMINTR_REG 26
38
39/* National Semiconductor DP83847 */
40#define MII_DP83847_ID 0x20005c30
41
42/* National Semiconductor DP83848 */
43#define MII_DP83848_ID 0x20005c90
44#define MII_DPPHYSTS_REG 16
45#define MII_DPMICR_REG 17
46#define MII_DPMISR_REG 18
47
48/* Altima AC101L PHY */
49#define MII_AC101L_ID 0x00225520
50
51/* Micrel KS8721 PHY */
52#define MII_KS8721_ID 0x00221610
53
54/* Teridian 78Q2123/78Q2133 */
55#define MII_T78Q21x3_ID 0x000e7230
56#define MII_T78Q21INT_REG 17
57
58/* SMSC LAN83C185 */
59#define MII_LAN83C185_ID 0x0007C0A0
60
61/* ........................................................................ */
62
63#define MAX_RBUFF_SZ 0x600 /* 1518 rounded up */
64#define MAX_RX_DESCR 9 /* max number of receive buffers */
65
66#define EMAC_DESC_DONE 0x00000001 /* bit for if DMA is done */
67#define EMAC_DESC_WRAP 0x00000002 /* bit for wrap */
68
69#define EMAC_BROADCAST 0x80000000 /* broadcast address */
70#define EMAC_MULTICAST 0x40000000 /* multicast address */
71#define EMAC_UNICAST 0x20000000 /* unicast address */
72
73struct rbf_t
74{
75 unsigned int addr;
76 unsigned long size;
77};
78
79struct recv_desc_bufs
80{
81 struct rbf_t descriptors[MAX_RX_DESCR]; /* must be on sizeof (rbf_t) boundary */
82 char recv_buf[MAX_RX_DESCR][MAX_RBUFF_SZ]; /* must be on long boundary */
83};
84
85struct at91_private
86{
87 struct mii_if_info mii; /* ethtool support */
88 struct macb_platform_data board_data; /* board-specific
89 * configuration (shared with
90 * macb for common data */
91 void __iomem *emac_base; /* base register address */
92 struct clk *ether_clk; /* clock */
93
94 /* PHY */
95 unsigned long phy_type; /* type of PHY (PHY_ID) */
96 spinlock_t lock; /* lock for MDI interface */
97 short phy_media; /* media interface type */
98 unsigned short phy_address; /* 5-bit MDI address of PHY (0..31) */
99 struct timer_list check_timer; /* Poll link status */
100
101 /* Transmit */
102 struct sk_buff *skb; /* holds skb until xmit interrupt completes */
103 dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
104 int skb_length; /* saved skb length for pci_unmap_single */
105
106 /* Receive */
107 int rxBuffIndex; /* index into receive descriptor list */
108 struct recv_desc_bufs *dlist; /* descriptor list address */
109 struct recv_desc_bufs *dlist_phys; /* descriptor list physical address */
110};
111
112#endif
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 033064b7b576..1fac769989ad 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -26,37 +26,79 @@
26#include <linux/of.h> 26#include <linux/of.h>
27#include <linux/of_device.h> 27#include <linux/of_device.h>
28#include <linux/of_net.h> 28#include <linux/of_net.h>
29#include <linux/pinctrl/consumer.h>
29 30
30#include "macb.h" 31#include "macb.h"
31 32
32#define RX_BUFFER_SIZE 128 33#define RX_BUFFER_SIZE 128
33#define RX_RING_SIZE 512 34#define RX_RING_SIZE 512 /* must be power of 2 */
34#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE) 35#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
35 36
36/* Make the IP header word-aligned (the ethernet header is 14 bytes) */ 37#define TX_RING_SIZE 128 /* must be power of 2 */
37#define RX_OFFSET 2 38#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
38
39#define TX_RING_SIZE 128
40#define DEF_TX_RING_PENDING (TX_RING_SIZE - 1)
41#define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE)
42
43#define TX_RING_GAP(bp) \
44 (TX_RING_SIZE - (bp)->tx_pending)
45#define TX_BUFFS_AVAIL(bp) \
46 (((bp)->tx_tail <= (bp)->tx_head) ? \
47 (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \
48 (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp))
49#define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1))
50
51#define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1))
52 39
53/* minimum number of free TX descriptors before waking up TX process */ 40/* minimum number of free TX descriptors before waking up TX process */
54#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4) 41#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4)
55 42
56#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 43#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
57 | MACB_BIT(ISR_ROVR)) 44 | MACB_BIT(ISR_ROVR))
45#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
46 | MACB_BIT(ISR_RLE) \
47 | MACB_BIT(TXERR))
48#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
49
50/*
51 * Graceful stop timeouts in us. We should allow up to
52 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
53 */
54#define MACB_HALT_TIMEOUT 1230
55
56/* Ring buffer accessors */
57static unsigned int macb_tx_ring_wrap(unsigned int index)
58{
59 return index & (TX_RING_SIZE - 1);
60}
61
62static unsigned int macb_tx_ring_avail(struct macb *bp)
63{
64 return (bp->tx_tail - bp->tx_head) & (TX_RING_SIZE - 1);
65}
66
67static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
68{
69 return &bp->tx_ring[macb_tx_ring_wrap(index)];
70}
71
72static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
73{
74 return &bp->tx_skb[macb_tx_ring_wrap(index)];
75}
76
77static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
78{
79 dma_addr_t offset;
80
81 offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
82
83 return bp->tx_ring_dma + offset;
84}
85
86static unsigned int macb_rx_ring_wrap(unsigned int index)
87{
88 return index & (RX_RING_SIZE - 1);
89}
90
91static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
92{
93 return &bp->rx_ring[macb_rx_ring_wrap(index)];
94}
58 95
59static void __macb_set_hwaddr(struct macb *bp) 96static void *macb_rx_buffer(struct macb *bp, unsigned int index)
97{
98 return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index);
99}
100
101void macb_set_hwaddr(struct macb *bp)
60{ 102{
61 u32 bottom; 103 u32 bottom;
62 u16 top; 104 u16 top;
@@ -66,30 +108,49 @@ static void __macb_set_hwaddr(struct macb *bp)
66 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); 108 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
67 macb_or_gem_writel(bp, SA1T, top); 109 macb_or_gem_writel(bp, SA1T, top);
68} 110}
111EXPORT_SYMBOL_GPL(macb_set_hwaddr);
69 112
70static void __init macb_get_hwaddr(struct macb *bp) 113void macb_get_hwaddr(struct macb *bp)
71{ 114{
115 struct macb_platform_data *pdata;
72 u32 bottom; 116 u32 bottom;
73 u16 top; 117 u16 top;
74 u8 addr[6]; 118 u8 addr[6];
119 int i;
75 120
76 bottom = macb_or_gem_readl(bp, SA1B); 121 pdata = bp->pdev->dev.platform_data;
77 top = macb_or_gem_readl(bp, SA1T);
78 122
79 addr[0] = bottom & 0xff; 123 /* Check all 4 address register for vaild address */
80 addr[1] = (bottom >> 8) & 0xff; 124 for (i = 0; i < 4; i++) {
81 addr[2] = (bottom >> 16) & 0xff; 125 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
82 addr[3] = (bottom >> 24) & 0xff; 126 top = macb_or_gem_readl(bp, SA1T + i * 8);
83 addr[4] = top & 0xff; 127
84 addr[5] = (top >> 8) & 0xff; 128 if (pdata && pdata->rev_eth_addr) {
129 addr[5] = bottom & 0xff;
130 addr[4] = (bottom >> 8) & 0xff;
131 addr[3] = (bottom >> 16) & 0xff;
132 addr[2] = (bottom >> 24) & 0xff;
133 addr[1] = top & 0xff;
134 addr[0] = (top & 0xff00) >> 8;
135 } else {
136 addr[0] = bottom & 0xff;
137 addr[1] = (bottom >> 8) & 0xff;
138 addr[2] = (bottom >> 16) & 0xff;
139 addr[3] = (bottom >> 24) & 0xff;
140 addr[4] = top & 0xff;
141 addr[5] = (top >> 8) & 0xff;
142 }
85 143
86 if (is_valid_ether_addr(addr)) { 144 if (is_valid_ether_addr(addr)) {
87 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 145 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
88 } else { 146 return;
89 netdev_info(bp->dev, "invalid hw address, using random\n"); 147 }
90 eth_hw_addr_random(bp->dev);
91 } 148 }
149
150 netdev_info(bp->dev, "invalid hw address, using random\n");
151 eth_hw_addr_random(bp->dev);
92} 152}
153EXPORT_SYMBOL_GPL(macb_get_hwaddr);
93 154
94static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 155static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
95{ 156{
@@ -152,13 +213,17 @@ static void macb_handle_link_change(struct net_device *dev)
152 213
153 reg = macb_readl(bp, NCFGR); 214 reg = macb_readl(bp, NCFGR);
154 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 215 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
216 if (macb_is_gem(bp))
217 reg &= ~GEM_BIT(GBE);
155 218
156 if (phydev->duplex) 219 if (phydev->duplex)
157 reg |= MACB_BIT(FD); 220 reg |= MACB_BIT(FD);
158 if (phydev->speed == SPEED_100) 221 if (phydev->speed == SPEED_100)
159 reg |= MACB_BIT(SPD); 222 reg |= MACB_BIT(SPD);
223 if (phydev->speed == SPEED_1000)
224 reg |= GEM_BIT(GBE);
160 225
161 macb_writel(bp, NCFGR, reg); 226 macb_or_gem_writel(bp, NCFGR, reg);
162 227
163 bp->speed = phydev->speed; 228 bp->speed = phydev->speed;
164 bp->duplex = phydev->duplex; 229 bp->duplex = phydev->duplex;
@@ -216,7 +281,10 @@ static int macb_mii_probe(struct net_device *dev)
216 } 281 }
217 282
218 /* mask with MAC supported features */ 283 /* mask with MAC supported features */
219 phydev->supported &= PHY_BASIC_FEATURES; 284 if (macb_is_gem(bp))
285 phydev->supported &= PHY_GBIT_FEATURES;
286 else
287 phydev->supported &= PHY_BASIC_FEATURES;
220 288
221 phydev->advertising = phydev->supported; 289 phydev->advertising = phydev->supported;
222 290
@@ -228,7 +296,7 @@ static int macb_mii_probe(struct net_device *dev)
228 return 0; 296 return 0;
229} 297}
230 298
231static int macb_mii_init(struct macb *bp) 299int macb_mii_init(struct macb *bp)
232{ 300{
233 struct macb_platform_data *pdata; 301 struct macb_platform_data *pdata;
234 int err = -ENXIO, i; 302 int err = -ENXIO, i;
@@ -284,6 +352,7 @@ err_out_free_mdiobus:
284err_out: 352err_out:
285 return err; 353 return err;
286} 354}
355EXPORT_SYMBOL_GPL(macb_mii_init);
287 356
288static void macb_update_stats(struct macb *bp) 357static void macb_update_stats(struct macb *bp)
289{ 358{
@@ -297,93 +366,147 @@ static void macb_update_stats(struct macb *bp)
297 *p += __raw_readl(reg); 366 *p += __raw_readl(reg);
298} 367}
299 368
300static void macb_tx(struct macb *bp) 369static int macb_halt_tx(struct macb *bp)
301{ 370{
302 unsigned int tail; 371 unsigned long halt_time, timeout;
303 unsigned int head; 372 u32 status;
304 u32 status;
305 373
306 status = macb_readl(bp, TSR); 374 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
307 macb_writel(bp, TSR, status);
308 375
309 netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status); 376 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
377 do {
378 halt_time = jiffies;
379 status = macb_readl(bp, TSR);
380 if (!(status & MACB_BIT(TGO)))
381 return 0;
310 382
311 if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) { 383 usleep_range(10, 250);
312 int i; 384 } while (time_before(halt_time, timeout));
313 netdev_err(bp->dev, "TX %s, resetting buffers\n",
314 status & MACB_BIT(UND) ?
315 "underrun" : "retry limit exceeded");
316 385
317 /* Transfer ongoing, disable transmitter, to avoid confusion */ 386 return -ETIMEDOUT;
318 if (status & MACB_BIT(TGO)) 387}
319 macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
320 388
321 head = bp->tx_head; 389static void macb_tx_error_task(struct work_struct *work)
390{
391 struct macb *bp = container_of(work, struct macb, tx_error_task);
392 struct macb_tx_skb *tx_skb;
393 struct sk_buff *skb;
394 unsigned int tail;
322 395
323 /*Mark all the buffer as used to avoid sending a lost buffer*/ 396 netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
324 for (i = 0; i < TX_RING_SIZE; i++) 397 bp->tx_tail, bp->tx_head);
325 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
326 398
327 /* Add wrap bit */ 399 /* Make sure nobody is trying to queue up new packets */
328 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 400 netif_stop_queue(bp->dev);
329 401
330 /* free transmit buffer in upper layer*/ 402 /*
331 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { 403 * Stop transmission now
332 struct ring_info *rp = &bp->tx_skb[tail]; 404 * (in case we have just queued new packets)
333 struct sk_buff *skb = rp->skb; 405 */
406 if (macb_halt_tx(bp))
407 /* Just complain for now, reinitializing TX path can be good */
408 netdev_err(bp->dev, "BUG: halt tx timed out\n");
334 409
335 BUG_ON(skb == NULL); 410 /* No need for the lock here as nobody will interrupt us anymore */
336 411
337 rmb(); 412 /*
413 * Treat frames in TX queue including the ones that caused the error.
414 * Free transmit buffers in upper layer.
415 */
416 for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
417 struct macb_dma_desc *desc;
418 u32 ctrl;
419
420 desc = macb_tx_desc(bp, tail);
421 ctrl = desc->ctrl;
422 tx_skb = macb_tx_skb(bp, tail);
423 skb = tx_skb->skb;
424
425 if (ctrl & MACB_BIT(TX_USED)) {
426 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
427 macb_tx_ring_wrap(tail), skb->data);
428 bp->stats.tx_packets++;
429 bp->stats.tx_bytes += skb->len;
430 } else {
431 /*
432 * "Buffers exhausted mid-frame" errors may only happen
433 * if the driver is buggy, so complain loudly about those.
434 * Statistics are updated by hardware.
435 */
436 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
437 netdev_err(bp->dev,
438 "BUG: TX buffers exhausted mid-frame\n");
338 439
339 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, 440 desc->ctrl = ctrl | MACB_BIT(TX_USED);
340 DMA_TO_DEVICE);
341 rp->skb = NULL;
342 dev_kfree_skb_irq(skb);
343 } 441 }
344 442
345 bp->tx_head = bp->tx_tail = 0; 443 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
346 444 DMA_TO_DEVICE);
347 /* Enable the transmitter again */ 445 tx_skb->skb = NULL;
348 if (status & MACB_BIT(TGO)) 446 dev_kfree_skb(skb);
349 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
350 } 447 }
351 448
352 if (!(status & MACB_BIT(COMP))) 449 /* Make descriptor updates visible to hardware */
353 /* 450 wmb();
354 * This may happen when a buffer becomes complete 451
355 * between reading the ISR and scanning the 452 /* Reinitialize the TX desc queue */
356 * descriptors. Nothing to worry about. 453 macb_writel(bp, TBQP, bp->tx_ring_dma);
357 */ 454 /* Make TX ring reflect state of hardware */
358 return; 455 bp->tx_head = bp->tx_tail = 0;
456
457 /* Now we are ready to start transmission again */
458 netif_wake_queue(bp->dev);
459
460 /* Housework before enabling TX IRQ */
461 macb_writel(bp, TSR, macb_readl(bp, TSR));
462 macb_writel(bp, IER, MACB_TX_INT_FLAGS);
463}
464
465static void macb_tx_interrupt(struct macb *bp)
466{
467 unsigned int tail;
468 unsigned int head;
469 u32 status;
470
471 status = macb_readl(bp, TSR);
472 macb_writel(bp, TSR, status);
473
474 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
475 (unsigned long)status);
359 476
360 head = bp->tx_head; 477 head = bp->tx_head;
361 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { 478 for (tail = bp->tx_tail; tail != head; tail++) {
362 struct ring_info *rp = &bp->tx_skb[tail]; 479 struct macb_tx_skb *tx_skb;
363 struct sk_buff *skb = rp->skb; 480 struct sk_buff *skb;
364 u32 bufstat; 481 struct macb_dma_desc *desc;
482 u32 ctrl;
365 483
366 BUG_ON(skb == NULL); 484 desc = macb_tx_desc(bp, tail);
367 485
486 /* Make hw descriptor updates visible to CPU */
368 rmb(); 487 rmb();
369 bufstat = bp->tx_ring[tail].ctrl;
370 488
371 if (!(bufstat & MACB_BIT(TX_USED))) 489 ctrl = desc->ctrl;
490
491 if (!(ctrl & MACB_BIT(TX_USED)))
372 break; 492 break;
373 493
374 netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n", 494 tx_skb = macb_tx_skb(bp, tail);
375 tail, skb->data); 495 skb = tx_skb->skb;
376 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, 496
497 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
498 macb_tx_ring_wrap(tail), skb->data);
499 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
377 DMA_TO_DEVICE); 500 DMA_TO_DEVICE);
378 bp->stats.tx_packets++; 501 bp->stats.tx_packets++;
379 bp->stats.tx_bytes += skb->len; 502 bp->stats.tx_bytes += skb->len;
380 rp->skb = NULL; 503 tx_skb->skb = NULL;
381 dev_kfree_skb_irq(skb); 504 dev_kfree_skb_irq(skb);
382 } 505 }
383 506
384 bp->tx_tail = tail; 507 bp->tx_tail = tail;
385 if (netif_queue_stopped(bp->dev) && 508 if (netif_queue_stopped(bp->dev)
386 TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH) 509 && macb_tx_ring_avail(bp) > MACB_TX_WAKEUP_THRESH)
387 netif_wake_queue(bp->dev); 510 netif_wake_queue(bp->dev);
388} 511}
389 512
@@ -392,31 +515,48 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
392{ 515{
393 unsigned int len; 516 unsigned int len;
394 unsigned int frag; 517 unsigned int frag;
395 unsigned int offset = 0; 518 unsigned int offset;
396 struct sk_buff *skb; 519 struct sk_buff *skb;
520 struct macb_dma_desc *desc;
397 521
398 len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); 522 desc = macb_rx_desc(bp, last_frag);
523 len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
399 524
400 netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", 525 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
401 first_frag, last_frag, len); 526 macb_rx_ring_wrap(first_frag),
527 macb_rx_ring_wrap(last_frag), len);
402 528
403 skb = netdev_alloc_skb(bp->dev, len + RX_OFFSET); 529 /*
530 * The ethernet header starts NET_IP_ALIGN bytes into the
531 * first buffer. Since the header is 14 bytes, this makes the
532 * payload word-aligned.
533 *
534 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
535 * the two padding bytes into the skb so that we avoid hitting
536 * the slowpath in memcpy(), and pull them off afterwards.
537 */
538 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
404 if (!skb) { 539 if (!skb) {
405 bp->stats.rx_dropped++; 540 bp->stats.rx_dropped++;
406 for (frag = first_frag; ; frag = NEXT_RX(frag)) { 541 for (frag = first_frag; ; frag++) {
407 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 542 desc = macb_rx_desc(bp, frag);
543 desc->addr &= ~MACB_BIT(RX_USED);
408 if (frag == last_frag) 544 if (frag == last_frag)
409 break; 545 break;
410 } 546 }
547
548 /* Make descriptor updates visible to hardware */
411 wmb(); 549 wmb();
550
412 return 1; 551 return 1;
413 } 552 }
414 553
415 skb_reserve(skb, RX_OFFSET); 554 offset = 0;
555 len += NET_IP_ALIGN;
416 skb_checksum_none_assert(skb); 556 skb_checksum_none_assert(skb);
417 skb_put(skb, len); 557 skb_put(skb, len);
418 558
419 for (frag = first_frag; ; frag = NEXT_RX(frag)) { 559 for (frag = first_frag; ; frag++) {
420 unsigned int frag_len = RX_BUFFER_SIZE; 560 unsigned int frag_len = RX_BUFFER_SIZE;
421 561
422 if (offset + frag_len > len) { 562 if (offset + frag_len > len) {
@@ -424,22 +564,24 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
424 frag_len = len - offset; 564 frag_len = len - offset;
425 } 565 }
426 skb_copy_to_linear_data_offset(skb, offset, 566 skb_copy_to_linear_data_offset(skb, offset,
427 (bp->rx_buffers + 567 macb_rx_buffer(bp, frag), frag_len);
428 (RX_BUFFER_SIZE * frag)),
429 frag_len);
430 offset += RX_BUFFER_SIZE; 568 offset += RX_BUFFER_SIZE;
431 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 569 desc = macb_rx_desc(bp, frag);
432 wmb(); 570 desc->addr &= ~MACB_BIT(RX_USED);
433 571
434 if (frag == last_frag) 572 if (frag == last_frag)
435 break; 573 break;
436 } 574 }
437 575
576 /* Make descriptor updates visible to hardware */
577 wmb();
578
579 __skb_pull(skb, NET_IP_ALIGN);
438 skb->protocol = eth_type_trans(skb, bp->dev); 580 skb->protocol = eth_type_trans(skb, bp->dev);
439 581
440 bp->stats.rx_packets++; 582 bp->stats.rx_packets++;
441 bp->stats.rx_bytes += len; 583 bp->stats.rx_bytes += skb->len;
442 netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n", 584 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
443 skb->len, skb->csum); 585 skb->len, skb->csum);
444 netif_receive_skb(skb); 586 netif_receive_skb(skb);
445 587
@@ -452,8 +594,12 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
452{ 594{
453 unsigned int frag; 595 unsigned int frag;
454 596
455 for (frag = begin; frag != end; frag = NEXT_RX(frag)) 597 for (frag = begin; frag != end; frag++) {
456 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 598 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
599 desc->addr &= ~MACB_BIT(RX_USED);
600 }
601
602 /* Make descriptor updates visible to hardware */
457 wmb(); 603 wmb();
458 604
459 /* 605 /*
@@ -466,15 +612,18 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
466static int macb_rx(struct macb *bp, int budget) 612static int macb_rx(struct macb *bp, int budget)
467{ 613{
468 int received = 0; 614 int received = 0;
469 unsigned int tail = bp->rx_tail; 615 unsigned int tail;
470 int first_frag = -1; 616 int first_frag = -1;
471 617
472 for (; budget > 0; tail = NEXT_RX(tail)) { 618 for (tail = bp->rx_tail; budget > 0; tail++) {
619 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
473 u32 addr, ctrl; 620 u32 addr, ctrl;
474 621
622 /* Make hw descriptor updates visible to CPU */
475 rmb(); 623 rmb();
476 addr = bp->rx_ring[tail].addr; 624
477 ctrl = bp->rx_ring[tail].ctrl; 625 addr = desc->addr;
626 ctrl = desc->ctrl;
478 627
479 if (!(addr & MACB_BIT(RX_USED))) 628 if (!(addr & MACB_BIT(RX_USED)))
480 break; 629 break;
@@ -517,7 +666,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
517 666
518 work_done = 0; 667 work_done = 0;
519 668
520 netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n", 669 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
521 (unsigned long)status, budget); 670 (unsigned long)status, budget);
522 671
523 work_done = macb_rx(bp, budget); 672 work_done = macb_rx(bp, budget);
@@ -552,10 +701,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
552 while (status) { 701 while (status) {
553 /* close possible race with dev_close */ 702 /* close possible race with dev_close */
554 if (unlikely(!netif_running(dev))) { 703 if (unlikely(!netif_running(dev))) {
555 macb_writel(bp, IDR, ~0UL); 704 macb_writel(bp, IDR, -1);
556 break; 705 break;
557 } 706 }
558 707
708 netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
709
559 if (status & MACB_RX_INT_FLAGS) { 710 if (status & MACB_RX_INT_FLAGS) {
560 /* 711 /*
561 * There's no point taking any more interrupts 712 * There's no point taking any more interrupts
@@ -567,14 +718,19 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
567 macb_writel(bp, IDR, MACB_RX_INT_FLAGS); 718 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
568 719
569 if (napi_schedule_prep(&bp->napi)) { 720 if (napi_schedule_prep(&bp->napi)) {
570 netdev_dbg(bp->dev, "scheduling RX softirq\n"); 721 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
571 __napi_schedule(&bp->napi); 722 __napi_schedule(&bp->napi);
572 } 723 }
573 } 724 }
574 725
575 if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) | 726 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
576 MACB_BIT(ISR_RLE))) 727 macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
577 macb_tx(bp); 728 schedule_work(&bp->tx_error_task);
729 break;
730 }
731
732 if (status & MACB_BIT(TCOMP))
733 macb_tx_interrupt(bp);
578 734
579 /* 735 /*
580 * Link change detection isn't possible with RMII, so we'll 736 * Link change detection isn't possible with RMII, so we'll
@@ -626,11 +782,13 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
626 struct macb *bp = netdev_priv(dev); 782 struct macb *bp = netdev_priv(dev);
627 dma_addr_t mapping; 783 dma_addr_t mapping;
628 unsigned int len, entry; 784 unsigned int len, entry;
785 struct macb_dma_desc *desc;
786 struct macb_tx_skb *tx_skb;
629 u32 ctrl; 787 u32 ctrl;
630 unsigned long flags; 788 unsigned long flags;
631 789
632#ifdef DEBUG 790#if defined(DEBUG) && defined(VERBOSE_DEBUG)
633 netdev_dbg(bp->dev, 791 netdev_vdbg(bp->dev,
634 "start_xmit: len %u head %p data %p tail %p end %p\n", 792 "start_xmit: len %u head %p data %p tail %p end %p\n",
635 skb->len, skb->head, skb->data, 793 skb->len, skb->head, skb->data,
636 skb_tail_pointer(skb), skb_end_pointer(skb)); 794 skb_tail_pointer(skb), skb_end_pointer(skb));
@@ -642,7 +800,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
642 spin_lock_irqsave(&bp->lock, flags); 800 spin_lock_irqsave(&bp->lock, flags);
643 801
644 /* This is a hard error, log it. */ 802 /* This is a hard error, log it. */
645 if (TX_BUFFS_AVAIL(bp) < 1) { 803 if (macb_tx_ring_avail(bp) < 1) {
646 netif_stop_queue(dev); 804 netif_stop_queue(dev);
647 spin_unlock_irqrestore(&bp->lock, flags); 805 spin_unlock_irqrestore(&bp->lock, flags);
648 netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n"); 806 netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
@@ -651,13 +809,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
651 return NETDEV_TX_BUSY; 809 return NETDEV_TX_BUSY;
652 } 810 }
653 811
654 entry = bp->tx_head; 812 entry = macb_tx_ring_wrap(bp->tx_head);
655 netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry); 813 bp->tx_head++;
814 netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
656 mapping = dma_map_single(&bp->pdev->dev, skb->data, 815 mapping = dma_map_single(&bp->pdev->dev, skb->data,
657 len, DMA_TO_DEVICE); 816 len, DMA_TO_DEVICE);
658 bp->tx_skb[entry].skb = skb; 817
659 bp->tx_skb[entry].mapping = mapping; 818 tx_skb = &bp->tx_skb[entry];
660 netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n", 819 tx_skb->skb = skb;
820 tx_skb->mapping = mapping;
821 netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
661 skb->data, (unsigned long)mapping); 822 skb->data, (unsigned long)mapping);
662 823
663 ctrl = MACB_BF(TX_FRMLEN, len); 824 ctrl = MACB_BF(TX_FRMLEN, len);
@@ -665,18 +826,18 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
665 if (entry == (TX_RING_SIZE - 1)) 826 if (entry == (TX_RING_SIZE - 1))
666 ctrl |= MACB_BIT(TX_WRAP); 827 ctrl |= MACB_BIT(TX_WRAP);
667 828
668 bp->tx_ring[entry].addr = mapping; 829 desc = &bp->tx_ring[entry];
669 bp->tx_ring[entry].ctrl = ctrl; 830 desc->addr = mapping;
670 wmb(); 831 desc->ctrl = ctrl;
671 832
672 entry = NEXT_TX(entry); 833 /* Make newly initialized descriptor visible to hardware */
673 bp->tx_head = entry; 834 wmb();
674 835
675 skb_tx_timestamp(skb); 836 skb_tx_timestamp(skb);
676 837
677 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 838 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
678 839
679 if (TX_BUFFS_AVAIL(bp) < 1) 840 if (macb_tx_ring_avail(bp) < 1)
680 netif_stop_queue(dev); 841 netif_stop_queue(dev);
681 842
682 spin_unlock_irqrestore(&bp->lock, flags); 843 spin_unlock_irqrestore(&bp->lock, flags);
@@ -712,7 +873,7 @@ static int macb_alloc_consistent(struct macb *bp)
712{ 873{
713 int size; 874 int size;
714 875
715 size = TX_RING_SIZE * sizeof(struct ring_info); 876 size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
716 bp->tx_skb = kmalloc(size, GFP_KERNEL); 877 bp->tx_skb = kmalloc(size, GFP_KERNEL);
717 if (!bp->tx_skb) 878 if (!bp->tx_skb)
718 goto out_err; 879 goto out_err;
@@ -775,9 +936,6 @@ static void macb_init_rings(struct macb *bp)
775 936
776static void macb_reset_hw(struct macb *bp) 937static void macb_reset_hw(struct macb *bp)
777{ 938{
778 /* Make sure we have the write buffer for ourselves */
779 wmb();
780
781 /* 939 /*
782 * Disable RX and TX (XXX: Should we halt the transmission 940 * Disable RX and TX (XXX: Should we halt the transmission
783 * more gracefully?) 941 * more gracefully?)
@@ -788,11 +946,11 @@ static void macb_reset_hw(struct macb *bp)
788 macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); 946 macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
789 947
790 /* Clear all status flags */ 948 /* Clear all status flags */
791 macb_writel(bp, TSR, ~0UL); 949 macb_writel(bp, TSR, -1);
792 macb_writel(bp, RSR, ~0UL); 950 macb_writel(bp, RSR, -1);
793 951
794 /* Disable all interrupts */ 952 /* Disable all interrupts */
795 macb_writel(bp, IDR, ~0UL); 953 macb_writel(bp, IDR, -1);
796 macb_readl(bp, ISR); 954 macb_readl(bp, ISR);
797} 955}
798 956
@@ -879,9 +1037,10 @@ static void macb_init_hw(struct macb *bp)
879 u32 config; 1037 u32 config;
880 1038
881 macb_reset_hw(bp); 1039 macb_reset_hw(bp);
882 __macb_set_hwaddr(bp); 1040 macb_set_hwaddr(bp);
883 1041
884 config = macb_mdc_clk_div(bp); 1042 config = macb_mdc_clk_div(bp);
1043 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
885 config |= MACB_BIT(PAE); /* PAuse Enable */ 1044 config |= MACB_BIT(PAE); /* PAuse Enable */
886 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 1045 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
887 config |= MACB_BIT(BIG); /* Receive oversized frames */ 1046 config |= MACB_BIT(BIG); /* Receive oversized frames */
@@ -891,6 +1050,8 @@ static void macb_init_hw(struct macb *bp)
891 config |= MACB_BIT(NBC); /* No BroadCast */ 1050 config |= MACB_BIT(NBC); /* No BroadCast */
892 config |= macb_dbw(bp); 1051 config |= macb_dbw(bp);
893 macb_writel(bp, NCFGR, config); 1052 macb_writel(bp, NCFGR, config);
1053 bp->speed = SPEED_10;
1054 bp->duplex = DUPLEX_HALF;
894 1055
895 macb_configure_dma(bp); 1056 macb_configure_dma(bp);
896 1057
@@ -902,13 +1063,8 @@ static void macb_init_hw(struct macb *bp)
902 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); 1063 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
903 1064
904 /* Enable interrupts */ 1065 /* Enable interrupts */
905 macb_writel(bp, IER, (MACB_BIT(RCOMP) 1066 macb_writel(bp, IER, (MACB_RX_INT_FLAGS
906 | MACB_BIT(RXUBR) 1067 | MACB_TX_INT_FLAGS
907 | MACB_BIT(ISR_TUND)
908 | MACB_BIT(ISR_RLE)
909 | MACB_BIT(TXERR)
910 | MACB_BIT(TCOMP)
911 | MACB_BIT(ISR_ROVR)
912 | MACB_BIT(HRESP))); 1068 | MACB_BIT(HRESP)));
913 1069
914} 1070}
@@ -996,7 +1152,7 @@ static void macb_sethashtable(struct net_device *dev)
996/* 1152/*
997 * Enable/Disable promiscuous and multicast modes. 1153 * Enable/Disable promiscuous and multicast modes.
998 */ 1154 */
999static void macb_set_rx_mode(struct net_device *dev) 1155void macb_set_rx_mode(struct net_device *dev)
1000{ 1156{
1001 unsigned long cfg; 1157 unsigned long cfg;
1002 struct macb *bp = netdev_priv(dev); 1158 struct macb *bp = netdev_priv(dev);
@@ -1028,6 +1184,7 @@ static void macb_set_rx_mode(struct net_device *dev)
1028 1184
1029 macb_writel(bp, NCFGR, cfg); 1185 macb_writel(bp, NCFGR, cfg);
1030} 1186}
1187EXPORT_SYMBOL_GPL(macb_set_rx_mode);
1031 1188
1032static int macb_open(struct net_device *dev) 1189static int macb_open(struct net_device *dev)
1033{ 1190{
@@ -1135,7 +1292,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
1135 return nstat; 1292 return nstat;
1136} 1293}
1137 1294
1138static struct net_device_stats *macb_get_stats(struct net_device *dev) 1295struct net_device_stats *macb_get_stats(struct net_device *dev)
1139{ 1296{
1140 struct macb *bp = netdev_priv(dev); 1297 struct macb *bp = netdev_priv(dev);
1141 struct net_device_stats *nstat = &bp->stats; 1298 struct net_device_stats *nstat = &bp->stats;
@@ -1181,6 +1338,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
1181 1338
1182 return nstat; 1339 return nstat;
1183} 1340}
1341EXPORT_SYMBOL_GPL(macb_get_stats);
1184 1342
1185static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1343static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1186{ 1344{
@@ -1204,25 +1362,55 @@ static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1204 return phy_ethtool_sset(phydev, cmd); 1362 return phy_ethtool_sset(phydev, cmd);
1205} 1363}
1206 1364
1207static void macb_get_drvinfo(struct net_device *dev, 1365static int macb_get_regs_len(struct net_device *netdev)
1208 struct ethtool_drvinfo *info) 1366{
1367 return MACB_GREGS_NBR * sizeof(u32);
1368}
1369
1370static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1371 void *p)
1209{ 1372{
1210 struct macb *bp = netdev_priv(dev); 1373 struct macb *bp = netdev_priv(dev);
1374 unsigned int tail, head;
1375 u32 *regs_buff = p;
1376
1377 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
1378 | MACB_GREGS_VERSION;
1379
1380 tail = macb_tx_ring_wrap(bp->tx_tail);
1381 head = macb_tx_ring_wrap(bp->tx_head);
1382
1383 regs_buff[0] = macb_readl(bp, NCR);
1384 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
1385 regs_buff[2] = macb_readl(bp, NSR);
1386 regs_buff[3] = macb_readl(bp, TSR);
1387 regs_buff[4] = macb_readl(bp, RBQP);
1388 regs_buff[5] = macb_readl(bp, TBQP);
1389 regs_buff[6] = macb_readl(bp, RSR);
1390 regs_buff[7] = macb_readl(bp, IMR);
1211 1391
1212 strcpy(info->driver, bp->pdev->dev.driver->name); 1392 regs_buff[8] = tail;
1213 strcpy(info->version, "$Revision: 1.14 $"); 1393 regs_buff[9] = head;
1214 strcpy(info->bus_info, dev_name(&bp->pdev->dev)); 1394 regs_buff[10] = macb_tx_dma(bp, tail);
1395 regs_buff[11] = macb_tx_dma(bp, head);
1396
1397 if (macb_is_gem(bp)) {
1398 regs_buff[12] = gem_readl(bp, USRIO);
1399 regs_buff[13] = gem_readl(bp, DMACFG);
1400 }
1215} 1401}
1216 1402
1217static const struct ethtool_ops macb_ethtool_ops = { 1403const struct ethtool_ops macb_ethtool_ops = {
1218 .get_settings = macb_get_settings, 1404 .get_settings = macb_get_settings,
1219 .set_settings = macb_set_settings, 1405 .set_settings = macb_set_settings,
1220 .get_drvinfo = macb_get_drvinfo, 1406 .get_regs_len = macb_get_regs_len,
1407 .get_regs = macb_get_regs,
1221 .get_link = ethtool_op_get_link, 1408 .get_link = ethtool_op_get_link,
1222 .get_ts_info = ethtool_op_get_ts_info, 1409 .get_ts_info = ethtool_op_get_ts_info,
1223}; 1410};
1411EXPORT_SYMBOL_GPL(macb_ethtool_ops);
1224 1412
1225static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1413int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1226{ 1414{
1227 struct macb *bp = netdev_priv(dev); 1415 struct macb *bp = netdev_priv(dev);
1228 struct phy_device *phydev = bp->phy_dev; 1416 struct phy_device *phydev = bp->phy_dev;
@@ -1235,6 +1423,7 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1235 1423
1236 return phy_mii_ioctl(phydev, rq, cmd); 1424 return phy_mii_ioctl(phydev, rq, cmd);
1237} 1425}
1426EXPORT_SYMBOL_GPL(macb_ioctl);
1238 1427
1239static const struct net_device_ops macb_netdev_ops = { 1428static const struct net_device_ops macb_netdev_ops = {
1240 .ndo_open = macb_open, 1429 .ndo_open = macb_open,
@@ -1306,6 +1495,7 @@ static int __init macb_probe(struct platform_device *pdev)
1306 struct phy_device *phydev; 1495 struct phy_device *phydev;
1307 u32 config; 1496 u32 config;
1308 int err = -ENXIO; 1497 int err = -ENXIO;
1498 struct pinctrl *pinctrl;
1309 1499
1310 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1500 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1311 if (!regs) { 1501 if (!regs) {
@@ -1313,6 +1503,15 @@ static int __init macb_probe(struct platform_device *pdev)
1313 goto err_out; 1503 goto err_out;
1314 } 1504 }
1315 1505
1506 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
1507 if (IS_ERR(pinctrl)) {
1508 err = PTR_ERR(pinctrl);
1509 if (err == -EPROBE_DEFER)
1510 goto err_out;
1511
1512 dev_warn(&pdev->dev, "No pinctrl provided\n");
1513 }
1514
1316 err = -ENOMEM; 1515 err = -ENOMEM;
1317 dev = alloc_etherdev(sizeof(*bp)); 1516 dev = alloc_etherdev(sizeof(*bp));
1318 if (!dev) 1517 if (!dev)
@@ -1328,6 +1527,7 @@ static int __init macb_probe(struct platform_device *pdev)
1328 bp->dev = dev; 1527 bp->dev = dev;
1329 1528
1330 spin_lock_init(&bp->lock); 1529 spin_lock_init(&bp->lock);
1530 INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
1331 1531
1332 bp->pclk = clk_get(&pdev->dev, "pclk"); 1532 bp->pclk = clk_get(&pdev->dev, "pclk");
1333 if (IS_ERR(bp->pclk)) { 1533 if (IS_ERR(bp->pclk)) {
@@ -1384,7 +1584,9 @@ static int __init macb_probe(struct platform_device *pdev)
1384 bp->phy_interface = err; 1584 bp->phy_interface = err;
1385 } 1585 }
1386 1586
1387 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) 1587 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
1588 macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
1589 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
1388#if defined(CONFIG_ARCH_AT91) 1590#if defined(CONFIG_ARCH_AT91)
1389 macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) | 1591 macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
1390 MACB_BIT(CLKEN))); 1592 MACB_BIT(CLKEN)));
@@ -1398,8 +1600,6 @@ static int __init macb_probe(struct platform_device *pdev)
1398 macb_or_gem_writel(bp, USRIO, MACB_BIT(MII)); 1600 macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
1399#endif 1601#endif
1400 1602
1401 bp->tx_pending = DEF_TX_RING_PENDING;
1402
1403 err = register_netdev(dev); 1603 err = register_netdev(dev);
1404 if (err) { 1604 if (err) {
1405 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 1605 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 335e288f5314..864e38042b2d 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -10,10 +10,15 @@
10#ifndef _MACB_H 10#ifndef _MACB_H
11#define _MACB_H 11#define _MACB_H
12 12
13#define MACB_GREGS_NBR 16
14#define MACB_GREGS_VERSION 1
15
13/* MACB register offsets */ 16/* MACB register offsets */
14#define MACB_NCR 0x0000 17#define MACB_NCR 0x0000
15#define MACB_NCFGR 0x0004 18#define MACB_NCFGR 0x0004
16#define MACB_NSR 0x0008 19#define MACB_NSR 0x0008
20#define MACB_TAR 0x000c /* AT91RM9200 only */
21#define MACB_TCR 0x0010 /* AT91RM9200 only */
17#define MACB_TSR 0x0014 22#define MACB_TSR 0x0014
18#define MACB_RBQP 0x0018 23#define MACB_RBQP 0x0018
19#define MACB_TBQP 0x001c 24#define MACB_TBQP 0x001c
@@ -133,6 +138,8 @@
133#define MACB_RTY_SIZE 1 138#define MACB_RTY_SIZE 1
134#define MACB_PAE_OFFSET 13 139#define MACB_PAE_OFFSET 13
135#define MACB_PAE_SIZE 1 140#define MACB_PAE_SIZE 1
141#define MACB_RM9200_RMII_OFFSET 13 /* AT91RM9200 only */
142#define MACB_RM9200_RMII_SIZE 1 /* AT91RM9200 only */
136#define MACB_RBOF_OFFSET 14 143#define MACB_RBOF_OFFSET 14
137#define MACB_RBOF_SIZE 2 144#define MACB_RBOF_SIZE 2
138#define MACB_RLCE_OFFSET 16 145#define MACB_RLCE_OFFSET 16
@@ -145,6 +152,8 @@
145#define MACB_IRXFCS_SIZE 1 152#define MACB_IRXFCS_SIZE 1
146 153
147/* GEM specific NCFGR bitfields. */ 154/* GEM specific NCFGR bitfields. */
155#define GEM_GBE_OFFSET 10
156#define GEM_GBE_SIZE 1
148#define GEM_CLK_OFFSET 18 157#define GEM_CLK_OFFSET 18
149#define GEM_CLK_SIZE 3 158#define GEM_CLK_SIZE 3
150#define GEM_DBW_OFFSET 21 159#define GEM_DBW_OFFSET 21
@@ -178,6 +187,8 @@
178#define MACB_TGO_SIZE 1 187#define MACB_TGO_SIZE 1
179#define MACB_BEX_OFFSET 4 188#define MACB_BEX_OFFSET 4
180#define MACB_BEX_SIZE 1 189#define MACB_BEX_SIZE 1
190#define MACB_RM9200_BNQ_OFFSET 4 /* AT91RM9200 only */
191#define MACB_RM9200_BNQ_SIZE 1 /* AT91RM9200 only */
181#define MACB_COMP_OFFSET 5 192#define MACB_COMP_OFFSET 5
182#define MACB_COMP_SIZE 1 193#define MACB_COMP_SIZE 1
183#define MACB_UND_OFFSET 6 194#define MACB_UND_OFFSET 6
@@ -246,6 +257,8 @@
246/* Bitfields in USRIO (AT91) */ 257/* Bitfields in USRIO (AT91) */
247#define MACB_RMII_OFFSET 0 258#define MACB_RMII_OFFSET 0
248#define MACB_RMII_SIZE 1 259#define MACB_RMII_SIZE 1
260#define GEM_RGMII_OFFSET 0 /* GEM gigabit mode */
261#define GEM_RGMII_SIZE 1
249#define MACB_CLKEN_OFFSET 1 262#define MACB_CLKEN_OFFSET 1
250#define MACB_CLKEN_SIZE 1 263#define MACB_CLKEN_SIZE 1
251 264
@@ -352,7 +365,12 @@
352 __v; \ 365 __v; \
353 }) 366 })
354 367
355struct dma_desc { 368/**
369 * struct macb_dma_desc - Hardware DMA descriptor
370 * @addr: DMA address of data buffer
371 * @ctrl: Control and status bits
372 */
373struct macb_dma_desc {
356 u32 addr; 374 u32 addr;
357 u32 ctrl; 375 u32 ctrl;
358}; 376};
@@ -417,7 +435,12 @@ struct dma_desc {
417#define MACB_TX_USED_OFFSET 31 435#define MACB_TX_USED_OFFSET 31
418#define MACB_TX_USED_SIZE 1 436#define MACB_TX_USED_SIZE 1
419 437
420struct ring_info { 438/**
439 * struct macb_tx_skb - data about an skb which is being transmitted
440 * @skb: skb currently being transmitted
441 * @mapping: DMA address of the skb's data buffer
442 */
443struct macb_tx_skb {
421 struct sk_buff *skb; 444 struct sk_buff *skb;
422 dma_addr_t mapping; 445 dma_addr_t mapping;
423}; 446};
@@ -502,12 +525,12 @@ struct macb {
502 void __iomem *regs; 525 void __iomem *regs;
503 526
504 unsigned int rx_tail; 527 unsigned int rx_tail;
505 struct dma_desc *rx_ring; 528 struct macb_dma_desc *rx_ring;
506 void *rx_buffers; 529 void *rx_buffers;
507 530
508 unsigned int tx_head, tx_tail; 531 unsigned int tx_head, tx_tail;
509 struct dma_desc *tx_ring; 532 struct macb_dma_desc *tx_ring;
510 struct ring_info *tx_skb; 533 struct macb_tx_skb *tx_skb;
511 534
512 spinlock_t lock; 535 spinlock_t lock;
513 struct platform_device *pdev; 536 struct platform_device *pdev;
@@ -515,6 +538,7 @@ struct macb {
515 struct clk *hclk; 538 struct clk *hclk;
516 struct net_device *dev; 539 struct net_device *dev;
517 struct napi_struct napi; 540 struct napi_struct napi;
541 struct work_struct tx_error_task;
518 struct net_device_stats stats; 542 struct net_device_stats stats;
519 union { 543 union {
520 struct macb_stats macb; 544 struct macb_stats macb;
@@ -525,8 +549,6 @@ struct macb {
525 dma_addr_t tx_ring_dma; 549 dma_addr_t tx_ring_dma;
526 dma_addr_t rx_buffers_dma; 550 dma_addr_t rx_buffers_dma;
527 551
528 unsigned int rx_pending, tx_pending;
529
530 struct mii_bus *mii_bus; 552 struct mii_bus *mii_bus;
531 struct phy_device *phy_dev; 553 struct phy_device *phy_dev;
532 unsigned int link; 554 unsigned int link;
@@ -534,8 +556,22 @@ struct macb {
534 unsigned int duplex; 556 unsigned int duplex;
535 557
536 phy_interface_t phy_interface; 558 phy_interface_t phy_interface;
559
560 /* AT91RM9200 transmit */
561 struct sk_buff *skb; /* holds skb until xmit interrupt completes */
562 dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
563 int skb_length; /* saved skb length for pci_unmap_single */
537}; 564};
538 565
566extern const struct ethtool_ops macb_ethtool_ops;
567
568int macb_mii_init(struct macb *bp);
569int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
570struct net_device_stats *macb_get_stats(struct net_device *dev);
571void macb_set_rx_mode(struct net_device *dev);
572void macb_set_hwaddr(struct macb *bp);
573void macb_get_hwaddr(struct macb *bp);
574
539static inline bool macb_is_gem(struct macb *bp) 575static inline bool macb_is_gem(struct macb *bp)
540{ 576{
541 return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2; 577 return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2;
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 16814b34d4b6..b407043ce9b0 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -191,6 +191,7 @@
191#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ 191#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
192#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ 192#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
193#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */ 193#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */
194#define DMA_CONTROL_OSF 0x00000004 /* Operate on 2nd tx frame */
194 195
195/* DMA Normal interrupt */ 196/* DMA Normal interrupt */
196#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ 197#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
@@ -210,7 +211,7 @@
210#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ 211#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
211 212
212#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ 213#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
213 DMA_INTR_ENA_TUE) 214 DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
214 215
215#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ 216#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
216 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \ 217 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
@@ -373,6 +374,7 @@ struct xgmac_priv {
373 struct sk_buff **tx_skbuff; 374 struct sk_buff **tx_skbuff;
374 unsigned int tx_head; 375 unsigned int tx_head;
375 unsigned int tx_tail; 376 unsigned int tx_tail;
377 int tx_irq_cnt;
376 378
377 void __iomem *base; 379 void __iomem *base;
378 unsigned int dma_buf_sz; 380 unsigned int dma_buf_sz;
@@ -663,6 +665,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
663{ 665{
664 struct xgmac_dma_desc *p; 666 struct xgmac_dma_desc *p;
665 dma_addr_t paddr; 667 dma_addr_t paddr;
668 int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN;
666 669
667 while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) { 670 while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
668 int entry = priv->rx_head; 671 int entry = priv->rx_head;
@@ -671,13 +674,13 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
671 p = priv->dma_rx + entry; 674 p = priv->dma_rx + entry;
672 675
673 if (priv->rx_skbuff[entry] == NULL) { 676 if (priv->rx_skbuff[entry] == NULL) {
674 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); 677 skb = netdev_alloc_skb_ip_align(priv->dev, bufsz);
675 if (unlikely(skb == NULL)) 678 if (unlikely(skb == NULL))
676 break; 679 break;
677 680
678 priv->rx_skbuff[entry] = skb; 681 priv->rx_skbuff[entry] = skb;
679 paddr = dma_map_single(priv->device, skb->data, 682 paddr = dma_map_single(priv->device, skb->data,
680 priv->dma_buf_sz, DMA_FROM_DEVICE); 683 bufsz, DMA_FROM_DEVICE);
681 desc_set_buf_addr(p, paddr, priv->dma_buf_sz); 684 desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
682 } 685 }
683 686
@@ -701,10 +704,10 @@ static int xgmac_dma_desc_rings_init(struct net_device *dev)
701 unsigned int bfsize; 704 unsigned int bfsize;
702 705
703 /* Set the Buffer size according to the MTU; 706 /* Set the Buffer size according to the MTU;
704 * indeed, in case of jumbo we need to bump-up the buffer sizes. 707 * The total buffer size including any IP offset must be a multiple
708 * of 8 bytes.
705 */ 709 */
706 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64, 710 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
707 64);
708 711
709 netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize); 712 netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
710 713
@@ -845,9 +848,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
845static void xgmac_tx_complete(struct xgmac_priv *priv) 848static void xgmac_tx_complete(struct xgmac_priv *priv)
846{ 849{
847 int i; 850 int i;
848 void __iomem *ioaddr = priv->base;
849
850 writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS);
851 851
852 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { 852 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
853 unsigned int entry = priv->tx_tail; 853 unsigned int entry = priv->tx_tail;
@@ -888,7 +888,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
888 } 888 }
889 889
890 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > 890 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
891 TX_THRESH) 891 MAX_SKB_FRAGS)
892 netif_wake_queue(priv->dev); 892 netif_wake_queue(priv->dev);
893} 893}
894 894
@@ -965,8 +965,7 @@ static int xgmac_hw_init(struct net_device *dev)
965 ctrl |= XGMAC_CONTROL_IPC; 965 ctrl |= XGMAC_CONTROL_IPC;
966 writel(ctrl, ioaddr + XGMAC_CONTROL); 966 writel(ctrl, ioaddr + XGMAC_CONTROL);
967 967
968 value = DMA_CONTROL_DFF; 968 writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL);
969 writel(value, ioaddr + XGMAC_DMA_CONTROL);
970 969
971 /* Set the HW DMA mode and the COE */ 970 /* Set the HW DMA mode and the COE */
972 writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA | 971 writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
@@ -1060,19 +1059,15 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1060 struct xgmac_priv *priv = netdev_priv(dev); 1059 struct xgmac_priv *priv = netdev_priv(dev);
1061 unsigned int entry; 1060 unsigned int entry;
1062 int i; 1061 int i;
1062 u32 irq_flag;
1063 int nfrags = skb_shinfo(skb)->nr_frags; 1063 int nfrags = skb_shinfo(skb)->nr_frags;
1064 struct xgmac_dma_desc *desc, *first; 1064 struct xgmac_dma_desc *desc, *first;
1065 unsigned int desc_flags; 1065 unsigned int desc_flags;
1066 unsigned int len; 1066 unsigned int len;
1067 dma_addr_t paddr; 1067 dma_addr_t paddr;
1068 1068
1069 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < 1069 priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
1070 (nfrags + 1)) { 1070 irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
1071 writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
1072 priv->base + XGMAC_DMA_INTR_ENA);
1073 netif_stop_queue(dev);
1074 return NETDEV_TX_BUSY;
1075 }
1076 1071
1077 desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ? 1072 desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
1078 TXDESC_CSUM_ALL : 0; 1073 TXDESC_CSUM_ALL : 0;
@@ -1113,9 +1108,9 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1113 /* Interrupt on completition only for the latest segment */ 1108 /* Interrupt on completition only for the latest segment */
1114 if (desc != first) 1109 if (desc != first)
1115 desc_set_tx_owner(desc, desc_flags | 1110 desc_set_tx_owner(desc, desc_flags |
1116 TXDESC_LAST_SEG | TXDESC_INTERRUPT); 1111 TXDESC_LAST_SEG | irq_flag);
1117 else 1112 else
1118 desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT; 1113 desc_flags |= TXDESC_LAST_SEG | irq_flag;
1119 1114
1120 /* Set owner on first desc last to avoid race condition */ 1115 /* Set owner on first desc last to avoid race condition */
1121 wmb(); 1116 wmb();
@@ -1124,6 +1119,9 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1124 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); 1119 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
1125 1120
1126 writel(1, priv->base + XGMAC_DMA_TX_POLL); 1121 writel(1, priv->base + XGMAC_DMA_TX_POLL);
1122 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
1123 MAX_SKB_FRAGS)
1124 netif_stop_queue(dev);
1127 1125
1128 return NETDEV_TX_OK; 1126 return NETDEV_TX_OK;
1129} 1127}
@@ -1139,9 +1137,6 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
1139 struct sk_buff *skb; 1137 struct sk_buff *skb;
1140 int frame_len; 1138 int frame_len;
1141 1139
1142 writel(DMA_STATUS_RI | DMA_STATUS_NIS,
1143 priv->base + XGMAC_DMA_STATUS);
1144
1145 entry = priv->rx_tail; 1140 entry = priv->rx_tail;
1146 p = priv->dma_rx + entry; 1141 p = priv->dma_rx + entry;
1147 if (desc_get_owner(p)) 1142 if (desc_get_owner(p))
@@ -1180,8 +1175,6 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
1180 1175
1181 xgmac_rx_refill(priv); 1176 xgmac_rx_refill(priv);
1182 1177
1183 writel(1, priv->base + XGMAC_DMA_RX_POLL);
1184
1185 return count; 1178 return count;
1186} 1179}
1187 1180
@@ -1205,7 +1198,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
1205 1198
1206 if (work_done < budget) { 1199 if (work_done < budget) {
1207 napi_complete(napi); 1200 napi_complete(napi);
1208 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); 1201 __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
1209 } 1202 }
1210 return work_done; 1203 return work_done;
1211} 1204}
@@ -1350,7 +1343,7 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
1350 struct xgmac_priv *priv = netdev_priv(dev); 1343 struct xgmac_priv *priv = netdev_priv(dev);
1351 void __iomem *ioaddr = priv->base; 1344 void __iomem *ioaddr = priv->base;
1352 1345
1353 intr_status = readl(ioaddr + XGMAC_INT_STAT); 1346 intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT);
1354 if (intr_status & XGMAC_INT_STAT_PMT) { 1347 if (intr_status & XGMAC_INT_STAT_PMT) {
1355 netdev_dbg(priv->dev, "received Magic frame\n"); 1348 netdev_dbg(priv->dev, "received Magic frame\n");
1356 /* clear the PMT bits 5 and 6 by reading the PMT */ 1349 /* clear the PMT bits 5 and 6 by reading the PMT */
@@ -1368,9 +1361,9 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1368 struct xgmac_extra_stats *x = &priv->xstats; 1361 struct xgmac_extra_stats *x = &priv->xstats;
1369 1362
1370 /* read the status register (CSR5) */ 1363 /* read the status register (CSR5) */
1371 intr_status = readl(priv->base + XGMAC_DMA_STATUS); 1364 intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS);
1372 intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA); 1365 intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA);
1373 writel(intr_status, priv->base + XGMAC_DMA_STATUS); 1366 __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS);
1374 1367
1375 /* It displays the DMA process states (CSR5 register) */ 1368 /* It displays the DMA process states (CSR5 register) */
1376 /* ABNORMAL interrupts */ 1369 /* ABNORMAL interrupts */
@@ -1405,8 +1398,8 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1405 } 1398 }
1406 1399
1407 /* TX/RX NORMAL interrupts */ 1400 /* TX/RX NORMAL interrupts */
1408 if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) { 1401 if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
1409 writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA); 1402 __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
1410 napi_schedule(&priv->napi); 1403 napi_schedule(&priv->napi);
1411 } 1404 }
1412 1405
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index df01b6343241..8c82248ce416 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -42,10 +42,9 @@
42#include <linux/mdio.h> 42#include <linux/mdio.h>
43#include "version.h" 43#include "version.h"
44 44
45#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__) 45#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ##__VA_ARGS__)
46#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__) 46#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ##__VA_ARGS__)
47#define CH_ALERT(adap, fmt, ...) \ 47#define CH_ALERT(adap, fmt, ...) dev_alert(&adap->pdev->dev, fmt, ##__VA_ARGS__)
48 dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
49 48
50/* 49/*
51 * More powerful macro that selectively prints messages based on msg_enable. 50 * More powerful macro that selectively prints messages based on msg_enable.
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index a059f0c27e28..2fb01bf18155 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1758,21 +1758,7 @@ static struct pci_driver rio_driver = {
1758 .remove = __devexit_p(rio_remove1), 1758 .remove = __devexit_p(rio_remove1),
1759}; 1759};
1760 1760
1761static int __init 1761module_pci_driver(rio_driver);
1762rio_init (void)
1763{
1764 return pci_register_driver(&rio_driver);
1765}
1766
1767static void __exit
1768rio_exit (void)
1769{
1770 pci_unregister_driver (&rio_driver);
1771}
1772
1773module_init (rio_init);
1774module_exit (rio_exit);
1775
1776/* 1762/*
1777 1763
1778Compile command: 1764Compile command:
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index cf4c05bdf5fe..abf26c7c1d19 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
34#include "be_hw.h" 34#include "be_hw.h"
35#include "be_roce.h" 35#include "be_roce.h"
36 36
37#define DRV_VER "4.4.31.0u" 37#define DRV_VER "4.4.161.0u"
38#define DRV_NAME "be2net" 38#define DRV_NAME "be2net"
39#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 39#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
40#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 40#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@@ -53,6 +53,7 @@
53#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */ 53#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
54#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */ 54#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
55#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */ 55#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */
56#define OC_DEVICE_ID6 0x728 /* Device id for VF in SkyHawk */
56#define OC_SUBSYS_DEVICE_ID1 0xE602 57#define OC_SUBSYS_DEVICE_ID1 0xE602
57#define OC_SUBSYS_DEVICE_ID2 0xE642 58#define OC_SUBSYS_DEVICE_ID2 0xE642
58#define OC_SUBSYS_DEVICE_ID3 0xE612 59#define OC_SUBSYS_DEVICE_ID3 0xE612
@@ -71,6 +72,7 @@ static inline char *nic_name(struct pci_dev *pdev)
71 case BE_DEVICE_ID2: 72 case BE_DEVICE_ID2:
72 return BE3_NAME; 73 return BE3_NAME;
73 case OC_DEVICE_ID5: 74 case OC_DEVICE_ID5:
75 case OC_DEVICE_ID6:
74 return OC_NAME_SH; 76 return OC_NAME_SH;
75 default: 77 default:
76 return BE_NAME; 78 return BE_NAME;
@@ -346,7 +348,6 @@ struct be_adapter {
346 struct pci_dev *pdev; 348 struct pci_dev *pdev;
347 struct net_device *netdev; 349 struct net_device *netdev;
348 350
349 u8 __iomem *csr;
350 u8 __iomem *db; /* Door Bell */ 351 u8 __iomem *db; /* Door Bell */
351 352
352 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ 353 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
@@ -374,11 +375,8 @@ struct be_adapter {
374 struct be_rx_obj rx_obj[MAX_RX_QS]; 375 struct be_rx_obj rx_obj[MAX_RX_QS];
375 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 376 u32 big_page_size; /* Compounded page size shared by rx wrbs */
376 377
377 u8 eq_next_idx;
378 struct be_drv_stats drv_stats; 378 struct be_drv_stats drv_stats;
379
380 u16 vlans_added; 379 u16 vlans_added;
381 u16 max_vlans; /* Number of vlans supported */
382 u8 vlan_tag[VLAN_N_VID]; 380 u8 vlan_tag[VLAN_N_VID];
383 u8 vlan_prio_bmap; /* Available Priority BitMap */ 381 u8 vlan_prio_bmap; /* Available Priority BitMap */
384 u16 recommended_prio; /* Recommended Priority */ 382 u16 recommended_prio; /* Recommended Priority */
@@ -391,6 +389,7 @@ struct be_adapter {
391 389
392 struct delayed_work func_recovery_work; 390 struct delayed_work func_recovery_work;
393 u32 flags; 391 u32 flags;
392 u32 cmd_privileges;
394 /* Ethtool knobs and info */ 393 /* Ethtool knobs and info */
395 char fw_ver[FW_VER_LEN]; 394 char fw_ver[FW_VER_LEN];
396 int if_handle; /* Used to configure filtering */ 395 int if_handle; /* Used to configure filtering */
@@ -408,10 +407,8 @@ struct be_adapter {
408 u32 rx_fc; /* Rx flow control */ 407 u32 rx_fc; /* Rx flow control */
409 u32 tx_fc; /* Tx flow control */ 408 u32 tx_fc; /* Tx flow control */
410 bool stats_cmd_sent; 409 bool stats_cmd_sent;
411 u8 generation; /* BladeEngine ASIC generation */
412 u32 if_type; 410 u32 if_type;
413 struct { 411 struct {
414 u8 __iomem *base; /* Door Bell */
415 u32 size; 412 u32 size;
416 u32 total_size; 413 u32 total_size;
417 u64 io_addr; 414 u64 io_addr;
@@ -434,10 +431,18 @@ struct be_adapter {
434 struct phy_info phy; 431 struct phy_info phy;
435 u8 wol_cap; 432 u8 wol_cap;
436 bool wol; 433 bool wol;
437 u32 max_pmac_cnt; /* Max secondary UC MACs programmable */
438 u32 uc_macs; /* Count of secondary UC MAC programmed */ 434 u32 uc_macs; /* Count of secondary UC MAC programmed */
439 u32 msg_enable; 435 u32 msg_enable;
440 int be_get_temp_freq; 436 int be_get_temp_freq;
437 u16 max_mcast_mac;
438 u16 max_tx_queues;
439 u16 max_rss_queues;
440 u16 max_rx_queues;
441 u16 max_pmac_cnt;
442 u16 max_vlans;
443 u16 max_event_queues;
444 u32 if_cap_flags;
445 u8 pf_number;
441}; 446};
442 447
443#define be_physfn(adapter) (!adapter->virtfn) 448#define be_physfn(adapter) (!adapter->virtfn)
@@ -448,21 +453,25 @@ struct be_adapter {
448 for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \ 453 for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
449 i++, vf_cfg++) 454 i++, vf_cfg++)
450 455
451/* BladeEngine Generation numbers */
452#define BE_GEN2 2
453#define BE_GEN3 3
454
455#define ON 1 456#define ON 1
456#define OFF 0 457#define OFF 0
457#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
458 (adapter->pdev->device == OC_DEVICE_ID4))
459 458
460#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5) 459#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
460 adapter->pdev->device == OC_DEVICE_ID4)
461
462#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5 || \
463 adapter->pdev->device == OC_DEVICE_ID6)
464
465#define BE3_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID2 || \
466 adapter->pdev->device == OC_DEVICE_ID2)
461 467
468#define BE2_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID1 || \
469 adapter->pdev->device == OC_DEVICE_ID1)
462 470
463#define be_roce_supported(adapter) ((adapter->if_type == SLI_INTF_TYPE_3 || \ 471#define BEx_chip(adapter) (BE3_chip(adapter) || BE2_chip(adapter))
464 adapter->sli_family == SKYHAWK_SLI_FAMILY) && \ 472
465 (adapter->function_mode & RDMA_ENABLED)) 473#define be_roce_supported(adapter) (skyhawk_chip(adapter) && \
474 (adapter->function_mode & RDMA_ENABLED))
466 475
467extern const struct ethtool_ops be_ethtool_ops; 476extern const struct ethtool_ops be_ethtool_ops;
468 477
@@ -637,12 +646,6 @@ static inline bool be_is_wol_excluded(struct be_adapter *adapter)
637 } 646 }
638} 647}
639 648
640static inline bool be_type_2_3(struct be_adapter *adapter)
641{
642 return (adapter->if_type == SLI_INTF_TYPE_2 ||
643 adapter->if_type == SLI_INTF_TYPE_3) ? true : false;
644}
645
646extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, 649extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
647 u16 num_popped); 650 u16 num_popped);
648extern void be_link_status_update(struct be_adapter *adapter, u8 link_status); 651extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index af60bb26e330..f2875aa47661 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,6 +19,55 @@
19#include "be.h" 19#include "be.h"
20#include "be_cmds.h" 20#include "be_cmds.h"
21 21
22static struct be_cmd_priv_map cmd_priv_map[] = {
23 {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
25 CMD_SUBSYSTEM_ETH,
26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
28 },
29 {
30 OPCODE_COMMON_GET_FLOW_CONTROL,
31 CMD_SUBSYSTEM_COMMON,
32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
34 },
35 {
36 OPCODE_COMMON_SET_FLOW_CONTROL,
37 CMD_SUBSYSTEM_COMMON,
38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
40 },
41 {
42 OPCODE_ETH_GET_PPORT_STATS,
43 CMD_SUBSYSTEM_ETH,
44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46 },
47 {
48 OPCODE_COMMON_GET_PHY_DETAILS,
49 CMD_SUBSYSTEM_COMMON,
50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52 }
53};
54
55static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
56 u8 subsystem)
57{
58 int i;
59 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
60 u32 cmd_privileges = adapter->cmd_privileges;
61
62 for (i = 0; i < num_entries; i++)
63 if (opcode == cmd_priv_map[i].opcode &&
64 subsystem == cmd_priv_map[i].subsystem)
65 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
66 return false;
67
68 return true;
69}
70
22static inline void *embedded_payload(struct be_mcc_wrb *wrb) 71static inline void *embedded_payload(struct be_mcc_wrb *wrb)
23{ 72{
24 return wrb->payload.embedded_payload; 73 return wrb->payload.embedded_payload;
@@ -419,14 +468,13 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
419static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) 468static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
420{ 469{
421 u32 sem; 470 u32 sem;
471 u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
472 SLIPORT_SEMAPHORE_OFFSET_BE;
422 473
423 if (lancer_chip(adapter)) 474 pci_read_config_dword(adapter->pdev, reg, &sem);
424 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET); 475 *stage = sem & POST_STAGE_MASK;
425 else
426 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
427 476
428 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; 477 if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
429 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
430 return -1; 478 return -1;
431 else 479 else
432 return 0; 480 return 0;
@@ -452,10 +500,33 @@ int lancer_wait_ready(struct be_adapter *adapter)
452 return status; 500 return status;
453} 501}
454 502
503static bool lancer_provisioning_error(struct be_adapter *adapter)
504{
505 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
506 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
507 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
508 sliport_err1 = ioread32(adapter->db +
509 SLIPORT_ERROR1_OFFSET);
510 sliport_err2 = ioread32(adapter->db +
511 SLIPORT_ERROR2_OFFSET);
512
513 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
514 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
515 return true;
516 }
517 return false;
518}
519
455int lancer_test_and_set_rdy_state(struct be_adapter *adapter) 520int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
456{ 521{
457 int status; 522 int status;
458 u32 sliport_status, err, reset_needed; 523 u32 sliport_status, err, reset_needed;
524 bool resource_error;
525
526 resource_error = lancer_provisioning_error(adapter);
527 if (resource_error)
528 return -1;
529
459 status = lancer_wait_ready(adapter); 530 status = lancer_wait_ready(adapter);
460 if (!status) { 531 if (!status) {
461 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 532 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
@@ -477,6 +548,14 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
477 status = -1; 548 status = -1;
478 } 549 }
479 } 550 }
551 /* Stop error recovery if error is not recoverable.
552 * No resource error is temporary errors and will go away
553 * when PF provisions resources.
554 */
555 resource_error = lancer_provisioning_error(adapter);
556 if (status == -1 && !resource_error)
557 adapter->eeh_error = true;
558
480 return status; 559 return status;
481} 560}
482 561
@@ -601,6 +680,9 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
601 struct be_queue_info *mccq = &adapter->mcc_obj.q; 680 struct be_queue_info *mccq = &adapter->mcc_obj.q;
602 struct be_mcc_wrb *wrb; 681 struct be_mcc_wrb *wrb;
603 682
683 if (!mccq->created)
684 return NULL;
685
604 if (atomic_read(&mccq->used) >= mccq->len) { 686 if (atomic_read(&mccq->used) >= mccq->len) {
605 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n"); 687 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
606 return NULL; 688 return NULL;
@@ -1155,8 +1237,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1155 req->id = cpu_to_le16(q->id); 1237 req->id = cpu_to_le16(q->id);
1156 1238
1157 status = be_mbox_notify_wait(adapter); 1239 status = be_mbox_notify_wait(adapter);
1158 if (!status) 1240 q->created = false;
1159 q->created = false;
1160 1241
1161 mutex_unlock(&adapter->mbox_lock); 1242 mutex_unlock(&adapter->mbox_lock);
1162 return status; 1243 return status;
@@ -1183,8 +1264,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1183 req->id = cpu_to_le16(q->id); 1264 req->id = cpu_to_le16(q->id);
1184 1265
1185 status = be_mcc_notify_wait(adapter); 1266 status = be_mcc_notify_wait(adapter);
1186 if (!status) 1267 q->created = false;
1187 q->created = false;
1188 1268
1189err: 1269err:
1190 spin_unlock_bh(&adapter->mcc_lock); 1270 spin_unlock_bh(&adapter->mcc_lock);
@@ -1281,7 +1361,8 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1281 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, 1361 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1282 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd); 1362 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1283 1363
1284 if (adapter->generation == BE_GEN3) 1364 /* version 1 of the cmd is not supported only by BE2 */
1365 if (!BE2_chip(adapter))
1285 hdr->version = 1; 1366 hdr->version = 1;
1286 1367
1287 be_mcc_notify(adapter); 1368 be_mcc_notify(adapter);
@@ -1301,6 +1382,10 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1301 struct lancer_cmd_req_pport_stats *req; 1382 struct lancer_cmd_req_pport_stats *req;
1302 int status = 0; 1383 int status = 0;
1303 1384
1385 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1386 CMD_SUBSYSTEM_ETH))
1387 return -EPERM;
1388
1304 spin_lock_bh(&adapter->mcc_lock); 1389 spin_lock_bh(&adapter->mcc_lock);
1305 1390
1306 wrb = wrb_from_mccq(adapter); 1391 wrb = wrb_from_mccq(adapter);
@@ -1367,7 +1452,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1367 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1452 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1368 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL); 1453 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1369 1454
1370 if (adapter->generation == BE_GEN3 || lancer_chip(adapter)) 1455 /* version 1 of the cmd is not supported only by BE2 */
1456 if (!BE2_chip(adapter))
1371 req->hdr.version = 1; 1457 req->hdr.version = 1;
1372 1458
1373 req->hdr.domain = dom; 1459 req->hdr.domain = dom;
@@ -1658,9 +1744,9 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1658 /* Reset mcast promisc mode if already set by setting mask 1744 /* Reset mcast promisc mode if already set by setting mask
1659 * and not setting flags field 1745 * and not setting flags field
1660 */ 1746 */
1661 if (!lancer_chip(adapter) || be_physfn(adapter)) 1747 req->if_flags_mask |=
1662 req->if_flags_mask |= 1748 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1663 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1749 adapter->if_cap_flags);
1664 1750
1665 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); 1751 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1666 netdev_for_each_mc_addr(ha, adapter->netdev) 1752 netdev_for_each_mc_addr(ha, adapter->netdev)
@@ -1680,6 +1766,10 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1680 struct be_cmd_req_set_flow_control *req; 1766 struct be_cmd_req_set_flow_control *req;
1681 int status; 1767 int status;
1682 1768
1769 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1770 CMD_SUBSYSTEM_COMMON))
1771 return -EPERM;
1772
1683 spin_lock_bh(&adapter->mcc_lock); 1773 spin_lock_bh(&adapter->mcc_lock);
1684 1774
1685 wrb = wrb_from_mccq(adapter); 1775 wrb = wrb_from_mccq(adapter);
@@ -1709,6 +1799,10 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1709 struct be_cmd_req_get_flow_control *req; 1799 struct be_cmd_req_get_flow_control *req;
1710 int status; 1800 int status;
1711 1801
1802 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1803 CMD_SUBSYSTEM_COMMON))
1804 return -EPERM;
1805
1712 spin_lock_bh(&adapter->mcc_lock); 1806 spin_lock_bh(&adapter->mcc_lock);
1713 1807
1714 wrb = wrb_from_mccq(adapter); 1808 wrb = wrb_from_mccq(adapter);
@@ -2067,7 +2161,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2067 int offset) 2161 int offset)
2068{ 2162{
2069 struct be_mcc_wrb *wrb; 2163 struct be_mcc_wrb *wrb;
2070 struct be_cmd_write_flashrom *req; 2164 struct be_cmd_read_flash_crc *req;
2071 int status; 2165 int status;
2072 2166
2073 spin_lock_bh(&adapter->mcc_lock); 2167 spin_lock_bh(&adapter->mcc_lock);
@@ -2080,7 +2174,8 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2080 req = embedded_payload(wrb); 2174 req = embedded_payload(wrb);
2081 2175
2082 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2176 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2083 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL); 2177 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2178 wrb, NULL);
2084 2179
2085 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT); 2180 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2086 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 2181 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
@@ -2089,7 +2184,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2089 2184
2090 status = be_mcc_notify_wait(adapter); 2185 status = be_mcc_notify_wait(adapter);
2091 if (!status) 2186 if (!status)
2092 memcpy(flashed_crc, req->params.data_buf, 4); 2187 memcpy(flashed_crc, req->crc, 4);
2093 2188
2094err: 2189err:
2095 spin_unlock_bh(&adapter->mcc_lock); 2190 spin_unlock_bh(&adapter->mcc_lock);
@@ -2275,6 +2370,10 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2275 struct be_dma_mem cmd; 2370 struct be_dma_mem cmd;
2276 int status; 2371 int status;
2277 2372
2373 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2374 CMD_SUBSYSTEM_COMMON))
2375 return -EPERM;
2376
2278 spin_lock_bh(&adapter->mcc_lock); 2377 spin_lock_bh(&adapter->mcc_lock);
2279 2378
2280 wrb = wrb_from_mccq(adapter); 2379 wrb = wrb_from_mccq(adapter);
@@ -2434,6 +2533,42 @@ err:
2434 return status; 2533 return status;
2435} 2534}
2436 2535
2536/* Get privilege(s) for a function */
2537int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2538 u32 domain)
2539{
2540 struct be_mcc_wrb *wrb;
2541 struct be_cmd_req_get_fn_privileges *req;
2542 int status;
2543
2544 spin_lock_bh(&adapter->mcc_lock);
2545
2546 wrb = wrb_from_mccq(adapter);
2547 if (!wrb) {
2548 status = -EBUSY;
2549 goto err;
2550 }
2551
2552 req = embedded_payload(wrb);
2553
2554 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2555 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2556 wrb, NULL);
2557
2558 req->hdr.domain = domain;
2559
2560 status = be_mcc_notify_wait(adapter);
2561 if (!status) {
2562 struct be_cmd_resp_get_fn_privileges *resp =
2563 embedded_payload(wrb);
2564 *privilege = le32_to_cpu(resp->privilege_mask);
2565 }
2566
2567err:
2568 spin_unlock_bh(&adapter->mcc_lock);
2569 return status;
2570}
2571
2437/* Uses synchronous MCCQ */ 2572/* Uses synchronous MCCQ */
2438int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, 2573int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2439 bool *pmac_id_active, u32 *pmac_id, u8 domain) 2574 bool *pmac_id_active, u32 *pmac_id, u8 domain)
@@ -2651,6 +2786,10 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2651 int payload_len = sizeof(*req); 2786 int payload_len = sizeof(*req);
2652 struct be_dma_mem cmd; 2787 struct be_dma_mem cmd;
2653 2788
2789 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2790 CMD_SUBSYSTEM_ETH))
2791 return -EPERM;
2792
2654 memset(&cmd, 0, sizeof(struct be_dma_mem)); 2793 memset(&cmd, 0, sizeof(struct be_dma_mem));
2655 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 2794 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2656 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 2795 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
@@ -2792,6 +2931,240 @@ err:
2792 return status; 2931 return status;
2793} 2932}
2794 2933
2934static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
2935 u32 max_buf_size)
2936{
2937 struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
2938 int i;
2939
2940 for (i = 0; i < desc_count; i++) {
2941 desc->desc_len = RESOURCE_DESC_SIZE;
2942 if (((void *)desc + desc->desc_len) >
2943 (void *)(buf + max_buf_size)) {
2944 desc = NULL;
2945 break;
2946 }
2947
2948 if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID)
2949 break;
2950
2951 desc = (void *)desc + desc->desc_len;
2952 }
2953
2954 if (!desc || i == MAX_RESOURCE_DESC)
2955 return NULL;
2956
2957 return desc;
2958}
2959
2960/* Uses Mbox */
2961int be_cmd_get_func_config(struct be_adapter *adapter)
2962{
2963 struct be_mcc_wrb *wrb;
2964 struct be_cmd_req_get_func_config *req;
2965 int status;
2966 struct be_dma_mem cmd;
2967
2968 memset(&cmd, 0, sizeof(struct be_dma_mem));
2969 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
2970 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2971 &cmd.dma);
2972 if (!cmd.va) {
2973 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2974 return -ENOMEM;
2975 }
2976 if (mutex_lock_interruptible(&adapter->mbox_lock))
2977 return -1;
2978
2979 wrb = wrb_from_mbox(adapter);
2980 if (!wrb) {
2981 status = -EBUSY;
2982 goto err;
2983 }
2984
2985 req = cmd.va;
2986
2987 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2988 OPCODE_COMMON_GET_FUNC_CONFIG,
2989 cmd.size, wrb, &cmd);
2990
2991 status = be_mbox_notify_wait(adapter);
2992 if (!status) {
2993 struct be_cmd_resp_get_func_config *resp = cmd.va;
2994 u32 desc_count = le32_to_cpu(resp->desc_count);
2995 struct be_nic_resource_desc *desc;
2996
2997 desc = be_get_nic_desc(resp->func_param, desc_count,
2998 sizeof(resp->func_param));
2999 if (!desc) {
3000 status = -EINVAL;
3001 goto err;
3002 }
3003
3004 adapter->pf_number = desc->pf_num;
3005 adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
3006 adapter->max_vlans = le16_to_cpu(desc->vlan_count);
3007 adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3008 adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
3009 adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
3010 adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
3011
3012 adapter->max_event_queues = le16_to_cpu(desc->eq_count);
3013 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
3014 }
3015err:
3016 mutex_unlock(&adapter->mbox_lock);
3017 pci_free_consistent(adapter->pdev, cmd.size,
3018 cmd.va, cmd.dma);
3019 return status;
3020}
3021
3022 /* Uses sync mcc */
3023int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
3024 u8 domain)
3025{
3026 struct be_mcc_wrb *wrb;
3027 struct be_cmd_req_get_profile_config *req;
3028 int status;
3029 struct be_dma_mem cmd;
3030
3031 memset(&cmd, 0, sizeof(struct be_dma_mem));
3032 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3033 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3034 &cmd.dma);
3035 if (!cmd.va) {
3036 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3037 return -ENOMEM;
3038 }
3039
3040 spin_lock_bh(&adapter->mcc_lock);
3041
3042 wrb = wrb_from_mccq(adapter);
3043 if (!wrb) {
3044 status = -EBUSY;
3045 goto err;
3046 }
3047
3048 req = cmd.va;
3049
3050 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3051 OPCODE_COMMON_GET_PROFILE_CONFIG,
3052 cmd.size, wrb, &cmd);
3053
3054 req->type = ACTIVE_PROFILE_TYPE;
3055 req->hdr.domain = domain;
3056
3057 status = be_mcc_notify_wait(adapter);
3058 if (!status) {
3059 struct be_cmd_resp_get_profile_config *resp = cmd.va;
3060 u32 desc_count = le32_to_cpu(resp->desc_count);
3061 struct be_nic_resource_desc *desc;
3062
3063 desc = be_get_nic_desc(resp->func_param, desc_count,
3064 sizeof(resp->func_param));
3065
3066 if (!desc) {
3067 status = -EINVAL;
3068 goto err;
3069 }
3070 *cap_flags = le32_to_cpu(desc->cap_flags);
3071 }
3072err:
3073 spin_unlock_bh(&adapter->mcc_lock);
3074 pci_free_consistent(adapter->pdev, cmd.size,
3075 cmd.va, cmd.dma);
3076 return status;
3077}
3078
3079/* Uses sync mcc */
3080int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3081 u8 domain)
3082{
3083 struct be_mcc_wrb *wrb;
3084 struct be_cmd_req_set_profile_config *req;
3085 int status;
3086
3087 spin_lock_bh(&adapter->mcc_lock);
3088
3089 wrb = wrb_from_mccq(adapter);
3090 if (!wrb) {
3091 status = -EBUSY;
3092 goto err;
3093 }
3094
3095 req = embedded_payload(wrb);
3096
3097 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3098 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3099 wrb, NULL);
3100
3101 req->hdr.domain = domain;
3102 req->desc_count = cpu_to_le32(1);
3103
3104 req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID;
3105 req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
3106 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3107 req->nic_desc.pf_num = adapter->pf_number;
3108 req->nic_desc.vf_num = domain;
3109
3110 /* Mark fields invalid */
3111 req->nic_desc.unicast_mac_count = 0xFFFF;
3112 req->nic_desc.mcc_count = 0xFFFF;
3113 req->nic_desc.vlan_count = 0xFFFF;
3114 req->nic_desc.mcast_mac_count = 0xFFFF;
3115 req->nic_desc.txq_count = 0xFFFF;
3116 req->nic_desc.rq_count = 0xFFFF;
3117 req->nic_desc.rssq_count = 0xFFFF;
3118 req->nic_desc.lro_count = 0xFFFF;
3119 req->nic_desc.cq_count = 0xFFFF;
3120 req->nic_desc.toe_conn_count = 0xFFFF;
3121 req->nic_desc.eq_count = 0xFFFF;
3122 req->nic_desc.link_param = 0xFF;
3123 req->nic_desc.bw_min = 0xFFFFFFFF;
3124 req->nic_desc.acpi_params = 0xFF;
3125 req->nic_desc.wol_param = 0x0F;
3126
3127 /* Change BW */
3128 req->nic_desc.bw_min = cpu_to_le32(bps);
3129 req->nic_desc.bw_max = cpu_to_le32(bps);
3130 status = be_mcc_notify_wait(adapter);
3131err:
3132 spin_unlock_bh(&adapter->mcc_lock);
3133 return status;
3134}
3135
3136/* Uses sync mcc */
3137int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3138{
3139 struct be_mcc_wrb *wrb;
3140 struct be_cmd_enable_disable_vf *req;
3141 int status;
3142
3143 if (!lancer_chip(adapter))
3144 return 0;
3145
3146 spin_lock_bh(&adapter->mcc_lock);
3147
3148 wrb = wrb_from_mccq(adapter);
3149 if (!wrb) {
3150 status = -EBUSY;
3151 goto err;
3152 }
3153
3154 req = embedded_payload(wrb);
3155
3156 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3157 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3158 wrb, NULL);
3159
3160 req->hdr.domain = domain;
3161 req->enable = 1;
3162 status = be_mcc_notify_wait(adapter);
3163err:
3164 spin_unlock_bh(&adapter->mcc_lock);
3165 return status;
3166}
3167
2795int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 3168int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
2796 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 3169 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
2797{ 3170{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 0936e21e3cff..d6552e19ffee 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -196,9 +196,14 @@ struct be_mcc_mailbox {
196#define OPCODE_COMMON_GET_MAC_LIST 147 196#define OPCODE_COMMON_GET_MAC_LIST 147
197#define OPCODE_COMMON_SET_MAC_LIST 148 197#define OPCODE_COMMON_SET_MAC_LIST 148
198#define OPCODE_COMMON_GET_HSW_CONFIG 152 198#define OPCODE_COMMON_GET_HSW_CONFIG 152
199#define OPCODE_COMMON_GET_FUNC_CONFIG 160
200#define OPCODE_COMMON_GET_PROFILE_CONFIG 164
201#define OPCODE_COMMON_SET_PROFILE_CONFIG 165
199#define OPCODE_COMMON_SET_HSW_CONFIG 153 202#define OPCODE_COMMON_SET_HSW_CONFIG 153
203#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
200#define OPCODE_COMMON_READ_OBJECT 171 204#define OPCODE_COMMON_READ_OBJECT 171
201#define OPCODE_COMMON_WRITE_OBJECT 172 205#define OPCODE_COMMON_WRITE_OBJECT 172
206#define OPCODE_COMMON_ENABLE_DISABLE_VF 196
202 207
203#define OPCODE_ETH_RSS_CONFIG 1 208#define OPCODE_ETH_RSS_CONFIG 1
204#define OPCODE_ETH_ACPI_CONFIG 2 209#define OPCODE_ETH_ACPI_CONFIG 2
@@ -1151,14 +1156,22 @@ struct flashrom_params {
1151 u32 op_type; 1156 u32 op_type;
1152 u32 data_buf_size; 1157 u32 data_buf_size;
1153 u32 offset; 1158 u32 offset;
1154 u8 data_buf[4];
1155}; 1159};
1156 1160
1157struct be_cmd_write_flashrom { 1161struct be_cmd_write_flashrom {
1158 struct be_cmd_req_hdr hdr; 1162 struct be_cmd_req_hdr hdr;
1159 struct flashrom_params params; 1163 struct flashrom_params params;
1160}; 1164 u8 data_buf[32768];
1165 u8 rsvd[4];
1166} __packed;
1161 1167
1168/* cmd to read flash crc */
1169struct be_cmd_read_flash_crc {
1170 struct be_cmd_req_hdr hdr;
1171 struct flashrom_params params;
1172 u8 crc[4];
1173 u8 rsvd[4];
1174};
1162/**************** Lancer Firmware Flash ************/ 1175/**************** Lancer Firmware Flash ************/
1163struct amap_lancer_write_obj_context { 1176struct amap_lancer_write_obj_context {
1164 u8 write_length[24]; 1177 u8 write_length[24];
@@ -1429,6 +1442,41 @@ struct be_cmd_resp_set_func_cap {
1429 u8 rsvd[212]; 1442 u8 rsvd[212];
1430}; 1443};
1431 1444
1445/*********************** Function Privileges ***********************/
1446enum {
1447 BE_PRIV_DEFAULT = 0x1,
1448 BE_PRIV_LNKQUERY = 0x2,
1449 BE_PRIV_LNKSTATS = 0x4,
1450 BE_PRIV_LNKMGMT = 0x8,
1451 BE_PRIV_LNKDIAG = 0x10,
1452 BE_PRIV_UTILQUERY = 0x20,
1453 BE_PRIV_FILTMGMT = 0x40,
1454 BE_PRIV_IFACEMGMT = 0x80,
1455 BE_PRIV_VHADM = 0x100,
1456 BE_PRIV_DEVCFG = 0x200,
1457 BE_PRIV_DEVSEC = 0x400
1458};
1459#define MAX_PRIVILEGES (BE_PRIV_VHADM | BE_PRIV_DEVCFG | \
1460 BE_PRIV_DEVSEC)
1461#define MIN_PRIVILEGES BE_PRIV_DEFAULT
1462
1463struct be_cmd_priv_map {
1464 u8 opcode;
1465 u8 subsystem;
1466 u32 priv_mask;
1467};
1468
1469struct be_cmd_req_get_fn_privileges {
1470 struct be_cmd_req_hdr hdr;
1471 u32 rsvd;
1472};
1473
1474struct be_cmd_resp_get_fn_privileges {
1475 struct be_cmd_resp_hdr hdr;
1476 u32 privilege_mask;
1477};
1478
1479
1432/******************** GET/SET_MACLIST **************************/ 1480/******************** GET/SET_MACLIST **************************/
1433#define BE_MAX_MAC 64 1481#define BE_MAX_MAC 64
1434struct be_cmd_req_get_mac_list { 1482struct be_cmd_req_get_mac_list {
@@ -1608,33 +1656,6 @@ struct be_cmd_resp_get_stats_v1 {
1608 struct be_hw_stats_v1 hw_stats; 1656 struct be_hw_stats_v1 hw_stats;
1609}; 1657};
1610 1658
1611static inline void *hw_stats_from_cmd(struct be_adapter *adapter)
1612{
1613 if (adapter->generation == BE_GEN3) {
1614 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
1615
1616 return &cmd->hw_stats;
1617 } else {
1618 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
1619
1620 return &cmd->hw_stats;
1621 }
1622}
1623
1624static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
1625{
1626 if (adapter->generation == BE_GEN3) {
1627 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
1628
1629 return &hw_stats->erx;
1630 } else {
1631 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
1632
1633 return &hw_stats->erx;
1634 }
1635}
1636
1637
1638/************** get fat capabilites *******************/ 1659/************** get fat capabilites *******************/
1639#define MAX_MODULES 27 1660#define MAX_MODULES 27
1640#define MAX_MODES 4 1661#define MAX_MODES 4
@@ -1684,6 +1705,96 @@ struct be_cmd_req_set_ext_fat_caps {
1684 struct be_fat_conf_params set_params; 1705 struct be_fat_conf_params set_params;
1685}; 1706};
1686 1707
1708#define RESOURCE_DESC_SIZE 72
1709#define NIC_RESOURCE_DESC_TYPE_ID 0x41
1710#define MAX_RESOURCE_DESC 4
1711
1712/* QOS unit number */
1713#define QUN 4
1714/* Immediate */
1715#define IMM 6
1716/* No save */
1717#define NOSV 7
1718
1719struct be_nic_resource_desc {
1720 u8 desc_type;
1721 u8 desc_len;
1722 u8 rsvd1;
1723 u8 flags;
1724 u8 vf_num;
1725 u8 rsvd2;
1726 u8 pf_num;
1727 u8 rsvd3;
1728 u16 unicast_mac_count;
1729 u8 rsvd4[6];
1730 u16 mcc_count;
1731 u16 vlan_count;
1732 u16 mcast_mac_count;
1733 u16 txq_count;
1734 u16 rq_count;
1735 u16 rssq_count;
1736 u16 lro_count;
1737 u16 cq_count;
1738 u16 toe_conn_count;
1739 u16 eq_count;
1740 u32 rsvd5;
1741 u32 cap_flags;
1742 u8 link_param;
1743 u8 rsvd6[3];
1744 u32 bw_min;
1745 u32 bw_max;
1746 u8 acpi_params;
1747 u8 wol_param;
1748 u16 rsvd7;
1749 u32 rsvd8[3];
1750};
1751
1752struct be_cmd_req_get_func_config {
1753 struct be_cmd_req_hdr hdr;
1754};
1755
1756struct be_cmd_resp_get_func_config {
1757 struct be_cmd_req_hdr hdr;
1758 u32 desc_count;
1759 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
1760};
1761
1762#define ACTIVE_PROFILE_TYPE 0x2
1763struct be_cmd_req_get_profile_config {
1764 struct be_cmd_req_hdr hdr;
1765 u8 rsvd;
1766 u8 type;
1767 u16 rsvd1;
1768};
1769
1770struct be_cmd_resp_get_profile_config {
1771 struct be_cmd_req_hdr hdr;
1772 u32 desc_count;
1773 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
1774};
1775
1776struct be_cmd_req_set_profile_config {
1777 struct be_cmd_req_hdr hdr;
1778 u32 rsvd;
1779 u32 desc_count;
1780 struct be_nic_resource_desc nic_desc;
1781};
1782
1783struct be_cmd_resp_set_profile_config {
1784 struct be_cmd_req_hdr hdr;
1785};
1786
1787struct be_cmd_enable_disable_vf {
1788 struct be_cmd_req_hdr hdr;
1789 u8 enable;
1790 u8 rsvd[3];
1791};
1792
1793static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
1794{
1795 return flags & adapter->cmd_privileges ? true : false;
1796}
1797
1687extern int be_pci_fnum_get(struct be_adapter *adapter); 1798extern int be_pci_fnum_get(struct be_adapter *adapter);
1688extern int be_fw_wait_ready(struct be_adapter *adapter); 1799extern int be_fw_wait_ready(struct be_adapter *adapter);
1689extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 1800extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -1780,6 +1891,8 @@ extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
1780extern int be_cmd_req_native_mode(struct be_adapter *adapter); 1891extern int be_cmd_req_native_mode(struct be_adapter *adapter);
1781extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); 1892extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
1782extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); 1893extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
1894extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
1895 u32 *privilege, u32 domain);
1783extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, 1896extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
1784 bool *pmac_id_active, u32 *pmac_id, 1897 bool *pmac_id_active, u32 *pmac_id,
1785 u8 domain); 1898 u8 domain);
@@ -1798,4 +1911,10 @@ extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
1798extern int lancer_wait_ready(struct be_adapter *adapter); 1911extern int lancer_wait_ready(struct be_adapter *adapter);
1799extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter); 1912extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
1800extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name); 1913extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
1914extern int be_cmd_get_func_config(struct be_adapter *adapter);
1915extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
1916 u8 domain);
1801 1917
1918extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
1919 u8 domain);
1920extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 8e6fb0ba6aa9..00454a10f88d 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -261,6 +261,9 @@ be_get_reg_len(struct net_device *netdev)
261 struct be_adapter *adapter = netdev_priv(netdev); 261 struct be_adapter *adapter = netdev_priv(netdev);
262 u32 log_size = 0; 262 u32 log_size = 0;
263 263
264 if (!check_privilege(adapter, MAX_PRIVILEGES))
265 return 0;
266
264 if (be_physfn(adapter)) { 267 if (be_physfn(adapter)) {
265 if (lancer_chip(adapter)) 268 if (lancer_chip(adapter))
266 log_size = lancer_cmd_get_file_len(adapter, 269 log_size = lancer_cmd_get_file_len(adapter,
@@ -525,6 +528,10 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
525 u8 link_status; 528 u8 link_status;
526 u16 link_speed = 0; 529 u16 link_speed = 0;
527 int status; 530 int status;
531 u32 auto_speeds;
532 u32 fixed_speeds;
533 u32 dac_cable_len;
534 u16 interface_type;
528 535
529 if (adapter->phy.link_speed < 0) { 536 if (adapter->phy.link_speed < 0) {
530 status = be_cmd_link_status_query(adapter, &link_speed, 537 status = be_cmd_link_status_query(adapter, &link_speed,
@@ -534,39 +541,46 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
534 ethtool_cmd_speed_set(ecmd, link_speed); 541 ethtool_cmd_speed_set(ecmd, link_speed);
535 542
536 status = be_cmd_get_phy_info(adapter); 543 status = be_cmd_get_phy_info(adapter);
537 if (status) 544 if (!status) {
538 return status; 545 interface_type = adapter->phy.interface_type;
539 546 auto_speeds = adapter->phy.auto_speeds_supported;
540 ecmd->supported = 547 fixed_speeds = adapter->phy.fixed_speeds_supported;
541 convert_to_et_setting(adapter->phy.interface_type, 548 dac_cable_len = adapter->phy.dac_cable_len;
542 adapter->phy.auto_speeds_supported | 549
543 adapter->phy.fixed_speeds_supported); 550 ecmd->supported =
544 ecmd->advertising = 551 convert_to_et_setting(interface_type,
545 convert_to_et_setting(adapter->phy.interface_type, 552 auto_speeds |
546 adapter->phy.auto_speeds_supported); 553 fixed_speeds);
547 554 ecmd->advertising =
548 ecmd->port = be_get_port_type(adapter->phy.interface_type, 555 convert_to_et_setting(interface_type,
549 adapter->phy.dac_cable_len); 556 auto_speeds);
550 557
551 if (adapter->phy.auto_speeds_supported) { 558 ecmd->port = be_get_port_type(interface_type,
552 ecmd->supported |= SUPPORTED_Autoneg; 559 dac_cable_len);
553 ecmd->autoneg = AUTONEG_ENABLE; 560
554 ecmd->advertising |= ADVERTISED_Autoneg; 561 if (adapter->phy.auto_speeds_supported) {
555 } 562 ecmd->supported |= SUPPORTED_Autoneg;
563 ecmd->autoneg = AUTONEG_ENABLE;
564 ecmd->advertising |= ADVERTISED_Autoneg;
565 }
556 566
557 if (be_pause_supported(adapter)) {
558 ecmd->supported |= SUPPORTED_Pause; 567 ecmd->supported |= SUPPORTED_Pause;
559 ecmd->advertising |= ADVERTISED_Pause; 568 if (be_pause_supported(adapter))
560 } 569 ecmd->advertising |= ADVERTISED_Pause;
561 570
562 switch (adapter->phy.interface_type) { 571 switch (adapter->phy.interface_type) {
563 case PHY_TYPE_KR_10GB: 572 case PHY_TYPE_KR_10GB:
564 case PHY_TYPE_KX4_10GB: 573 case PHY_TYPE_KX4_10GB:
565 ecmd->transceiver = XCVR_INTERNAL; 574 ecmd->transceiver = XCVR_INTERNAL;
566 break; 575 break;
567 default: 576 default:
568 ecmd->transceiver = XCVR_EXTERNAL; 577 ecmd->transceiver = XCVR_EXTERNAL;
569 break; 578 break;
579 }
580 } else {
581 ecmd->port = PORT_OTHER;
582 ecmd->autoneg = AUTONEG_DISABLE;
583 ecmd->transceiver = XCVR_DUMMY1;
570 } 584 }
571 585
572 /* Save for future use */ 586 /* Save for future use */
@@ -787,6 +801,10 @@ static int
787be_get_eeprom_len(struct net_device *netdev) 801be_get_eeprom_len(struct net_device *netdev)
788{ 802{
789 struct be_adapter *adapter = netdev_priv(netdev); 803 struct be_adapter *adapter = netdev_priv(netdev);
804
805 if (!check_privilege(adapter, MAX_PRIVILEGES))
806 return 0;
807
790 if (lancer_chip(adapter)) { 808 if (lancer_chip(adapter)) {
791 if (be_physfn(adapter)) 809 if (be_physfn(adapter))
792 return lancer_cmd_get_file_len(adapter, 810 return lancer_cmd_get_file_len(adapter,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index b755f7061dce..541d4530d5bf 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -31,12 +31,12 @@
31 31
32#define MPU_EP_CONTROL 0 32#define MPU_EP_CONTROL 0
33 33
34/********** MPU semphore ******************/ 34/********** MPU semphore: used for SH & BE *************/
35#define MPU_EP_SEMAPHORE_OFFSET 0xac 35#define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c
36#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400 36#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94
37#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF 37#define POST_STAGE_MASK 0x0000FFFF
38#define EP_SEMAPHORE_POST_ERR_MASK 0x1 38#define POST_ERR_MASK 0x1
39#define EP_SEMAPHORE_POST_ERR_SHIFT 31 39#define POST_ERR_SHIFT 31
40 40
41/* MPU semphore POST stage values */ 41/* MPU semphore POST stage values */
42#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */ 42#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
@@ -59,6 +59,9 @@
59#define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002 59#define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002
60#define PHYSDEV_CONTROL_INP_MASK 0x40000000 60#define PHYSDEV_CONTROL_INP_MASK 0x40000000
61 61
62#define SLIPORT_ERROR_NO_RESOURCE1 0x2
63#define SLIPORT_ERROR_NO_RESOURCE2 0x9
64
62/********* Memory BAR register ************/ 65/********* Memory BAR register ************/
63#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc 66#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
64/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt 67/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
@@ -102,11 +105,6 @@
102#define SLI_INTF_TYPE_2 2 105#define SLI_INTF_TYPE_2 2
103#define SLI_INTF_TYPE_3 3 106#define SLI_INTF_TYPE_3 3
104 107
105/* SLI family */
106#define BE_SLI_FAMILY 0x0
107#define LANCER_A0_SLI_FAMILY 0xA
108#define SKYHAWK_SLI_FAMILY 0x2
109
110/********* ISR0 Register offset **********/ 108/********* ISR0 Register offset **********/
111#define CEV_ISR0_OFFSET 0xC18 109#define CEV_ISR0_OFFSET 0xC18
112#define CEV_ISR_SIZE 4 110#define CEV_ISR_SIZE 4
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d1b6cc587639..c365722218ff 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -44,6 +44,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)}, 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)}, 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)}, 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
47 { 0 } 48 { 0 }
48}; 49};
49MODULE_DEVICE_TABLE(pci, be_dev_ids); 50MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -237,23 +238,46 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
237 int status = 0; 238 int status = 0;
238 u8 current_mac[ETH_ALEN]; 239 u8 current_mac[ETH_ALEN];
239 u32 pmac_id = adapter->pmac_id[0]; 240 u32 pmac_id = adapter->pmac_id[0];
241 bool active_mac = true;
240 242
241 if (!is_valid_ether_addr(addr->sa_data)) 243 if (!is_valid_ether_addr(addr->sa_data))
242 return -EADDRNOTAVAIL; 244 return -EADDRNOTAVAIL;
243 245
244 status = be_cmd_mac_addr_query(adapter, current_mac, false, 246 /* For BE VF, MAC address is already activated by PF.
245 adapter->if_handle, 0); 247 * Hence only operation left is updating netdev->devaddr.
248 * Update it if user is passing the same MAC which was used
249 * during configuring VF MAC from PF(Hypervisor).
250 */
251 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252 status = be_cmd_mac_addr_query(adapter, current_mac,
253 false, adapter->if_handle, 0);
254 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255 goto done;
256 else
257 goto err;
258 }
259
260 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261 goto done;
262
263 /* For Lancer check if any MAC is active.
264 * If active, get its mac id.
265 */
266 if (lancer_chip(adapter) && !be_physfn(adapter))
267 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268 &pmac_id, 0);
269
270 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271 adapter->if_handle,
272 &adapter->pmac_id[0], 0);
273
246 if (status) 274 if (status)
247 goto err; 275 goto err;
248 276
249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) { 277 if (active_mac)
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, 278 be_cmd_pmac_del(adapter, adapter->if_handle,
251 adapter->if_handle, &adapter->pmac_id[0], 0); 279 pmac_id, 0);
252 if (status) 280done:
253 goto err;
254
255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 281 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0; 282 return 0;
259err: 283err:
@@ -261,7 +285,35 @@ err:
261 return status; 285 return status;
262} 286}
263 287
264static void populate_be2_stats(struct be_adapter *adapter) 288/* BE2 supports only v0 cmd */
289static void *hw_stats_from_cmd(struct be_adapter *adapter)
290{
291 if (BE2_chip(adapter)) {
292 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294 return &cmd->hw_stats;
295 } else {
296 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298 return &cmd->hw_stats;
299 }
300}
301
302/* BE2 supports only v0 cmd */
303static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308 return &hw_stats->erx;
309 } else {
310 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312 return &hw_stats->erx;
313 }
314}
315
316static void populate_be_v0_stats(struct be_adapter *adapter)
265{ 317{
266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter); 318 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem; 319 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
@@ -310,7 +362,7 @@ static void populate_be2_stats(struct be_adapter *adapter)
310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; 362 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311} 363}
312 364
313static void populate_be3_stats(struct be_adapter *adapter) 365static void populate_be_v1_stats(struct be_adapter *adapter)
314{ 366{
315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter); 367 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem; 368 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
@@ -412,28 +464,25 @@ void be_parse_stats(struct be_adapter *adapter)
412 struct be_rx_obj *rxo; 464 struct be_rx_obj *rxo;
413 int i; 465 int i;
414 466
415 if (adapter->generation == BE_GEN3) { 467 if (lancer_chip(adapter)) {
416 if (lancer_chip(adapter)) 468 populate_lancer_stats(adapter);
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else { 469 } else {
421 populate_be2_stats(adapter); 470 if (BE2_chip(adapter))
422 } 471 populate_be_v0_stats(adapter);
423 472 else
424 if (lancer_chip(adapter)) 473 /* for BE3 and Skyhawk */
425 goto done; 474 populate_be_v1_stats(adapter);
426 475
427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */ 476 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
428 for_all_rx_queues(adapter, rxo, i) { 477 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after 478 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value 479 * 65535. Driver accumulates a 32-bit value
431 */ 480 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags, 481 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]); 482 (u16)erx->rx_drops_no_fragments \
483 [rxo->q.id]);
484 }
434 } 485 }
435done:
436 return;
437} 486}
438 487
439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, 488static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
@@ -597,16 +646,6 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
597 hdr, skb_shinfo(skb)->gso_size); 646 hdr, skb_shinfo(skb)->gso_size);
598 if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) 647 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1); 648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
600 if (lancer_chip(adapter) && adapter->sli_family ==
601 LANCER_A0_SLI_FAMILY) {
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603 if (is_tcp_pkt(skb))
604 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605 tcpcs, hdr, 1);
606 else if (is_udp_pkt(skb))
607 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608 udpcs, hdr, 1);
609 }
610 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 649 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611 if (is_tcp_pkt(skb)) 650 if (is_tcp_pkt(skb))
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); 651 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -856,11 +895,15 @@ static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
856 struct be_adapter *adapter = netdev_priv(netdev); 895 struct be_adapter *adapter = netdev_priv(netdev);
857 int status = 0; 896 int status = 0;
858 897
859 if (!be_physfn(adapter)) { 898 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
860 status = -EINVAL; 899 status = -EINVAL;
861 goto ret; 900 goto ret;
862 } 901 }
863 902
903 /* Packets with VID 0 are always received by Lancer by default */
904 if (lancer_chip(adapter) && vid == 0)
905 goto ret;
906
864 adapter->vlan_tag[vid] = 1; 907 adapter->vlan_tag[vid] = 1;
865 if (adapter->vlans_added <= (adapter->max_vlans + 1)) 908 if (adapter->vlans_added <= (adapter->max_vlans + 1))
866 status = be_vid_config(adapter); 909 status = be_vid_config(adapter);
@@ -878,11 +921,15 @@ static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
878 struct be_adapter *adapter = netdev_priv(netdev); 921 struct be_adapter *adapter = netdev_priv(netdev);
879 int status = 0; 922 int status = 0;
880 923
881 if (!be_physfn(adapter)) { 924 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
882 status = -EINVAL; 925 status = -EINVAL;
883 goto ret; 926 goto ret;
884 } 927 }
885 928
929 /* Packets with VID 0 are always received by Lancer by default */
930 if (lancer_chip(adapter) && vid == 0)
931 goto ret;
932
886 adapter->vlan_tag[vid] = 0; 933 adapter->vlan_tag[vid] = 0;
887 if (adapter->vlans_added <= adapter->max_vlans) 934 if (adapter->vlans_added <= adapter->max_vlans)
888 status = be_vid_config(adapter); 935 status = be_vid_config(adapter);
@@ -917,7 +964,7 @@ static void be_set_rx_mode(struct net_device *netdev)
917 964
918 /* Enable multicast promisc if num configured exceeds what we support */ 965 /* Enable multicast promisc if num configured exceeds what we support */
919 if (netdev->flags & IFF_ALLMULTI || 966 if (netdev->flags & IFF_ALLMULTI ||
920 netdev_mc_count(netdev) > BE_MAX_MC) { 967 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
921 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); 968 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
922 goto done; 969 goto done;
923 } 970 }
@@ -962,6 +1009,9 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
962 struct be_adapter *adapter = netdev_priv(netdev); 1009 struct be_adapter *adapter = netdev_priv(netdev);
963 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; 1010 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
964 int status; 1011 int status;
1012 bool active_mac = false;
1013 u32 pmac_id;
1014 u8 old_mac[ETH_ALEN];
965 1015
966 if (!sriov_enabled(adapter)) 1016 if (!sriov_enabled(adapter))
967 return -EPERM; 1017 return -EPERM;
@@ -970,6 +1020,12 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
970 return -EINVAL; 1020 return -EINVAL;
971 1021
972 if (lancer_chip(adapter)) { 1022 if (lancer_chip(adapter)) {
1023 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1024 &pmac_id, vf + 1);
1025 if (!status && active_mac)
1026 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1027 pmac_id, vf + 1);
1028
973 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1); 1029 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
974 } else { 1030 } else {
975 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle, 1031 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
@@ -1062,7 +1118,10 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
1062 return -EINVAL; 1118 return -EINVAL;
1063 } 1119 }
1064 1120
1065 status = be_cmd_set_qos(adapter, rate / 10, vf + 1); 1121 if (lancer_chip(adapter))
1122 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1123 else
1124 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1066 1125
1067 if (status) 1126 if (status)
1068 dev_err(&adapter->pdev->dev, 1127 dev_err(&adapter->pdev->dev,
@@ -1837,12 +1896,13 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
1837 1896
1838static int be_num_txqs_want(struct be_adapter *adapter) 1897static int be_num_txqs_want(struct be_adapter *adapter)
1839{ 1898{
1840 if (sriov_want(adapter) || be_is_mc(adapter) || 1899 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1841 lancer_chip(adapter) || !be_physfn(adapter) || 1900 be_is_mc(adapter) ||
1842 adapter->generation == BE_GEN2) 1901 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1902 BE2_chip(adapter))
1843 return 1; 1903 return 1;
1844 else 1904 else
1845 return MAX_TX_QS; 1905 return adapter->max_tx_queues;
1846} 1906}
1847 1907
1848static int be_tx_cqs_create(struct be_adapter *adapter) 1908static int be_tx_cqs_create(struct be_adapter *adapter)
@@ -2177,9 +2237,11 @@ static void be_msix_disable(struct be_adapter *adapter)
2177static uint be_num_rss_want(struct be_adapter *adapter) 2237static uint be_num_rss_want(struct be_adapter *adapter)
2178{ 2238{
2179 u32 num = 0; 2239 u32 num = 0;
2240
2180 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && 2241 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2181 !sriov_want(adapter) && be_physfn(adapter)) { 2242 (lancer_chip(adapter) ||
2182 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; 2243 (!sriov_want(adapter) && be_physfn(adapter)))) {
2244 num = adapter->max_rss_queues;
2183 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues()); 2245 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2184 } 2246 }
2185 return num; 2247 return num;
@@ -2579,10 +2641,30 @@ static int be_clear(struct be_adapter *adapter)
2579 be_tx_queues_destroy(adapter); 2641 be_tx_queues_destroy(adapter);
2580 be_evt_queues_destroy(adapter); 2642 be_evt_queues_destroy(adapter);
2581 2643
2644 kfree(adapter->pmac_id);
2645 adapter->pmac_id = NULL;
2646
2582 be_msix_disable(adapter); 2647 be_msix_disable(adapter);
2583 return 0; 2648 return 0;
2584} 2649}
2585 2650
2651static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2652 u32 *cap_flags, u8 domain)
2653{
2654 bool profile_present = false;
2655 int status;
2656
2657 if (lancer_chip(adapter)) {
2658 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2659 if (!status)
2660 profile_present = true;
2661 }
2662
2663 if (!profile_present)
2664 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2665 BE_IF_FLAGS_MULTICAST;
2666}
2667
2586static int be_vf_setup_init(struct be_adapter *adapter) 2668static int be_vf_setup_init(struct be_adapter *adapter)
2587{ 2669{
2588 struct be_vf_cfg *vf_cfg; 2670 struct be_vf_cfg *vf_cfg;
@@ -2634,9 +2716,13 @@ static int be_vf_setup(struct be_adapter *adapter)
2634 if (status) 2716 if (status)
2635 goto err; 2717 goto err;
2636 2718
2637 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2638 BE_IF_FLAGS_MULTICAST;
2639 for_all_vfs(adapter, vf_cfg, vf) { 2719 for_all_vfs(adapter, vf_cfg, vf) {
2720 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2721
2722 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2723 BE_IF_FLAGS_BROADCAST |
2724 BE_IF_FLAGS_MULTICAST);
2725
2640 status = be_cmd_if_create(adapter, cap_flags, en_flags, 2726 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2641 &vf_cfg->if_handle, vf + 1); 2727 &vf_cfg->if_handle, vf + 1);
2642 if (status) 2728 if (status)
@@ -2661,6 +2747,8 @@ static int be_vf_setup(struct be_adapter *adapter)
2661 if (status) 2747 if (status)
2662 goto err; 2748 goto err;
2663 vf_cfg->def_vid = def_vlan; 2749 vf_cfg->def_vid = def_vlan;
2750
2751 be_cmd_enable_vf(adapter, vf + 1);
2664 } 2752 }
2665 return 0; 2753 return 0;
2666err: 2754err:
@@ -2674,7 +2762,10 @@ static void be_setup_init(struct be_adapter *adapter)
2674 adapter->if_handle = -1; 2762 adapter->if_handle = -1;
2675 adapter->be3_native = false; 2763 adapter->be3_native = false;
2676 adapter->promiscuous = false; 2764 adapter->promiscuous = false;
2677 adapter->eq_next_idx = 0; 2765 if (be_physfn(adapter))
2766 adapter->cmd_privileges = MAX_PRIVILEGES;
2767 else
2768 adapter->cmd_privileges = MIN_PRIVILEGES;
2678} 2769}
2679 2770
2680static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle, 2771static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
@@ -2712,12 +2803,93 @@ static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2712 return status; 2803 return status;
2713} 2804}
2714 2805
2806static void be_get_resources(struct be_adapter *adapter)
2807{
2808 int status;
2809 bool profile_present = false;
2810
2811 if (lancer_chip(adapter)) {
2812 status = be_cmd_get_func_config(adapter);
2813
2814 if (!status)
2815 profile_present = true;
2816 }
2817
2818 if (profile_present) {
2819 /* Sanity fixes for Lancer */
2820 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2821 BE_UC_PMAC_COUNT);
2822 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2823 BE_NUM_VLANS_SUPPORTED);
2824 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2825 BE_MAX_MC);
2826 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2827 MAX_TX_QS);
2828 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2829 BE3_MAX_RSS_QS);
2830 adapter->max_event_queues = min_t(u16,
2831 adapter->max_event_queues,
2832 BE3_MAX_RSS_QS);
2833
2834 if (adapter->max_rss_queues &&
2835 adapter->max_rss_queues == adapter->max_rx_queues)
2836 adapter->max_rss_queues -= 1;
2837
2838 if (adapter->max_event_queues < adapter->max_rss_queues)
2839 adapter->max_rss_queues = adapter->max_event_queues;
2840
2841 } else {
2842 if (be_physfn(adapter))
2843 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2844 else
2845 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2846
2847 if (adapter->function_mode & FLEX10_MODE)
2848 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2849 else
2850 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2851
2852 adapter->max_mcast_mac = BE_MAX_MC;
2853 adapter->max_tx_queues = MAX_TX_QS;
2854 adapter->max_rss_queues = (adapter->be3_native) ?
2855 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2856 adapter->max_event_queues = BE3_MAX_RSS_QS;
2857
2858 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2859 BE_IF_FLAGS_BROADCAST |
2860 BE_IF_FLAGS_MULTICAST |
2861 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2862 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2863 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2864 BE_IF_FLAGS_PROMISCUOUS;
2865
2866 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2867 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2868 }
2869}
2870
2715/* Routine to query per function resource limits */ 2871/* Routine to query per function resource limits */
2716static int be_get_config(struct be_adapter *adapter) 2872static int be_get_config(struct be_adapter *adapter)
2717{ 2873{
2718 int pos; 2874 int pos, status;
2719 u16 dev_num_vfs; 2875 u16 dev_num_vfs;
2720 2876
2877 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2878 &adapter->function_mode,
2879 &adapter->function_caps);
2880 if (status)
2881 goto err;
2882
2883 be_get_resources(adapter);
2884
2885 /* primary mac needs 1 pmac entry */
2886 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2887 sizeof(u32), GFP_KERNEL);
2888 if (!adapter->pmac_id) {
2889 status = -ENOMEM;
2890 goto err;
2891 }
2892
2721 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV); 2893 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2722 if (pos) { 2894 if (pos) {
2723 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF, 2895 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
@@ -2726,13 +2898,14 @@ static int be_get_config(struct be_adapter *adapter)
2726 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS); 2898 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2727 adapter->dev_num_vfs = dev_num_vfs; 2899 adapter->dev_num_vfs = dev_num_vfs;
2728 } 2900 }
2729 return 0; 2901err:
2902 return status;
2730} 2903}
2731 2904
2732static int be_setup(struct be_adapter *adapter) 2905static int be_setup(struct be_adapter *adapter)
2733{ 2906{
2734 struct device *dev = &adapter->pdev->dev; 2907 struct device *dev = &adapter->pdev->dev;
2735 u32 cap_flags, en_flags; 2908 u32 en_flags;
2736 u32 tx_fc, rx_fc; 2909 u32 tx_fc, rx_fc;
2737 int status; 2910 int status;
2738 u8 mac[ETH_ALEN]; 2911 u8 mac[ETH_ALEN];
@@ -2740,9 +2913,12 @@ static int be_setup(struct be_adapter *adapter)
2740 2913
2741 be_setup_init(adapter); 2914 be_setup_init(adapter);
2742 2915
2743 be_get_config(adapter); 2916 if (!lancer_chip(adapter))
2917 be_cmd_req_native_mode(adapter);
2744 2918
2745 be_cmd_req_native_mode(adapter); 2919 status = be_get_config(adapter);
2920 if (status)
2921 goto err;
2746 2922
2747 be_msix_enable(adapter); 2923 be_msix_enable(adapter);
2748 2924
@@ -2762,24 +2938,22 @@ static int be_setup(struct be_adapter *adapter)
2762 if (status) 2938 if (status)
2763 goto err; 2939 goto err;
2764 2940
2941 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2942 /* In UMC mode FW does not return right privileges.
2943 * Override with correct privilege equivalent to PF.
2944 */
2945 if (be_is_mc(adapter))
2946 adapter->cmd_privileges = MAX_PRIVILEGES;
2947
2765 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 2948 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2766 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; 2949 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2767 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2768 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2769 2950
2770 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) { 2951 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2771 cap_flags |= BE_IF_FLAGS_RSS;
2772 en_flags |= BE_IF_FLAGS_RSS; 2952 en_flags |= BE_IF_FLAGS_RSS;
2773 }
2774 2953
2775 if (lancer_chip(adapter) && !be_physfn(adapter)) { 2954 en_flags = en_flags & adapter->if_cap_flags;
2776 en_flags = BE_IF_FLAGS_UNTAGGED |
2777 BE_IF_FLAGS_BROADCAST |
2778 BE_IF_FLAGS_MULTICAST;
2779 cap_flags = en_flags;
2780 }
2781 2955
2782 status = be_cmd_if_create(adapter, cap_flags, en_flags, 2956 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
2783 &adapter->if_handle, 0); 2957 &adapter->if_handle, 0);
2784 if (status != 0) 2958 if (status != 0)
2785 goto err; 2959 goto err;
@@ -2827,8 +3001,8 @@ static int be_setup(struct be_adapter *adapter)
2827 dev_warn(dev, "device doesn't support SRIOV\n"); 3001 dev_warn(dev, "device doesn't support SRIOV\n");
2828 } 3002 }
2829 3003
2830 be_cmd_get_phy_info(adapter); 3004 status = be_cmd_get_phy_info(adapter);
2831 if (be_pause_supported(adapter)) 3005 if (!status && be_pause_supported(adapter))
2832 adapter->phy.fc_autoneg = 1; 3006 adapter->phy.fc_autoneg = 1;
2833 3007
2834 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 3008 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
@@ -2895,7 +3069,7 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
2895 int i = 0, img_type = 0; 3069 int i = 0, img_type = 0;
2896 struct flash_section_info_g2 *fsec_g2 = NULL; 3070 struct flash_section_info_g2 *fsec_g2 = NULL;
2897 3071
2898 if (adapter->generation != BE_GEN3) 3072 if (BE2_chip(adapter))
2899 fsec_g2 = (struct flash_section_info_g2 *)fsec; 3073 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2900 3074
2901 for (i = 0; i < MAX_FLASH_COMP; i++) { 3075 for (i = 0; i < MAX_FLASH_COMP; i++) {
@@ -2928,7 +3102,49 @@ struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2928 return NULL; 3102 return NULL;
2929} 3103}
2930 3104
2931static int be_flash_data(struct be_adapter *adapter, 3105static int be_flash(struct be_adapter *adapter, const u8 *img,
3106 struct be_dma_mem *flash_cmd, int optype, int img_size)
3107{
3108 u32 total_bytes = 0, flash_op, num_bytes = 0;
3109 int status = 0;
3110 struct be_cmd_write_flashrom *req = flash_cmd->va;
3111
3112 total_bytes = img_size;
3113 while (total_bytes) {
3114 num_bytes = min_t(u32, 32*1024, total_bytes);
3115
3116 total_bytes -= num_bytes;
3117
3118 if (!total_bytes) {
3119 if (optype == OPTYPE_PHY_FW)
3120 flash_op = FLASHROM_OPER_PHY_FLASH;
3121 else
3122 flash_op = FLASHROM_OPER_FLASH;
3123 } else {
3124 if (optype == OPTYPE_PHY_FW)
3125 flash_op = FLASHROM_OPER_PHY_SAVE;
3126 else
3127 flash_op = FLASHROM_OPER_SAVE;
3128 }
3129
3130 memcpy(req->data_buf, img, num_bytes);
3131 img += num_bytes;
3132 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3133 flash_op, num_bytes);
3134 if (status) {
3135 if (status == ILLEGAL_IOCTL_REQ &&
3136 optype == OPTYPE_PHY_FW)
3137 break;
3138 dev_err(&adapter->pdev->dev,
3139 "cmd to write to flash rom failed.\n");
3140 return status;
3141 }
3142 }
3143 return 0;
3144}
3145
3146/* For BE2 and BE3 */
3147static int be_flash_BEx(struct be_adapter *adapter,
2932 const struct firmware *fw, 3148 const struct firmware *fw,
2933 struct be_dma_mem *flash_cmd, 3149 struct be_dma_mem *flash_cmd,
2934 int num_of_images) 3150 int num_of_images)
@@ -2936,12 +3152,9 @@ static int be_flash_data(struct be_adapter *adapter,
2936{ 3152{
2937 int status = 0, i, filehdr_size = 0; 3153 int status = 0, i, filehdr_size = 0;
2938 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); 3154 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2939 u32 total_bytes = 0, flash_op;
2940 int num_bytes;
2941 const u8 *p = fw->data; 3155 const u8 *p = fw->data;
2942 struct be_cmd_write_flashrom *req = flash_cmd->va;
2943 const struct flash_comp *pflashcomp; 3156 const struct flash_comp *pflashcomp;
2944 int num_comp, hdr_size; 3157 int num_comp, redboot;
2945 struct flash_section_info *fsec = NULL; 3158 struct flash_section_info *fsec = NULL;
2946 3159
2947 struct flash_comp gen3_flash_types[] = { 3160 struct flash_comp gen3_flash_types[] = {
@@ -2986,7 +3199,7 @@ static int be_flash_data(struct be_adapter *adapter,
2986 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE} 3199 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2987 }; 3200 };
2988 3201
2989 if (adapter->generation == BE_GEN3) { 3202 if (BE3_chip(adapter)) {
2990 pflashcomp = gen3_flash_types; 3203 pflashcomp = gen3_flash_types;
2991 filehdr_size = sizeof(struct flash_file_hdr_g3); 3204 filehdr_size = sizeof(struct flash_file_hdr_g3);
2992 num_comp = ARRAY_SIZE(gen3_flash_types); 3205 num_comp = ARRAY_SIZE(gen3_flash_types);
@@ -2995,6 +3208,7 @@ static int be_flash_data(struct be_adapter *adapter,
2995 filehdr_size = sizeof(struct flash_file_hdr_g2); 3208 filehdr_size = sizeof(struct flash_file_hdr_g2);
2996 num_comp = ARRAY_SIZE(gen2_flash_types); 3209 num_comp = ARRAY_SIZE(gen2_flash_types);
2997 } 3210 }
3211
2998 /* Get flash section info*/ 3212 /* Get flash section info*/
2999 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); 3213 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3000 if (!fsec) { 3214 if (!fsec) {
@@ -3010,70 +3224,105 @@ static int be_flash_data(struct be_adapter *adapter,
3010 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0) 3224 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3011 continue; 3225 continue;
3012 3226
3013 if (pflashcomp[i].optype == OPTYPE_PHY_FW) { 3227 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3014 if (!phy_flashing_required(adapter)) 3228 !phy_flashing_required(adapter))
3015 continue; 3229 continue;
3016 }
3017
3018 hdr_size = filehdr_size +
3019 (num_of_images * sizeof(struct image_hdr));
3020 3230
3021 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) && 3231 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3022 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset, 3232 redboot = be_flash_redboot(adapter, fw->data,
3023 pflashcomp[i].size, hdr_size))) 3233 pflashcomp[i].offset, pflashcomp[i].size,
3024 continue; 3234 filehdr_size + img_hdrs_size);
3235 if (!redboot)
3236 continue;
3237 }
3025 3238
3026 /* Flash the component */
3027 p = fw->data; 3239 p = fw->data;
3028 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size; 3240 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3029 if (p + pflashcomp[i].size > fw->data + fw->size) 3241 if (p + pflashcomp[i].size > fw->data + fw->size)
3030 return -1; 3242 return -1;
3031 total_bytes = pflashcomp[i].size; 3243
3032 while (total_bytes) { 3244 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3033 if (total_bytes > 32*1024) 3245 pflashcomp[i].size);
3034 num_bytes = 32*1024; 3246 if (status) {
3035 else 3247 dev_err(&adapter->pdev->dev,
3036 num_bytes = total_bytes; 3248 "Flashing section type %d failed.\n",
3037 total_bytes -= num_bytes; 3249 pflashcomp[i].img_type);
3038 if (!total_bytes) { 3250 return status;
3039 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3040 flash_op = FLASHROM_OPER_PHY_FLASH;
3041 else
3042 flash_op = FLASHROM_OPER_FLASH;
3043 } else {
3044 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3045 flash_op = FLASHROM_OPER_PHY_SAVE;
3046 else
3047 flash_op = FLASHROM_OPER_SAVE;
3048 }
3049 memcpy(req->params.data_buf, p, num_bytes);
3050 p += num_bytes;
3051 status = be_cmd_write_flashrom(adapter, flash_cmd,
3052 pflashcomp[i].optype, flash_op, num_bytes);
3053 if (status) {
3054 if ((status == ILLEGAL_IOCTL_REQ) &&
3055 (pflashcomp[i].optype ==
3056 OPTYPE_PHY_FW))
3057 break;
3058 dev_err(&adapter->pdev->dev,
3059 "cmd to write to flash rom failed.\n");
3060 return -1;
3061 }
3062 } 3251 }
3063 } 3252 }
3064 return 0; 3253 return 0;
3065} 3254}
3066 3255
3067static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr) 3256static int be_flash_skyhawk(struct be_adapter *adapter,
3257 const struct firmware *fw,
3258 struct be_dma_mem *flash_cmd, int num_of_images)
3068{ 3259{
3069 if (fhdr == NULL) 3260 int status = 0, i, filehdr_size = 0;
3070 return 0; 3261 int img_offset, img_size, img_optype, redboot;
3071 if (fhdr->build[0] == '3') 3262 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3072 return BE_GEN3; 3263 const u8 *p = fw->data;
3073 else if (fhdr->build[0] == '2') 3264 struct flash_section_info *fsec = NULL;
3074 return BE_GEN2; 3265
3075 else 3266 filehdr_size = sizeof(struct flash_file_hdr_g3);
3076 return 0; 3267 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3268 if (!fsec) {
3269 dev_err(&adapter->pdev->dev,
3270 "Invalid Cookie. UFI corrupted ?\n");
3271 return -1;
3272 }
3273
3274 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3275 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3276 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3277
3278 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3279 case IMAGE_FIRMWARE_iSCSI:
3280 img_optype = OPTYPE_ISCSI_ACTIVE;
3281 break;
3282 case IMAGE_BOOT_CODE:
3283 img_optype = OPTYPE_REDBOOT;
3284 break;
3285 case IMAGE_OPTION_ROM_ISCSI:
3286 img_optype = OPTYPE_BIOS;
3287 break;
3288 case IMAGE_OPTION_ROM_PXE:
3289 img_optype = OPTYPE_PXE_BIOS;
3290 break;
3291 case IMAGE_OPTION_ROM_FCoE:
3292 img_optype = OPTYPE_FCOE_BIOS;
3293 break;
3294 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3295 img_optype = OPTYPE_ISCSI_BACKUP;
3296 break;
3297 case IMAGE_NCSI:
3298 img_optype = OPTYPE_NCSI_FW;
3299 break;
3300 default:
3301 continue;
3302 }
3303
3304 if (img_optype == OPTYPE_REDBOOT) {
3305 redboot = be_flash_redboot(adapter, fw->data,
3306 img_offset, img_size,
3307 filehdr_size + img_hdrs_size);
3308 if (!redboot)
3309 continue;
3310 }
3311
3312 p = fw->data;
3313 p += filehdr_size + img_offset + img_hdrs_size;
3314 if (p + img_size > fw->data + fw->size)
3315 return -1;
3316
3317 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3318 if (status) {
3319 dev_err(&adapter->pdev->dev,
3320 "Flashing section type %d failed.\n",
3321 fsec->fsec_entry[i].type);
3322 return status;
3323 }
3324 }
3325 return 0;
3077} 3326}
3078 3327
3079static int lancer_wait_idle(struct be_adapter *adapter) 3328static int lancer_wait_idle(struct be_adapter *adapter)
@@ -3207,6 +3456,28 @@ lancer_fw_exit:
3207 return status; 3456 return status;
3208} 3457}
3209 3458
3459#define UFI_TYPE2 2
3460#define UFI_TYPE3 3
3461#define UFI_TYPE4 4
3462static int be_get_ufi_type(struct be_adapter *adapter,
3463 struct flash_file_hdr_g2 *fhdr)
3464{
3465 if (fhdr == NULL)
3466 goto be_get_ufi_exit;
3467
3468 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3469 return UFI_TYPE4;
3470 else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3471 return UFI_TYPE3;
3472 else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3473 return UFI_TYPE2;
3474
3475be_get_ufi_exit:
3476 dev_err(&adapter->pdev->dev,
3477 "UFI and Interface are not compatible for flashing\n");
3478 return -1;
3479}
3480
3210static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) 3481static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3211{ 3482{
3212 struct flash_file_hdr_g2 *fhdr; 3483 struct flash_file_hdr_g2 *fhdr;
@@ -3214,12 +3485,9 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3214 struct image_hdr *img_hdr_ptr = NULL; 3485 struct image_hdr *img_hdr_ptr = NULL;
3215 struct be_dma_mem flash_cmd; 3486 struct be_dma_mem flash_cmd;
3216 const u8 *p; 3487 const u8 *p;
3217 int status = 0, i = 0, num_imgs = 0; 3488 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3218 3489
3219 p = fw->data; 3490 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3220 fhdr = (struct flash_file_hdr_g2 *) p;
3221
3222 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3223 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, 3491 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3224 &flash_cmd.dma, GFP_KERNEL); 3492 &flash_cmd.dma, GFP_KERNEL);
3225 if (!flash_cmd.va) { 3493 if (!flash_cmd.va) {
@@ -3229,27 +3497,32 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3229 goto be_fw_exit; 3497 goto be_fw_exit;
3230 } 3498 }
3231 3499
3232 if ((adapter->generation == BE_GEN3) && 3500 p = fw->data;
3233 (get_ufigen_type(fhdr) == BE_GEN3)) { 3501 fhdr = (struct flash_file_hdr_g2 *)p;
3234 fhdr3 = (struct flash_file_hdr_g3 *) fw->data; 3502
3235 num_imgs = le32_to_cpu(fhdr3->num_imgs); 3503 ufi_type = be_get_ufi_type(adapter, fhdr);
3236 for (i = 0; i < num_imgs; i++) { 3504
3237 img_hdr_ptr = (struct image_hdr *) (fw->data + 3505 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3238 (sizeof(struct flash_file_hdr_g3) + 3506 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3239 i * sizeof(struct image_hdr))); 3507 for (i = 0; i < num_imgs; i++) {
3240 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) 3508 img_hdr_ptr = (struct image_hdr *)(fw->data +
3241 status = be_flash_data(adapter, fw, &flash_cmd, 3509 (sizeof(struct flash_file_hdr_g3) +
3242 num_imgs); 3510 i * sizeof(struct image_hdr)));
3511 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3512 if (ufi_type == UFI_TYPE4)
3513 status = be_flash_skyhawk(adapter, fw,
3514 &flash_cmd, num_imgs);
3515 else if (ufi_type == UFI_TYPE3)
3516 status = be_flash_BEx(adapter, fw, &flash_cmd,
3517 num_imgs);
3243 } 3518 }
3244 } else if ((adapter->generation == BE_GEN2) &&
3245 (get_ufigen_type(fhdr) == BE_GEN2)) {
3246 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3247 } else {
3248 dev_err(&adapter->pdev->dev,
3249 "UFI and Interface are not compatible for flashing\n");
3250 status = -1;
3251 } 3519 }
3252 3520
3521 if (ufi_type == UFI_TYPE2)
3522 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3523 else if (ufi_type == -1)
3524 status = -1;
3525
3253 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, 3526 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3254 flash_cmd.dma); 3527 flash_cmd.dma);
3255 if (status) { 3528 if (status) {
@@ -3344,80 +3617,47 @@ static void be_netdev_init(struct net_device *netdev)
3344 3617
3345static void be_unmap_pci_bars(struct be_adapter *adapter) 3618static void be_unmap_pci_bars(struct be_adapter *adapter)
3346{ 3619{
3347 if (adapter->csr)
3348 iounmap(adapter->csr);
3349 if (adapter->db) 3620 if (adapter->db)
3350 iounmap(adapter->db); 3621 pci_iounmap(adapter->pdev, adapter->db);
3351 if (adapter->roce_db.base)
3352 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3353} 3622}
3354 3623
3355static int lancer_roce_map_pci_bars(struct be_adapter *adapter) 3624static int db_bar(struct be_adapter *adapter)
3356{ 3625{
3357 struct pci_dev *pdev = adapter->pdev; 3626 if (lancer_chip(adapter) || !be_physfn(adapter))
3358 u8 __iomem *addr; 3627 return 0;
3359 3628 else
3360 addr = pci_iomap(pdev, 2, 0); 3629 return 4;
3361 if (addr == NULL) 3630}
3362 return -ENOMEM;
3363 3631
3364 adapter->roce_db.base = addr; 3632static int be_roce_map_pci_bars(struct be_adapter *adapter)
3365 adapter->roce_db.io_addr = pci_resource_start(pdev, 2); 3633{
3366 adapter->roce_db.size = 8192; 3634 if (skyhawk_chip(adapter)) {
3367 adapter->roce_db.total_size = pci_resource_len(pdev, 2); 3635 adapter->roce_db.size = 4096;
3636 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3637 db_bar(adapter));
3638 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3639 db_bar(adapter));
3640 }
3368 return 0; 3641 return 0;
3369} 3642}
3370 3643
3371static int be_map_pci_bars(struct be_adapter *adapter) 3644static int be_map_pci_bars(struct be_adapter *adapter)
3372{ 3645{
3373 u8 __iomem *addr; 3646 u8 __iomem *addr;
3374 int db_reg; 3647 u32 sli_intf;
3375 3648
3376 if (lancer_chip(adapter)) { 3649 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3377 if (be_type_2_3(adapter)) { 3650 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3378 addr = ioremap_nocache( 3651 SLI_INTF_IF_TYPE_SHIFT;
3379 pci_resource_start(adapter->pdev, 0),
3380 pci_resource_len(adapter->pdev, 0));
3381 if (addr == NULL)
3382 return -ENOMEM;
3383 adapter->db = addr;
3384 }
3385 if (adapter->if_type == SLI_INTF_TYPE_3) {
3386 if (lancer_roce_map_pci_bars(adapter))
3387 goto pci_map_err;
3388 }
3389 return 0;
3390 }
3391
3392 if (be_physfn(adapter)) {
3393 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3394 pci_resource_len(adapter->pdev, 2));
3395 if (addr == NULL)
3396 return -ENOMEM;
3397 adapter->csr = addr;
3398 }
3399 3652
3400 if (adapter->generation == BE_GEN2) { 3653 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3401 db_reg = 4;
3402 } else {
3403 if (be_physfn(adapter))
3404 db_reg = 4;
3405 else
3406 db_reg = 0;
3407 }
3408 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3409 pci_resource_len(adapter->pdev, db_reg));
3410 if (addr == NULL) 3654 if (addr == NULL)
3411 goto pci_map_err; 3655 goto pci_map_err;
3412 adapter->db = addr; 3656 adapter->db = addr;
3413 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) { 3657
3414 adapter->roce_db.size = 4096; 3658 be_roce_map_pci_bars(adapter);
3415 adapter->roce_db.io_addr =
3416 pci_resource_start(adapter->pdev, db_reg);
3417 adapter->roce_db.total_size =
3418 pci_resource_len(adapter->pdev, db_reg);
3419 }
3420 return 0; 3659 return 0;
3660
3421pci_map_err: 3661pci_map_err:
3422 be_unmap_pci_bars(adapter); 3662 be_unmap_pci_bars(adapter);
3423 return -ENOMEM; 3663 return -ENOMEM;
@@ -3437,7 +3677,6 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
3437 if (mem->va) 3677 if (mem->va)
3438 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, 3678 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3439 mem->dma); 3679 mem->dma);
3440 kfree(adapter->pmac_id);
3441} 3680}
3442 3681
3443static int be_ctrl_init(struct be_adapter *adapter) 3682static int be_ctrl_init(struct be_adapter *adapter)
@@ -3445,8 +3684,14 @@ static int be_ctrl_init(struct be_adapter *adapter)
3445 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced; 3684 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3446 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem; 3685 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3447 struct be_dma_mem *rx_filter = &adapter->rx_filter; 3686 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3687 u32 sli_intf;
3448 int status; 3688 int status;
3449 3689
3690 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3691 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3692 SLI_INTF_FAMILY_SHIFT;
3693 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3694
3450 status = be_map_pci_bars(adapter); 3695 status = be_map_pci_bars(adapter);
3451 if (status) 3696 if (status)
3452 goto done; 3697 goto done;
@@ -3473,13 +3718,6 @@ static int be_ctrl_init(struct be_adapter *adapter)
3473 goto free_mbox; 3718 goto free_mbox;
3474 } 3719 }
3475 memset(rx_filter->va, 0, rx_filter->size); 3720 memset(rx_filter->va, 0, rx_filter->size);
3476
3477 /* primary mac needs 1 pmac entry */
3478 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3479 sizeof(*adapter->pmac_id), GFP_KERNEL);
3480 if (!adapter->pmac_id)
3481 return -ENOMEM;
3482
3483 mutex_init(&adapter->mbox_lock); 3721 mutex_init(&adapter->mbox_lock);
3484 spin_lock_init(&adapter->mcc_lock); 3722 spin_lock_init(&adapter->mcc_lock);
3485 spin_lock_init(&adapter->mcc_cq_lock); 3723 spin_lock_init(&adapter->mcc_cq_lock);
@@ -3512,14 +3750,14 @@ static int be_stats_init(struct be_adapter *adapter)
3512{ 3750{
3513 struct be_dma_mem *cmd = &adapter->stats_cmd; 3751 struct be_dma_mem *cmd = &adapter->stats_cmd;
3514 3752
3515 if (adapter->generation == BE_GEN2) { 3753 if (lancer_chip(adapter))
3754 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3755 else if (BE2_chip(adapter))
3516 cmd->size = sizeof(struct be_cmd_req_get_stats_v0); 3756 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3517 } else { 3757 else
3518 if (lancer_chip(adapter)) 3758 /* BE3 and Skyhawk */
3519 cmd->size = sizeof(struct lancer_cmd_req_pport_stats); 3759 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3520 else 3760
3521 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3522 }
3523 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, 3761 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3524 GFP_KERNEL); 3762 GFP_KERNEL);
3525 if (cmd->va == NULL) 3763 if (cmd->va == NULL)
@@ -3573,6 +3811,9 @@ u32 be_get_fw_log_level(struct be_adapter *adapter)
3573 u32 level = 0; 3811 u32 level = 0;
3574 int j; 3812 int j;
3575 3813
3814 if (lancer_chip(adapter))
3815 return 0;
3816
3576 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3817 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3577 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3818 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3578 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, 3819 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
@@ -3598,26 +3839,12 @@ u32 be_get_fw_log_level(struct be_adapter *adapter)
3598err: 3839err:
3599 return level; 3840 return level;
3600} 3841}
3842
3601static int be_get_initial_config(struct be_adapter *adapter) 3843static int be_get_initial_config(struct be_adapter *adapter)
3602{ 3844{
3603 int status; 3845 int status;
3604 u32 level; 3846 u32 level;
3605 3847
3606 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3607 &adapter->function_mode, &adapter->function_caps);
3608 if (status)
3609 return status;
3610
3611 if (adapter->function_mode & FLEX10_MODE)
3612 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3613 else
3614 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3615
3616 if (be_physfn(adapter))
3617 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3618 else
3619 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3620
3621 status = be_cmd_get_cntl_attributes(adapter); 3848 status = be_cmd_get_cntl_attributes(adapter);
3622 if (status) 3849 if (status)
3623 return status; 3850 return status;
@@ -3642,55 +3869,6 @@ static int be_get_initial_config(struct be_adapter *adapter)
3642 return 0; 3869 return 0;
3643} 3870}
3644 3871
3645static int be_dev_type_check(struct be_adapter *adapter)
3646{
3647 struct pci_dev *pdev = adapter->pdev;
3648 u32 sli_intf = 0, if_type;
3649
3650 switch (pdev->device) {
3651 case BE_DEVICE_ID1:
3652 case OC_DEVICE_ID1:
3653 adapter->generation = BE_GEN2;
3654 break;
3655 case BE_DEVICE_ID2:
3656 case OC_DEVICE_ID2:
3657 adapter->generation = BE_GEN3;
3658 break;
3659 case OC_DEVICE_ID3:
3660 case OC_DEVICE_ID4:
3661 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3662 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3663 SLI_INTF_IF_TYPE_SHIFT;
3664 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3665 SLI_INTF_IF_TYPE_SHIFT;
3666 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3667 !be_type_2_3(adapter)) {
3668 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3669 return -EINVAL;
3670 }
3671 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3672 SLI_INTF_FAMILY_SHIFT);
3673 adapter->generation = BE_GEN3;
3674 break;
3675 case OC_DEVICE_ID5:
3676 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3677 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
3678 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3679 return -EINVAL;
3680 }
3681 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3682 SLI_INTF_FAMILY_SHIFT);
3683 adapter->generation = BE_GEN3;
3684 break;
3685 default:
3686 adapter->generation = 0;
3687 }
3688
3689 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3690 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3691 return 0;
3692}
3693
3694static int lancer_recover_func(struct be_adapter *adapter) 3872static int lancer_recover_func(struct be_adapter *adapter)
3695{ 3873{
3696 int status; 3874 int status;
@@ -3721,8 +3899,9 @@ static int lancer_recover_func(struct be_adapter *adapter)
3721 "Adapter SLIPORT recovery succeeded\n"); 3899 "Adapter SLIPORT recovery succeeded\n");
3722 return 0; 3900 return 0;
3723err: 3901err:
3724 dev_err(&adapter->pdev->dev, 3902 if (adapter->eeh_error)
3725 "Adapter SLIPORT recovery failed\n"); 3903 dev_err(&adapter->pdev->dev,
3904 "Adapter SLIPORT recovery failed\n");
3726 3905
3727 return status; 3906 return status;
3728} 3907}
@@ -3845,11 +4024,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
3845 adapter = netdev_priv(netdev); 4024 adapter = netdev_priv(netdev);
3846 adapter->pdev = pdev; 4025 adapter->pdev = pdev;
3847 pci_set_drvdata(pdev, adapter); 4026 pci_set_drvdata(pdev, adapter);
3848
3849 status = be_dev_type_check(adapter);
3850 if (status)
3851 goto free_netdev;
3852
3853 adapter->netdev = netdev; 4027 adapter->netdev = netdev;
3854 SET_NETDEV_DEV(netdev, &pdev->dev); 4028 SET_NETDEV_DEV(netdev, &pdev->dev);
3855 4029
@@ -4023,9 +4197,6 @@ static void be_shutdown(struct pci_dev *pdev)
4023 4197
4024 netif_device_detach(adapter->netdev); 4198 netif_device_detach(adapter->netdev);
4025 4199
4026 if (adapter->wol)
4027 be_setup_wol(adapter, true);
4028
4029 be_cmd_reset_function(adapter); 4200 be_cmd_reset_function(adapter);
4030 4201
4031 pci_disable_device(pdev); 4202 pci_disable_device(pdev);
@@ -4061,9 +4232,13 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4061 4232
4062 /* The error could cause the FW to trigger a flash debug dump. 4233 /* The error could cause the FW to trigger a flash debug dump.
4063 * Resetting the card while flash dump is in progress 4234 * Resetting the card while flash dump is in progress
4064 * can cause it not to recover; wait for it to finish 4235 * can cause it not to recover; wait for it to finish.
4236 * Wait only for first function as it is needed only once per
4237 * adapter.
4065 */ 4238 */
4066 ssleep(30); 4239 if (pdev->devfn == 0)
4240 ssleep(30);
4241
4067 return PCI_ERS_RESULT_NEED_RESET; 4242 return PCI_ERS_RESULT_NEED_RESET;
4068} 4243}
4069 4244
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index deecc44b3617..55d32aa0a093 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -47,10 +47,7 @@ static void _be_roce_dev_add(struct be_adapter *adapter)
47 dev_info.dpp_unmapped_len = 0; 47 dev_info.dpp_unmapped_len = 0;
48 } 48 }
49 dev_info.pdev = adapter->pdev; 49 dev_info.pdev = adapter->pdev;
50 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) 50 dev_info.db = adapter->db;
51 dev_info.db = adapter->db;
52 else
53 dev_info.db = adapter->roce_db.base;
54 dev_info.unmapped_db = adapter->roce_db.io_addr; 51 dev_info.unmapped_db = adapter->roce_db.io_addr;
55 dev_info.db_page_size = adapter->roce_db.size; 52 dev_info.db_page_size = adapter->roce_db.size;
56 dev_info.db_total_size = adapter->roce_db.total_size; 53 dev_info.db_total_size = adapter->roce_db.total_size;
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index feff51664dcf..5ba6e1cbd346 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -92,4 +92,13 @@ config GIANFAR
92 This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, 92 This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
93 and MPC86xx family of chips, and the FEC on the 8540. 93 and MPC86xx family of chips, and the FEC on the 8540.
94 94
95config FEC_PTP
96 bool "PTP Hardware Clock (PHC)"
97 depends on FEC && ARCH_MXC
98 select PTP_1588_CLOCK
99 default y if SOC_IMX6Q
100 --help---
101 Say Y here if you want to use PTP Hardware Clock (PHC) in the
102 driver. Only the basic clock operations have been implemented.
103
95endif # NET_VENDOR_FREESCALE 104endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 3d1839afff65..d4d19b3d00ae 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_FEC) += fec.o 5obj-$(CONFIG_FEC) += fec.o
6obj-$(CONFIG_FEC_PTP) += fec_ptp.o
6obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o 7obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
7ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) 8ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
8 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o 9 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index fffd20528b5d..2665162ff4e5 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -140,21 +140,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
140#endif 140#endif
141#endif /* CONFIG_M5272 */ 141#endif /* CONFIG_M5272 */
142 142
143/* The number of Tx and Rx buffers. These are allocated from the page
144 * pool. The code may assume these are power of two, so it it best
145 * to keep them that size.
146 * We don't need to allocate pages for the transmitter. We just use
147 * the skbuffer directly.
148 */
149#define FEC_ENET_RX_PAGES 8
150#define FEC_ENET_RX_FRSIZE 2048
151#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
152#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
153#define FEC_ENET_TX_FRSIZE 2048
154#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
155#define TX_RING_SIZE 16 /* Must be power of two */
156#define TX_RING_MOD_MASK 15 /* for this to work */
157
158#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) 143#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
159#error "FEC: descriptor ring size constants too large" 144#error "FEC: descriptor ring size constants too large"
160#endif 145#endif
@@ -179,9 +164,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
179#define PKT_MINBUF_SIZE 64 164#define PKT_MINBUF_SIZE 64
180#define PKT_MAXBLR_SIZE 1520 165#define PKT_MAXBLR_SIZE 1520
181 166
182/* This device has up to three irqs on some platforms */
183#define FEC_IRQ_NUM 3
184
185/* 167/*
186 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 168 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
187 * size bits. Other FEC hardware does not, so we need to take that into 169 * size bits. Other FEC hardware does not, so we need to take that into
@@ -194,61 +176,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
194#define OPT_FRAME_SIZE 0 176#define OPT_FRAME_SIZE 0
195#endif 177#endif
196 178
197/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
198 * tx_bd_base always point to the base of the buffer descriptors. The
199 * cur_rx and cur_tx point to the currently available buffer.
200 * The dirty_tx tracks the current buffer that is being sent by the
201 * controller. The cur_tx and dirty_tx are equal under both completely
202 * empty and completely full conditions. The empty/ready indicator in
203 * the buffer descriptor determines the actual condition.
204 */
205struct fec_enet_private {
206 /* Hardware registers of the FEC device */
207 void __iomem *hwp;
208
209 struct net_device *netdev;
210
211 struct clk *clk_ipg;
212 struct clk *clk_ahb;
213
214 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
215 unsigned char *tx_bounce[TX_RING_SIZE];
216 struct sk_buff* tx_skbuff[TX_RING_SIZE];
217 struct sk_buff* rx_skbuff[RX_RING_SIZE];
218 ushort skb_cur;
219 ushort skb_dirty;
220
221 /* CPM dual port RAM relative addresses */
222 dma_addr_t bd_dma;
223 /* Address of Rx and Tx buffers */
224 struct bufdesc *rx_bd_base;
225 struct bufdesc *tx_bd_base;
226 /* The next free ring entry */
227 struct bufdesc *cur_rx, *cur_tx;
228 /* The ring entries to be free()ed */
229 struct bufdesc *dirty_tx;
230
231 uint tx_full;
232 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
233 spinlock_t hw_lock;
234
235 struct platform_device *pdev;
236
237 int opened;
238 int dev_id;
239
240 /* Phylib and MDIO interface */
241 struct mii_bus *mii_bus;
242 struct phy_device *phy_dev;
243 int mii_timeout;
244 uint phy_speed;
245 phy_interface_t phy_interface;
246 int link;
247 int full_duplex;
248 struct completion mdio_done;
249 int irq[FEC_IRQ_NUM];
250};
251
252/* FEC MII MMFR bits definition */ 179/* FEC MII MMFR bits definition */
253#define FEC_MMFR_ST (1 << 30) 180#define FEC_MMFR_ST (1 << 30)
254#define FEC_MMFR_OP_READ (2 << 28) 181#define FEC_MMFR_OP_READ (2 << 28)
@@ -353,6 +280,17 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
353 | BD_ENET_TX_LAST | BD_ENET_TX_TC); 280 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
354 bdp->cbd_sc = status; 281 bdp->cbd_sc = status;
355 282
283#ifdef CONFIG_FEC_PTP
284 bdp->cbd_bdu = 0;
285 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
286 fep->hwts_tx_en)) {
287 bdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
288 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
289 } else {
290
291 bdp->cbd_esc = BD_ENET_TX_INT;
292 }
293#endif
356 /* Trigger transmission start */ 294 /* Trigger transmission start */
357 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 295 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
358 296
@@ -510,10 +448,17 @@ fec_restart(struct net_device *ndev, int duplex)
510 writel(1 << 8, fep->hwp + FEC_X_WMRK); 448 writel(1 << 8, fep->hwp + FEC_X_WMRK);
511 } 449 }
512 450
451#ifdef CONFIG_FEC_PTP
452 ecntl |= (1 << 4);
453#endif
454
513 /* And last, enable the transmit and receive processing */ 455 /* And last, enable the transmit and receive processing */
514 writel(ecntl, fep->hwp + FEC_ECNTRL); 456 writel(ecntl, fep->hwp + FEC_ECNTRL);
515 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 457 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
516 458
459#ifdef CONFIG_FEC_PTP
460 fec_ptp_start_cyclecounter(ndev);
461#endif
517 /* Enable interrupts we wish to service */ 462 /* Enable interrupts we wish to service */
518 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 463 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
519} 464}
@@ -599,6 +544,19 @@ fec_enet_tx(struct net_device *ndev)
599 ndev->stats.tx_packets++; 544 ndev->stats.tx_packets++;
600 } 545 }
601 546
547#ifdef CONFIG_FEC_PTP
548 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
549 struct skb_shared_hwtstamps shhwtstamps;
550 unsigned long flags;
551
552 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
553 spin_lock_irqsave(&fep->tmreg_lock, flags);
554 shhwtstamps.hwtstamp = ns_to_ktime(
555 timecounter_cyc2time(&fep->tc, bdp->ts));
556 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
557 skb_tstamp_tx(skb, &shhwtstamps);
558 }
559#endif
602 if (status & BD_ENET_TX_READY) 560 if (status & BD_ENET_TX_READY)
603 printk("HEY! Enet xmit interrupt and TX_READY.\n"); 561 printk("HEY! Enet xmit interrupt and TX_READY.\n");
604 562
@@ -725,6 +683,21 @@ fec_enet_rx(struct net_device *ndev)
725 skb_put(skb, pkt_len - 4); /* Make room */ 683 skb_put(skb, pkt_len - 4); /* Make room */
726 skb_copy_to_linear_data(skb, data, pkt_len - 4); 684 skb_copy_to_linear_data(skb, data, pkt_len - 4);
727 skb->protocol = eth_type_trans(skb, ndev); 685 skb->protocol = eth_type_trans(skb, ndev);
686#ifdef CONFIG_FEC_PTP
687 /* Get receive timestamp from the skb */
688 if (fep->hwts_rx_en) {
689 struct skb_shared_hwtstamps *shhwtstamps =
690 skb_hwtstamps(skb);
691 unsigned long flags;
692
693 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
694
695 spin_lock_irqsave(&fep->tmreg_lock, flags);
696 shhwtstamps->hwtstamp = ns_to_ktime(
697 timecounter_cyc2time(&fep->tc, bdp->ts));
698 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
699 }
700#endif
728 if (!skb_defer_rx_timestamp(skb)) 701 if (!skb_defer_rx_timestamp(skb))
729 netif_rx(skb); 702 netif_rx(skb);
730 } 703 }
@@ -739,6 +712,12 @@ rx_processing_done:
739 status |= BD_ENET_RX_EMPTY; 712 status |= BD_ENET_RX_EMPTY;
740 bdp->cbd_sc = status; 713 bdp->cbd_sc = status;
741 714
715#ifdef CONFIG_FEC_PTP
716 bdp->cbd_esc = BD_ENET_RX_INT;
717 bdp->cbd_prot = 0;
718 bdp->cbd_bdu = 0;
719#endif
720
742 /* Update BD pointer to next entry */ 721 /* Update BD pointer to next entry */
743 if (status & BD_ENET_RX_WRAP) 722 if (status & BD_ENET_RX_WRAP)
744 bdp = fep->rx_bd_base; 723 bdp = fep->rx_bd_base;
@@ -1178,6 +1157,10 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1178 if (!phydev) 1157 if (!phydev)
1179 return -ENODEV; 1158 return -ENODEV;
1180 1159
1160#ifdef CONFIG_FEC_PTP
1161 if (cmd == SIOCSHWTSTAMP)
1162 return fec_ptp_ioctl(ndev, rq, cmd);
1163#endif
1181 return phy_mii_ioctl(phydev, rq, cmd); 1164 return phy_mii_ioctl(phydev, rq, cmd);
1182} 1165}
1183 1166
@@ -1224,6 +1207,9 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1224 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, 1207 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
1225 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1208 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1226 bdp->cbd_sc = BD_ENET_RX_EMPTY; 1209 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1210#ifdef CONFIG_FEC_PTP
1211 bdp->cbd_esc = BD_ENET_RX_INT;
1212#endif
1227 bdp++; 1213 bdp++;
1228 } 1214 }
1229 1215
@@ -1237,6 +1223,10 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1237 1223
1238 bdp->cbd_sc = 0; 1224 bdp->cbd_sc = 0;
1239 bdp->cbd_bufaddr = 0; 1225 bdp->cbd_bufaddr = 0;
1226
1227#ifdef CONFIG_FEC_PTP
1228 bdp->cbd_esc = BD_ENET_RX_INT;
1229#endif
1240 bdp++; 1230 bdp++;
1241 } 1231 }
1242 1232
@@ -1638,9 +1628,19 @@ fec_probe(struct platform_device *pdev)
1638 goto failed_clk; 1628 goto failed_clk;
1639 } 1629 }
1640 1630
1631#ifdef CONFIG_FEC_PTP
1632 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
1633 if (IS_ERR(fep->clk_ptp)) {
1634 ret = PTR_ERR(fep->clk_ptp);
1635 goto failed_clk;
1636 }
1637#endif
1638
1641 clk_prepare_enable(fep->clk_ahb); 1639 clk_prepare_enable(fep->clk_ahb);
1642 clk_prepare_enable(fep->clk_ipg); 1640 clk_prepare_enable(fep->clk_ipg);
1643 1641#ifdef CONFIG_FEC_PTP
1642 clk_prepare_enable(fep->clk_ptp);
1643#endif
1644 reg_phy = devm_regulator_get(&pdev->dev, "phy"); 1644 reg_phy = devm_regulator_get(&pdev->dev, "phy");
1645 if (!IS_ERR(reg_phy)) { 1645 if (!IS_ERR(reg_phy)) {
1646 ret = regulator_enable(reg_phy); 1646 ret = regulator_enable(reg_phy);
@@ -1668,6 +1668,10 @@ fec_probe(struct platform_device *pdev)
1668 if (ret) 1668 if (ret)
1669 goto failed_register; 1669 goto failed_register;
1670 1670
1671#ifdef CONFIG_FEC_PTP
1672 fec_ptp_init(ndev, pdev);
1673#endif
1674
1671 return 0; 1675 return 0;
1672 1676
1673failed_register: 1677failed_register:
@@ -1677,6 +1681,9 @@ failed_init:
1677failed_regulator: 1681failed_regulator:
1678 clk_disable_unprepare(fep->clk_ahb); 1682 clk_disable_unprepare(fep->clk_ahb);
1679 clk_disable_unprepare(fep->clk_ipg); 1683 clk_disable_unprepare(fep->clk_ipg);
1684#ifdef CONFIG_FEC_PTP
1685 clk_disable_unprepare(fep->clk_ptp);
1686#endif
1680failed_pin: 1687failed_pin:
1681failed_clk: 1688failed_clk:
1682 for (i = 0; i < FEC_IRQ_NUM; i++) { 1689 for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1709,6 +1716,12 @@ fec_drv_remove(struct platform_device *pdev)
1709 if (irq > 0) 1716 if (irq > 0)
1710 free_irq(irq, ndev); 1717 free_irq(irq, ndev);
1711 } 1718 }
1719#ifdef CONFIG_FEC_PTP
1720 del_timer_sync(&fep->time_keep);
1721 clk_disable_unprepare(fep->clk_ptp);
1722 if (fep->ptp_clock)
1723 ptp_clock_unregister(fep->ptp_clock);
1724#endif
1712 clk_disable_unprepare(fep->clk_ahb); 1725 clk_disable_unprepare(fep->clk_ahb);
1713 clk_disable_unprepare(fep->clk_ipg); 1726 clk_disable_unprepare(fep->clk_ipg);
1714 iounmap(fep->hwp); 1727 iounmap(fep->hwp);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 8408c627b195..c5a3bc1475c7 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -13,6 +13,12 @@
13#define FEC_H 13#define FEC_H
14/****************************************************************************/ 14/****************************************************************************/
15 15
16#ifdef CONFIG_FEC_PTP
17#include <linux/clocksource.h>
18#include <linux/net_tstamp.h>
19#include <linux/ptp_clock_kernel.h>
20#endif
21
16#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 22#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
17 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ 23 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
18 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) 24 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
@@ -88,6 +94,13 @@ struct bufdesc {
88 unsigned short cbd_datlen; /* Data length */ 94 unsigned short cbd_datlen; /* Data length */
89 unsigned short cbd_sc; /* Control and status info */ 95 unsigned short cbd_sc; /* Control and status info */
90 unsigned long cbd_bufaddr; /* Buffer address */ 96 unsigned long cbd_bufaddr; /* Buffer address */
97#ifdef CONFIG_FEC_PTP
98 unsigned long cbd_esc;
99 unsigned long cbd_prot;
100 unsigned long cbd_bdu;
101 unsigned long ts;
102 unsigned short res0[4];
103#endif
91}; 104};
92#else 105#else
93struct bufdesc { 106struct bufdesc {
@@ -147,6 +160,112 @@ struct bufdesc {
147#define BD_ENET_TX_CSL ((ushort)0x0001) 160#define BD_ENET_TX_CSL ((ushort)0x0001)
148#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */ 161#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
149 162
163/*enhanced buffer desciptor control/status used by Ethernet transmit*/
164#define BD_ENET_TX_INT 0x40000000
165#define BD_ENET_TX_TS 0x20000000
166
167
168/* This device has up to three irqs on some platforms */
169#define FEC_IRQ_NUM 3
170
171/* The number of Tx and Rx buffers. These are allocated from the page
172 * pool. The code may assume these are power of two, so it it best
173 * to keep them that size.
174 * We don't need to allocate pages for the transmitter. We just use
175 * the skbuffer directly.
176 */
177
178#define FEC_ENET_RX_PAGES 8
179#define FEC_ENET_RX_FRSIZE 2048
180#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
181#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
182#define FEC_ENET_TX_FRSIZE 2048
183#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
184#define TX_RING_SIZE 16 /* Must be power of two */
185#define TX_RING_MOD_MASK 15 /* for this to work */
186
187#define BD_ENET_RX_INT 0x00800000
188#define BD_ENET_RX_PTP ((ushort)0x0400)
189
190/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
191 * tx_bd_base always point to the base of the buffer descriptors. The
192 * cur_rx and cur_tx point to the currently available buffer.
193 * The dirty_tx tracks the current buffer that is being sent by the
194 * controller. The cur_tx and dirty_tx are equal under both completely
195 * empty and completely full conditions. The empty/ready indicator in
196 * the buffer descriptor determines the actual condition.
197 */
198struct fec_enet_private {
199 /* Hardware registers of the FEC device */
200 void __iomem *hwp;
201
202 struct net_device *netdev;
203
204 struct clk *clk_ipg;
205 struct clk *clk_ahb;
206#ifdef CONFIG_FEC_PTP
207 struct clk *clk_ptp;
208#endif
209
210 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
211 unsigned char *tx_bounce[TX_RING_SIZE];
212 struct sk_buff *tx_skbuff[TX_RING_SIZE];
213 struct sk_buff *rx_skbuff[RX_RING_SIZE];
214 ushort skb_cur;
215 ushort skb_dirty;
216
217 /* CPM dual port RAM relative addresses */
218 dma_addr_t bd_dma;
219 /* Address of Rx and Tx buffers */
220 struct bufdesc *rx_bd_base;
221 struct bufdesc *tx_bd_base;
222 /* The next free ring entry */
223 struct bufdesc *cur_rx, *cur_tx;
224 /* The ring entries to be free()ed */
225 struct bufdesc *dirty_tx;
226
227 uint tx_full;
228 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
229 spinlock_t hw_lock;
230
231 struct platform_device *pdev;
232
233 int opened;
234 int dev_id;
235
236 /* Phylib and MDIO interface */
237 struct mii_bus *mii_bus;
238 struct phy_device *phy_dev;
239 int mii_timeout;
240 uint phy_speed;
241 phy_interface_t phy_interface;
242 int link;
243 int full_duplex;
244 struct completion mdio_done;
245 int irq[FEC_IRQ_NUM];
246
247#ifdef CONFIG_FEC_PTP
248 struct ptp_clock *ptp_clock;
249 struct ptp_clock_info ptp_caps;
250 unsigned long last_overflow_check;
251 spinlock_t tmreg_lock;
252 struct cyclecounter cc;
253 struct timecounter tc;
254 int rx_hwtstamp_filter;
255 u32 base_incval;
256 u32 cycle_speed;
257 int hwts_rx_en;
258 int hwts_tx_en;
259 struct timer_list time_keep;
260#endif
261
262};
263
264#ifdef CONFIG_FEC_PTP
265void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev);
266void fec_ptp_start_cyclecounter(struct net_device *ndev);
267int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
268#endif
150 269
151/****************************************************************************/ 270/****************************************************************************/
152#endif /* FEC_H */ 271#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
new file mode 100644
index 000000000000..c40526c78c20
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -0,0 +1,383 @@
1/*
2 * Fast Ethernet Controller (ENET) PTP driver for MX6x.
3 *
4 * Copyright (C) 2012 Freescale Semiconductor, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/ptrace.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/slab.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/spinlock.h>
35#include <linux/workqueue.h>
36#include <linux/bitops.h>
37#include <linux/io.h>
38#include <linux/irq.h>
39#include <linux/clk.h>
40#include <linux/platform_device.h>
41#include <linux/phy.h>
42#include <linux/fec.h>
43#include <linux/of.h>
44#include <linux/of_device.h>
45#include <linux/of_gpio.h>
46#include <linux/of_net.h>
47
48#include "fec.h"
49
50/* FEC 1588 register bits */
51#define FEC_T_CTRL_SLAVE 0x00002000
52#define FEC_T_CTRL_CAPTURE 0x00000800
53#define FEC_T_CTRL_RESTART 0x00000200
54#define FEC_T_CTRL_PERIOD_RST 0x00000030
55#define FEC_T_CTRL_PERIOD_EN 0x00000010
56#define FEC_T_CTRL_ENABLE 0x00000001
57
58#define FEC_T_INC_MASK 0x0000007f
59#define FEC_T_INC_OFFSET 0
60#define FEC_T_INC_CORR_MASK 0x00007f00
61#define FEC_T_INC_CORR_OFFSET 8
62
63#define FEC_ATIME_CTRL 0x400
64#define FEC_ATIME 0x404
65#define FEC_ATIME_EVT_OFFSET 0x408
66#define FEC_ATIME_EVT_PERIOD 0x40c
67#define FEC_ATIME_CORR 0x410
68#define FEC_ATIME_INC 0x414
69#define FEC_TS_TIMESTAMP 0x418
70
71#define FEC_CC_MULT (1 << 31)
72/**
73 * fec_ptp_read - read raw cycle counter (to be used by time counter)
74 * @cc: the cyclecounter structure
75 *
76 * this function reads the cyclecounter registers and is called by the
77 * cyclecounter structure used to construct a ns counter from the
78 * arbitrary fixed point registers
79 */
80static cycle_t fec_ptp_read(const struct cyclecounter *cc)
81{
82 struct fec_enet_private *fep =
83 container_of(cc, struct fec_enet_private, cc);
84 u32 tempval;
85
86 tempval = readl(fep->hwp + FEC_ATIME_CTRL);
87 tempval |= FEC_T_CTRL_CAPTURE;
88 writel(tempval, fep->hwp + FEC_ATIME_CTRL);
89
90 return readl(fep->hwp + FEC_ATIME);
91}
92
93/**
94 * fec_ptp_start_cyclecounter - create the cycle counter from hw
95 * @ndev: network device
96 *
97 * this function initializes the timecounter and cyclecounter
98 * structures for use in generated a ns counter from the arbitrary
99 * fixed point cycles registers in the hardware.
100 */
101void fec_ptp_start_cyclecounter(struct net_device *ndev)
102{
103 struct fec_enet_private *fep = netdev_priv(ndev);
104 unsigned long flags;
105 int inc;
106
107 inc = 1000000000 / clk_get_rate(fep->clk_ptp);
108
109 /* grab the ptp lock */
110 spin_lock_irqsave(&fep->tmreg_lock, flags);
111
112 /* 1ns counter */
113 writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
114
115 /* use free running count */
116 writel(0, fep->hwp + FEC_ATIME_EVT_PERIOD);
117
118 writel(FEC_T_CTRL_ENABLE, fep->hwp + FEC_ATIME_CTRL);
119
120 memset(&fep->cc, 0, sizeof(fep->cc));
121 fep->cc.read = fec_ptp_read;
122 fep->cc.mask = CLOCKSOURCE_MASK(32);
123 fep->cc.shift = 31;
124 fep->cc.mult = FEC_CC_MULT;
125
126 /* reset the ns time counter */
127 timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real()));
128
129 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
130}
131
132/**
133 * fec_ptp_adjfreq - adjust ptp cycle frequency
134 * @ptp: the ptp clock structure
135 * @ppb: parts per billion adjustment from base
136 *
137 * Adjust the frequency of the ptp cycle counter by the
138 * indicated ppb from the base frequency.
139 *
140 * Because ENET hardware frequency adjust is complex,
141 * using software method to do that.
142 */
143static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
144{
145 u64 diff;
146 unsigned long flags;
147 int neg_adj = 0;
148 u32 mult = FEC_CC_MULT;
149
150 struct fec_enet_private *fep =
151 container_of(ptp, struct fec_enet_private, ptp_caps);
152
153 if (ppb < 0) {
154 ppb = -ppb;
155 neg_adj = 1;
156 }
157
158 diff = mult;
159 diff *= ppb;
160 diff = div_u64(diff, 1000000000ULL);
161
162 spin_lock_irqsave(&fep->tmreg_lock, flags);
163 /*
164 * dummy read to set cycle_last in tc to now.
165 * So use adjusted mult to calculate when next call
166 * timercounter_read.
167 */
168 timecounter_read(&fep->tc);
169
170 fep->cc.mult = neg_adj ? mult - diff : mult + diff;
171
172 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
173
174 return 0;
175}
176
177/**
178 * fec_ptp_adjtime
179 * @ptp: the ptp clock structure
180 * @delta: offset to adjust the cycle counter by
181 *
182 * adjust the timer by resetting the timecounter structure.
183 */
184static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
185{
186 struct fec_enet_private *fep =
187 container_of(ptp, struct fec_enet_private, ptp_caps);
188 unsigned long flags;
189 u64 now;
190
191 spin_lock_irqsave(&fep->tmreg_lock, flags);
192
193 now = timecounter_read(&fep->tc);
194 now += delta;
195
196 /* reset the timecounter */
197 timecounter_init(&fep->tc, &fep->cc, now);
198
199 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
200
201 return 0;
202}
203
204/**
205 * fec_ptp_gettime
206 * @ptp: the ptp clock structure
207 * @ts: timespec structure to hold the current time value
208 *
209 * read the timecounter and return the correct value on ns,
210 * after converting it into a struct timespec.
211 */
212static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
213{
214 struct fec_enet_private *adapter =
215 container_of(ptp, struct fec_enet_private, ptp_caps);
216 u64 ns;
217 u32 remainder;
218 unsigned long flags;
219
220 spin_lock_irqsave(&adapter->tmreg_lock, flags);
221 ns = timecounter_read(&adapter->tc);
222 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
223
224 ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
225 ts->tv_nsec = remainder;
226
227 return 0;
228}
229
230/**
231 * fec_ptp_settime
232 * @ptp: the ptp clock structure
233 * @ts: the timespec containing the new time for the cycle counter
234 *
235 * reset the timecounter to use a new base value instead of the kernel
236 * wall timer value.
237 */
238static int fec_ptp_settime(struct ptp_clock_info *ptp,
239 const struct timespec *ts)
240{
241 struct fec_enet_private *fep =
242 container_of(ptp, struct fec_enet_private, ptp_caps);
243
244 u64 ns;
245 unsigned long flags;
246
247 ns = ts->tv_sec * 1000000000ULL;
248 ns += ts->tv_nsec;
249
250 spin_lock_irqsave(&fep->tmreg_lock, flags);
251 timecounter_init(&fep->tc, &fep->cc, ns);
252 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
253 return 0;
254}
255
256/**
257 * fec_ptp_enable
258 * @ptp: the ptp clock structure
259 * @rq: the requested feature to change
260 * @on: whether to enable or disable the feature
261 *
262 */
263static int fec_ptp_enable(struct ptp_clock_info *ptp,
264 struct ptp_clock_request *rq, int on)
265{
266 return -EOPNOTSUPP;
267}
268
269/**
270 * fec_ptp_hwtstamp_ioctl - control hardware time stamping
271 * @ndev: pointer to net_device
272 * @ifreq: ioctl data
273 * @cmd: particular ioctl requested
274 */
275int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
276{
277 struct fec_enet_private *fep = netdev_priv(ndev);
278
279 struct hwtstamp_config config;
280
281 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
282 return -EFAULT;
283
284 /* reserved for future extensions */
285 if (config.flags)
286 return -EINVAL;
287
288 switch (config.tx_type) {
289 case HWTSTAMP_TX_OFF:
290 fep->hwts_tx_en = 0;
291 break;
292 case HWTSTAMP_TX_ON:
293 fep->hwts_tx_en = 1;
294 break;
295 default:
296 return -ERANGE;
297 }
298
299 switch (config.rx_filter) {
300 case HWTSTAMP_FILTER_NONE:
301 if (fep->hwts_rx_en)
302 fep->hwts_rx_en = 0;
303 config.rx_filter = HWTSTAMP_FILTER_NONE;
304 break;
305
306 default:
307 /*
308 * register RXMTRL must be set in order to do V1 packets,
309 * therefore it is not possible to time stamp both V1 Sync and
310 * Delay_Req messages and hardware does not support
311 * timestamping all packets => return error
312 */
313 fep->hwts_rx_en = 1;
314 config.rx_filter = HWTSTAMP_FILTER_ALL;
315 break;
316 }
317
318 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
319 -EFAULT : 0;
320}
321
322/**
323 * fec_time_keep - call timecounter_read every second to avoid timer overrun
324 * because ENET just support 32bit counter, will timeout in 4s
325 */
326static void fec_time_keep(unsigned long _data)
327{
328 struct fec_enet_private *fep = (struct fec_enet_private *)_data;
329 u64 ns;
330 unsigned long flags;
331
332 spin_lock_irqsave(&fep->tmreg_lock, flags);
333 ns = timecounter_read(&fep->tc);
334 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
335
336 mod_timer(&fep->time_keep, jiffies + HZ);
337}
338
339/**
340 * fec_ptp_init
341 * @ndev: The FEC network adapter
342 *
343 * This function performs the required steps for enabling ptp
344 * support. If ptp support has already been loaded it simply calls the
345 * cyclecounter init routine and exits.
346 */
347
348void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
349{
350 struct fec_enet_private *fep = netdev_priv(ndev);
351
352 fep->ptp_caps.owner = THIS_MODULE;
353 snprintf(fep->ptp_caps.name, 16, "fec ptp");
354
355 fep->ptp_caps.max_adj = 250000000;
356 fep->ptp_caps.n_alarm = 0;
357 fep->ptp_caps.n_ext_ts = 0;
358 fep->ptp_caps.n_per_out = 0;
359 fep->ptp_caps.pps = 0;
360 fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
361 fep->ptp_caps.adjtime = fec_ptp_adjtime;
362 fep->ptp_caps.gettime = fec_ptp_gettime;
363 fep->ptp_caps.settime = fec_ptp_settime;
364 fep->ptp_caps.enable = fec_ptp_enable;
365
366 spin_lock_init(&fep->tmreg_lock);
367
368 fec_ptp_start_cyclecounter(ndev);
369
370 init_timer(&fep->time_keep);
371 fep->time_keep.data = (unsigned long)fep;
372 fep->time_keep.function = fec_time_keep;
373 fep->time_keep.expires = jiffies + HZ;
374 add_timer(&fep->time_keep);
375
376 fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
377 if (IS_ERR(fep->ptp_clock)) {
378 fep->ptp_clock = NULL;
379 pr_err("ptp_clock_register failed\n");
380 } else {
381 pr_info("registered PHC device on %s\n", ndev->name);
382 }
383}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 19ac096cb07b..bffb2edd6858 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -210,7 +210,7 @@ static int gfar_init_bds(struct net_device *ndev)
210 skb = gfar_new_skb(ndev); 210 skb = gfar_new_skb(ndev);
211 if (!skb) { 211 if (!skb) {
212 netdev_err(ndev, "Can't allocate RX buffers\n"); 212 netdev_err(ndev, "Can't allocate RX buffers\n");
213 goto err_rxalloc_fail; 213 return -ENOMEM;
214 } 214 }
215 rx_queue->rx_skbuff[j] = skb; 215 rx_queue->rx_skbuff[j] = skb;
216 216
@@ -223,10 +223,6 @@ static int gfar_init_bds(struct net_device *ndev)
223 } 223 }
224 224
225 return 0; 225 return 0;
226
227err_rxalloc_fail:
228 free_skb_resources(priv);
229 return -ENOMEM;
230} 226}
231 227
232static int gfar_alloc_skb_resources(struct net_device *ndev) 228static int gfar_alloc_skb_resources(struct net_device *ndev)
@@ -1359,7 +1355,11 @@ static int gfar_restore(struct device *dev)
1359 return 0; 1355 return 0;
1360 } 1356 }
1361 1357
1362 gfar_init_bds(ndev); 1358 if (gfar_init_bds(ndev)) {
1359 free_skb_resources(priv);
1360 return -ENOMEM;
1361 }
1362
1363 init_registers(ndev); 1363 init_registers(ndev);
1364 gfar_set_mac_address(ndev); 1364 gfar_set_mac_address(ndev);
1365 gfar_init_mac(ndev); 1365 gfar_init_mac(ndev);
@@ -1712,6 +1712,7 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1712 tx_queue->tx_skbuff[i] = NULL; 1712 tx_queue->tx_skbuff[i] = NULL;
1713 } 1713 }
1714 kfree(tx_queue->tx_skbuff); 1714 kfree(tx_queue->tx_skbuff);
1715 tx_queue->tx_skbuff = NULL;
1715} 1716}
1716 1717
1717static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) 1718static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
@@ -1735,6 +1736,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1735 rxbdp++; 1736 rxbdp++;
1736 } 1737 }
1737 kfree(rx_queue->rx_skbuff); 1738 kfree(rx_queue->rx_skbuff);
1739 rx_queue->rx_skbuff = NULL;
1738} 1740}
1739 1741
1740/* If there are any tx skbs or rx skbs still around, free them. 1742/* If there are any tx skbs or rx skbs still around, free them.
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 479e43e2f1ef..84c6b6cf9c14 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -738,13 +738,11 @@ static int __devexit mal_remove(struct platform_device *ofdev)
738 /* Synchronize with scheduled polling */ 738 /* Synchronize with scheduled polling */
739 napi_disable(&mal->napi); 739 napi_disable(&mal->napi);
740 740
741 if (!list_empty(&mal->list)) { 741 if (!list_empty(&mal->list))
742 /* This is *very* bad */ 742 /* This is *very* bad */
743 printk(KERN_EMERG 743 WARN(1, KERN_EMERG
744 "mal%d: commac list is not empty on remove!\n", 744 "mal%d: commac list is not empty on remove!\n",
745 mal->index); 745 mal->index);
746 WARN_ON(1);
747 }
748 746
749 dev_set_drvdata(&ofdev->dev, NULL); 747 dev_set_drvdata(&ofdev->dev, NULL);
750 748
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 0cafe4fe9406..73d28d51b5d9 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -93,6 +93,7 @@ config E1000E
93config IGB 93config IGB
94 tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" 94 tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
95 depends on PCI 95 depends on PCI
96 select PTP_1588_CLOCK
96 ---help--- 97 ---help---
97 This driver supports Intel(R) 82575/82576 gigabit ethernet family of 98 This driver supports Intel(R) 82575/82576 gigabit ethernet family of
98 adapters. For more information on how to identify your adapter, go 99 adapters. For more information on how to identify your adapter, go
@@ -120,19 +121,6 @@ config IGB_DCA
120 driver. DCA is a method for warming the CPU cache before data 121 driver. DCA is a method for warming the CPU cache before data
121 is used, with the intent of lessening the impact of cache misses. 122 is used, with the intent of lessening the impact of cache misses.
122 123
123config IGB_PTP
124 bool "PTP Hardware Clock (PHC)"
125 default n
126 depends on IGB && EXPERIMENTAL
127 select PPS
128 select PTP_1588_CLOCK
129 ---help---
130 Say Y here if you want to use PTP Hardware Clock (PHC) in the
131 driver. Only the basic clock operations have been implemented.
132
133 Every timestamp and clock read operations must consult the
134 overflow counter to form a correct time value.
135
136config IGBVF 124config IGBVF
137 tristate "Intel(R) 82576 Virtual Function Ethernet support" 125 tristate "Intel(R) 82576 Virtual Function Ethernet support"
138 depends on PCI 126 depends on PCI
@@ -180,6 +168,7 @@ config IXGBE
180 tristate "Intel(R) 10GbE PCI Express adapters support" 168 tristate "Intel(R) 10GbE PCI Express adapters support"
181 depends on PCI && INET 169 depends on PCI && INET
182 select MDIO 170 select MDIO
171 select PTP_1588_CLOCK
183 ---help--- 172 ---help---
184 This driver supports Intel(R) 10GbE PCI Express family of 173 This driver supports Intel(R) 10GbE PCI Express family of
185 adapters. For more information on how to identify your adapter, go 174 adapters. For more information on how to identify your adapter, go
@@ -222,19 +211,6 @@ config IXGBE_DCB
222 211
223 If unsure, say N. 212 If unsure, say N.
224 213
225config IXGBE_PTP
226 bool "PTP Clock Support"
227 default n
228 depends on IXGBE && EXPERIMENTAL
229 select PPS
230 select PTP_1588_CLOCK
231 ---help---
232 Say Y here if you want support for 1588 Timestamping with a
233 PHC device, using the PTP 1588 Clock support. This is
234 required to enable timestamping support for the device.
235
236 If unsure, say N.
237
238config IXGBEVF 214config IXGBEVF
239 tristate "Intel(R) 82599 Virtual Function Ethernet support" 215 tristate "Intel(R) 82599 Virtual Function Ethernet support"
240 depends on PCI_MSI 216 depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 3d6839528761..8fedd2451538 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -107,6 +107,7 @@ u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = {
107}; 107};
108 108
109static DEFINE_SPINLOCK(e1000_eeprom_lock); 109static DEFINE_SPINLOCK(e1000_eeprom_lock);
110static DEFINE_SPINLOCK(e1000_phy_lock);
110 111
111/** 112/**
112 * e1000_set_phy_type - Set the phy type member in the hw struct. 113 * e1000_set_phy_type - Set the phy type member in the hw struct.
@@ -2830,19 +2831,25 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
2830s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) 2831s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
2831{ 2832{
2832 u32 ret_val; 2833 u32 ret_val;
2834 unsigned long flags;
2833 2835
2834 e_dbg("e1000_read_phy_reg"); 2836 e_dbg("e1000_read_phy_reg");
2835 2837
2838 spin_lock_irqsave(&e1000_phy_lock, flags);
2839
2836 if ((hw->phy_type == e1000_phy_igp) && 2840 if ((hw->phy_type == e1000_phy_igp) &&
2837 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2841 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
2838 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 2842 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
2839 (u16) reg_addr); 2843 (u16) reg_addr);
2840 if (ret_val) 2844 if (ret_val) {
2845 spin_unlock_irqrestore(&e1000_phy_lock, flags);
2841 return ret_val; 2846 return ret_val;
2847 }
2842 } 2848 }
2843 2849
2844 ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, 2850 ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
2845 phy_data); 2851 phy_data);
2852 spin_unlock_irqrestore(&e1000_phy_lock, flags);
2846 2853
2847 return ret_val; 2854 return ret_val;
2848} 2855}
@@ -2965,19 +2972,25 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2965s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) 2972s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
2966{ 2973{
2967 u32 ret_val; 2974 u32 ret_val;
2975 unsigned long flags;
2968 2976
2969 e_dbg("e1000_write_phy_reg"); 2977 e_dbg("e1000_write_phy_reg");
2970 2978
2979 spin_lock_irqsave(&e1000_phy_lock, flags);
2980
2971 if ((hw->phy_type == e1000_phy_igp) && 2981 if ((hw->phy_type == e1000_phy_igp) &&
2972 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2982 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
2973 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 2983 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
2974 (u16) reg_addr); 2984 (u16) reg_addr);
2975 if (ret_val) 2985 if (ret_val) {
2986 spin_unlock_irqrestore(&e1000_phy_lock, flags);
2976 return ret_val; 2987 return ret_val;
2988 }
2977 } 2989 }
2978 2990
2979 ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, 2991 ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
2980 phy_data); 2992 phy_data);
2993 spin_unlock_irqrestore(&e1000_phy_lock, flags);
2981 2994
2982 return ret_val; 2995 return ret_val;
2983} 2996}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index f444eb0b76d8..dadb13be479a 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5067,6 +5067,17 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5067 return NETDEV_TX_OK; 5067 return NETDEV_TX_OK;
5068 } 5068 }
5069 5069
5070 /*
5071 * The minimum packet size with TCTL.PSP set is 17 bytes so
5072 * pad skb in order to meet this minimum size requirement
5073 */
5074 if (unlikely(skb->len < 17)) {
5075 if (skb_pad(skb, 17 - skb->len))
5076 return NETDEV_TX_OK;
5077 skb->len = 17;
5078 skb_set_tail_pointer(skb, 17);
5079 }
5080
5070 mss = skb_shinfo(skb)->gso_size; 5081 mss = skb_shinfo(skb)->gso_size;
5071 if (mss) { 5082 if (mss) {
5072 u8 hdr_len; 5083 u8 hdr_len;
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 97c197fd4a8e..624476cfa727 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -34,6 +34,4 @@ obj-$(CONFIG_IGB) += igb.o
34 34
35igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ 35igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
36 e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ 36 e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
37 e1000_i210.o 37 e1000_i210.o igb_ptp.o
38
39igb-$(CONFIG_IGB_PTP) += igb_ptp.o
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index ca4641e2f748..deb05970b9f1 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -319,6 +319,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
319 nvm->ops.acquire = igb_acquire_nvm_i210; 319 nvm->ops.acquire = igb_acquire_nvm_i210;
320 nvm->ops.release = igb_release_nvm_i210; 320 nvm->ops.release = igb_release_nvm_i210;
321 nvm->ops.read = igb_read_nvm_srrd_i210; 321 nvm->ops.read = igb_read_nvm_srrd_i210;
322 nvm->ops.write = igb_write_nvm_srwr_i210;
322 nvm->ops.valid_led_default = igb_valid_led_default_i210; 323 nvm->ops.valid_led_default = igb_valid_led_default_i210;
323 break; 324 break;
324 case e1000_i211: 325 case e1000_i211:
@@ -2233,19 +2234,16 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
2233 2234
2234 /* enable or disable per user setting */ 2235 /* enable or disable per user setting */
2235 if (!(hw->dev_spec._82575.eee_disable)) { 2236 if (!(hw->dev_spec._82575.eee_disable)) {
2236 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | 2237 u32 eee_su = rd32(E1000_EEE_SU);
2237 E1000_IPCNFG_EEE_100M_AN); 2238
2238 eeer |= (E1000_EEER_TX_LPI_EN | 2239 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
2239 E1000_EEER_RX_LPI_EN | 2240 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2240 E1000_EEER_LPI_FC); 2241 E1000_EEER_LPI_FC);
2241 2242
2242 /* keep the LPI clock running before EEE is enabled */ 2243 /* This bit should not be set in normal operation. */
2243 if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { 2244 if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
2244 u32 eee_su; 2245 hw_dbg("LPI Clock Stop Bit should not be set!\n");
2245 eee_su = rd32(E1000_EEE_SU); 2246
2246 eee_su &= ~E1000_EEE_SU_LPI_CLK_STP;
2247 wr32(E1000_EEE_SU, eee_su);
2248 }
2249 2247
2250 } else { 2248 } else {
2251 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | 2249 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index e85c453f5428..44b76b3b6816 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -172,10 +172,13 @@ struct e1000_adv_tx_context_desc {
172#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ 172#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
173#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ 173#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
174#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ 174#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
175#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
175 176
176#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ 177#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
177#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ 178#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
179#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
178#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ 180#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
181#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
179 182
180/* Additional DCA related definitions, note change in position of CPUID */ 183/* Additional DCA related definitions, note change in position of CPUID */
181#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ 184#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index de4b41ec3c40..e647cff9a5e3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -636,6 +636,7 @@
636/* NVM Word Offsets */ 636/* NVM Word Offsets */
637#define NVM_COMPAT 0x0003 637#define NVM_COMPAT 0x0003
638#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */ 638#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */
639#define NVM_VERSION 0x0005
639#define NVM_INIT_CONTROL2_REG 0x000F 640#define NVM_INIT_CONTROL2_REG 0x000F
640#define NVM_INIT_CONTROL3_PORT_B 0x0014 641#define NVM_INIT_CONTROL3_PORT_B 0x0014
641#define NVM_INIT_CONTROL3_PORT_A 0x0024 642#define NVM_INIT_CONTROL3_PORT_A 0x0024
@@ -653,6 +654,19 @@
653#define NVM_LED_1_CFG 0x001C 654#define NVM_LED_1_CFG 0x001C
654#define NVM_LED_0_2_CFG 0x001F 655#define NVM_LED_0_2_CFG 0x001F
655 656
657/* NVM version defines */
658#define NVM_ETRACK_WORD 0x0042
659#define NVM_COMB_VER_OFF 0x0083
660#define NVM_COMB_VER_PTR 0x003d
661#define NVM_MAJOR_MASK 0xF000
662#define NVM_MINOR_MASK 0x0FF0
663#define NVM_BUILD_MASK 0x000F
664#define NVM_COMB_VER_MASK 0x00FF
665#define NVM_MAJOR_SHIFT 12
666#define NVM_MINOR_SHIFT 4
667#define NVM_COMB_VER_SHFT 8
668#define NVM_VER_INVALID 0xFFFF
669#define NVM_ETRACK_SHIFT 16
656 670
657#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ 671#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
658#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ 672#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 77a5f939bc74..41474298d365 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -423,6 +423,100 @@ s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
423} 423}
424 424
425/** 425/**
426 * igb_read_invm_version - Reads iNVM version and image type
427 * @hw: pointer to the HW structure
428 * @invm_ver: version structure for the version read
429 *
430 * Reads iNVM version and image type.
431 **/
432s32 igb_read_invm_version(struct e1000_hw *hw,
433 struct e1000_fw_version *invm_ver) {
434 u32 *record = NULL;
435 u32 *next_record = NULL;
436 u32 i = 0;
437 u32 invm_dword = 0;
438 u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
439 E1000_INVM_RECORD_SIZE_IN_BYTES);
440 u32 buffer[E1000_INVM_SIZE];
441 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
442 u16 version = 0;
443
444 /* Read iNVM memory */
445 for (i = 0; i < E1000_INVM_SIZE; i++) {
446 invm_dword = rd32(E1000_INVM_DATA_REG(i));
447 buffer[i] = invm_dword;
448 }
449
450 /* Read version number */
451 for (i = 1; i < invm_blocks; i++) {
452 record = &buffer[invm_blocks - i];
453 next_record = &buffer[invm_blocks - i + 1];
454
455 /* Check if we have first version location used */
456 if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
457 version = 0;
458 status = E1000_SUCCESS;
459 break;
460 }
461 /* Check if we have second version location used */
462 else if ((i == 1) &&
463 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
464 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
465 status = E1000_SUCCESS;
466 break;
467 }
468 /* Check if we have odd version location
469 * used and it is the last one used
470 */
471 else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
472 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
473 (i != 1))) {
474 version = (*next_record & E1000_INVM_VER_FIELD_TWO)
475 >> 13;
476 status = E1000_SUCCESS;
477 break;
478 }
479 /* Check if we have even version location
480 * used and it is the last one used
481 */
482 else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
483 ((*record & 0x3) == 0)) {
484 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
485 status = E1000_SUCCESS;
486 break;
487 }
488 }
489
490 if (status == E1000_SUCCESS) {
491 invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
492 >> E1000_INVM_MAJOR_SHIFT;
493 invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
494 }
495 /* Read Image Type */
496 for (i = 1; i < invm_blocks; i++) {
497 record = &buffer[invm_blocks - i];
498 next_record = &buffer[invm_blocks - i + 1];
499
500 /* Check if we have image type in first location used */
501 if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
502 invm_ver->invm_img_type = 0;
503 status = E1000_SUCCESS;
504 break;
505 }
506 /* Check if we have image type in first location used */
507 else if ((((*record & 0x3) == 0) &&
508 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
509 ((((*record & 0x3) != 0) && (i != 1)))) {
510 invm_ver->invm_img_type =
511 (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
512 status = E1000_SUCCESS;
513 break;
514 }
515 }
516 return status;
517}
518
519/**
426 * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum 520 * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
427 * @hw: pointer to the HW structure 521 * @hw: pointer to the HW structure
428 * 522 *
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 5dc2bd3f50bc..974d23584d70 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -43,6 +43,8 @@ extern void igb_release_nvm_i210(struct e1000_hw *hw);
43extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); 43extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
44extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words, 44extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
45 u16 *data); 45 u16 *data);
46extern s32 igb_read_invm_version(struct e1000_hw *hw,
47 struct e1000_fw_version *invm_ver);
46 48
47#define E1000_STM_OPCODE 0xDB00 49#define E1000_STM_OPCODE 0xDB00
48#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 50#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@@ -65,6 +67,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
65 67
66#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 68#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
67#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 69#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
70#define E1000_INVM_ULT_BYTES_SIZE 8
71#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
72#define E1000_INVM_VER_FIELD_ONE 0x1FF8
73#define E1000_INVM_VER_FIELD_TWO 0x7FE000
74#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
75
76#define E1000_INVM_MAJOR_MASK 0x3F0
77#define E1000_INVM_MINOR_MASK 0xF
78#define E1000_INVM_MAJOR_SHIFT 4
68 79
69#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ 80#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
70 (ID_LED_OFF1_OFF2 << 4) | \ 81 (ID_LED_OFF1_OFF2 << 4) | \
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 819c145ac762..7acddfe9e6d5 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1391,6 +1391,10 @@ s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1391{ 1391{
1392 s32 ret_val = 0; 1392 s32 ret_val = 0;
1393 1393
1394 /* All MDI settings are supported on 82580 and newer. */
1395 if (hw->mac.type >= e1000_82580)
1396 goto out;
1397
1394 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { 1398 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1395 hw_dbg("Invalid MDI setting detected\n"); 1399 hw_dbg("Invalid MDI setting detected\n");
1396 hw->phy.mdix = 1; 1400 hw->phy.mdix = 1;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index cbddc4e51e30..e2b2c4b9c951 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -33,6 +33,7 @@
33#include "e1000_phy.h" 33#include "e1000_phy.h"
34#include "e1000_nvm.h" 34#include "e1000_nvm.h"
35#include "e1000_defines.h" 35#include "e1000_defines.h"
36#include "e1000_i210.h"
36 37
37/* 38/*
38 * Functions that should not be called directly from drivers but can be used 39 * Functions that should not be called directly from drivers but can be used
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index aa5fcdf3f357..7db3f80bcd57 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -710,3 +710,74 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
710out: 710out:
711 return ret_val; 711 return ret_val;
712} 712}
713
714/**
715 * igb_get_fw_version - Get firmware version information
716 * @hw: pointer to the HW structure
717 * @fw_vers: pointer to output structure
718 *
719 * unsupported MAC types will return all 0 version structure
720 **/
721void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
722{
723 u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
724 u16 fw_version;
725
726 memset(fw_vers, 0, sizeof(struct e1000_fw_version));
727
728 switch (hw->mac.type) {
729 case e1000_i211:
730 igb_read_invm_version(hw, fw_vers);
731 return;
732 case e1000_82575:
733 case e1000_82576:
734 case e1000_82580:
735 case e1000_i350:
736 case e1000_i210:
737 break;
738 default:
739 return;
740 }
741 /* basic eeprom version numbers */
742 hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
743 fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
744 fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
745
746 /* etrack id */
747 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
748 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
749 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
750
751 switch (hw->mac.type) {
752 case e1000_i210:
753 case e1000_i350:
754 /* find combo image version */
755 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
756 if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) {
757
758 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
759 + 1), 1, &comb_verh);
760 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
761 1, &comb_verl);
762
763 /* get Option Rom version if it exists and is valid */
764 if ((comb_verh && comb_verl) &&
765 ((comb_verh != NVM_VER_INVALID) &&
766 (comb_verl != NVM_VER_INVALID))) {
767
768 fw_vers->or_valid = true;
769 fw_vers->or_major =
770 comb_verl >> NVM_COMB_VER_SHFT;
771 fw_vers->or_build =
772 ((comb_verl << NVM_COMB_VER_SHFT)
773 | (comb_verh >> NVM_COMB_VER_SHFT));
774 fw_vers->or_patch =
775 comb_verh & NVM_COMB_VER_MASK;
776 }
777 }
778 break;
779 default:
780 break;
781 }
782 return;
783}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 825b0228cac0..7012d458c6f7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -40,4 +40,20 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
40s32 igb_validate_nvm_checksum(struct e1000_hw *hw); 40s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
41s32 igb_update_nvm_checksum(struct e1000_hw *hw); 41s32 igb_update_nvm_checksum(struct e1000_hw *hw);
42 42
43struct e1000_fw_version {
44 u32 etrack_id;
45 u16 eep_major;
46 u16 eep_minor;
47
48 u8 invm_major;
49 u8 invm_minor;
50 u8 invm_img_type;
51
52 bool or_valid;
53 u16 or_major;
54 u16 or_build;
55 u16 or_patch;
56};
57void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers);
58
43#endif 59#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 3404bc79f4ca..fe76004aca4e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1207,20 +1207,25 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1207 u16 phy_data; 1207 u16 phy_data;
1208 bool link; 1208 bool link;
1209 1209
1210 /* 1210 /* I210 and I211 devices support Auto-Crossover in forced operation. */
1211 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 1211 if (phy->type != e1000_phy_i210) {
1212 * forced whenever speed and duplex are forced. 1212 /*
1213 */ 1213 * Clear Auto-Crossover to force MDI manually. M88E1000
1214 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1214 * requires MDI forced whenever speed and duplex are forced.
1215 if (ret_val) 1215 */
1216 goto out; 1216 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
1217 &phy_data);
1218 if (ret_val)
1219 goto out;
1217 1220
1218 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; 1221 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
1219 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 1222 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
1220 if (ret_val) 1223 phy_data);
1221 goto out; 1224 if (ret_val)
1225 goto out;
1222 1226
1223 hw_dbg("M88E1000 PSCR: %X\n", phy_data); 1227 hw_dbg("M88E1000 PSCR: %X\n", phy_data);
1228 }
1224 1229
1225 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); 1230 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
1226 if (ret_val) 1231 if (ret_val)
@@ -1710,6 +1715,26 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
1710 1715
1711 switch (hw->phy.id) { 1716 switch (hw->phy.id) {
1712 case I210_I_PHY_ID: 1717 case I210_I_PHY_ID:
1718 /* Get cable length from PHY Cable Diagnostics Control Reg */
1719 ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
1720 (I347AT4_PCDL + phy->addr),
1721 &phy_data);
1722 if (ret_val)
1723 return ret_val;
1724
1725 /* Check if the unit of cable length is meters or cm */
1726 ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
1727 I347AT4_PCDC, &phy_data2);
1728 if (ret_val)
1729 return ret_val;
1730
1731 is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
1732
1733 /* Populate the phy structure with cable length in meters */
1734 phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
1735 phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
1736 phy->cable_length = phy_data / (is_cm ? 100 : 1);
1737 break;
1713 case I347AT4_E_PHY_ID: 1738 case I347AT4_E_PHY_ID:
1714 /* Remember the original page select and set it to 7 */ 1739 /* Remember the original page select and set it to 7 */
1715 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, 1740 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 8aad230c0592..796db53954d9 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -34,11 +34,9 @@
34#include "e1000_mac.h" 34#include "e1000_mac.h"
35#include "e1000_82575.h" 35#include "e1000_82575.h"
36 36
37#ifdef CONFIG_IGB_PTP
38#include <linux/clocksource.h> 37#include <linux/clocksource.h>
39#include <linux/net_tstamp.h> 38#include <linux/net_tstamp.h>
40#include <linux/ptp_clock_kernel.h> 39#include <linux/ptp_clock_kernel.h>
41#endif /* CONFIG_IGB_PTP */
42#include <linux/bitops.h> 40#include <linux/bitops.h>
43#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
44 42
@@ -132,9 +130,10 @@ struct vf_data_storage {
132#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 130#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
133 131
134/* Supported Rx Buffer Sizes */ 132/* Supported Rx Buffer Sizes */
135#define IGB_RXBUFFER_256 256 133#define IGB_RXBUFFER_256 256
136#define IGB_RXBUFFER_16384 16384 134#define IGB_RXBUFFER_2048 2048
137#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 135#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
136#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
138 137
139/* How many Tx Descriptors do we need to call netif_wake_queue ? */ 138/* How many Tx Descriptors do we need to call netif_wake_queue ? */
140#define IGB_TX_QUEUE_WAKE 16 139#define IGB_TX_QUEUE_WAKE 16
@@ -174,11 +173,9 @@ struct igb_tx_buffer {
174}; 173};
175 174
176struct igb_rx_buffer { 175struct igb_rx_buffer {
177 struct sk_buff *skb;
178 dma_addr_t dma; 176 dma_addr_t dma;
179 struct page *page; 177 struct page *page;
180 dma_addr_t page_dma; 178 unsigned int page_offset;
181 u32 page_offset;
182}; 179};
183 180
184struct igb_tx_queue_stats { 181struct igb_tx_queue_stats {
@@ -205,22 +202,6 @@ struct igb_ring_container {
205 u8 itr; /* current ITR setting for ring */ 202 u8 itr; /* current ITR setting for ring */
206}; 203};
207 204
208struct igb_q_vector {
209 struct igb_adapter *adapter; /* backlink */
210 int cpu; /* CPU for DCA */
211 u32 eims_value; /* EIMS mask value */
212
213 struct igb_ring_container rx, tx;
214
215 struct napi_struct napi;
216
217 u16 itr_val;
218 u8 set_itr;
219 void __iomem *itr_register;
220
221 char name[IFNAMSIZ + 9];
222};
223
224struct igb_ring { 205struct igb_ring {
225 struct igb_q_vector *q_vector; /* backlink to q_vector */ 206 struct igb_q_vector *q_vector; /* backlink to q_vector */
226 struct net_device *netdev; /* back pointer to net_device */ 207 struct net_device *netdev; /* back pointer to net_device */
@@ -232,15 +213,17 @@ struct igb_ring {
232 void *desc; /* descriptor ring memory */ 213 void *desc; /* descriptor ring memory */
233 unsigned long flags; /* ring specific flags */ 214 unsigned long flags; /* ring specific flags */
234 void __iomem *tail; /* pointer to ring tail register */ 215 void __iomem *tail; /* pointer to ring tail register */
216 dma_addr_t dma; /* phys address of the ring */
217 unsigned int size; /* length of desc. ring in bytes */
235 218
236 u16 count; /* number of desc. in the ring */ 219 u16 count; /* number of desc. in the ring */
237 u8 queue_index; /* logical index of the ring*/ 220 u8 queue_index; /* logical index of the ring*/
238 u8 reg_idx; /* physical index of the ring */ 221 u8 reg_idx; /* physical index of the ring */
239 u32 size; /* length of desc. ring in bytes */
240 222
241 /* everything past this point are written often */ 223 /* everything past this point are written often */
242 u16 next_to_clean ____cacheline_aligned_in_smp; 224 u16 next_to_clean;
243 u16 next_to_use; 225 u16 next_to_use;
226 u16 next_to_alloc;
244 227
245 union { 228 union {
246 /* TX */ 229 /* TX */
@@ -251,12 +234,30 @@ struct igb_ring {
251 }; 234 };
252 /* RX */ 235 /* RX */
253 struct { 236 struct {
237 struct sk_buff *skb;
254 struct igb_rx_queue_stats rx_stats; 238 struct igb_rx_queue_stats rx_stats;
255 struct u64_stats_sync rx_syncp; 239 struct u64_stats_sync rx_syncp;
256 }; 240 };
257 }; 241 };
258 /* Items past this point are only used during ring alloc / free */ 242} ____cacheline_internodealigned_in_smp;
259 dma_addr_t dma; /* phys address of the ring */ 243
244struct igb_q_vector {
245 struct igb_adapter *adapter; /* backlink */
246 int cpu; /* CPU for DCA */
247 u32 eims_value; /* EIMS mask value */
248
249 u16 itr_val;
250 u8 set_itr;
251 void __iomem *itr_register;
252
253 struct igb_ring_container rx, tx;
254
255 struct napi_struct napi;
256 struct rcu_head rcu; /* to avoid race with update stats on free */
257 char name[IFNAMSIZ + 9];
258
259 /* for dynamic allocation of rings associated with this q_vector */
260 struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
260}; 261};
261 262
262enum e1000_ring_flags_t { 263enum e1000_ring_flags_t {
@@ -373,7 +374,6 @@ struct igb_adapter {
373 u32 wvbr; 374 u32 wvbr;
374 u32 *shadow_vfta; 375 u32 *shadow_vfta;
375 376
376#ifdef CONFIG_IGB_PTP
377 struct ptp_clock *ptp_clock; 377 struct ptp_clock *ptp_clock;
378 struct ptp_clock_info ptp_caps; 378 struct ptp_clock_info ptp_caps;
379 struct delayed_work ptp_overflow_work; 379 struct delayed_work ptp_overflow_work;
@@ -382,7 +382,6 @@ struct igb_adapter {
382 spinlock_t tmreg_lock; 382 spinlock_t tmreg_lock;
383 struct cyclecounter cc; 383 struct cyclecounter cc;
384 struct timecounter tc; 384 struct timecounter tc;
385#endif /* CONFIG_IGB_PTP */
386 385
387 char fw_version[32]; 386 char fw_version[32];
388}; 387};
@@ -436,18 +435,27 @@ extern bool igb_has_link(struct igb_adapter *adapter);
436extern void igb_set_ethtool_ops(struct net_device *); 435extern void igb_set_ethtool_ops(struct net_device *);
437extern void igb_power_up_link(struct igb_adapter *); 436extern void igb_power_up_link(struct igb_adapter *);
438extern void igb_set_fw_version(struct igb_adapter *); 437extern void igb_set_fw_version(struct igb_adapter *);
439#ifdef CONFIG_IGB_PTP
440extern void igb_ptp_init(struct igb_adapter *adapter); 438extern void igb_ptp_init(struct igb_adapter *adapter);
441extern void igb_ptp_stop(struct igb_adapter *adapter); 439extern void igb_ptp_stop(struct igb_adapter *adapter);
442extern void igb_ptp_reset(struct igb_adapter *adapter); 440extern void igb_ptp_reset(struct igb_adapter *adapter);
443extern void igb_ptp_tx_work(struct work_struct *work); 441extern void igb_ptp_tx_work(struct work_struct *work);
444extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); 442extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
445extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, 443extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
446 union e1000_adv_rx_desc *rx_desc,
447 struct sk_buff *skb); 444 struct sk_buff *skb);
445extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
446 unsigned char *va,
447 struct sk_buff *skb);
448static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
449 union e1000_adv_rx_desc *rx_desc,
450 struct sk_buff *skb)
451{
452 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
453 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
454 igb_ptp_rx_rgtstamp(q_vector, skb);
455}
456
448extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, 457extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
449 struct ifreq *ifr, int cmd); 458 struct ifreq *ifr, int cmd);
450#endif /* CONFIG_IGB_PTP */
451 459
452static inline s32 igb_reset_phy(struct e1000_hw *hw) 460static inline s32 igb_reset_phy(struct e1000_hw *hw)
453{ 461{
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 2ea012849825..d8b1bee606c0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -37,6 +37,7 @@
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/pm_runtime.h> 39#include <linux/pm_runtime.h>
40#include <linux/highmem.h>
40 41
41#include "igb.h" 42#include "igb.h"
42 43
@@ -1685,16 +1686,24 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
1685 memset(&skb->data[frame_size + 12], 0xAF, 1); 1686 memset(&skb->data[frame_size + 12], 0xAF, 1);
1686} 1687}
1687 1688
1688static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1689static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
1690 unsigned int frame_size)
1689{ 1691{
1690 frame_size /= 2; 1692 unsigned char *data;
1691 if (*(skb->data + 3) == 0xFF) { 1693 bool match = true;
1692 if ((*(skb->data + frame_size + 10) == 0xBE) && 1694
1693 (*(skb->data + frame_size + 12) == 0xAF)) { 1695 frame_size >>= 1;
1694 return 0; 1696
1695 } 1697 data = kmap(rx_buffer->page);
1696 } 1698
1697 return 13; 1699 if (data[3] != 0xFF ||
1700 data[frame_size + 10] != 0xBE ||
1701 data[frame_size + 12] != 0xAF)
1702 match = false;
1703
1704 kunmap(rx_buffer->page);
1705
1706 return match;
1698} 1707}
1699 1708
1700static int igb_clean_test_rings(struct igb_ring *rx_ring, 1709static int igb_clean_test_rings(struct igb_ring *rx_ring,
@@ -1704,9 +1713,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1704 union e1000_adv_rx_desc *rx_desc; 1713 union e1000_adv_rx_desc *rx_desc;
1705 struct igb_rx_buffer *rx_buffer_info; 1714 struct igb_rx_buffer *rx_buffer_info;
1706 struct igb_tx_buffer *tx_buffer_info; 1715 struct igb_tx_buffer *tx_buffer_info;
1707 struct netdev_queue *txq;
1708 u16 rx_ntc, tx_ntc, count = 0; 1716 u16 rx_ntc, tx_ntc, count = 0;
1709 unsigned int total_bytes = 0, total_packets = 0;
1710 1717
1711 /* initialize next to clean and descriptor values */ 1718 /* initialize next to clean and descriptor values */
1712 rx_ntc = rx_ring->next_to_clean; 1719 rx_ntc = rx_ring->next_to_clean;
@@ -1717,21 +1724,24 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1717 /* check rx buffer */ 1724 /* check rx buffer */
1718 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; 1725 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1719 1726
1720 /* unmap rx buffer, will be remapped by alloc_rx_buffers */ 1727 /* sync Rx buffer for CPU read */
1721 dma_unmap_single(rx_ring->dev, 1728 dma_sync_single_for_cpu(rx_ring->dev,
1722 rx_buffer_info->dma, 1729 rx_buffer_info->dma,
1723 IGB_RX_HDR_LEN, 1730 IGB_RX_BUFSZ,
1724 DMA_FROM_DEVICE); 1731 DMA_FROM_DEVICE);
1725 rx_buffer_info->dma = 0;
1726 1732
1727 /* verify contents of skb */ 1733 /* verify contents of skb */
1728 if (!igb_check_lbtest_frame(rx_buffer_info->skb, size)) 1734 if (igb_check_lbtest_frame(rx_buffer_info, size))
1729 count++; 1735 count++;
1730 1736
1737 /* sync Rx buffer for device write */
1738 dma_sync_single_for_device(rx_ring->dev,
1739 rx_buffer_info->dma,
1740 IGB_RX_BUFSZ,
1741 DMA_FROM_DEVICE);
1742
1731 /* unmap buffer on tx side */ 1743 /* unmap buffer on tx side */
1732 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; 1744 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1733 total_bytes += tx_buffer_info->bytecount;
1734 total_packets += tx_buffer_info->gso_segs;
1735 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1745 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1736 1746
1737 /* increment rx/tx next to clean counters */ 1747 /* increment rx/tx next to clean counters */
@@ -1746,8 +1756,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1746 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); 1756 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1747 } 1757 }
1748 1758
1749 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); 1759 netdev_tx_reset_queue(txring_txq(tx_ring));
1750 netdev_tx_completed_queue(txq, total_packets, total_bytes);
1751 1760
1752 /* re-map buffers to ring, store next to clean values */ 1761 /* re-map buffers to ring, store next to clean values */
1753 igb_alloc_rx_buffers(rx_ring, count); 1762 igb_alloc_rx_buffers(rx_ring, count);
@@ -2301,7 +2310,6 @@ static int igb_get_ts_info(struct net_device *dev,
2301 struct igb_adapter *adapter = netdev_priv(dev); 2310 struct igb_adapter *adapter = netdev_priv(dev);
2302 2311
2303 switch (adapter->hw.mac.type) { 2312 switch (adapter->hw.mac.type) {
2304#ifdef CONFIG_IGB_PTP
2305 case e1000_82576: 2313 case e1000_82576:
2306 case e1000_82580: 2314 case e1000_82580:
2307 case e1000_i350: 2315 case e1000_i350:
@@ -2337,7 +2345,6 @@ static int igb_get_ts_info(struct net_device *dev,
2337 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); 2345 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2338 2346
2339 return 0; 2347 return 0;
2340#endif /* CONFIG_IGB_PTP */
2341 default: 2348 default:
2342 return -EOPNOTSUPP; 2349 return -EOPNOTSUPP;
2343 } 2350 }
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index e1ceb37ef12e..082ce73dc627 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -61,7 +61,7 @@
61 61
62#define MAJ 4 62#define MAJ 4
63#define MIN 0 63#define MIN 0
64#define BUILD 1 64#define BUILD 17
65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
66__stringify(BUILD) "-k" 66__stringify(BUILD) "-k"
67char igb_driver_name[] = "igb"; 67char igb_driver_name[] = "igb";
@@ -534,31 +534,27 @@ rx_ring_summary:
534 534
535 if (staterr & E1000_RXD_STAT_DD) { 535 if (staterr & E1000_RXD_STAT_DD) {
536 /* Descriptor Done */ 536 /* Descriptor Done */
537 pr_info("%s[0x%03X] %016llX %016llX -------" 537 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
538 "--------- %p%s\n", "RWB", i, 538 "RWB", i,
539 le64_to_cpu(u0->a), 539 le64_to_cpu(u0->a),
540 le64_to_cpu(u0->b), 540 le64_to_cpu(u0->b),
541 buffer_info->skb, next_desc); 541 next_desc);
542 } else { 542 } else {
543 pr_info("%s[0x%03X] %016llX %016llX %016llX" 543 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
544 " %p%s\n", "R ", i, 544 "R ", i,
545 le64_to_cpu(u0->a), 545 le64_to_cpu(u0->a),
546 le64_to_cpu(u0->b), 546 le64_to_cpu(u0->b),
547 (u64)buffer_info->dma, 547 (u64)buffer_info->dma,
548 buffer_info->skb, next_desc); 548 next_desc);
549 549
550 if (netif_msg_pktdata(adapter) && 550 if (netif_msg_pktdata(adapter) &&
551 buffer_info->dma && buffer_info->skb) { 551 buffer_info->dma && buffer_info->page) {
552 print_hex_dump(KERN_INFO, "",
553 DUMP_PREFIX_ADDRESS,
554 16, 1, buffer_info->skb->data,
555 IGB_RX_HDR_LEN, true);
556 print_hex_dump(KERN_INFO, "", 552 print_hex_dump(KERN_INFO, "",
557 DUMP_PREFIX_ADDRESS, 553 DUMP_PREFIX_ADDRESS,
558 16, 1, 554 16, 1,
559 page_address(buffer_info->page) + 555 page_address(buffer_info->page) +
560 buffer_info->page_offset, 556 buffer_info->page_offset,
561 PAGE_SIZE/2, true); 557 IGB_RX_BUFSZ, true);
562 } 558 }
563 } 559 }
564 } 560 }
@@ -656,80 +652,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
656 } 652 }
657} 653}
658 654
659static void igb_free_queues(struct igb_adapter *adapter)
660{
661 int i;
662
663 for (i = 0; i < adapter->num_tx_queues; i++) {
664 kfree(adapter->tx_ring[i]);
665 adapter->tx_ring[i] = NULL;
666 }
667 for (i = 0; i < adapter->num_rx_queues; i++) {
668 kfree(adapter->rx_ring[i]);
669 adapter->rx_ring[i] = NULL;
670 }
671 adapter->num_rx_queues = 0;
672 adapter->num_tx_queues = 0;
673}
674
675/**
676 * igb_alloc_queues - Allocate memory for all rings
677 * @adapter: board private structure to initialize
678 *
679 * We allocate one ring per queue at run-time since we don't know the
680 * number of queues at compile-time.
681 **/
682static int igb_alloc_queues(struct igb_adapter *adapter)
683{
684 struct igb_ring *ring;
685 int i;
686
687 for (i = 0; i < adapter->num_tx_queues; i++) {
688 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
689 if (!ring)
690 goto err;
691 ring->count = adapter->tx_ring_count;
692 ring->queue_index = i;
693 ring->dev = &adapter->pdev->dev;
694 ring->netdev = adapter->netdev;
695 /* For 82575, context index must be unique per ring. */
696 if (adapter->hw.mac.type == e1000_82575)
697 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
698 adapter->tx_ring[i] = ring;
699 }
700
701 for (i = 0; i < adapter->num_rx_queues; i++) {
702 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
703 if (!ring)
704 goto err;
705 ring->count = adapter->rx_ring_count;
706 ring->queue_index = i;
707 ring->dev = &adapter->pdev->dev;
708 ring->netdev = adapter->netdev;
709 /* set flag indicating ring supports SCTP checksum offload */
710 if (adapter->hw.mac.type >= e1000_82576)
711 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
712
713 /*
714 * On i350, i210, and i211, loopback VLAN packets
715 * have the tag byte-swapped.
716 * */
717 if (adapter->hw.mac.type >= e1000_i350)
718 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
719
720 adapter->rx_ring[i] = ring;
721 }
722
723 igb_cache_ring_register(adapter);
724
725 return 0;
726
727err:
728 igb_free_queues(adapter);
729
730 return -ENOMEM;
731}
732
733/** 655/**
734 * igb_write_ivar - configure ivar for given MSI-X vector 656 * igb_write_ivar - configure ivar for given MSI-X vector
735 * @hw: pointer to the HW structure 657 * @hw: pointer to the HW structure
@@ -960,6 +882,35 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
960} 882}
961 883
962/** 884/**
885 * igb_free_q_vector - Free memory allocated for specific interrupt vector
886 * @adapter: board private structure to initialize
887 * @v_idx: Index of vector to be freed
888 *
889 * This function frees the memory allocated to the q_vector. In addition if
890 * NAPI is enabled it will delete any references to the NAPI struct prior
891 * to freeing the q_vector.
892 **/
893static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
894{
895 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
896
897 if (q_vector->tx.ring)
898 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
899
900 if (q_vector->rx.ring)
901 adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
902
903 adapter->q_vector[v_idx] = NULL;
904 netif_napi_del(&q_vector->napi);
905
906 /*
907 * ixgbe_get_stats64() might access the rings on this vector,
908 * we must wait a grace period before freeing it.
909 */
910 kfree_rcu(q_vector, rcu);
911}
912
913/**
963 * igb_free_q_vectors - Free memory allocated for interrupt vectors 914 * igb_free_q_vectors - Free memory allocated for interrupt vectors
964 * @adapter: board private structure to initialize 915 * @adapter: board private structure to initialize
965 * 916 *
@@ -969,17 +920,14 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
969 **/ 920 **/
970static void igb_free_q_vectors(struct igb_adapter *adapter) 921static void igb_free_q_vectors(struct igb_adapter *adapter)
971{ 922{
972 int v_idx; 923 int v_idx = adapter->num_q_vectors;
973 924
974 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { 925 adapter->num_tx_queues = 0;
975 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; 926 adapter->num_rx_queues = 0;
976 adapter->q_vector[v_idx] = NULL;
977 if (!q_vector)
978 continue;
979 netif_napi_del(&q_vector->napi);
980 kfree(q_vector);
981 }
982 adapter->num_q_vectors = 0; 927 adapter->num_q_vectors = 0;
928
929 while (v_idx--)
930 igb_free_q_vector(adapter, v_idx);
983} 931}
984 932
985/** 933/**
@@ -990,7 +938,6 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
990 */ 938 */
991static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) 939static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
992{ 940{
993 igb_free_queues(adapter);
994 igb_free_q_vectors(adapter); 941 igb_free_q_vectors(adapter);
995 igb_reset_interrupt_capability(adapter); 942 igb_reset_interrupt_capability(adapter);
996} 943}
@@ -1001,7 +948,7 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1001 * Attempt to configure interrupts using the best available 948 * Attempt to configure interrupts using the best available
1002 * capabilities of the hardware and kernel. 949 * capabilities of the hardware and kernel.
1003 **/ 950 **/
1004static int igb_set_interrupt_capability(struct igb_adapter *adapter) 951static void igb_set_interrupt_capability(struct igb_adapter *adapter)
1005{ 952{
1006 int err; 953 int err;
1007 int numvecs, i; 954 int numvecs, i;
@@ -1038,7 +985,7 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
1038 adapter->msix_entries, 985 adapter->msix_entries,
1039 numvecs); 986 numvecs);
1040 if (err == 0) 987 if (err == 0)
1041 goto out; 988 return;
1042 989
1043 igb_reset_interrupt_capability(adapter); 990 igb_reset_interrupt_capability(adapter);
1044 991
@@ -1068,105 +1015,183 @@ msi_only:
1068 adapter->num_q_vectors = 1; 1015 adapter->num_q_vectors = 1;
1069 if (!pci_enable_msi(adapter->pdev)) 1016 if (!pci_enable_msi(adapter->pdev))
1070 adapter->flags |= IGB_FLAG_HAS_MSI; 1017 adapter->flags |= IGB_FLAG_HAS_MSI;
1071out: 1018}
1072 /* Notify the stack of the (possibly) reduced queue counts. */ 1019
1073 rtnl_lock(); 1020static void igb_add_ring(struct igb_ring *ring,
1074 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 1021 struct igb_ring_container *head)
1075 err = netif_set_real_num_rx_queues(adapter->netdev, 1022{
1076 adapter->num_rx_queues); 1023 head->ring = ring;
1077 rtnl_unlock(); 1024 head->count++;
1078 return err;
1079} 1025}
1080 1026
1081/** 1027/**
1082 * igb_alloc_q_vectors - Allocate memory for interrupt vectors 1028 * igb_alloc_q_vector - Allocate memory for a single interrupt vector
1083 * @adapter: board private structure to initialize 1029 * @adapter: board private structure to initialize
1030 * @v_count: q_vectors allocated on adapter, used for ring interleaving
1031 * @v_idx: index of vector in adapter struct
1032 * @txr_count: total number of Tx rings to allocate
1033 * @txr_idx: index of first Tx ring to allocate
1034 * @rxr_count: total number of Rx rings to allocate
1035 * @rxr_idx: index of first Rx ring to allocate
1084 * 1036 *
1085 * We allocate one q_vector per queue interrupt. If allocation fails we 1037 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1086 * return -ENOMEM.
1087 **/ 1038 **/
1088static int igb_alloc_q_vectors(struct igb_adapter *adapter) 1039static int igb_alloc_q_vector(struct igb_adapter *adapter,
1040 int v_count, int v_idx,
1041 int txr_count, int txr_idx,
1042 int rxr_count, int rxr_idx)
1089{ 1043{
1090 struct igb_q_vector *q_vector; 1044 struct igb_q_vector *q_vector;
1091 struct e1000_hw *hw = &adapter->hw; 1045 struct igb_ring *ring;
1092 int v_idx; 1046 int ring_count, size;
1093 1047
1094 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { 1048 /* igb only supports 1 Tx and/or 1 Rx queue per vector */
1095 q_vector = kzalloc(sizeof(struct igb_q_vector), 1049 if (txr_count > 1 || rxr_count > 1)
1096 GFP_KERNEL); 1050 return -ENOMEM;
1097 if (!q_vector) 1051
1098 goto err_out; 1052 ring_count = txr_count + rxr_count;
1099 q_vector->adapter = adapter; 1053 size = sizeof(struct igb_q_vector) +
1100 q_vector->itr_register = hw->hw_addr + E1000_EITR(0); 1054 (sizeof(struct igb_ring) * ring_count);
1101 q_vector->itr_val = IGB_START_ITR; 1055
1102 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); 1056 /* allocate q_vector and rings */
1103 adapter->q_vector[v_idx] = q_vector; 1057 q_vector = kzalloc(size, GFP_KERNEL);
1058 if (!q_vector)
1059 return -ENOMEM;
1060
1061 /* initialize NAPI */
1062 netif_napi_add(adapter->netdev, &q_vector->napi,
1063 igb_poll, 64);
1064
1065 /* tie q_vector and adapter together */
1066 adapter->q_vector[v_idx] = q_vector;
1067 q_vector->adapter = adapter;
1068
1069 /* initialize work limits */
1070 q_vector->tx.work_limit = adapter->tx_work_limit;
1071
1072 /* initialize ITR configuration */
1073 q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
1074 q_vector->itr_val = IGB_START_ITR;
1075
1076 /* initialize pointer to rings */
1077 ring = q_vector->ring;
1078
1079 if (txr_count) {
1080 /* assign generic ring traits */
1081 ring->dev = &adapter->pdev->dev;
1082 ring->netdev = adapter->netdev;
1083
1084 /* configure backlink on ring */
1085 ring->q_vector = q_vector;
1086
1087 /* update q_vector Tx values */
1088 igb_add_ring(ring, &q_vector->tx);
1089
1090 /* For 82575, context index must be unique per ring. */
1091 if (adapter->hw.mac.type == e1000_82575)
1092 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1093
1094 /* apply Tx specific ring traits */
1095 ring->count = adapter->tx_ring_count;
1096 ring->queue_index = txr_idx;
1097
1098 /* assign ring to adapter */
1099 adapter->tx_ring[txr_idx] = ring;
1100
1101 /* push pointer to next ring */
1102 ring++;
1104 } 1103 }
1105 1104
1106 return 0; 1105 if (rxr_count) {
1106 /* assign generic ring traits */
1107 ring->dev = &adapter->pdev->dev;
1108 ring->netdev = adapter->netdev;
1107 1109
1108err_out: 1110 /* configure backlink on ring */
1109 igb_free_q_vectors(adapter); 1111 ring->q_vector = q_vector;
1110 return -ENOMEM;
1111}
1112 1112
1113static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, 1113 /* update q_vector Rx values */
1114 int ring_idx, int v_idx) 1114 igb_add_ring(ring, &q_vector->rx);
1115{
1116 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1117 1115
1118 q_vector->rx.ring = adapter->rx_ring[ring_idx]; 1116 /* set flag indicating ring supports SCTP checksum offload */
1119 q_vector->rx.ring->q_vector = q_vector; 1117 if (adapter->hw.mac.type >= e1000_82576)
1120 q_vector->rx.count++; 1118 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1121 q_vector->itr_val = adapter->rx_itr_setting;
1122 if (q_vector->itr_val && q_vector->itr_val <= 3)
1123 q_vector->itr_val = IGB_START_ITR;
1124}
1125 1119
1126static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, 1120 /*
1127 int ring_idx, int v_idx) 1121 * On i350, i210, and i211, loopback VLAN packets
1128{ 1122 * have the tag byte-swapped.
1129 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; 1123 * */
1124 if (adapter->hw.mac.type >= e1000_i350)
1125 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1130 1126
1131 q_vector->tx.ring = adapter->tx_ring[ring_idx]; 1127 /* apply Rx specific ring traits */
1132 q_vector->tx.ring->q_vector = q_vector; 1128 ring->count = adapter->rx_ring_count;
1133 q_vector->tx.count++; 1129 ring->queue_index = rxr_idx;
1134 q_vector->itr_val = adapter->tx_itr_setting; 1130
1135 q_vector->tx.work_limit = adapter->tx_work_limit; 1131 /* assign ring to adapter */
1136 if (q_vector->itr_val && q_vector->itr_val <= 3) 1132 adapter->rx_ring[rxr_idx] = ring;
1137 q_vector->itr_val = IGB_START_ITR; 1133 }
1134
1135 return 0;
1138} 1136}
1139 1137
1138
1140/** 1139/**
1141 * igb_map_ring_to_vector - maps allocated queues to vectors 1140 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1141 * @adapter: board private structure to initialize
1142 * 1142 *
1143 * This function maps the recently allocated queues to vectors. 1143 * We allocate one q_vector per queue interrupt. If allocation fails we
1144 * return -ENOMEM.
1144 **/ 1145 **/
1145static int igb_map_ring_to_vector(struct igb_adapter *adapter) 1146static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1146{ 1147{
1147 int i; 1148 int q_vectors = adapter->num_q_vectors;
1148 int v_idx = 0; 1149 int rxr_remaining = adapter->num_rx_queues;
1150 int txr_remaining = adapter->num_tx_queues;
1151 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1152 int err;
1149 1153
1150 if ((adapter->num_q_vectors < adapter->num_rx_queues) || 1154 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1151 (adapter->num_q_vectors < adapter->num_tx_queues)) 1155 for (; rxr_remaining; v_idx++) {
1152 return -ENOMEM; 1156 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1157 0, 0, 1, rxr_idx);
1153 1158
1154 if (adapter->num_q_vectors >= 1159 if (err)
1155 (adapter->num_rx_queues + adapter->num_tx_queues)) { 1160 goto err_out;
1156 for (i = 0; i < adapter->num_rx_queues; i++) 1161
1157 igb_map_rx_ring_to_vector(adapter, i, v_idx++); 1162 /* update counts and index */
1158 for (i = 0; i < adapter->num_tx_queues; i++) 1163 rxr_remaining--;
1159 igb_map_tx_ring_to_vector(adapter, i, v_idx++); 1164 rxr_idx++;
1160 } else {
1161 for (i = 0; i < adapter->num_rx_queues; i++) {
1162 if (i < adapter->num_tx_queues)
1163 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1164 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1165 } 1165 }
1166 for (; i < adapter->num_tx_queues; i++)
1167 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1168 } 1166 }
1167
1168 for (; v_idx < q_vectors; v_idx++) {
1169 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1170 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1171 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1172 tqpv, txr_idx, rqpv, rxr_idx);
1173
1174 if (err)
1175 goto err_out;
1176
1177 /* update counts and index */
1178 rxr_remaining -= rqpv;
1179 txr_remaining -= tqpv;
1180 rxr_idx++;
1181 txr_idx++;
1182 }
1183
1169 return 0; 1184 return 0;
1185
1186err_out:
1187 adapter->num_tx_queues = 0;
1188 adapter->num_rx_queues = 0;
1189 adapter->num_q_vectors = 0;
1190
1191 while (v_idx--)
1192 igb_free_q_vector(adapter, v_idx);
1193
1194 return -ENOMEM;
1170} 1195}
1171 1196
1172/** 1197/**
@@ -1179,9 +1204,7 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1179 struct pci_dev *pdev = adapter->pdev; 1204 struct pci_dev *pdev = adapter->pdev;
1180 int err; 1205 int err;
1181 1206
1182 err = igb_set_interrupt_capability(adapter); 1207 igb_set_interrupt_capability(adapter);
1183 if (err)
1184 return err;
1185 1208
1186 err = igb_alloc_q_vectors(adapter); 1209 err = igb_alloc_q_vectors(adapter);
1187 if (err) { 1210 if (err) {
@@ -1189,24 +1212,10 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1189 goto err_alloc_q_vectors; 1212 goto err_alloc_q_vectors;
1190 } 1213 }
1191 1214
1192 err = igb_alloc_queues(adapter); 1215 igb_cache_ring_register(adapter);
1193 if (err) {
1194 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1195 goto err_alloc_queues;
1196 }
1197
1198 err = igb_map_ring_to_vector(adapter);
1199 if (err) {
1200 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1201 goto err_map_queues;
1202 }
1203
1204 1216
1205 return 0; 1217 return 0;
1206err_map_queues: 1218
1207 igb_free_queues(adapter);
1208err_alloc_queues:
1209 igb_free_q_vectors(adapter);
1210err_alloc_q_vectors: 1219err_alloc_q_vectors:
1211 igb_reset_interrupt_capability(adapter); 1220 igb_reset_interrupt_capability(adapter);
1212 return err; 1221 return err;
@@ -1229,11 +1238,11 @@ static int igb_request_irq(struct igb_adapter *adapter)
1229 if (!err) 1238 if (!err)
1230 goto request_done; 1239 goto request_done;
1231 /* fall back to MSI */ 1240 /* fall back to MSI */
1241 igb_free_all_tx_resources(adapter);
1242 igb_free_all_rx_resources(adapter);
1232 igb_clear_interrupt_scheme(adapter); 1243 igb_clear_interrupt_scheme(adapter);
1233 if (!pci_enable_msi(pdev)) 1244 if (!pci_enable_msi(pdev))
1234 adapter->flags |= IGB_FLAG_HAS_MSI; 1245 adapter->flags |= IGB_FLAG_HAS_MSI;
1235 igb_free_all_tx_resources(adapter);
1236 igb_free_all_rx_resources(adapter);
1237 adapter->num_tx_queues = 1; 1246 adapter->num_tx_queues = 1;
1238 adapter->num_rx_queues = 1; 1247 adapter->num_rx_queues = 1;
1239 adapter->num_q_vectors = 1; 1248 adapter->num_q_vectors = 1;
@@ -1243,13 +1252,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
1243 "Unable to allocate memory for vectors\n"); 1252 "Unable to allocate memory for vectors\n");
1244 goto request_done; 1253 goto request_done;
1245 } 1254 }
1246 err = igb_alloc_queues(adapter);
1247 if (err) {
1248 dev_err(&pdev->dev,
1249 "Unable to allocate memory for queues\n");
1250 igb_free_q_vectors(adapter);
1251 goto request_done;
1252 }
1253 igb_setup_all_tx_resources(adapter); 1255 igb_setup_all_tx_resources(adapter);
1254 igb_setup_all_rx_resources(adapter); 1256 igb_setup_all_rx_resources(adapter);
1255 } 1257 }
@@ -1706,10 +1708,8 @@ void igb_reset(struct igb_adapter *adapter)
1706 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 1708 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1707 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); 1709 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1708 1710
1709#ifdef CONFIG_IGB_PTP
1710 /* Re-enable PTP, where applicable. */ 1711 /* Re-enable PTP, where applicable. */
1711 igb_ptp_reset(adapter); 1712 igb_ptp_reset(adapter);
1712#endif /* CONFIG_IGB_PTP */
1713 1713
1714 igb_get_phy_info(hw); 1714 igb_get_phy_info(hw);
1715} 1715}
@@ -1783,58 +1783,34 @@ static const struct net_device_ops igb_netdev_ops = {
1783void igb_set_fw_version(struct igb_adapter *adapter) 1783void igb_set_fw_version(struct igb_adapter *adapter)
1784{ 1784{
1785 struct e1000_hw *hw = &adapter->hw; 1785 struct e1000_hw *hw = &adapter->hw;
1786 u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset; 1786 struct e1000_fw_version fw;
1787 u16 major, build, patch, fw_version; 1787
1788 u32 etrack_id; 1788 igb_get_fw_version(hw, &fw);
1789 1789
1790 hw->nvm.ops.read(hw, 5, 1, &fw_version); 1790 switch (hw->mac.type) {
1791 if (adapter->hw.mac.type != e1000_i211) { 1791 case e1000_i211:
1792 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
1793 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
1794 etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
1795
1796 /* combo image version needs to be found */
1797 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
1798 if ((comb_offset != 0x0) &&
1799 (comb_offset != IGB_NVM_VER_INVALID)) {
1800 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
1801 + 1), 1, &comb_verh);
1802 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
1803 1, &comb_verl);
1804
1805 /* Only display Option Rom if it exists and is valid */
1806 if ((comb_verh && comb_verl) &&
1807 ((comb_verh != IGB_NVM_VER_INVALID) &&
1808 (comb_verl != IGB_NVM_VER_INVALID))) {
1809 major = comb_verl >> IGB_COMB_VER_SHFT;
1810 build = (comb_verl << IGB_COMB_VER_SHFT) |
1811 (comb_verh >> IGB_COMB_VER_SHFT);
1812 patch = comb_verh & IGB_COMB_VER_MASK;
1813 snprintf(adapter->fw_version,
1814 sizeof(adapter->fw_version),
1815 "%d.%d%d, 0x%08x, %d.%d.%d",
1816 (fw_version & IGB_MAJOR_MASK) >>
1817 IGB_MAJOR_SHIFT,
1818 (fw_version & IGB_MINOR_MASK) >>
1819 IGB_MINOR_SHIFT,
1820 (fw_version & IGB_BUILD_MASK),
1821 etrack_id, major, build, patch);
1822 goto out;
1823 }
1824 }
1825 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1826 "%d.%d%d, 0x%08x",
1827 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1828 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1829 (fw_version & IGB_BUILD_MASK), etrack_id);
1830 } else {
1831 snprintf(adapter->fw_version, sizeof(adapter->fw_version), 1792 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1832 "%d.%d%d", 1793 "%2d.%2d-%d",
1833 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT, 1794 fw.invm_major, fw.invm_minor, fw.invm_img_type);
1834 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT, 1795 break;
1835 (fw_version & IGB_BUILD_MASK)); 1796
1797 default:
1798 /* if option is rom valid, display its version too */
1799 if (fw.or_valid) {
1800 snprintf(adapter->fw_version,
1801 sizeof(adapter->fw_version),
1802 "%d.%d, 0x%08x, %d.%d.%d",
1803 fw.eep_major, fw.eep_minor, fw.etrack_id,
1804 fw.or_major, fw.or_build, fw.or_patch);
1805 /* no option rom */
1806 } else {
1807 snprintf(adapter->fw_version,
1808 sizeof(adapter->fw_version),
1809 "%d.%d, 0x%08x",
1810 fw.eep_major, fw.eep_minor, fw.etrack_id);
1811 }
1812 break;
1836 } 1813 }
1837out:
1838 return; 1814 return;
1839} 1815}
1840 1816
@@ -2141,10 +2117,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2141 2117
2142#endif 2118#endif
2143 2119
2144#ifdef CONFIG_IGB_PTP
2145 /* do hw tstamp init after resetting */ 2120 /* do hw tstamp init after resetting */
2146 igb_ptp_init(adapter); 2121 igb_ptp_init(adapter);
2147#endif /* CONFIG_IGB_PTP */
2148 2122
2149 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 2123 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2150 /* print bus type/speed/width info */ 2124 /* print bus type/speed/width info */
@@ -2219,9 +2193,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
2219 struct e1000_hw *hw = &adapter->hw; 2193 struct e1000_hw *hw = &adapter->hw;
2220 2194
2221 pm_runtime_get_noresume(&pdev->dev); 2195 pm_runtime_get_noresume(&pdev->dev);
2222#ifdef CONFIG_IGB_PTP
2223 igb_ptp_stop(adapter); 2196 igb_ptp_stop(adapter);
2224#endif /* CONFIG_IGB_PTP */
2225 2197
2226 /* 2198 /*
2227 * The watchdog timer may be rescheduled, so explicitly 2199 * The watchdog timer may be rescheduled, so explicitly
@@ -2531,6 +2503,17 @@ static int __igb_open(struct net_device *netdev, bool resuming)
2531 if (err) 2503 if (err)
2532 goto err_req_irq; 2504 goto err_req_irq;
2533 2505
2506 /* Notify the stack of the actual queue counts. */
2507 err = netif_set_real_num_tx_queues(adapter->netdev,
2508 adapter->num_tx_queues);
2509 if (err)
2510 goto err_set_queues;
2511
2512 err = netif_set_real_num_rx_queues(adapter->netdev,
2513 adapter->num_rx_queues);
2514 if (err)
2515 goto err_set_queues;
2516
2534 /* From here on the code is the same as igb_up() */ 2517 /* From here on the code is the same as igb_up() */
2535 clear_bit(__IGB_DOWN, &adapter->state); 2518 clear_bit(__IGB_DOWN, &adapter->state);
2536 2519
@@ -2560,6 +2543,8 @@ static int __igb_open(struct net_device *netdev, bool resuming)
2560 2543
2561 return 0; 2544 return 0;
2562 2545
2546err_set_queues:
2547 igb_free_irq(adapter);
2563err_req_irq: 2548err_req_irq:
2564 igb_release_hw_control(adapter); 2549 igb_release_hw_control(adapter);
2565 igb_power_down_link(adapter); 2550 igb_power_down_link(adapter);
@@ -2637,10 +2622,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2637 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2622 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2638 tx_ring->size = ALIGN(tx_ring->size, 4096); 2623 tx_ring->size = ALIGN(tx_ring->size, 4096);
2639 2624
2640 tx_ring->desc = dma_alloc_coherent(dev, 2625 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
2641 tx_ring->size, 2626 &tx_ring->dma, GFP_KERNEL);
2642 &tx_ring->dma,
2643 GFP_KERNEL);
2644 if (!tx_ring->desc) 2627 if (!tx_ring->desc)
2645 goto err; 2628 goto err;
2646 2629
@@ -2777,18 +2760,16 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2777 if (!rx_ring->rx_buffer_info) 2760 if (!rx_ring->rx_buffer_info)
2778 goto err; 2761 goto err;
2779 2762
2780
2781 /* Round up to nearest 4K */ 2763 /* Round up to nearest 4K */
2782 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); 2764 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
2783 rx_ring->size = ALIGN(rx_ring->size, 4096); 2765 rx_ring->size = ALIGN(rx_ring->size, 4096);
2784 2766
2785 rx_ring->desc = dma_alloc_coherent(dev, 2767 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
2786 rx_ring->size, 2768 &rx_ring->dma, GFP_KERNEL);
2787 &rx_ring->dma,
2788 GFP_KERNEL);
2789 if (!rx_ring->desc) 2769 if (!rx_ring->desc)
2790 goto err; 2770 goto err;
2791 2771
2772 rx_ring->next_to_alloc = 0;
2792 rx_ring->next_to_clean = 0; 2773 rx_ring->next_to_clean = 0;
2793 rx_ring->next_to_use = 0; 2774 rx_ring->next_to_use = 0;
2794 2775
@@ -3106,16 +3087,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
3106 3087
3107 /* set descriptor configuration */ 3088 /* set descriptor configuration */
3108 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 3089 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
3109#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 3090 srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3110 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 3091 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
3111#else
3112 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3113#endif
3114 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3115#ifdef CONFIG_IGB_PTP
3116 if (hw->mac.type >= e1000_82580) 3092 if (hw->mac.type >= e1000_82580)
3117 srrctl |= E1000_SRRCTL_TIMESTAMP; 3093 srrctl |= E1000_SRRCTL_TIMESTAMP;
3118#endif /* CONFIG_IGB_PTP */
3119 /* Only set Drop Enable if we are supporting multiple queues */ 3094 /* Only set Drop Enable if we are supporting multiple queues */
3120 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) 3095 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3121 srrctl |= E1000_SRRCTL_DROP_EN; 3096 srrctl |= E1000_SRRCTL_DROP_EN;
@@ -3305,36 +3280,27 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3305 unsigned long size; 3280 unsigned long size;
3306 u16 i; 3281 u16 i;
3307 3282
3283 if (rx_ring->skb)
3284 dev_kfree_skb(rx_ring->skb);
3285 rx_ring->skb = NULL;
3286
3308 if (!rx_ring->rx_buffer_info) 3287 if (!rx_ring->rx_buffer_info)
3309 return; 3288 return;
3310 3289
3311 /* Free all the Rx ring sk_buffs */ 3290 /* Free all the Rx ring sk_buffs */
3312 for (i = 0; i < rx_ring->count; i++) { 3291 for (i = 0; i < rx_ring->count; i++) {
3313 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 3292 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
3314 if (buffer_info->dma) {
3315 dma_unmap_single(rx_ring->dev,
3316 buffer_info->dma,
3317 IGB_RX_HDR_LEN,
3318 DMA_FROM_DEVICE);
3319 buffer_info->dma = 0;
3320 }
3321 3293
3322 if (buffer_info->skb) { 3294 if (!buffer_info->page)
3323 dev_kfree_skb(buffer_info->skb); 3295 continue;
3324 buffer_info->skb = NULL; 3296
3325 } 3297 dma_unmap_page(rx_ring->dev,
3326 if (buffer_info->page_dma) { 3298 buffer_info->dma,
3327 dma_unmap_page(rx_ring->dev, 3299 PAGE_SIZE,
3328 buffer_info->page_dma, 3300 DMA_FROM_DEVICE);
3329 PAGE_SIZE / 2, 3301 __free_page(buffer_info->page);
3330 DMA_FROM_DEVICE); 3302
3331 buffer_info->page_dma = 0; 3303 buffer_info->page = NULL;
3332 }
3333 if (buffer_info->page) {
3334 put_page(buffer_info->page);
3335 buffer_info->page = NULL;
3336 buffer_info->page_offset = 0;
3337 }
3338 } 3304 }
3339 3305
3340 size = sizeof(struct igb_rx_buffer) * rx_ring->count; 3306 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
@@ -3343,6 +3309,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3343 /* Zero out the descriptor ring */ 3309 /* Zero out the descriptor ring */
3344 memset(rx_ring->desc, 0, rx_ring->size); 3310 memset(rx_ring->desc, 0, rx_ring->size);
3345 3311
3312 rx_ring->next_to_alloc = 0;
3346 rx_ring->next_to_clean = 0; 3313 rx_ring->next_to_clean = 0;
3347 rx_ring->next_to_use = 0; 3314 rx_ring->next_to_use = 0;
3348} 3315}
@@ -4159,11 +4126,9 @@ static __le32 igb_tx_cmd_type(u32 tx_flags)
4159 if (tx_flags & IGB_TX_FLAGS_VLAN) 4126 if (tx_flags & IGB_TX_FLAGS_VLAN)
4160 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE); 4127 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4161 4128
4162#ifdef CONFIG_IGB_PTP
4163 /* set timestamp bit if present */ 4129 /* set timestamp bit if present */
4164 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) 4130 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
4165 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP); 4131 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4166#endif /* CONFIG_IGB_PTP */
4167 4132
4168 /* set segmentation bits for TSO */ 4133 /* set segmentation bits for TSO */
4169 if (tx_flags & IGB_TX_FLAGS_TSO) 4134 if (tx_flags & IGB_TX_FLAGS_TSO)
@@ -4372,9 +4337,7 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4372netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, 4337netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4373 struct igb_ring *tx_ring) 4338 struct igb_ring *tx_ring)
4374{ 4339{
4375#ifdef CONFIG_IGB_PTP
4376 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); 4340 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4377#endif /* CONFIG_IGB_PTP */
4378 struct igb_tx_buffer *first; 4341 struct igb_tx_buffer *first;
4379 int tso; 4342 int tso;
4380 u32 tx_flags = 0; 4343 u32 tx_flags = 0;
@@ -4397,7 +4360,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4397 first->bytecount = skb->len; 4360 first->bytecount = skb->len;
4398 first->gso_segs = 1; 4361 first->gso_segs = 1;
4399 4362
4400#ifdef CONFIG_IGB_PTP
4401 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4363 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4402 !(adapter->ptp_tx_skb))) { 4364 !(adapter->ptp_tx_skb))) {
4403 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4365 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -4407,7 +4369,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4407 if (adapter->hw.mac.type == e1000_82576) 4369 if (adapter->hw.mac.type == e1000_82576)
4408 schedule_work(&adapter->ptp_tx_work); 4370 schedule_work(&adapter->ptp_tx_work);
4409 } 4371 }
4410#endif /* CONFIG_IGB_PTP */
4411 4372
4412 if (vlan_tx_tag_present(skb)) { 4373 if (vlan_tx_tag_present(skb)) {
4413 tx_flags |= IGB_TX_FLAGS_VLAN; 4374 tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -4467,10 +4428,11 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4467 * The minimum packet size with TCTL.PSP set is 17 so pad the skb 4428 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4468 * in order to meet this minimum size requirement. 4429 * in order to meet this minimum size requirement.
4469 */ 4430 */
4470 if (skb->len < 17) { 4431 if (unlikely(skb->len < 17)) {
4471 if (skb_padto(skb, 17)) 4432 if (skb_pad(skb, 17 - skb->len))
4472 return NETDEV_TX_OK; 4433 return NETDEV_TX_OK;
4473 skb->len = 17; 4434 skb->len = 17;
4435 skb_set_tail_pointer(skb, 17);
4474 } 4436 }
4475 4437
4476 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); 4438 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
@@ -4800,7 +4762,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
4800 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4762 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4801 } 4763 }
4802 4764
4803#ifdef CONFIG_IGB_PTP
4804 if (icr & E1000_ICR_TS) { 4765 if (icr & E1000_ICR_TS) {
4805 u32 tsicr = rd32(E1000_TSICR); 4766 u32 tsicr = rd32(E1000_TSICR);
4806 4767
@@ -4811,7 +4772,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
4811 schedule_work(&adapter->ptp_tx_work); 4772 schedule_work(&adapter->ptp_tx_work);
4812 } 4773 }
4813 } 4774 }
4814#endif /* CONFIG_IGB_PTP */
4815 4775
4816 wr32(E1000_EIMS, adapter->eims_other); 4776 wr32(E1000_EIMS, adapter->eims_other);
4817 4777
@@ -4851,45 +4811,63 @@ static irqreturn_t igb_msix_ring(int irq, void *data)
4851} 4811}
4852 4812
4853#ifdef CONFIG_IGB_DCA 4813#ifdef CONFIG_IGB_DCA
4814static void igb_update_tx_dca(struct igb_adapter *adapter,
4815 struct igb_ring *tx_ring,
4816 int cpu)
4817{
4818 struct e1000_hw *hw = &adapter->hw;
4819 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
4820
4821 if (hw->mac.type != e1000_82575)
4822 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
4823
4824 /*
4825 * We can enable relaxed ordering for reads, but not writes when
4826 * DCA is enabled. This is due to a known issue in some chipsets
4827 * which will cause the DCA tag to be cleared.
4828 */
4829 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
4830 E1000_DCA_TXCTRL_DATA_RRO_EN |
4831 E1000_DCA_TXCTRL_DESC_DCA_EN;
4832
4833 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
4834}
4835
4836static void igb_update_rx_dca(struct igb_adapter *adapter,
4837 struct igb_ring *rx_ring,
4838 int cpu)
4839{
4840 struct e1000_hw *hw = &adapter->hw;
4841 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
4842
4843 if (hw->mac.type != e1000_82575)
4844 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
4845
4846 /*
4847 * We can enable relaxed ordering for reads, but not writes when
4848 * DCA is enabled. This is due to a known issue in some chipsets
4849 * which will cause the DCA tag to be cleared.
4850 */
4851 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
4852 E1000_DCA_RXCTRL_DESC_DCA_EN;
4853
4854 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
4855}
4856
4854static void igb_update_dca(struct igb_q_vector *q_vector) 4857static void igb_update_dca(struct igb_q_vector *q_vector)
4855{ 4858{
4856 struct igb_adapter *adapter = q_vector->adapter; 4859 struct igb_adapter *adapter = q_vector->adapter;
4857 struct e1000_hw *hw = &adapter->hw;
4858 int cpu = get_cpu(); 4860 int cpu = get_cpu();
4859 4861
4860 if (q_vector->cpu == cpu) 4862 if (q_vector->cpu == cpu)
4861 goto out_no_update; 4863 goto out_no_update;
4862 4864
4863 if (q_vector->tx.ring) { 4865 if (q_vector->tx.ring)
4864 int q = q_vector->tx.ring->reg_idx; 4866 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
4865 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); 4867
4866 if (hw->mac.type == e1000_82575) { 4868 if (q_vector->rx.ring)
4867 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; 4869 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
4868 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 4870
4869 } else {
4870 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4871 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4872 E1000_DCA_TXCTRL_CPUID_SHIFT;
4873 }
4874 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4875 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4876 }
4877 if (q_vector->rx.ring) {
4878 int q = q_vector->rx.ring->reg_idx;
4879 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4880 if (hw->mac.type == e1000_82575) {
4881 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4882 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4883 } else {
4884 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4885 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4886 E1000_DCA_RXCTRL_CPUID_SHIFT;
4887 }
4888 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4889 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4890 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4891 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
4892 }
4893 q_vector->cpu = cpu; 4871 q_vector->cpu = cpu;
4894out_no_update: 4872out_no_update:
4895 put_cpu(); 4873 put_cpu();
@@ -5545,7 +5523,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
5545 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5523 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5546 } 5524 }
5547 5525
5548#ifdef CONFIG_IGB_PTP
5549 if (icr & E1000_ICR_TS) { 5526 if (icr & E1000_ICR_TS) {
5550 u32 tsicr = rd32(E1000_TSICR); 5527 u32 tsicr = rd32(E1000_TSICR);
5551 5528
@@ -5556,7 +5533,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
5556 schedule_work(&adapter->ptp_tx_work); 5533 schedule_work(&adapter->ptp_tx_work);
5557 } 5534 }
5558 } 5535 }
5559#endif /* CONFIG_IGB_PTP */
5560 5536
5561 napi_schedule(&q_vector->napi); 5537 napi_schedule(&q_vector->napi);
5562 5538
@@ -5599,7 +5575,6 @@ static irqreturn_t igb_intr(int irq, void *data)
5599 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5575 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5600 } 5576 }
5601 5577
5602#ifdef CONFIG_IGB_PTP
5603 if (icr & E1000_ICR_TS) { 5578 if (icr & E1000_ICR_TS) {
5604 u32 tsicr = rd32(E1000_TSICR); 5579 u32 tsicr = rd32(E1000_TSICR);
5605 5580
@@ -5610,7 +5585,6 @@ static irqreturn_t igb_intr(int irq, void *data)
5610 schedule_work(&adapter->ptp_tx_work); 5585 schedule_work(&adapter->ptp_tx_work);
5611 } 5586 }
5612 } 5587 }
5613#endif /* CONFIG_IGB_PTP */
5614 5588
5615 napi_schedule(&q_vector->napi); 5589 napi_schedule(&q_vector->napi);
5616 5590
@@ -5840,6 +5814,181 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5840 return !!budget; 5814 return !!budget;
5841} 5815}
5842 5816
5817/**
5818 * igb_reuse_rx_page - page flip buffer and store it back on the ring
5819 * @rx_ring: rx descriptor ring to store buffers on
5820 * @old_buff: donor buffer to have page reused
5821 *
5822 * Synchronizes page for reuse by the adapter
5823 **/
5824static void igb_reuse_rx_page(struct igb_ring *rx_ring,
5825 struct igb_rx_buffer *old_buff)
5826{
5827 struct igb_rx_buffer *new_buff;
5828 u16 nta = rx_ring->next_to_alloc;
5829
5830 new_buff = &rx_ring->rx_buffer_info[nta];
5831
5832 /* update, and store next to alloc */
5833 nta++;
5834 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
5835
5836 /* transfer page from old buffer to new buffer */
5837 memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
5838
5839 /* sync the buffer for use by the device */
5840 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
5841 old_buff->page_offset,
5842 IGB_RX_BUFSZ,
5843 DMA_FROM_DEVICE);
5844}
5845
5846/**
5847 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
5848 * @rx_ring: rx descriptor ring to transact packets on
5849 * @rx_buffer: buffer containing page to add
5850 * @rx_desc: descriptor containing length of buffer written by hardware
5851 * @skb: sk_buff to place the data into
5852 *
5853 * This function will add the data contained in rx_buffer->page to the skb.
5854 * This is done either through a direct copy if the data in the buffer is
5855 * less than the skb header size, otherwise it will just attach the page as
5856 * a frag to the skb.
5857 *
5858 * The function will then update the page offset if necessary and return
5859 * true if the buffer can be reused by the adapter.
5860 **/
5861static bool igb_add_rx_frag(struct igb_ring *rx_ring,
5862 struct igb_rx_buffer *rx_buffer,
5863 union e1000_adv_rx_desc *rx_desc,
5864 struct sk_buff *skb)
5865{
5866 struct page *page = rx_buffer->page;
5867 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
5868
5869 if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
5870 unsigned char *va = page_address(page) + rx_buffer->page_offset;
5871
5872 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
5873 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
5874 va += IGB_TS_HDR_LEN;
5875 size -= IGB_TS_HDR_LEN;
5876 }
5877
5878 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
5879
5880 /* we can reuse buffer as-is, just make sure it is local */
5881 if (likely(page_to_nid(page) == numa_node_id()))
5882 return true;
5883
5884 /* this page cannot be reused so discard it */
5885 put_page(page);
5886 return false;
5887 }
5888
5889 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
5890 rx_buffer->page_offset, size, IGB_RX_BUFSZ);
5891
5892 /* avoid re-using remote pages */
5893 if (unlikely(page_to_nid(page) != numa_node_id()))
5894 return false;
5895
5896#if (PAGE_SIZE < 8192)
5897 /* if we are only owner of page we can reuse it */
5898 if (unlikely(page_count(page) != 1))
5899 return false;
5900
5901 /* flip page offset to other buffer */
5902 rx_buffer->page_offset ^= IGB_RX_BUFSZ;
5903
5904 /*
5905 * since we are the only owner of the page and we need to
5906 * increment it, just set the value to 2 in order to avoid
5907 * an unnecessary locked operation
5908 */
5909 atomic_set(&page->_count, 2);
5910#else
5911 /* move offset up to the next cache line */
5912 rx_buffer->page_offset += SKB_DATA_ALIGN(size);
5913
5914 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
5915 return false;
5916
5917 /* bump ref count on page before it is given to the stack */
5918 get_page(page);
5919#endif
5920
5921 return true;
5922}
5923
5924static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
5925 union e1000_adv_rx_desc *rx_desc,
5926 struct sk_buff *skb)
5927{
5928 struct igb_rx_buffer *rx_buffer;
5929 struct page *page;
5930
5931 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
5932
5933 /*
5934 * This memory barrier is needed to keep us from reading
5935 * any other fields out of the rx_desc until we know the
5936 * RXD_STAT_DD bit is set
5937 */
5938 rmb();
5939
5940 page = rx_buffer->page;
5941 prefetchw(page);
5942
5943 if (likely(!skb)) {
5944 void *page_addr = page_address(page) +
5945 rx_buffer->page_offset;
5946
5947 /* prefetch first cache line of first page */
5948 prefetch(page_addr);
5949#if L1_CACHE_BYTES < 128
5950 prefetch(page_addr + L1_CACHE_BYTES);
5951#endif
5952
5953 /* allocate a skb to store the frags */
5954 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
5955 IGB_RX_HDR_LEN);
5956 if (unlikely(!skb)) {
5957 rx_ring->rx_stats.alloc_failed++;
5958 return NULL;
5959 }
5960
5961 /*
5962 * we will be copying header into skb->data in
5963 * pskb_may_pull so it is in our interest to prefetch
5964 * it now to avoid a possible cache miss
5965 */
5966 prefetchw(skb->data);
5967 }
5968
5969 /* we are reusing so sync this buffer for CPU use */
5970 dma_sync_single_range_for_cpu(rx_ring->dev,
5971 rx_buffer->dma,
5972 rx_buffer->page_offset,
5973 IGB_RX_BUFSZ,
5974 DMA_FROM_DEVICE);
5975
5976 /* pull page into skb */
5977 if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
5978 /* hand second half of page back to the ring */
5979 igb_reuse_rx_page(rx_ring, rx_buffer);
5980 } else {
5981 /* we are not reusing the buffer so unmap it */
5982 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
5983 PAGE_SIZE, DMA_FROM_DEVICE);
5984 }
5985
5986 /* clear contents of rx_buffer */
5987 rx_buffer->page = NULL;
5988
5989 return skb;
5990}
5991
5843static inline void igb_rx_checksum(struct igb_ring *ring, 5992static inline void igb_rx_checksum(struct igb_ring *ring,
5844 union e1000_adv_rx_desc *rx_desc, 5993 union e1000_adv_rx_desc *rx_desc,
5845 struct sk_buff *skb) 5994 struct sk_buff *skb)
@@ -5889,224 +6038,386 @@ static inline void igb_rx_hash(struct igb_ring *ring,
5889 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 6038 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5890} 6039}
5891 6040
5892static void igb_rx_vlan(struct igb_ring *ring, 6041/**
5893 union e1000_adv_rx_desc *rx_desc, 6042 * igb_is_non_eop - process handling of non-EOP buffers
5894 struct sk_buff *skb) 6043 * @rx_ring: Rx ring being processed
6044 * @rx_desc: Rx descriptor for current buffer
6045 * @skb: current socket buffer containing buffer in progress
6046 *
6047 * This function updates next to clean. If the buffer is an EOP buffer
6048 * this function exits returning false, otherwise it will place the
6049 * sk_buff in the next buffer to be chained and return true indicating
6050 * that this is in fact a non-EOP buffer.
6051 **/
6052static bool igb_is_non_eop(struct igb_ring *rx_ring,
6053 union e1000_adv_rx_desc *rx_desc)
5895{ 6054{
5896 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { 6055 u32 ntc = rx_ring->next_to_clean + 1;
5897 u16 vid;
5898 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
5899 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
5900 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
5901 else
5902 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
5903 6056
5904 __vlan_hwaccel_put_tag(skb, vid); 6057 /* fetch, update, and store next to clean */
5905 } 6058 ntc = (ntc < rx_ring->count) ? ntc : 0;
6059 rx_ring->next_to_clean = ntc;
6060
6061 prefetch(IGB_RX_DESC(rx_ring, ntc));
6062
6063 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
6064 return false;
6065
6066 return true;
5906} 6067}
5907 6068
5908static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc) 6069/**
5909{ 6070 * igb_get_headlen - determine size of header for LRO/GRO
5910 /* HW will not DMA in data larger than the given buffer, even if it 6071 * @data: pointer to the start of the headers
5911 * parses the (NFS, of course) header to be larger. In that case, it 6072 * @max_len: total length of section to find headers in
5912 * fills the header buffer and spills the rest into the page. 6073 *
6074 * This function is meant to determine the length of headers that will
6075 * be recognized by hardware for LRO, and GRO offloads. The main
6076 * motivation of doing this is to only perform one pull for IPv4 TCP
6077 * packets so that we can do basic things like calculating the gso_size
6078 * based on the average data per packet.
6079 **/
6080static unsigned int igb_get_headlen(unsigned char *data,
6081 unsigned int max_len)
6082{
6083 union {
6084 unsigned char *network;
6085 /* l2 headers */
6086 struct ethhdr *eth;
6087 struct vlan_hdr *vlan;
6088 /* l3 headers */
6089 struct iphdr *ipv4;
6090 struct ipv6hdr *ipv6;
6091 } hdr;
6092 __be16 protocol;
6093 u8 nexthdr = 0; /* default to not TCP */
6094 u8 hlen;
6095
6096 /* this should never happen, but better safe than sorry */
6097 if (max_len < ETH_HLEN)
6098 return max_len;
6099
6100 /* initialize network frame pointer */
6101 hdr.network = data;
6102
6103 /* set first protocol and move network header forward */
6104 protocol = hdr.eth->h_proto;
6105 hdr.network += ETH_HLEN;
6106
6107 /* handle any vlan tag if present */
6108 if (protocol == __constant_htons(ETH_P_8021Q)) {
6109 if ((hdr.network - data) > (max_len - VLAN_HLEN))
6110 return max_len;
6111
6112 protocol = hdr.vlan->h_vlan_encapsulated_proto;
6113 hdr.network += VLAN_HLEN;
6114 }
6115
6116 /* handle L3 protocols */
6117 if (protocol == __constant_htons(ETH_P_IP)) {
6118 if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
6119 return max_len;
6120
6121 /* access ihl as a u8 to avoid unaligned access on ia64 */
6122 hlen = (hdr.network[0] & 0x0F) << 2;
6123
6124 /* verify hlen meets minimum size requirements */
6125 if (hlen < sizeof(struct iphdr))
6126 return hdr.network - data;
6127
6128 /* record next protocol */
6129 nexthdr = hdr.ipv4->protocol;
6130 hdr.network += hlen;
6131 } else if (protocol == __constant_htons(ETH_P_IPV6)) {
6132 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
6133 return max_len;
6134
6135 /* record next protocol */
6136 nexthdr = hdr.ipv6->nexthdr;
6137 hdr.network += sizeof(struct ipv6hdr);
6138 } else {
6139 return hdr.network - data;
6140 }
6141
6142 /* finally sort out TCP */
6143 if (nexthdr == IPPROTO_TCP) {
6144 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
6145 return max_len;
6146
6147 /* access doff as a u8 to avoid unaligned access on ia64 */
6148 hlen = (hdr.network[12] & 0xF0) >> 2;
6149
6150 /* verify hlen meets minimum size requirements */
6151 if (hlen < sizeof(struct tcphdr))
6152 return hdr.network - data;
6153
6154 hdr.network += hlen;
6155 } else if (nexthdr == IPPROTO_UDP) {
6156 if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
6157 return max_len;
6158
6159 hdr.network += sizeof(struct udphdr);
6160 }
6161
6162 /*
6163 * If everything has gone correctly hdr.network should be the
6164 * data section of the packet and will be the end of the header.
6165 * If not then it probably represents the end of the last recognized
6166 * header.
5913 */ 6167 */
5914 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & 6168 if ((hdr.network - data) < max_len)
5915 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 6169 return hdr.network - data;
5916 if (hlen > IGB_RX_HDR_LEN) 6170 else
5917 hlen = IGB_RX_HDR_LEN; 6171 return max_len;
5918 return hlen;
5919} 6172}
5920 6173
5921static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) 6174/**
6175 * igb_pull_tail - igb specific version of skb_pull_tail
6176 * @rx_ring: rx descriptor ring packet is being transacted on
6177 * @rx_desc: pointer to the EOP Rx descriptor
6178 * @skb: pointer to current skb being adjusted
6179 *
6180 * This function is an igb specific version of __pskb_pull_tail. The
6181 * main difference between this version and the original function is that
6182 * this function can make several assumptions about the state of things
6183 * that allow for significant optimizations versus the standard function.
6184 * As a result we can do things like drop a frag and maintain an accurate
6185 * truesize for the skb.
6186 */
6187static void igb_pull_tail(struct igb_ring *rx_ring,
6188 union e1000_adv_rx_desc *rx_desc,
6189 struct sk_buff *skb)
5922{ 6190{
5923 struct igb_ring *rx_ring = q_vector->rx.ring; 6191 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
5924 union e1000_adv_rx_desc *rx_desc; 6192 unsigned char *va;
5925 const int current_node = numa_node_id(); 6193 unsigned int pull_len;
5926 unsigned int total_bytes = 0, total_packets = 0;
5927 u16 cleaned_count = igb_desc_unused(rx_ring);
5928 u16 i = rx_ring->next_to_clean;
5929 6194
5930 rx_desc = IGB_RX_DESC(rx_ring, i); 6195 /*
6196 * it is valid to use page_address instead of kmap since we are
6197 * working with pages allocated out of the lomem pool per
6198 * alloc_page(GFP_ATOMIC)
6199 */
6200 va = skb_frag_address(frag);
5931 6201
5932 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { 6202 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
5933 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 6203 /* retrieve timestamp from buffer */
5934 struct sk_buff *skb = buffer_info->skb; 6204 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
5935 union e1000_adv_rx_desc *next_rxd;
5936 6205
5937 buffer_info->skb = NULL; 6206 /* update pointers to remove timestamp header */
5938 prefetch(skb->data); 6207 skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
6208 frag->page_offset += IGB_TS_HDR_LEN;
6209 skb->data_len -= IGB_TS_HDR_LEN;
6210 skb->len -= IGB_TS_HDR_LEN;
5939 6211
5940 i++; 6212 /* move va to start of packet data */
5941 if (i == rx_ring->count) 6213 va += IGB_TS_HDR_LEN;
5942 i = 0; 6214 }
6215
6216 /*
6217 * we need the header to contain the greater of either ETH_HLEN or
6218 * 60 bytes if the skb->len is less than 60 for skb_pad.
6219 */
6220 pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
5943 6221
5944 next_rxd = IGB_RX_DESC(rx_ring, i); 6222 /* align pull length to size of long to optimize memcpy performance */
5945 prefetch(next_rxd); 6223 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
5946 6224
5947 /* 6225 /* update all of the pointers */
5948 * This memory barrier is needed to keep us from reading 6226 skb_frag_size_sub(frag, pull_len);
5949 * any other fields out of the rx_desc until we know the 6227 frag->page_offset += pull_len;
5950 * RXD_STAT_DD bit is set 6228 skb->data_len -= pull_len;
5951 */ 6229 skb->tail += pull_len;
5952 rmb(); 6230}
5953 6231
5954 if (!skb_is_nonlinear(skb)) { 6232/**
5955 __skb_put(skb, igb_get_hlen(rx_desc)); 6233 * igb_cleanup_headers - Correct corrupted or empty headers
5956 dma_unmap_single(rx_ring->dev, buffer_info->dma, 6234 * @rx_ring: rx descriptor ring packet is being transacted on
5957 IGB_RX_HDR_LEN, 6235 * @rx_desc: pointer to the EOP Rx descriptor
5958 DMA_FROM_DEVICE); 6236 * @skb: pointer to current skb being fixed
5959 buffer_info->dma = 0; 6237 *
6238 * Address the case where we are pulling data in on pages only
6239 * and as such no data is present in the skb header.
6240 *
6241 * In addition if skb is not at least 60 bytes we need to pad it so that
6242 * it is large enough to qualify as a valid Ethernet frame.
6243 *
6244 * Returns true if an error was encountered and skb was freed.
6245 **/
6246static bool igb_cleanup_headers(struct igb_ring *rx_ring,
6247 union e1000_adv_rx_desc *rx_desc,
6248 struct sk_buff *skb)
6249{
6250
6251 if (unlikely((igb_test_staterr(rx_desc,
6252 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
6253 struct net_device *netdev = rx_ring->netdev;
6254 if (!(netdev->features & NETIF_F_RXALL)) {
6255 dev_kfree_skb_any(skb);
6256 return true;
5960 } 6257 }
6258 }
5961 6259
5962 if (rx_desc->wb.upper.length) { 6260 /* place header in linear portion of buffer */
5963 u16 length = le16_to_cpu(rx_desc->wb.upper.length); 6261 if (skb_is_nonlinear(skb))
6262 igb_pull_tail(rx_ring, rx_desc, skb);
5964 6263
5965 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 6264 /* if skb_pad returns an error the skb was freed */
5966 buffer_info->page, 6265 if (unlikely(skb->len < 60)) {
5967 buffer_info->page_offset, 6266 int pad_len = 60 - skb->len;
5968 length);
5969 6267
5970 skb->len += length; 6268 if (skb_pad(skb, pad_len))
5971 skb->data_len += length; 6269 return true;
5972 skb->truesize += PAGE_SIZE / 2; 6270 __skb_put(skb, pad_len);
6271 }
5973 6272
5974 if ((page_count(buffer_info->page) != 1) || 6273 return false;
5975 (page_to_nid(buffer_info->page) != current_node)) 6274}
5976 buffer_info->page = NULL;
5977 else
5978 get_page(buffer_info->page);
5979 6275
5980 dma_unmap_page(rx_ring->dev, buffer_info->page_dma, 6276/**
5981 PAGE_SIZE / 2, DMA_FROM_DEVICE); 6277 * igb_process_skb_fields - Populate skb header fields from Rx descriptor
5982 buffer_info->page_dma = 0; 6278 * @rx_ring: rx descriptor ring packet is being transacted on
5983 } 6279 * @rx_desc: pointer to the EOP Rx descriptor
6280 * @skb: pointer to current skb being populated
6281 *
6282 * This function checks the ring, descriptor, and packet information in
6283 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
6284 * other fields within the skb.
6285 **/
6286static void igb_process_skb_fields(struct igb_ring *rx_ring,
6287 union e1000_adv_rx_desc *rx_desc,
6288 struct sk_buff *skb)
6289{
6290 struct net_device *dev = rx_ring->netdev;
5984 6291
5985 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) { 6292 igb_rx_hash(rx_ring, rx_desc, skb);
5986 struct igb_rx_buffer *next_buffer;
5987 next_buffer = &rx_ring->rx_buffer_info[i];
5988 buffer_info->skb = next_buffer->skb;
5989 buffer_info->dma = next_buffer->dma;
5990 next_buffer->skb = skb;
5991 next_buffer->dma = 0;
5992 goto next_desc;
5993 }
5994 6293
5995 if (unlikely((igb_test_staterr(rx_desc, 6294 igb_rx_checksum(rx_ring, rx_desc, skb);
5996 E1000_RXDEXT_ERR_FRAME_ERR_MASK))
5997 && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
5998 dev_kfree_skb_any(skb);
5999 goto next_desc;
6000 }
6001 6295
6002#ifdef CONFIG_IGB_PTP 6296 igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
6003 igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
6004#endif /* CONFIG_IGB_PTP */
6005 igb_rx_hash(rx_ring, rx_desc, skb);
6006 igb_rx_checksum(rx_ring, rx_desc, skb);
6007 igb_rx_vlan(rx_ring, rx_desc, skb);
6008 6297
6009 total_bytes += skb->len; 6298 if ((dev->features & NETIF_F_HW_VLAN_RX) &&
6010 total_packets++; 6299 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
6300 u16 vid;
6301 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
6302 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
6303 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
6304 else
6305 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
6011 6306
6012 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 6307 __vlan_hwaccel_put_tag(skb, vid);
6308 }
6013 6309
6014 napi_gro_receive(&q_vector->napi, skb); 6310 skb_record_rx_queue(skb, rx_ring->queue_index);
6015 6311
6016 budget--; 6312 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6017next_desc: 6313}
6018 if (!budget) 6314
6019 break; 6315static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
6316{
6317 struct igb_ring *rx_ring = q_vector->rx.ring;
6318 struct sk_buff *skb = rx_ring->skb;
6319 unsigned int total_bytes = 0, total_packets = 0;
6320 u16 cleaned_count = igb_desc_unused(rx_ring);
6321
6322 do {
6323 union e1000_adv_rx_desc *rx_desc;
6020 6324
6021 cleaned_count++;
6022 /* return some buffers to hardware, one at a time is too slow */ 6325 /* return some buffers to hardware, one at a time is too slow */
6023 if (cleaned_count >= IGB_RX_BUFFER_WRITE) { 6326 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
6024 igb_alloc_rx_buffers(rx_ring, cleaned_count); 6327 igb_alloc_rx_buffers(rx_ring, cleaned_count);
6025 cleaned_count = 0; 6328 cleaned_count = 0;
6026 } 6329 }
6027 6330
6028 /* use prefetched values */ 6331 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
6029 rx_desc = next_rxd;
6030 }
6031 6332
6032 rx_ring->next_to_clean = i; 6333 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
6033 u64_stats_update_begin(&rx_ring->rx_syncp); 6334 break;
6034 rx_ring->rx_stats.packets += total_packets;
6035 rx_ring->rx_stats.bytes += total_bytes;
6036 u64_stats_update_end(&rx_ring->rx_syncp);
6037 q_vector->rx.total_packets += total_packets;
6038 q_vector->rx.total_bytes += total_bytes;
6039 6335
6040 if (cleaned_count) 6336 /* retrieve a buffer from the ring */
6041 igb_alloc_rx_buffers(rx_ring, cleaned_count); 6337 skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
6042 6338
6043 return !!budget; 6339 /* exit if we failed to retrieve a buffer */
6044} 6340 if (!skb)
6341 break;
6045 6342
6046static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring, 6343 cleaned_count++;
6047 struct igb_rx_buffer *bi)
6048{
6049 struct sk_buff *skb = bi->skb;
6050 dma_addr_t dma = bi->dma;
6051 6344
6052 if (dma) 6345 /* fetch next buffer in frame if non-eop */
6053 return true; 6346 if (igb_is_non_eop(rx_ring, rx_desc))
6347 continue;
6054 6348
6055 if (likely(!skb)) { 6349 /* verify the packet layout is correct */
6056 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 6350 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
6057 IGB_RX_HDR_LEN); 6351 skb = NULL;
6058 bi->skb = skb; 6352 continue;
6059 if (!skb) {
6060 rx_ring->rx_stats.alloc_failed++;
6061 return false;
6062 } 6353 }
6063 6354
6064 /* initialize skb for ring */ 6355 /* probably a little skewed due to removing CRC */
6065 skb_record_rx_queue(skb, rx_ring->queue_index); 6356 total_bytes += skb->len;
6066 }
6067 6357
6068 dma = dma_map_single(rx_ring->dev, skb->data, 6358 /* populate checksum, timestamp, VLAN, and protocol */
6069 IGB_RX_HDR_LEN, DMA_FROM_DEVICE); 6359 igb_process_skb_fields(rx_ring, rx_desc, skb);
6070 6360
6071 if (dma_mapping_error(rx_ring->dev, dma)) { 6361 napi_gro_receive(&q_vector->napi, skb);
6072 rx_ring->rx_stats.alloc_failed++;
6073 return false;
6074 }
6075 6362
6076 bi->dma = dma; 6363 /* reset skb pointer */
6077 return true; 6364 skb = NULL;
6365
6366 /* update budget accounting */
6367 total_packets++;
6368 } while (likely(total_packets < budget));
6369
6370 /* place incomplete frames back on ring for completion */
6371 rx_ring->skb = skb;
6372
6373 u64_stats_update_begin(&rx_ring->rx_syncp);
6374 rx_ring->rx_stats.packets += total_packets;
6375 rx_ring->rx_stats.bytes += total_bytes;
6376 u64_stats_update_end(&rx_ring->rx_syncp);
6377 q_vector->rx.total_packets += total_packets;
6378 q_vector->rx.total_bytes += total_bytes;
6379
6380 if (cleaned_count)
6381 igb_alloc_rx_buffers(rx_ring, cleaned_count);
6382
6383 return (total_packets < budget);
6078} 6384}
6079 6385
6080static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, 6386static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
6081 struct igb_rx_buffer *bi) 6387 struct igb_rx_buffer *bi)
6082{ 6388{
6083 struct page *page = bi->page; 6389 struct page *page = bi->page;
6084 dma_addr_t page_dma = bi->page_dma; 6390 dma_addr_t dma;
6085 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6086 6391
6087 if (page_dma) 6392 /* since we are recycling buffers we should seldom need to alloc */
6393 if (likely(page))
6088 return true; 6394 return true;
6089 6395
6090 if (!page) { 6396 /* alloc new page for storage */
6091 page = __skb_alloc_page(GFP_ATOMIC, bi->skb); 6397 page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
6092 bi->page = page; 6398 if (unlikely(!page)) {
6093 if (unlikely(!page)) { 6399 rx_ring->rx_stats.alloc_failed++;
6094 rx_ring->rx_stats.alloc_failed++; 6400 return false;
6095 return false;
6096 }
6097 } 6401 }
6098 6402
6099 page_dma = dma_map_page(rx_ring->dev, page, 6403 /* map page for use */
6100 page_offset, PAGE_SIZE / 2, 6404 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
6101 DMA_FROM_DEVICE); 6405
6406 /*
6407 * if mapping failed free memory back to system since
6408 * there isn't much point in holding memory we can't use
6409 */
6410 if (dma_mapping_error(rx_ring->dev, dma)) {
6411 __free_page(page);
6102 6412
6103 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6104 rx_ring->rx_stats.alloc_failed++; 6413 rx_ring->rx_stats.alloc_failed++;
6105 return false; 6414 return false;
6106 } 6415 }
6107 6416
6108 bi->page_dma = page_dma; 6417 bi->dma = dma;
6109 bi->page_offset = page_offset; 6418 bi->page = page;
6419 bi->page_offset = 0;
6420
6110 return true; 6421 return true;
6111} 6422}
6112 6423
@@ -6120,22 +6431,23 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
6120 struct igb_rx_buffer *bi; 6431 struct igb_rx_buffer *bi;
6121 u16 i = rx_ring->next_to_use; 6432 u16 i = rx_ring->next_to_use;
6122 6433
6434 /* nothing to do */
6435 if (!cleaned_count)
6436 return;
6437
6123 rx_desc = IGB_RX_DESC(rx_ring, i); 6438 rx_desc = IGB_RX_DESC(rx_ring, i);
6124 bi = &rx_ring->rx_buffer_info[i]; 6439 bi = &rx_ring->rx_buffer_info[i];
6125 i -= rx_ring->count; 6440 i -= rx_ring->count;
6126 6441
6127 while (cleaned_count--) { 6442 do {
6128 if (!igb_alloc_mapped_skb(rx_ring, bi))
6129 break;
6130
6131 /* Refresh the desc even if buffer_addrs didn't change
6132 * because each write-back erases this info. */
6133 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
6134
6135 if (!igb_alloc_mapped_page(rx_ring, bi)) 6443 if (!igb_alloc_mapped_page(rx_ring, bi))
6136 break; 6444 break;
6137 6445
6138 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); 6446 /*
6447 * Refresh the desc even if buffer_addrs didn't change
6448 * because each write-back erases this info.
6449 */
6450 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
6139 6451
6140 rx_desc++; 6452 rx_desc++;
6141 bi++; 6453 bi++;
@@ -6148,17 +6460,25 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
6148 6460
6149 /* clear the hdr_addr for the next_to_use descriptor */ 6461 /* clear the hdr_addr for the next_to_use descriptor */
6150 rx_desc->read.hdr_addr = 0; 6462 rx_desc->read.hdr_addr = 0;
6151 } 6463
6464 cleaned_count--;
6465 } while (cleaned_count);
6152 6466
6153 i += rx_ring->count; 6467 i += rx_ring->count;
6154 6468
6155 if (rx_ring->next_to_use != i) { 6469 if (rx_ring->next_to_use != i) {
6470 /* record the next descriptor to use */
6156 rx_ring->next_to_use = i; 6471 rx_ring->next_to_use = i;
6157 6472
6158 /* Force memory writes to complete before letting h/w 6473 /* update next to alloc since we have filled the ring */
6474 rx_ring->next_to_alloc = i;
6475
6476 /*
6477 * Force memory writes to complete before letting h/w
6159 * know there are new descriptors to fetch. (Only 6478 * know there are new descriptors to fetch. (Only
6160 * applicable for weak-ordered memory model archs, 6479 * applicable for weak-ordered memory model archs,
6161 * such as IA-64). */ 6480 * such as IA-64).
6481 */
6162 wmb(); 6482 wmb();
6163 writel(i, rx_ring->tail); 6483 writel(i, rx_ring->tail);
6164 } 6484 }
@@ -6207,10 +6527,8 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6207 case SIOCGMIIREG: 6527 case SIOCGMIIREG:
6208 case SIOCSMIIREG: 6528 case SIOCSMIIREG:
6209 return igb_mii_ioctl(netdev, ifr, cmd); 6529 return igb_mii_ioctl(netdev, ifr, cmd);
6210#ifdef CONFIG_IGB_PTP
6211 case SIOCSHWTSTAMP: 6530 case SIOCSHWTSTAMP:
6212 return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd); 6531 return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
6213#endif /* CONFIG_IGB_PTP */
6214 default: 6532 default:
6215 return -EOPNOTSUPP; 6533 return -EOPNOTSUPP;
6216 } 6534 }
@@ -6492,7 +6810,9 @@ static int igb_resume(struct device *dev)
6492 wr32(E1000_WUS, ~0); 6810 wr32(E1000_WUS, ~0);
6493 6811
6494 if (netdev->flags & IFF_UP) { 6812 if (netdev->flags & IFF_UP) {
6813 rtnl_lock();
6495 err = __igb_open(netdev, true); 6814 err = __igb_open(netdev, true);
6815 rtnl_unlock();
6496 if (err) 6816 if (err)
6497 return err; 6817 return err;
6498 } 6818 }
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ee21445157a3..aa10f69f9f16 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -441,18 +441,46 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
441 adapter->ptp_tx_skb = NULL; 441 adapter->ptp_tx_skb = NULL;
442} 442}
443 443
444void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, 444/**
445 union e1000_adv_rx_desc *rx_desc, 445 * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
446 * @q_vector: Pointer to interrupt specific structure
447 * @va: Pointer to address containing Rx buffer
448 * @skb: Buffer containing timestamp and packet
449 *
450 * This function is meant to retrieve a timestamp from the first buffer of an
451 * incoming frame. The value is stored in little endian format starting on
452 * byte 8.
453 */
454void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
455 unsigned char *va,
456 struct sk_buff *skb)
457{
458 __le64 *regval = (__le64 *)va;
459
460 /*
461 * The timestamp is recorded in little endian format.
462 * DWORD: 0 1 2 3
463 * Field: Reserved Reserved SYSTIML SYSTIMH
464 */
465 igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
466 le64_to_cpu(regval[1]));
467}
468
469/**
470 * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
471 * @q_vector: Pointer to interrupt specific structure
472 * @skb: Buffer containing timestamp and packet
473 *
474 * This function is meant to retrieve a timestamp from the internal registers
475 * of the adapter and store it in the skb.
476 */
477void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
446 struct sk_buff *skb) 478 struct sk_buff *skb)
447{ 479{
448 struct igb_adapter *adapter = q_vector->adapter; 480 struct igb_adapter *adapter = q_vector->adapter;
449 struct e1000_hw *hw = &adapter->hw; 481 struct e1000_hw *hw = &adapter->hw;
450 u64 regval; 482 u64 regval;
451 483
452 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
453 E1000_RXDADV_STAT_TS))
454 return;
455
456 /* 484 /*
457 * If this bit is set, then the RX registers contain the time stamp. No 485 * If this bit is set, then the RX registers contain the time stamp. No
458 * other packet will be time stamped until we read these registers, so 486 * other packet will be time stamped until we read these registers, so
@@ -464,18 +492,11 @@ void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
464 * If nothing went wrong, then it should have a shared tx_flags that we 492 * If nothing went wrong, then it should have a shared tx_flags that we
465 * can turn into a skb_shared_hwtstamps. 493 * can turn into a skb_shared_hwtstamps.
466 */ 494 */
467 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { 495 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
468 u32 *stamp = (u32 *)skb->data; 496 return;
469 regval = le32_to_cpu(*(stamp + 2));
470 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
471 skb_pull(skb, IGB_TS_HDR_LEN);
472 } else {
473 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
474 return;
475 497
476 regval = rd32(E1000_RXSTMPL); 498 regval = rd32(E1000_RXSTMPL);
477 regval |= (u64)rd32(E1000_RXSTMPH) << 32; 499 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
478 }
479 500
480 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 501 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
481} 502}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 0ac11f527a84..4051ec404613 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -184,6 +184,13 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
184 buffer_info->page_offset, 184 buffer_info->page_offset,
185 PAGE_SIZE / 2, 185 PAGE_SIZE / 2,
186 DMA_FROM_DEVICE); 186 DMA_FROM_DEVICE);
187 if (dma_mapping_error(&pdev->dev,
188 buffer_info->page_dma)) {
189 __free_page(buffer_info->page);
190 buffer_info->page = NULL;
191 dev_err(&pdev->dev, "RX DMA map failed\n");
192 break;
193 }
187 } 194 }
188 195
189 if (!buffer_info->skb) { 196 if (!buffer_info->skb) {
@@ -197,6 +204,12 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
197 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 204 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
198 bufsz, 205 bufsz,
199 DMA_FROM_DEVICE); 206 DMA_FROM_DEVICE);
207 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
208 dev_kfree_skb(buffer_info->skb);
209 buffer_info->skb = NULL;
210 dev_err(&pdev->dev, "RX DMA map failed\n");
211 goto no_buffers;
212 }
200 } 213 }
201 /* Refresh the desc even if buffer_addrs didn't change because 214 /* Refresh the desc even if buffer_addrs didn't change because
202 * each write-back erases this info. */ 215 * each write-back erases this info. */
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 89f40e51fc13..f3a632bf8d96 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,11 +34,10 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o 37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
38 38
39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
41 41
42ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
43ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o 42ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
44ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o 43ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 30efc9f0f47a..7ff4c4fdcb0d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -36,11 +36,9 @@
36#include <linux/aer.h> 36#include <linux/aer.h>
37#include <linux/if_vlan.h> 37#include <linux/if_vlan.h>
38 38
39#ifdef CONFIG_IXGBE_PTP
40#include <linux/clocksource.h> 39#include <linux/clocksource.h>
41#include <linux/net_tstamp.h> 40#include <linux/net_tstamp.h>
42#include <linux/ptp_clock_kernel.h> 41#include <linux/ptp_clock_kernel.h>
43#endif /* CONFIG_IXGBE_PTP */
44 42
45#include "ixgbe_type.h" 43#include "ixgbe_type.h"
46#include "ixgbe_common.h" 44#include "ixgbe_common.h"
@@ -135,6 +133,7 @@ struct vf_data_storage {
135 u16 tx_rate; 133 u16 tx_rate;
136 u16 vlan_count; 134 u16 vlan_count;
137 u8 spoofchk_enabled; 135 u8 spoofchk_enabled;
136 unsigned int vf_api;
138}; 137};
139 138
140struct vf_macvlans { 139struct vf_macvlans {
@@ -482,7 +481,7 @@ struct ixgbe_adapter {
482#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) 481#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
483#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) 482#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
484#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) 483#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
485#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 10) 484#define IXGBE_FLAG2_PTP_ENABLED (u32)(1 << 10)
486#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11) 485#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11)
487 486
488 /* Tx fast path data */ 487 /* Tx fast path data */
@@ -571,7 +570,6 @@ struct ixgbe_adapter {
571 u32 interrupt_event; 570 u32 interrupt_event;
572 u32 led_reg; 571 u32 led_reg;
573 572
574#ifdef CONFIG_IXGBE_PTP
575 struct ptp_clock *ptp_clock; 573 struct ptp_clock *ptp_clock;
576 struct ptp_clock_info ptp_caps; 574 struct ptp_clock_info ptp_caps;
577 unsigned long last_overflow_check; 575 unsigned long last_overflow_check;
@@ -580,8 +578,6 @@ struct ixgbe_adapter {
580 struct timecounter tc; 578 struct timecounter tc;
581 int rx_hwtstamp_filter; 579 int rx_hwtstamp_filter;
582 u32 base_incval; 580 u32 base_incval;
583 u32 cycle_speed;
584#endif /* CONFIG_IXGBE_PTP */
585 581
586 /* SR-IOV */ 582 /* SR-IOV */
587 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); 583 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -600,6 +596,8 @@ struct ixgbe_adapter {
600#ifdef CONFIG_DEBUG_FS 596#ifdef CONFIG_DEBUG_FS
601 struct dentry *ixgbe_dbg_adapter; 597 struct dentry *ixgbe_dbg_adapter;
602#endif /*CONFIG_DEBUG_FS*/ 598#endif /*CONFIG_DEBUG_FS*/
599
600 u8 default_up;
603}; 601};
604 602
605struct ixgbe_fdir_filter { 603struct ixgbe_fdir_filter {
@@ -691,6 +689,7 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
691 u16 soft_id); 689 u16 soft_id);
692extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, 690extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
693 union ixgbe_atr_input *mask); 691 union ixgbe_atr_input *mask);
692extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
694extern void ixgbe_set_rx_mode(struct net_device *netdev); 693extern void ixgbe_set_rx_mode(struct net_device *netdev);
695#ifdef CONFIG_IXGBE_DCB 694#ifdef CONFIG_IXGBE_DCB
696extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); 695extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
@@ -739,7 +738,6 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
739 return netdev_get_tx_queue(ring->netdev, ring->queue_index); 738 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
740} 739}
741 740
742#ifdef CONFIG_IXGBE_PTP
743extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter); 741extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
744extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); 742extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
745extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); 743extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
@@ -751,7 +749,7 @@ extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
751extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, 749extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
752 struct ifreq *ifr, int cmd); 750 struct ifreq *ifr, int cmd);
753extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); 751extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
752extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
754extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); 753extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
755#endif /* CONFIG_IXGBE_PTP */
756 754
757#endif /* _IXGBE_H_ */ 755#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 1077cb2b38db..e75f5a4a2a6d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -62,7 +62,6 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
62 bool autoneg, 62 bool autoneg,
63 bool autoneg_wait_to_complete); 63 bool autoneg_wait_to_complete);
64static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 64static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
65static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
66 65
67static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 66static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
68{ 67{
@@ -99,9 +98,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
99static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 98static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
100{ 99{
101 s32 ret_val = 0; 100 s32 ret_val = 0;
102 u32 reg_anlp1 = 0;
103 u32 i = 0;
104 u16 list_offset, data_offset, data_value; 101 u16 list_offset, data_offset, data_value;
102 bool got_lock = false;
105 103
106 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 104 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
107 ixgbe_init_mac_link_ops_82599(hw); 105 ixgbe_init_mac_link_ops_82599(hw);
@@ -137,28 +135,36 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
137 usleep_range(hw->eeprom.semaphore_delay * 1000, 135 usleep_range(hw->eeprom.semaphore_delay * 1000,
138 hw->eeprom.semaphore_delay * 2000); 136 hw->eeprom.semaphore_delay * 2000);
139 137
140 /* Now restart DSP by setting Restart_AN and clearing LMS */ 138 /* Need SW/FW semaphore around AUTOC writes if LESM on,
141 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, 139 * likewise reset_pipeline requires lock as it also writes
142 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | 140 * AUTOC.
143 IXGBE_AUTOC_AN_RESTART)); 141 */
144 142 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
145 /* Wait for AN to leave state 0 */ 143 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
146 for (i = 0; i < 10; i++) { 144 IXGBE_GSSR_MAC_CSR_SM);
147 usleep_range(4000, 8000); 145 if (ret_val)
148 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); 146 goto setup_sfp_out;
149 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) 147
150 break; 148 got_lock = true;
149 }
150
151 /* Restart DSP and set SFI mode */
152 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
153 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL));
154
155 ret_val = ixgbe_reset_pipeline_82599(hw);
156
157 if (got_lock) {
158 hw->mac.ops.release_swfw_sync(hw,
159 IXGBE_GSSR_MAC_CSR_SM);
160 got_lock = false;
151 } 161 }
152 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { 162
153 hw_dbg(hw, "sfp module setup not complete\n"); 163 if (ret_val) {
164 hw_dbg(hw, " sfp module setup not complete\n");
154 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; 165 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
155 goto setup_sfp_out; 166 goto setup_sfp_out;
156 } 167 }
157
158 /* Restart DSP by setting Restart_AN and return to SFI mode */
159 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
160 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
161 IXGBE_AUTOC_AN_RESTART));
162 } 168 }
163 169
164setup_sfp_out: 170setup_sfp_out:
@@ -394,14 +400,26 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
394 u32 links_reg; 400 u32 links_reg;
395 u32 i; 401 u32 i;
396 s32 status = 0; 402 s32 status = 0;
403 bool got_lock = false;
404
405 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
406 status = hw->mac.ops.acquire_swfw_sync(hw,
407 IXGBE_GSSR_MAC_CSR_SM);
408 if (status)
409 goto out;
410
411 got_lock = true;
412 }
397 413
398 /* Restart link */ 414 /* Restart link */
399 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 415 ixgbe_reset_pipeline_82599(hw);
400 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 416
401 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 417 if (got_lock)
418 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
402 419
403 /* Only poll for autoneg to complete if specified to do so */ 420 /* Only poll for autoneg to complete if specified to do so */
404 if (autoneg_wait_to_complete) { 421 if (autoneg_wait_to_complete) {
422 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
405 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 423 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
406 IXGBE_AUTOC_LMS_KX4_KX_KR || 424 IXGBE_AUTOC_LMS_KX4_KX_KR ||
407 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 425 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
@@ -425,6 +443,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
425 /* Add delay to filter out noises during initial link setup */ 443 /* Add delay to filter out noises during initial link setup */
426 msleep(50); 444 msleep(50);
427 445
446out:
428 return status; 447 return status;
429} 448}
430 449
@@ -779,6 +798,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
779 u32 links_reg; 798 u32 links_reg;
780 u32 i; 799 u32 i;
781 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 800 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
801 bool got_lock = false;
782 802
783 /* Check to see if speed passed in is supported. */ 803 /* Check to see if speed passed in is supported. */
784 status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, 804 status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
@@ -836,9 +856,26 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
836 } 856 }
837 857
838 if (autoc != start_autoc) { 858 if (autoc != start_autoc) {
859 /* Need SW/FW semaphore around AUTOC writes if LESM is on,
860 * likewise reset_pipeline requires us to hold this lock as
861 * it also writes to AUTOC.
862 */
863 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
864 status = hw->mac.ops.acquire_swfw_sync(hw,
865 IXGBE_GSSR_MAC_CSR_SM);
866 if (status != 0)
867 goto out;
868
869 got_lock = true;
870 }
871
839 /* Restart link */ 872 /* Restart link */
840 autoc |= IXGBE_AUTOC_AN_RESTART;
841 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 873 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
874 ixgbe_reset_pipeline_82599(hw);
875
876 if (got_lock)
877 hw->mac.ops.release_swfw_sync(hw,
878 IXGBE_GSSR_MAC_CSR_SM);
842 879
843 /* Only poll for autoneg to complete if specified to do so */ 880 /* Only poll for autoneg to complete if specified to do so */
844 if (autoneg_wait_to_complete) { 881 if (autoneg_wait_to_complete) {
@@ -994,9 +1031,28 @@ mac_reset_top:
994 hw->mac.orig_autoc2 = autoc2; 1031 hw->mac.orig_autoc2 = autoc2;
995 hw->mac.orig_link_settings_stored = true; 1032 hw->mac.orig_link_settings_stored = true;
996 } else { 1033 } else {
997 if (autoc != hw->mac.orig_autoc) 1034 if (autoc != hw->mac.orig_autoc) {
998 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | 1035 /* Need SW/FW semaphore around AUTOC writes if LESM is
999 IXGBE_AUTOC_AN_RESTART)); 1036 * on, likewise reset_pipeline requires us to hold
1037 * this lock as it also writes to AUTOC.
1038 */
1039 bool got_lock = false;
1040 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1041 status = hw->mac.ops.acquire_swfw_sync(hw,
1042 IXGBE_GSSR_MAC_CSR_SM);
1043 if (status)
1044 goto reset_hw_out;
1045
1046 got_lock = true;
1047 }
1048
1049 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
1050 ixgbe_reset_pipeline_82599(hw);
1051
1052 if (got_lock)
1053 hw->mac.ops.release_swfw_sync(hw,
1054 IXGBE_GSSR_MAC_CSR_SM);
1055 }
1000 1056
1001 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1057 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1002 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1058 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
@@ -1983,7 +2039,7 @@ fw_version_out:
1983 * Returns true if the LESM FW module is present and enabled. Otherwise 2039 * Returns true if the LESM FW module is present and enabled. Otherwise
1984 * returns false. Smart Speed must be disabled if LESM FW module is enabled. 2040 * returns false. Smart Speed must be disabled if LESM FW module is enabled.
1985 **/ 2041 **/
1986static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) 2042bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
1987{ 2043{
1988 bool lesm_enabled = false; 2044 bool lesm_enabled = false;
1989 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; 2045 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
@@ -2080,6 +2136,50 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2080 return ret_val; 2136 return ret_val;
2081} 2137}
2082 2138
2139/**
2140 * ixgbe_reset_pipeline_82599 - perform pipeline reset
2141 *
2142 * @hw: pointer to hardware structure
2143 *
2144 * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2145 * full pipeline reset. Note - We must hold the SW/FW semaphore before writing
2146 * to AUTOC, so this function assumes the semaphore is held.
2147 **/
2148s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2149{
2150 s32 i, autoc_reg, ret_val;
2151 s32 anlp1_reg = 0;
2152
2153 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2154 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2155
2156 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2157 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
2158
2159 /* Wait for AN to leave state 0 */
2160 for (i = 0; i < 10; i++) {
2161 usleep_range(4000, 8000);
2162 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2163 if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2164 break;
2165 }
2166
2167 if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2168 hw_dbg(hw, "auto negotiation not completed\n");
2169 ret_val = IXGBE_ERR_RESET_FAILED;
2170 goto reset_pipeline_out;
2171 }
2172
2173 ret_val = 0;
2174
2175reset_pipeline_out:
2176 /* Write AUTOC register with original LMS field and Restart_AN */
2177 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2178 IXGBE_WRITE_FLUSH(hw);
2179
2180 return ret_val;
2181}
2182
2083static struct ixgbe_mac_operations mac_ops_82599 = { 2183static struct ixgbe_mac_operations mac_ops_82599 = {
2084 .init_hw = &ixgbe_init_hw_generic, 2184 .init_hw = &ixgbe_init_hw_generic,
2085 .reset_hw = &ixgbe_reset_hw_82599, 2185 .reset_hw = &ixgbe_reset_hw_82599,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index dbf37e4a45fd..8f285edb5094 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -90,6 +90,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
90 s32 ret_val = 0; 90 s32 ret_val = 0;
91 u32 reg = 0, reg_bp = 0; 91 u32 reg = 0, reg_bp = 0;
92 u16 reg_cu = 0; 92 u16 reg_cu = 0;
93 bool got_lock = false;
93 94
94 /* 95 /*
95 * Validate the requested mode. Strict IEEE mode does not allow 96 * Validate the requested mode. Strict IEEE mode does not allow
@@ -210,8 +211,29 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
210 * 211 *
211 */ 212 */
212 if (hw->phy.media_type == ixgbe_media_type_backplane) { 213 if (hw->phy.media_type == ixgbe_media_type_backplane) {
213 reg_bp |= IXGBE_AUTOC_AN_RESTART; 214 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
215 * LESM is on, likewise reset_pipeline requries the lock as
216 * it also writes AUTOC.
217 */
218 if ((hw->mac.type == ixgbe_mac_82599EB) &&
219 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
220 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
221 IXGBE_GSSR_MAC_CSR_SM);
222 if (ret_val)
223 goto out;
224
225 got_lock = true;
226 }
227
214 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); 228 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
229
230 if (hw->mac.type == ixgbe_mac_82599EB)
231 ixgbe_reset_pipeline_82599(hw);
232
233 if (got_lock)
234 hw->mac.ops.release_swfw_sync(hw,
235 IXGBE_GSSR_MAC_CSR_SM);
236
215 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 237 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
216 (ixgbe_device_supports_autoneg_fc(hw) == 0)) { 238 (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
217 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, 239 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
@@ -1778,8 +1800,7 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr)
1778 else if (IXGBE_IS_BROADCAST(mac_addr)) 1800 else if (IXGBE_IS_BROADCAST(mac_addr))
1779 status = IXGBE_ERR_INVALID_MAC_ADDR; 1801 status = IXGBE_ERR_INVALID_MAC_ADDR;
1780 /* Reject the zero address */ 1802 /* Reject the zero address */
1781 else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && 1803 else if (is_zero_ether_addr(mac_addr))
1782 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
1783 status = IXGBE_ERR_INVALID_MAC_ADDR; 1804 status = IXGBE_ERR_INVALID_MAC_ADDR;
1784 1805
1785 return status; 1806 return status;
@@ -2617,6 +2638,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2617 bool link_up = false; 2638 bool link_up = false;
2618 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2639 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2619 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2640 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2641 s32 ret_val = 0;
2620 2642
2621 /* 2643 /*
2622 * Link must be up to auto-blink the LEDs; 2644 * Link must be up to auto-blink the LEDs;
@@ -2625,10 +2647,28 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2625 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2647 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2626 2648
2627 if (!link_up) { 2649 if (!link_up) {
2650 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
2651 * LESM is on.
2652 */
2653 bool got_lock = false;
2654
2655 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2656 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
2657 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
2658 IXGBE_GSSR_MAC_CSR_SM);
2659 if (ret_val)
2660 goto out;
2661
2662 got_lock = true;
2663 }
2628 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2664 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2629 autoc_reg |= IXGBE_AUTOC_FLU; 2665 autoc_reg |= IXGBE_AUTOC_FLU;
2630 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2666 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2631 IXGBE_WRITE_FLUSH(hw); 2667 IXGBE_WRITE_FLUSH(hw);
2668
2669 if (got_lock)
2670 hw->mac.ops.release_swfw_sync(hw,
2671 IXGBE_GSSR_MAC_CSR_SM);
2632 usleep_range(10000, 20000); 2672 usleep_range(10000, 20000);
2633 } 2673 }
2634 2674
@@ -2637,7 +2677,8 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2637 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2677 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2638 IXGBE_WRITE_FLUSH(hw); 2678 IXGBE_WRITE_FLUSH(hw);
2639 2679
2640 return 0; 2680out:
2681 return ret_val;
2641} 2682}
2642 2683
2643/** 2684/**
@@ -2649,18 +2690,40 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2649{ 2690{
2650 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2691 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2651 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2692 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2693 s32 ret_val = 0;
2694 bool got_lock = false;
2695
2696 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
2697 * LESM is on.
2698 */
2699 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2700 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
2701 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
2702 IXGBE_GSSR_MAC_CSR_SM);
2703 if (ret_val)
2704 goto out;
2705
2706 got_lock = true;
2707 }
2652 2708
2653 autoc_reg &= ~IXGBE_AUTOC_FLU; 2709 autoc_reg &= ~IXGBE_AUTOC_FLU;
2654 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2710 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2655 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2711 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2656 2712
2713 if (hw->mac.type == ixgbe_mac_82599EB)
2714 ixgbe_reset_pipeline_82599(hw);
2715
2716 if (got_lock)
2717 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
2718
2657 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2719 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2658 led_reg &= ~IXGBE_LED_BLINK(index); 2720 led_reg &= ~IXGBE_LED_BLINK(index);
2659 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 2721 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2660 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2722 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2661 IXGBE_WRITE_FLUSH(hw); 2723 IXGBE_WRITE_FLUSH(hw);
2662 2724
2663 return 0; 2725out:
2726 return ret_val;
2664} 2727}
2665 2728
2666/** 2729/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index d813d1188c36..587db4728072 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -107,6 +107,7 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
107 107
108void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, 108void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
109 u32 headroom, int strategy); 109 u32 headroom, int strategy);
110s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
110 111
111#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 112#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
112#define IXGBE_EMC_INTERNAL_DATA 0x00 113#define IXGBE_EMC_INTERNAL_DATA 0x00
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 116f0e901bee..a545728e100c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -887,24 +887,23 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
887 struct ethtool_ringparam *ring) 887 struct ethtool_ringparam *ring)
888{ 888{
889 struct ixgbe_adapter *adapter = netdev_priv(netdev); 889 struct ixgbe_adapter *adapter = netdev_priv(netdev);
890 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; 890 struct ixgbe_ring *temp_ring;
891 int i, err = 0; 891 int i, err = 0;
892 u32 new_rx_count, new_tx_count; 892 u32 new_rx_count, new_tx_count;
893 bool need_update = false;
894 893
895 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 894 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
896 return -EINVAL; 895 return -EINVAL;
897 896
898 new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD); 897 new_tx_count = clamp_t(u32, ring->tx_pending,
899 new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD); 898 IXGBE_MIN_TXD, IXGBE_MAX_TXD);
900 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
901
902 new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD);
903 new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD);
904 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 899 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
905 900
906 if ((new_tx_count == adapter->tx_ring[0]->count) && 901 new_rx_count = clamp_t(u32, ring->rx_pending,
907 (new_rx_count == adapter->rx_ring[0]->count)) { 902 IXGBE_MIN_RXD, IXGBE_MAX_RXD);
903 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
904
905 if ((new_tx_count == adapter->tx_ring_count) &&
906 (new_rx_count == adapter->rx_ring_count)) {
908 /* nothing to do */ 907 /* nothing to do */
909 return 0; 908 return 0;
910 } 909 }
@@ -922,81 +921,80 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
922 goto clear_reset; 921 goto clear_reset;
923 } 922 }
924 923
925 temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); 924 /* allocate temporary buffer to store rings in */
926 if (!temp_tx_ring) { 925 i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
926 temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
927
928 if (!temp_ring) {
927 err = -ENOMEM; 929 err = -ENOMEM;
928 goto clear_reset; 930 goto clear_reset;
929 } 931 }
930 932
933 ixgbe_down(adapter);
934
935 /*
936 * Setup new Tx resources and free the old Tx resources in that order.
937 * We can then assign the new resources to the rings via a memcpy.
938 * The advantage to this approach is that we are guaranteed to still
939 * have resources even in the case of an allocation failure.
940 */
931 if (new_tx_count != adapter->tx_ring_count) { 941 if (new_tx_count != adapter->tx_ring_count) {
932 for (i = 0; i < adapter->num_tx_queues; i++) { 942 for (i = 0; i < adapter->num_tx_queues; i++) {
933 memcpy(&temp_tx_ring[i], adapter->tx_ring[i], 943 memcpy(&temp_ring[i], adapter->tx_ring[i],
934 sizeof(struct ixgbe_ring)); 944 sizeof(struct ixgbe_ring));
935 temp_tx_ring[i].count = new_tx_count; 945
936 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); 946 temp_ring[i].count = new_tx_count;
947 err = ixgbe_setup_tx_resources(&temp_ring[i]);
937 if (err) { 948 if (err) {
938 while (i) { 949 while (i) {
939 i--; 950 i--;
940 ixgbe_free_tx_resources(&temp_tx_ring[i]); 951 ixgbe_free_tx_resources(&temp_ring[i]);
941 } 952 }
942 goto clear_reset; 953 goto err_setup;
943 } 954 }
944 } 955 }
945 need_update = true;
946 }
947 956
948 temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); 957 for (i = 0; i < adapter->num_tx_queues; i++) {
949 if (!temp_rx_ring) { 958 ixgbe_free_tx_resources(adapter->tx_ring[i]);
950 err = -ENOMEM; 959
951 goto err_setup; 960 memcpy(adapter->tx_ring[i], &temp_ring[i],
961 sizeof(struct ixgbe_ring));
962 }
963
964 adapter->tx_ring_count = new_tx_count;
952 } 965 }
953 966
967 /* Repeat the process for the Rx rings if needed */
954 if (new_rx_count != adapter->rx_ring_count) { 968 if (new_rx_count != adapter->rx_ring_count) {
955 for (i = 0; i < adapter->num_rx_queues; i++) { 969 for (i = 0; i < adapter->num_rx_queues; i++) {
956 memcpy(&temp_rx_ring[i], adapter->rx_ring[i], 970 memcpy(&temp_ring[i], adapter->rx_ring[i],
957 sizeof(struct ixgbe_ring)); 971 sizeof(struct ixgbe_ring));
958 temp_rx_ring[i].count = new_rx_count; 972
959 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); 973 temp_ring[i].count = new_rx_count;
974 err = ixgbe_setup_rx_resources(&temp_ring[i]);
960 if (err) { 975 if (err) {
961 while (i) { 976 while (i) {
962 i--; 977 i--;
963 ixgbe_free_rx_resources(&temp_rx_ring[i]); 978 ixgbe_free_rx_resources(&temp_ring[i]);
964 } 979 }
965 goto err_setup; 980 goto err_setup;
966 } 981 }
982
967 } 983 }
968 need_update = true;
969 }
970 984
971 /* if rings need to be updated, here's the place to do it in one shot */ 985 for (i = 0; i < adapter->num_rx_queues; i++) {
972 if (need_update) { 986 ixgbe_free_rx_resources(adapter->rx_ring[i]);
973 ixgbe_down(adapter);
974 987
975 /* tx */ 988 memcpy(adapter->rx_ring[i], &temp_ring[i],
976 if (new_tx_count != adapter->tx_ring_count) { 989 sizeof(struct ixgbe_ring));
977 for (i = 0; i < adapter->num_tx_queues; i++) {
978 ixgbe_free_tx_resources(adapter->tx_ring[i]);
979 memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
980 sizeof(struct ixgbe_ring));
981 }
982 adapter->tx_ring_count = new_tx_count;
983 } 990 }
984 991
985 /* rx */ 992 adapter->rx_ring_count = new_rx_count;
986 if (new_rx_count != adapter->rx_ring_count) {
987 for (i = 0; i < adapter->num_rx_queues; i++) {
988 ixgbe_free_rx_resources(adapter->rx_ring[i]);
989 memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
990 sizeof(struct ixgbe_ring));
991 }
992 adapter->rx_ring_count = new_rx_count;
993 }
994 ixgbe_up(adapter);
995 } 993 }
996 994
997 vfree(temp_rx_ring);
998err_setup: 995err_setup:
999 vfree(temp_tx_ring); 996 ixgbe_up(adapter);
997 vfree(temp_ring);
1000clear_reset: 998clear_reset:
1001 clear_bit(__IXGBE_RESETTING, &adapter->state); 999 clear_bit(__IXGBE_RESETTING, &adapter->state);
1002 return err; 1000 return err;
@@ -2669,7 +2667,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
2669 struct ixgbe_adapter *adapter = netdev_priv(dev); 2667 struct ixgbe_adapter *adapter = netdev_priv(dev);
2670 2668
2671 switch (adapter->hw.mac.type) { 2669 switch (adapter->hw.mac.type) {
2672#ifdef CONFIG_IXGBE_PTP
2673 case ixgbe_mac_X540: 2670 case ixgbe_mac_X540:
2674 case ixgbe_mac_82599EB: 2671 case ixgbe_mac_82599EB:
2675 info->so_timestamping = 2672 info->so_timestamping =
@@ -2695,7 +2692,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
2695 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 2692 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2696 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); 2693 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2697 break; 2694 break;
2698#endif /* CONFIG_IXGBE_PTP */
2699 default: 2695 default:
2700 return ethtool_op_get_ts_info(dev, info); 2696 return ethtool_op_get_ts_info(dev, info);
2701 break; 2697 break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index ae73ef14fdf3..252850d9a3e0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -800,6 +800,10 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
800 return -EINVAL; 800 return -EINVAL;
801 801
802 e_info(drv, "Enabling FCoE offload features.\n"); 802 e_info(drv, "Enabling FCoE offload features.\n");
803
804 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
805 e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
806
803 if (netif_running(netdev)) 807 if (netif_running(netdev))
804 netdev->netdev_ops->ndo_stop(netdev); 808 netdev->netdev_ops->ndo_stop(netdev);
805 809
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 17ecbcedd548..8c74f739011d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -802,10 +802,13 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
802 /* setup affinity mask and node */ 802 /* setup affinity mask and node */
803 if (cpu != -1) 803 if (cpu != -1)
804 cpumask_set_cpu(cpu, &q_vector->affinity_mask); 804 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
805 else
806 cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
807 q_vector->numa_node = node; 805 q_vector->numa_node = node;
808 806
807#ifdef CONFIG_IXGBE_DCA
808 /* initialize CPU for DCA */
809 q_vector->cpu = -1;
810
811#endif
809 /* initialize NAPI */ 812 /* initialize NAPI */
810 netif_napi_add(adapter->netdev, &q_vector->napi, 813 netif_napi_add(adapter->netdev, &q_vector->napi,
811 ixgbe_poll, 64); 814 ixgbe_poll, 64);
@@ -821,6 +824,21 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
821 /* initialize pointer to rings */ 824 /* initialize pointer to rings */
822 ring = q_vector->ring; 825 ring = q_vector->ring;
823 826
827 /* intialize ITR */
828 if (txr_count && !rxr_count) {
829 /* tx only vector */
830 if (adapter->tx_itr_setting == 1)
831 q_vector->itr = IXGBE_10K_ITR;
832 else
833 q_vector->itr = adapter->tx_itr_setting;
834 } else {
835 /* rx or rx/tx vector */
836 if (adapter->rx_itr_setting == 1)
837 q_vector->itr = IXGBE_20K_ITR;
838 else
839 q_vector->itr = adapter->rx_itr_setting;
840 }
841
824 while (txr_count) { 842 while (txr_count) {
825 /* assign generic ring traits */ 843 /* assign generic ring traits */
826 ring->dev = &adapter->pdev->dev; 844 ring->dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index fa3d552e1f4a..690535a0322f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -44,6 +44,7 @@
44#include <linux/ethtool.h> 44#include <linux/ethtool.h>
45#include <linux/if.h> 45#include <linux/if.h>
46#include <linux/if_vlan.h> 46#include <linux/if_vlan.h>
47#include <linux/if_bridge.h>
47#include <linux/prefetch.h> 48#include <linux/prefetch.h>
48#include <scsi/fc/fc_fcoe.h> 49#include <scsi/fc/fc_fcoe.h>
49 50
@@ -355,13 +356,37 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
355 356
356 /* Transmit Descriptor Formats 357 /* Transmit Descriptor Formats
357 * 358 *
358 * Advanced Transmit Descriptor 359 * 82598 Advanced Transmit Descriptor
359 * +--------------------------------------------------------------+ 360 * +--------------------------------------------------------------+
360 * 0 | Buffer Address [63:0] | 361 * 0 | Buffer Address [63:0] |
361 * +--------------------------------------------------------------+ 362 * +--------------------------------------------------------------+
362 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | 363 * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
363 * +--------------------------------------------------------------+ 364 * +--------------------------------------------------------------+
364 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 365 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
366 *
367 * 82598 Advanced Transmit Descriptor (Write-Back Format)
368 * +--------------------------------------------------------------+
369 * 0 | RSV [63:0] |
370 * +--------------------------------------------------------------+
371 * 8 | RSV | STA | NXTSEQ |
372 * +--------------------------------------------------------------+
373 * 63 36 35 32 31 0
374 *
375 * 82599+ Advanced Transmit Descriptor
376 * +--------------------------------------------------------------+
377 * 0 | Buffer Address [63:0] |
378 * +--------------------------------------------------------------+
379 * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN |
380 * +--------------------------------------------------------------+
381 * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0
382 *
383 * 82599+ Advanced Transmit Descriptor (Write-Back Format)
384 * +--------------------------------------------------------------+
385 * 0 | RSV [63:0] |
386 * +--------------------------------------------------------------+
387 * 8 | RSV | STA | RSV |
388 * +--------------------------------------------------------------+
389 * 63 36 35 32 31 0
365 */ 390 */
366 391
367 for (n = 0; n < adapter->num_tx_queues; n++) { 392 for (n = 0; n < adapter->num_tx_queues; n++) {
@@ -422,7 +447,9 @@ rx_ring_summary:
422 447
423 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); 448 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
424 449
425 /* Advanced Receive Descriptor (Read) Format 450 /* Receive Descriptor Formats
451 *
452 * 82598 Advanced Receive Descriptor (Read) Format
426 * 63 1 0 453 * 63 1 0
427 * +-----------------------------------------------------+ 454 * +-----------------------------------------------------+
428 * 0 | Packet Buffer Address [63:1] |A0/NSE| 455 * 0 | Packet Buffer Address [63:1] |A0/NSE|
@@ -431,17 +458,40 @@ rx_ring_summary:
431 * +-----------------------------------------------------+ 458 * +-----------------------------------------------------+
432 * 459 *
433 * 460 *
434 * Advanced Receive Descriptor (Write-Back) Format 461 * 82598 Advanced Receive Descriptor (Write-Back) Format
435 * 462 *
436 * 63 48 47 32 31 30 21 20 16 15 4 3 0 463 * 63 48 47 32 31 30 21 20 16 15 4 3 0
437 * +------------------------------------------------------+ 464 * +------------------------------------------------------+
438 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | 465 * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS |
439 * | Checksum Ident | | | | Type | Type | 466 * | Packet | IP | | | | Type | Type |
467 * | Checksum | Ident | | | | | |
440 * +------------------------------------------------------+ 468 * +------------------------------------------------------+
441 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 469 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
442 * +------------------------------------------------------+ 470 * +------------------------------------------------------+
443 * 63 48 47 32 31 20 19 0 471 * 63 48 47 32 31 20 19 0
472 *
473 * 82599+ Advanced Receive Descriptor (Read) Format
474 * 63 1 0
475 * +-----------------------------------------------------+
476 * 0 | Packet Buffer Address [63:1] |A0/NSE|
477 * +----------------------------------------------+------+
478 * 8 | Header Buffer Address [63:1] | DD |
479 * +-----------------------------------------------------+
480 *
481 *
482 * 82599+ Advanced Receive Descriptor (Write-Back) Format
483 *
484 * 63 48 47 32 31 30 21 20 17 16 4 3 0
485 * +------------------------------------------------------+
486 * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS |
487 * |/ RTT / PCoE_PARAM | | | CNT | Type | Type |
488 * |/ Flow Dir Flt ID | | | | | |
489 * +------------------------------------------------------+
490 * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
491 * +------------------------------------------------------+
492 * 63 48 47 32 31 20 19 0
444 */ 493 */
494
445 for (n = 0; n < adapter->num_rx_queues; n++) { 495 for (n = 0; n < adapter->num_rx_queues; n++) {
446 rx_ring = adapter->rx_ring[n]; 496 rx_ring = adapter->rx_ring[n];
447 pr_info("------------------------------------\n"); 497 pr_info("------------------------------------\n");
@@ -791,10 +841,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
791 total_bytes += tx_buffer->bytecount; 841 total_bytes += tx_buffer->bytecount;
792 total_packets += tx_buffer->gso_segs; 842 total_packets += tx_buffer->gso_segs;
793 843
794#ifdef CONFIG_IXGBE_PTP
795 if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP)) 844 if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
796 ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb); 845 ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
797#endif
798 846
799 /* free the skb */ 847 /* free the skb */
800 dev_kfree_skb_any(tx_buffer->skb); 848 dev_kfree_skb_any(tx_buffer->skb);
@@ -1244,6 +1292,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1244 struct vlan_hdr *vlan; 1292 struct vlan_hdr *vlan;
1245 /* l3 headers */ 1293 /* l3 headers */
1246 struct iphdr *ipv4; 1294 struct iphdr *ipv4;
1295 struct ipv6hdr *ipv6;
1247 } hdr; 1296 } hdr;
1248 __be16 protocol; 1297 __be16 protocol;
1249 u8 nexthdr = 0; /* default to not TCP */ 1298 u8 nexthdr = 0; /* default to not TCP */
@@ -1284,6 +1333,13 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1284 /* record next protocol */ 1333 /* record next protocol */
1285 nexthdr = hdr.ipv4->protocol; 1334 nexthdr = hdr.ipv4->protocol;
1286 hdr.network += hlen; 1335 hdr.network += hlen;
1336 } else if (protocol == __constant_htons(ETH_P_IPV6)) {
1337 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
1338 return max_len;
1339
1340 /* record next protocol */
1341 nexthdr = hdr.ipv6->nexthdr;
1342 hdr.network += sizeof(struct ipv6hdr);
1287#ifdef IXGBE_FCOE 1343#ifdef IXGBE_FCOE
1288 } else if (protocol == __constant_htons(ETH_P_FCOE)) { 1344 } else if (protocol == __constant_htons(ETH_P_FCOE)) {
1289 if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) 1345 if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
@@ -1294,7 +1350,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1294 return hdr.network - data; 1350 return hdr.network - data;
1295 } 1351 }
1296 1352
1297 /* finally sort out TCP */ 1353 /* finally sort out TCP/UDP */
1298 if (nexthdr == IPPROTO_TCP) { 1354 if (nexthdr == IPPROTO_TCP) {
1299 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) 1355 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
1300 return max_len; 1356 return max_len;
@@ -1307,6 +1363,11 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1307 return hdr.network - data; 1363 return hdr.network - data;
1308 1364
1309 hdr.network += hlen; 1365 hdr.network += hlen;
1366 } else if (nexthdr == IPPROTO_UDP) {
1367 if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
1368 return max_len;
1369
1370 hdr.network += sizeof(struct udphdr);
1310 } 1371 }
1311 1372
1312 /* 1373 /*
@@ -1369,9 +1430,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1369 1430
1370 ixgbe_rx_checksum(rx_ring, rx_desc, skb); 1431 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1371 1432
1372#ifdef CONFIG_IXGBE_PTP
1373 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); 1433 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
1374#endif
1375 1434
1376 if ((dev->features & NETIF_F_HW_VLAN_RX) && 1435 if ((dev->features & NETIF_F_HW_VLAN_RX) &&
1377 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1436 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -1781,7 +1840,7 @@ dma_sync:
1781 **/ 1840 **/
1782static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 1841static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1783 struct ixgbe_ring *rx_ring, 1842 struct ixgbe_ring *rx_ring,
1784 int budget) 1843 const int budget)
1785{ 1844{
1786 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1845 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1787#ifdef IXGBE_FCOE 1846#ifdef IXGBE_FCOE
@@ -1832,7 +1891,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1832 1891
1833 /* probably a little skewed due to removing CRC */ 1892 /* probably a little skewed due to removing CRC */
1834 total_rx_bytes += skb->len; 1893 total_rx_bytes += skb->len;
1835 total_rx_packets++;
1836 1894
1837 /* populate checksum, timestamp, VLAN, and protocol */ 1895 /* populate checksum, timestamp, VLAN, and protocol */
1838 ixgbe_process_skb_fields(rx_ring, rx_desc, skb); 1896 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
@@ -1865,8 +1923,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1865 ixgbe_rx_skb(q_vector, skb); 1923 ixgbe_rx_skb(q_vector, skb);
1866 1924
1867 /* update budget accounting */ 1925 /* update budget accounting */
1868 budget--; 1926 total_rx_packets++;
1869 } while (likely(budget)); 1927 } while (likely(total_rx_packets < budget));
1870 1928
1871 u64_stats_update_begin(&rx_ring->syncp); 1929 u64_stats_update_begin(&rx_ring->syncp);
1872 rx_ring->stats.packets += total_rx_packets; 1930 rx_ring->stats.packets += total_rx_packets;
@@ -1878,7 +1936,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1878 if (cleaned_count) 1936 if (cleaned_count)
1879 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); 1937 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1880 1938
1881 return !!budget; 1939 return (total_rx_packets < budget);
1882} 1940}
1883 1941
1884/** 1942/**
@@ -1914,20 +1972,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1914 ixgbe_for_each_ring(ring, q_vector->tx) 1972 ixgbe_for_each_ring(ring, q_vector->tx)
1915 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); 1973 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1916 1974
1917 if (q_vector->tx.ring && !q_vector->rx.ring) {
1918 /* tx only vector */
1919 if (adapter->tx_itr_setting == 1)
1920 q_vector->itr = IXGBE_10K_ITR;
1921 else
1922 q_vector->itr = adapter->tx_itr_setting;
1923 } else {
1924 /* rx or rx/tx vector */
1925 if (adapter->rx_itr_setting == 1)
1926 q_vector->itr = IXGBE_20K_ITR;
1927 else
1928 q_vector->itr = adapter->rx_itr_setting;
1929 }
1930
1931 ixgbe_write_eitr(q_vector); 1975 ixgbe_write_eitr(q_vector);
1932 } 1976 }
1933 1977
@@ -2324,10 +2368,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2324 break; 2368 break;
2325 } 2369 }
2326 2370
2327#ifdef CONFIG_IXGBE_PTP
2328 if (adapter->hw.mac.type == ixgbe_mac_X540) 2371 if (adapter->hw.mac.type == ixgbe_mac_X540)
2329 mask |= IXGBE_EIMS_TIMESYNC; 2372 mask |= IXGBE_EIMS_TIMESYNC;
2330#endif
2331 2373
2332 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && 2374 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2333 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) 2375 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
@@ -2393,10 +2435,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
2393 2435
2394 ixgbe_check_fan_failure(adapter, eicr); 2436 ixgbe_check_fan_failure(adapter, eicr);
2395 2437
2396#ifdef CONFIG_IXGBE_PTP
2397 if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) 2438 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2398 ixgbe_ptp_check_pps_event(adapter, eicr); 2439 ixgbe_ptp_check_pps_event(adapter, eicr);
2399#endif
2400 2440
2401 /* re-enable the original interrupt state, no lsc, no queues */ 2441 /* re-enable the original interrupt state, no lsc, no queues */
2402 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2442 if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2588,10 +2628,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2588 } 2628 }
2589 2629
2590 ixgbe_check_fan_failure(adapter, eicr); 2630 ixgbe_check_fan_failure(adapter, eicr);
2591#ifdef CONFIG_IXGBE_PTP
2592 if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) 2631 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2593 ixgbe_ptp_check_pps_event(adapter, eicr); 2632 ixgbe_ptp_check_pps_event(adapter, eicr);
2594#endif
2595 2633
2596 /* would disable interrupts here but EIAM disabled it */ 2634 /* would disable interrupts here but EIAM disabled it */
2597 napi_schedule(&q_vector->napi); 2635 napi_schedule(&q_vector->napi);
@@ -2699,12 +2737,6 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2699{ 2737{
2700 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 2738 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2701 2739
2702 /* rx/tx vector */
2703 if (adapter->rx_itr_setting == 1)
2704 q_vector->itr = IXGBE_20K_ITR;
2705 else
2706 q_vector->itr = adapter->rx_itr_setting;
2707
2708 ixgbe_write_eitr(q_vector); 2740 ixgbe_write_eitr(q_vector);
2709 2741
2710 ixgbe_set_ivar(adapter, 0, 0, 0); 2742 ixgbe_set_ivar(adapter, 0, 0, 0);
@@ -3211,7 +3243,6 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3211 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); 3243 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3212 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); 3244 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
3213 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); 3245 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
3214 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3215 3246
3216 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ 3247 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
3217 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); 3248 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
@@ -3234,8 +3265,6 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3234 3265
3235 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3266 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3236 3267
3237 /* enable Tx loopback for VF/PF communication */
3238 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3239 3268
3240 /* Enable MAC Anti-Spoofing */ 3269 /* Enable MAC Anti-Spoofing */
3241 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), 3270 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
@@ -3263,6 +3292,11 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3263 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; 3292 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3264 3293
3265#endif /* IXGBE_FCOE */ 3294#endif /* IXGBE_FCOE */
3295
3296 /* adjust max frame to be at least the size of a standard frame */
3297 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3298 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
3299
3266 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 3300 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3267 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { 3301 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3268 mhadd &= ~IXGBE_MHADD_MFS_MASK; 3302 mhadd &= ~IXGBE_MHADD_MFS_MASK;
@@ -3271,9 +3305,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3271 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 3305 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3272 } 3306 }
3273 3307
3274 /* MHADD will allow an extra 4 bytes past for vlan tagged frames */
3275 max_frame += VLAN_HLEN;
3276
3277 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3308 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3278 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ 3309 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
3279 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 3310 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
@@ -4072,11 +4103,8 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
4072 else 4103 else
4073 ixgbe_configure_msi_and_legacy(adapter); 4104 ixgbe_configure_msi_and_legacy(adapter);
4074 4105
4075 /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */ 4106 /* enable the optics for 82599 SFP+ fiber */
4076 if (hw->mac.ops.enable_tx_laser && 4107 if (hw->mac.ops.enable_tx_laser)
4077 ((hw->phy.multispeed_fiber) ||
4078 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
4079 (hw->mac.type == ixgbe_mac_82599EB))))
4080 hw->mac.ops.enable_tx_laser(hw); 4108 hw->mac.ops.enable_tx_laser(hw);
4081 4109
4082 clear_bit(__IXGBE_DOWN, &adapter->state); 4110 clear_bit(__IXGBE_DOWN, &adapter->state);
@@ -4192,6 +4220,9 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
4192 /* update SAN MAC vmdq pool selection */ 4220 /* update SAN MAC vmdq pool selection */
4193 if (hw->mac.san_mac_rar_index) 4221 if (hw->mac.san_mac_rar_index)
4194 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); 4222 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
4223
4224 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
4225 ixgbe_ptp_reset(adapter);
4195} 4226}
4196 4227
4197/** 4228/**
@@ -4393,11 +4424,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
4393 if (!pci_channel_offline(adapter->pdev)) 4424 if (!pci_channel_offline(adapter->pdev))
4394 ixgbe_reset(adapter); 4425 ixgbe_reset(adapter);
4395 4426
4396 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ 4427 /* power down the optics for 82599 SFP+ fiber */
4397 if (hw->mac.ops.disable_tx_laser && 4428 if (hw->mac.ops.disable_tx_laser)
4398 ((hw->phy.multispeed_fiber) ||
4399 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
4400 (hw->mac.type == ixgbe_mac_82599EB))))
4401 hw->mac.ops.disable_tx_laser(hw); 4429 hw->mac.ops.disable_tx_laser(hw);
4402 4430
4403 ixgbe_clean_all_tx_rings(adapter); 4431 ixgbe_clean_all_tx_rings(adapter);
@@ -4828,14 +4856,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
4828 return -EINVAL; 4856 return -EINVAL;
4829 4857
4830 /* 4858 /*
4831 * For 82599EB we cannot allow PF to change MTU greater than 1500 4859 * For 82599EB we cannot allow legacy VFs to enable their receive
4832 * in SR-IOV mode as it may cause buffer overruns in guest VFs that 4860 * paths when MTU greater than 1500 is configured. So display a
4833 * don't allocate and chain buffers correctly. 4861 * warning that legacy VFs will be disabled.
4834 */ 4862 */
4835 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && 4863 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
4836 (adapter->hw.mac.type == ixgbe_mac_82599EB) && 4864 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
4837 (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) 4865 (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
4838 return -EINVAL; 4866 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
4839 4867
4840 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 4868 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4841 4869
@@ -4901,6 +4929,8 @@ static int ixgbe_open(struct net_device *netdev)
4901 if (err) 4929 if (err)
4902 goto err_set_queues; 4930 goto err_set_queues;
4903 4931
4932 ixgbe_ptp_init(adapter);
4933
4904 ixgbe_up_complete(adapter); 4934 ixgbe_up_complete(adapter);
4905 4935
4906 return 0; 4936 return 0;
@@ -4932,6 +4962,8 @@ static int ixgbe_close(struct net_device *netdev)
4932{ 4962{
4933 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4963 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4934 4964
4965 ixgbe_ptp_stop(adapter);
4966
4935 ixgbe_down(adapter); 4967 ixgbe_down(adapter);
4936 ixgbe_free_irq(adapter); 4968 ixgbe_free_irq(adapter);
4937 4969
@@ -5022,14 +5054,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5022 if (wufc) { 5054 if (wufc) {
5023 ixgbe_set_rx_mode(netdev); 5055 ixgbe_set_rx_mode(netdev);
5024 5056
5025 /* 5057 /* enable the optics for 82599 SFP+ fiber as we can WoL */
5026 * enable the optics for both mult-speed fiber and 5058 if (hw->mac.ops.enable_tx_laser)
5027 * 82599 SFP+ fiber as we can WoL.
5028 */
5029 if (hw->mac.ops.enable_tx_laser &&
5030 (hw->phy.multispeed_fiber ||
5031 (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
5032 hw->mac.type == ixgbe_mac_82599EB)))
5033 hw->mac.ops.enable_tx_laser(hw); 5059 hw->mac.ops.enable_tx_laser(hw);
5034 5060
5035 /* turn on all-multi mode if wake on multicast is enabled */ 5061 /* turn on all-multi mode if wake on multicast is enabled */
@@ -5442,6 +5468,23 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5442 adapter->link_speed = link_speed; 5468 adapter->link_speed = link_speed;
5443} 5469}
5444 5470
5471static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
5472{
5473#ifdef CONFIG_IXGBE_DCB
5474 struct net_device *netdev = adapter->netdev;
5475 struct dcb_app app = {
5476 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
5477 .protocol = 0,
5478 };
5479 u8 up = 0;
5480
5481 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
5482 up = dcb_ieee_getapp_mask(netdev, &app);
5483
5484 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
5485#endif
5486}
5487
5445/** 5488/**
5446 * ixgbe_watchdog_link_is_up - update netif_carrier status and 5489 * ixgbe_watchdog_link_is_up - update netif_carrier status and
5447 * print link up message 5490 * print link up message
@@ -5482,9 +5525,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5482 break; 5525 break;
5483 } 5526 }
5484 5527
5485#ifdef CONFIG_IXGBE_PTP 5528 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
5486 ixgbe_ptp_start_cyclecounter(adapter); 5529 ixgbe_ptp_start_cyclecounter(adapter);
5487#endif
5488 5530
5489 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", 5531 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
5490 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 5532 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -5501,6 +5543,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5501 netif_carrier_on(netdev); 5543 netif_carrier_on(netdev);
5502 ixgbe_check_vf_rate_limit(adapter); 5544 ixgbe_check_vf_rate_limit(adapter);
5503 5545
5546 /* update the default user priority for VFs */
5547 ixgbe_update_default_up(adapter);
5548
5504 /* ping all the active vfs to let them know link has changed */ 5549 /* ping all the active vfs to let them know link has changed */
5505 ixgbe_ping_all_vfs(adapter); 5550 ixgbe_ping_all_vfs(adapter);
5506} 5551}
@@ -5526,9 +5571,8 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
5526 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) 5571 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
5527 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; 5572 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5528 5573
5529#ifdef CONFIG_IXGBE_PTP 5574 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
5530 ixgbe_ptp_start_cyclecounter(adapter); 5575 ixgbe_ptp_start_cyclecounter(adapter);
5531#endif
5532 5576
5533 e_info(drv, "NIC Link is Down\n"); 5577 e_info(drv, "NIC Link is Down\n");
5534 netif_carrier_off(netdev); 5578 netif_carrier_off(netdev);
@@ -5833,9 +5877,7 @@ static void ixgbe_service_task(struct work_struct *work)
5833 ixgbe_watchdog_subtask(adapter); 5877 ixgbe_watchdog_subtask(adapter);
5834 ixgbe_fdir_reinit_subtask(adapter); 5878 ixgbe_fdir_reinit_subtask(adapter);
5835 ixgbe_check_hang_subtask(adapter); 5879 ixgbe_check_hang_subtask(adapter);
5836#ifdef CONFIG_IXGBE_PTP
5837 ixgbe_ptp_overflow_check(adapter); 5880 ixgbe_ptp_overflow_check(adapter);
5838#endif
5839 5881
5840 ixgbe_service_event_complete(adapter); 5882 ixgbe_service_event_complete(adapter);
5841} 5883}
@@ -5988,10 +6030,8 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
5988 if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN) 6030 if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
5989 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); 6031 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
5990 6032
5991#ifdef CONFIG_IXGBE_PTP
5992 if (tx_flags & IXGBE_TX_FLAGS_TSTAMP) 6033 if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
5993 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP); 6034 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
5994#endif
5995 6035
5996 /* set segmentation enable bits for TSO/FSO */ 6036 /* set segmentation enable bits for TSO/FSO */
5997#ifdef IXGBE_FCOE 6037#ifdef IXGBE_FCOE
@@ -6393,12 +6433,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6393 6433
6394 skb_tx_timestamp(skb); 6434 skb_tx_timestamp(skb);
6395 6435
6396#ifdef CONFIG_IXGBE_PTP
6397 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 6436 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6398 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 6437 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6399 tx_flags |= IXGBE_TX_FLAGS_TSTAMP; 6438 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
6400 } 6439 }
6401#endif
6402 6440
6403#ifdef CONFIG_PCI_IOV 6441#ifdef CONFIG_PCI_IOV
6404 /* 6442 /*
@@ -6485,6 +6523,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6485 if (skb_pad(skb, 17 - skb->len)) 6523 if (skb_pad(skb, 17 - skb->len))
6486 return NETDEV_TX_OK; 6524 return NETDEV_TX_OK;
6487 skb->len = 17; 6525 skb->len = 17;
6526 skb_set_tail_pointer(skb, 17);
6488 } 6527 }
6489 6528
6490 tx_ring = adapter->tx_ring[skb->queue_mapping]; 6529 tx_ring = adapter->tx_ring[skb->queue_mapping];
@@ -6547,10 +6586,8 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
6547 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6586 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6548 6587
6549 switch (cmd) { 6588 switch (cmd) {
6550#ifdef CONFIG_IXGBE_PTP
6551 case SIOCSHWTSTAMP: 6589 case SIOCSHWTSTAMP:
6552 return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd); 6590 return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
6553#endif
6554 default: 6591 default:
6555 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); 6592 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
6556 } 6593 }
@@ -6916,7 +6953,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
6916 return -EINVAL; 6953 return -EINVAL;
6917 } 6954 }
6918 6955
6919 if (is_unicast_ether_addr(addr)) { 6956 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
6920 u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS; 6957 u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
6921 6958
6922 if (netdev_uc_count(dev) < rar_uc_entries) 6959 if (netdev_uc_count(dev) < rar_uc_entries)
@@ -6974,6 +7011,59 @@ static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
6974 return idx; 7011 return idx;
6975} 7012}
6976 7013
7014static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7015 struct nlmsghdr *nlh)
7016{
7017 struct ixgbe_adapter *adapter = netdev_priv(dev);
7018 struct nlattr *attr, *br_spec;
7019 int rem;
7020
7021 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7022 return -EOPNOTSUPP;
7023
7024 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7025
7026 nla_for_each_nested(attr, br_spec, rem) {
7027 __u16 mode;
7028 u32 reg = 0;
7029
7030 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7031 continue;
7032
7033 mode = nla_get_u16(attr);
7034 if (mode == BRIDGE_MODE_VEPA)
7035 reg = 0;
7036 else if (mode == BRIDGE_MODE_VEB)
7037 reg = IXGBE_PFDTXGSWC_VT_LBEN;
7038 else
7039 return -EINVAL;
7040
7041 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg);
7042
7043 e_info(drv, "enabling bridge mode: %s\n",
7044 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
7045 }
7046
7047 return 0;
7048}
7049
7050static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7051 struct net_device *dev)
7052{
7053 struct ixgbe_adapter *adapter = netdev_priv(dev);
7054 u16 mode;
7055
7056 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7057 return 0;
7058
7059 if (IXGBE_READ_REG(&adapter->hw, IXGBE_PFDTXGSWC) & 1)
7060 mode = BRIDGE_MODE_VEB;
7061 else
7062 mode = BRIDGE_MODE_VEPA;
7063
7064 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
7065}
7066
6977static const struct net_device_ops ixgbe_netdev_ops = { 7067static const struct net_device_ops ixgbe_netdev_ops = {
6978 .ndo_open = ixgbe_open, 7068 .ndo_open = ixgbe_open,
6979 .ndo_stop = ixgbe_close, 7069 .ndo_stop = ixgbe_close,
@@ -7013,6 +7103,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7013 .ndo_fdb_add = ixgbe_ndo_fdb_add, 7103 .ndo_fdb_add = ixgbe_ndo_fdb_add,
7014 .ndo_fdb_del = ixgbe_ndo_fdb_del, 7104 .ndo_fdb_del = ixgbe_ndo_fdb_del,
7015 .ndo_fdb_dump = ixgbe_ndo_fdb_dump, 7105 .ndo_fdb_dump = ixgbe_ndo_fdb_dump,
7106 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
7107 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7016}; 7108};
7017 7109
7018/** 7110/**
@@ -7042,6 +7134,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
7042 break; 7134 break;
7043 case IXGBE_SUBDEV_ID_82599_SFP: 7135 case IXGBE_SUBDEV_ID_82599_SFP:
7044 case IXGBE_SUBDEV_ID_82599_RNDC: 7136 case IXGBE_SUBDEV_ID_82599_RNDC:
7137 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
7045 is_wol_supported = 1; 7138 is_wol_supported = 1;
7046 break; 7139 break;
7047 } 7140 }
@@ -7364,10 +7457,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7364 7457
7365 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 7458 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
7366 7459
7367#ifdef CONFIG_IXGBE_PTP
7368 ixgbe_ptp_init(adapter);
7369#endif /* CONFIG_IXGBE_PTP*/
7370
7371 /* save off EEPROM version number */ 7460 /* save off EEPROM version number */
7372 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); 7461 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
7373 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); 7462 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
@@ -7420,11 +7509,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7420 if (err) 7509 if (err)
7421 goto err_register; 7510 goto err_register;
7422 7511
7423 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ 7512 /* power down the optics for 82599 SFP+ fiber */
7424 if (hw->mac.ops.disable_tx_laser && 7513 if (hw->mac.ops.disable_tx_laser)
7425 ((hw->phy.multispeed_fiber) ||
7426 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
7427 (hw->mac.type == ixgbe_mac_82599EB))))
7428 hw->mac.ops.disable_tx_laser(hw); 7514 hw->mac.ops.disable_tx_laser(hw);
7429 7515
7430 /* carrier off reporting is important to ethtool even BEFORE open */ 7516 /* carrier off reporting is important to ethtool even BEFORE open */
@@ -7505,9 +7591,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7505 set_bit(__IXGBE_DOWN, &adapter->state); 7591 set_bit(__IXGBE_DOWN, &adapter->state);
7506 cancel_work_sync(&adapter->service_task); 7592 cancel_work_sync(&adapter->service_task);
7507 7593
7508#ifdef CONFIG_IXGBE_PTP
7509 ixgbe_ptp_stop(adapter);
7510#endif
7511 7594
7512#ifdef CONFIG_IXGBE_DCA 7595#ifdef CONFIG_IXGBE_DCA
7513 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 7596 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 310bdd961075..42dd65e6ac97 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -62,12 +62,39 @@
62/* bits 23:16 are used for exra info for certain messages */ 62/* bits 23:16 are used for exra info for certain messages */
63#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) 63#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
64 64
65/* definitions to support mailbox API version negotiation */
66
67/*
68 * Each element denotes a version of the API; existing numbers may not
69 * change; any additions must go at the end
70 */
71enum ixgbe_pfvf_api_rev {
72 ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
73 ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
74 ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
75 /* This value should always be last */
76 ixgbe_mbox_api_unknown, /* indicates that API version is not known */
77};
78
79/* mailbox API, legacy requests */
65#define IXGBE_VF_RESET 0x01 /* VF requests reset */ 80#define IXGBE_VF_RESET 0x01 /* VF requests reset */
66#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ 81#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
67#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ 82#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
68#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ 83#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
69#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ 84
70#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ 85/* mailbox API, version 1.0 VF requests */
86#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
87#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
88#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
89
90/* mailbox API, version 1.1 VF requests */
91#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
92
93/* GET_QUEUES return data indices within the mailbox */
94#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
95#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
96#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
97#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
71 98
72/* length of permanent address message returned from PF */ 99/* length of permanent address message returned from PF */
73#define IXGBE_VF_PERMADDR_MSG_LEN 4 100#define IXGBE_VF_PERMADDR_MSG_LEN 4
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index d9291316ee9f..01d99af0b9ba 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -387,6 +387,15 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
387 struct ixgbe_hw *hw = &adapter->hw; 387 struct ixgbe_hw *hw = &adapter->hw;
388 struct ptp_clock_event event; 388 struct ptp_clock_event event;
389 389
390 event.type = PTP_CLOCK_PPS;
391
392 /* this check is necessary in case the interrupt was enabled via some
393 * alternative means (ex. debug_fs). Better to check here than
394 * everywhere that calls this function.
395 */
396 if (!adapter->ptp_clock)
397 return;
398
390 switch (hw->mac.type) { 399 switch (hw->mac.type) {
391 case ixgbe_mac_X540: 400 case ixgbe_mac_X540:
392 ptp_clock_event(adapter->ptp_clock, &event); 401 ptp_clock_event(adapter->ptp_clock, &event);
@@ -411,7 +420,7 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
411 unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies; 420 unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies;
412 struct timespec ts; 421 struct timespec ts;
413 422
414 if ((adapter->flags2 & IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED) && 423 if ((adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) &&
415 (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) { 424 (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) {
416 ixgbe_ptp_gettime(&adapter->ptp_caps, &ts); 425 ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
417 adapter->last_overflow_check = jiffies; 426 adapter->last_overflow_check = jiffies;
@@ -554,12 +563,14 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
554 adapter = q_vector->adapter; 563 adapter = q_vector->adapter;
555 hw = &adapter->hw; 564 hw = &adapter->hw;
556 565
566 if (likely(!ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
567 return;
568
557 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 569 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
558 570
559 /* Check if we have a valid timestamp and make sure the skb should 571 /* Check if we have a valid timestamp and make sure the skb should
560 * have been timestamped */ 572 * have been timestamped */
561 if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) || 573 if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
562 !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
563 return; 574 return;
564 575
565 /* 576 /*
@@ -759,58 +770,20 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
759 * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw 770 * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
760 * @adapter: pointer to the adapter structure 771 * @adapter: pointer to the adapter structure
761 * 772 *
762 * this function initializes the timecounter and cyclecounter 773 * This function should be called to set the proper values for the TIMINCA
763 * structures for use in generated a ns counter from the arbitrary 774 * register and tell the cyclecounter structure what the tick rate of SYSTIME
764 * fixed point cycles registers in the hardware. 775 * is. It does not directly modify SYSTIME registers or the timecounter
765 * 776 * structure. It should be called whenever a new TIMINCA value is necessary,
766 * A change in link speed impacts the frequency of the DMA clock on 777 * such as during initialization or when the link speed changes.
767 * the device, which is used to generate the cycle counter
768 * registers. Therefor this function is called whenever the link speed
769 * changes.
770 *
771 * This function also turns on the SDP pin for clock out feature (X540
772 * only), because this is where the shift is first calculated.
773 */ 778 */
774void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) 779void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
775{ 780{
776 struct ixgbe_hw *hw = &adapter->hw; 781 struct ixgbe_hw *hw = &adapter->hw;
777 u32 incval = 0; 782 u32 incval = 0;
778 u32 timinca = 0;
779 u32 shift = 0; 783 u32 shift = 0;
780 u32 cycle_speed;
781 unsigned long flags; 784 unsigned long flags;
782 785
783 /** 786 /**
784 * Determine what speed we need to set the cyclecounter
785 * for. It should be different for 100Mb, 1Gb, and 10Gb. Treat
786 * unknown speeds as 10Gb. (Hence why we can't just copy the
787 * link_speed.
788 */
789 switch (adapter->link_speed) {
790 case IXGBE_LINK_SPEED_100_FULL:
791 case IXGBE_LINK_SPEED_1GB_FULL:
792 case IXGBE_LINK_SPEED_10GB_FULL:
793 cycle_speed = adapter->link_speed;
794 break;
795 default:
796 /* cycle speed should be 10Gb when there is no link */
797 cycle_speed = IXGBE_LINK_SPEED_10GB_FULL;
798 break;
799 }
800
801 /*
802 * grab the current TIMINCA value from the register so that it can be
803 * double checked. If the register value has been cleared, it must be
804 * reset to the correct value for generating a cyclecounter. If
805 * TIMINCA is zero, the SYSTIME registers do not increment at all.
806 */
807 timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA);
808
809 /* Bail if the cycle speed didn't change and TIMINCA is non-zero */
810 if (adapter->cycle_speed == cycle_speed && timinca)
811 return;
812
813 /**
814 * Scale the NIC cycle counter by a large factor so that 787 * Scale the NIC cycle counter by a large factor so that
815 * relatively small corrections to the frequency can be added 788 * relatively small corrections to the frequency can be added
816 * or subtracted. The drawbacks of a large factor include 789 * or subtracted. The drawbacks of a large factor include
@@ -819,8 +792,12 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
819 * to nanoseconds using only a multiplier and a right-shift, 792 * to nanoseconds using only a multiplier and a right-shift,
820 * and (c) the value must fit within the timinca register space 793 * and (c) the value must fit within the timinca register space
821 * => math based on internal DMA clock rate and available bits 794 * => math based on internal DMA clock rate and available bits
795 *
796 * Note that when there is no link, internal DMA clock is same as when
797 * link speed is 10Gb. Set the registers correctly even when link is
798 * down to preserve the clock setting
822 */ 799 */
823 switch (cycle_speed) { 800 switch (adapter->link_speed) {
824 case IXGBE_LINK_SPEED_100_FULL: 801 case IXGBE_LINK_SPEED_100_FULL:
825 incval = IXGBE_INCVAL_100; 802 incval = IXGBE_INCVAL_100;
826 shift = IXGBE_INCVAL_SHIFT_100; 803 shift = IXGBE_INCVAL_SHIFT_100;
@@ -830,6 +807,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
830 shift = IXGBE_INCVAL_SHIFT_1GB; 807 shift = IXGBE_INCVAL_SHIFT_1GB;
831 break; 808 break;
832 case IXGBE_LINK_SPEED_10GB_FULL: 809 case IXGBE_LINK_SPEED_10GB_FULL:
810 default:
833 incval = IXGBE_INCVAL_10GB; 811 incval = IXGBE_INCVAL_10GB;
834 shift = IXGBE_INCVAL_SHIFT_10GB; 812 shift = IXGBE_INCVAL_SHIFT_10GB;
835 break; 813 break;
@@ -857,18 +835,11 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
857 return; 835 return;
858 } 836 }
859 837
860 /* reset the system time registers */ 838 /* update the base incval used to calculate frequency adjustment */
861 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
862 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
863 IXGBE_WRITE_FLUSH(hw);
864
865 /* store the new cycle speed */
866 adapter->cycle_speed = cycle_speed;
867
868 ACCESS_ONCE(adapter->base_incval) = incval; 839 ACCESS_ONCE(adapter->base_incval) = incval;
869 smp_mb(); 840 smp_mb();
870 841
871 /* grab the ptp lock */ 842 /* need lock to prevent incorrect read while modifying cyclecounter */
872 spin_lock_irqsave(&adapter->tmreg_lock, flags); 843 spin_lock_irqsave(&adapter->tmreg_lock, flags);
873 844
874 memset(&adapter->cc, 0, sizeof(adapter->cc)); 845 memset(&adapter->cc, 0, sizeof(adapter->cc));
@@ -877,6 +848,31 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
877 adapter->cc.shift = shift; 848 adapter->cc.shift = shift;
878 adapter->cc.mult = 1; 849 adapter->cc.mult = 1;
879 850
851 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
852}
853
854/**
855 * ixgbe_ptp_reset
856 * @adapter: the ixgbe private board structure
857 *
858 * When the MAC resets, all timesync features are reset. This function should be
859 * called to re-enable the PTP clock structure. It will re-init the timecounter
860 * structure based on the kernel time as well as setup the cycle counter data.
861 */
862void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
863{
864 struct ixgbe_hw *hw = &adapter->hw;
865 unsigned long flags;
866
867 /* set SYSTIME registers to 0 just in case */
868 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
869 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
870 IXGBE_WRITE_FLUSH(hw);
871
872 ixgbe_ptp_start_cyclecounter(adapter);
873
874 spin_lock_irqsave(&adapter->tmreg_lock, flags);
875
880 /* reset the ns time counter */ 876 /* reset the ns time counter */
881 timecounter_init(&adapter->tc, &adapter->cc, 877 timecounter_init(&adapter->tc, &adapter->cc,
882 ktime_to_ns(ktime_get_real())); 878 ktime_to_ns(ktime_get_real()));
@@ -904,7 +900,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
904 900
905 switch (adapter->hw.mac.type) { 901 switch (adapter->hw.mac.type) {
906 case ixgbe_mac_X540: 902 case ixgbe_mac_X540:
907 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); 903 snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
908 adapter->ptp_caps.owner = THIS_MODULE; 904 adapter->ptp_caps.owner = THIS_MODULE;
909 adapter->ptp_caps.max_adj = 250000000; 905 adapter->ptp_caps.max_adj = 250000000;
910 adapter->ptp_caps.n_alarm = 0; 906 adapter->ptp_caps.n_alarm = 0;
@@ -918,7 +914,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
918 adapter->ptp_caps.enable = ixgbe_ptp_enable; 914 adapter->ptp_caps.enable = ixgbe_ptp_enable;
919 break; 915 break;
920 case ixgbe_mac_82599EB: 916 case ixgbe_mac_82599EB:
921 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); 917 snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
922 adapter->ptp_caps.owner = THIS_MODULE; 918 adapter->ptp_caps.owner = THIS_MODULE;
923 adapter->ptp_caps.max_adj = 250000000; 919 adapter->ptp_caps.max_adj = 250000000;
924 adapter->ptp_caps.n_alarm = 0; 920 adapter->ptp_caps.n_alarm = 0;
@@ -942,11 +938,6 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
942 938
943 spin_lock_init(&adapter->tmreg_lock); 939 spin_lock_init(&adapter->tmreg_lock);
944 940
945 ixgbe_ptp_start_cyclecounter(adapter);
946
947 /* (Re)start the overflow check */
948 adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
949
950 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, 941 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
951 &adapter->pdev->dev); 942 &adapter->pdev->dev);
952 if (IS_ERR(adapter->ptp_clock)) { 943 if (IS_ERR(adapter->ptp_clock)) {
@@ -955,6 +946,11 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
955 } else 946 } else
956 e_dev_info("registered PHC device on %s\n", netdev->name); 947 e_dev_info("registered PHC device on %s\n", netdev->name);
957 948
949 ixgbe_ptp_reset(adapter);
950
951 /* set the flag that PTP has been enabled */
952 adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED;
953
958 return; 954 return;
959} 955}
960 956
@@ -967,7 +963,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
967void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) 963void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
968{ 964{
969 /* stop the overflow check task */ 965 /* stop the overflow check task */
970 adapter->flags2 &= ~(IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED | 966 adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED |
971 IXGBE_FLAG2_PTP_PPS_ENABLED); 967 IXGBE_FLAG2_PTP_PPS_ENABLED);
972 968
973 ixgbe_ptp_setup_sdp(adapter); 969 ixgbe_ptp_setup_sdp(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index dce48bf64d96..4993642d1ce1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -117,6 +117,9 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
117 } 117 }
118 } 118 }
119 119
120 /* Initialize default switching mode VEB */
121 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
122
120 /* If call to enable VFs succeeded then allocate memory 123 /* If call to enable VFs succeeded then allocate memory
121 * for per VF control structures. 124 * for per VF control structures.
122 */ 125 */
@@ -150,16 +153,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
150 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | 153 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
151 IXGBE_FLAG2_RSC_ENABLED); 154 IXGBE_FLAG2_RSC_ENABLED);
152 155
153#ifdef IXGBE_FCOE
154 /*
155 * When SR-IOV is enabled 82599 cannot support jumbo frames
156 * so we must disable FCoE because we cannot support FCoE MTU.
157 */
158 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
159 adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
160 IXGBE_FLAG_FCOE_CAPABLE);
161#endif
162
163 /* enable spoof checking for all VFs */ 156 /* enable spoof checking for all VFs */
164 for (i = 0; i < adapter->num_vfs; i++) 157 for (i = 0; i < adapter->num_vfs; i++)
165 adapter->vfinfo[i].spoofchk_enabled = true; 158 adapter->vfinfo[i].spoofchk_enabled = true;
@@ -265,8 +258,11 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
265} 258}
266 259
267static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, 260static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
268 int entries, u16 *hash_list, u32 vf) 261 u32 *msgbuf, u32 vf)
269{ 262{
263 int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
264 >> IXGBE_VT_MSGINFO_SHIFT;
265 u16 *hash_list = (u16 *)&msgbuf[1];
270 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 266 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
271 struct ixgbe_hw *hw = &adapter->hw; 267 struct ixgbe_hw *hw = &adapter->hw;
272 int i; 268 int i;
@@ -353,31 +349,89 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
353 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 349 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
354} 350}
355 351
356static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf) 352static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
357{ 353{
358 struct ixgbe_hw *hw = &adapter->hw; 354 struct ixgbe_hw *hw = &adapter->hw;
359 int new_mtu = msgbuf[1]; 355 int max_frame = msgbuf[1];
360 u32 max_frs; 356 u32 max_frs;
361 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
362 357
363 /* Only X540 supports jumbo frames in IOV mode */ 358 /*
364 if (adapter->hw.mac.type != ixgbe_mac_X540) 359 * For 82599EB we have to keep all PFs and VFs operating with
365 return; 360 * the same max_frame value in order to avoid sending an oversize
361 * frame to a VF. In order to guarantee this is handled correctly
362 * for all cases we have several special exceptions to take into
363 * account before we can enable the VF for receive
364 */
365 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
366 struct net_device *dev = adapter->netdev;
367 int pf_max_frame = dev->mtu + ETH_HLEN;
368 u32 reg_offset, vf_shift, vfre;
369 s32 err = 0;
370
371#ifdef CONFIG_FCOE
372 if (dev->features & NETIF_F_FCOE_MTU)
373 pf_max_frame = max_t(int, pf_max_frame,
374 IXGBE_FCOE_JUMBO_FRAME_SIZE);
375
376#endif /* CONFIG_FCOE */
377 switch (adapter->vfinfo[vf].vf_api) {
378 case ixgbe_mbox_api_11:
379 /*
380 * Version 1.1 supports jumbo frames on VFs if PF has
381 * jumbo frames enabled which means legacy VFs are
382 * disabled
383 */
384 if (pf_max_frame > ETH_FRAME_LEN)
385 break;
386 default:
387 /*
388 * If the PF or VF are running w/ jumbo frames enabled
389 * we need to shut down the VF Rx path as we cannot
390 * support jumbo frames on legacy VFs
391 */
392 if ((pf_max_frame > ETH_FRAME_LEN) ||
393 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
394 err = -EINVAL;
395 break;
396 }
397
398 /* determine VF receive enable location */
399 vf_shift = vf % 32;
400 reg_offset = vf / 32;
401
402 /* enable or disable receive depending on error */
403 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
404 if (err)
405 vfre &= ~(1 << vf_shift);
406 else
407 vfre |= 1 << vf_shift;
408 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
409
410 if (err) {
411 e_err(drv, "VF max_frame %d out of range\n", max_frame);
412 return err;
413 }
414 }
366 415
367 /* MTU < 68 is an error and causes problems on some kernels */ 416 /* MTU < 68 is an error and causes problems on some kernels */
368 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) { 417 if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
369 e_err(drv, "VF mtu %d out of range\n", new_mtu); 418 e_err(drv, "VF max_frame %d out of range\n", max_frame);
370 return; 419 return -EINVAL;
371 } 420 }
372 421
373 max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & 422 /* pull current max frame size from hardware */
374 IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; 423 max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
375 if (max_frs < new_mtu) { 424 max_frs &= IXGBE_MHADD_MFS_MASK;
376 max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT; 425 max_frs >>= IXGBE_MHADD_MFS_SHIFT;
426
427 if (max_frs < max_frame) {
428 max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
377 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); 429 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
378 } 430 }
379 431
380 e_info(hw, "VF requests change max MTU to %d\n", new_mtu); 432 e_info(hw, "VF requests change max MTU to %d\n", max_frame);
433
434 return 0;
381} 435}
382 436
383static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 437static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
@@ -392,35 +446,47 @@ static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
392 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); 446 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
393} 447}
394 448
395static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf) 449static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter,
450 u16 vid, u16 qos, u32 vf)
396{ 451{
397 struct ixgbe_hw *hw = &adapter->hw; 452 struct ixgbe_hw *hw = &adapter->hw;
453 u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT;
398 454
399 if (vid) 455 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir);
400 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
401 (vid | IXGBE_VMVIR_VLANA_DEFAULT));
402 else
403 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
404} 456}
405 457
458static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
459{
460 struct ixgbe_hw *hw = &adapter->hw;
461
462 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
463}
406static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 464static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
407{ 465{
408 struct ixgbe_hw *hw = &adapter->hw; 466 struct ixgbe_hw *hw = &adapter->hw;
467 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
409 int rar_entry = hw->mac.num_rar_entries - (vf + 1); 468 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
469 u8 num_tcs = netdev_get_num_tc(adapter->netdev);
470
471 /* add PF assigned VLAN or VLAN 0 */
472 ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
410 473
411 /* reset offloads to defaults */ 474 /* reset offloads to defaults */
412 if (adapter->vfinfo[vf].pf_vlan) { 475 ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
413 ixgbe_set_vf_vlan(adapter, true, 476
414 adapter->vfinfo[vf].pf_vlan, vf); 477 /* set outgoing tags for VFs */
415 ixgbe_set_vmvir(adapter, 478 if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
416 (adapter->vfinfo[vf].pf_vlan | 479 ixgbe_clear_vmvir(adapter, vf);
417 (adapter->vfinfo[vf].pf_qos <<
418 VLAN_PRIO_SHIFT)), vf);
419 ixgbe_set_vmolr(hw, vf, false);
420 } else { 480 } else {
421 ixgbe_set_vf_vlan(adapter, true, 0, vf); 481 if (vfinfo->pf_qos || !num_tcs)
422 ixgbe_set_vmvir(adapter, 0, vf); 482 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
423 ixgbe_set_vmolr(hw, vf, true); 483 vfinfo->pf_qos, vf);
484 else
485 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
486 adapter->default_up, vf);
487
488 if (vfinfo->spoofchk_enabled)
489 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
424 } 490 }
425 491
426 /* reset multicast table array for vf */ 492 /* reset multicast table array for vf */
@@ -430,6 +496,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
430 ixgbe_set_rx_mode(adapter->netdev); 496 ixgbe_set_rx_mode(adapter->netdev);
431 497
432 hw->mac.ops.clear_rar(hw, rar_entry); 498 hw->mac.ops.clear_rar(hw, rar_entry);
499
500 /* reset VF api back to unknown */
501 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
433} 502}
434 503
435static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, 504static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
@@ -521,30 +590,221 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
521 return 0; 590 return 0;
522} 591}
523 592
524static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) 593static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
525{ 594{
526 struct ixgbe_hw *hw = &adapter->hw; 595 struct ixgbe_hw *hw = &adapter->hw;
527 u32 reg; 596 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
597 u32 reg, msgbuf[4];
528 u32 reg_offset, vf_shift; 598 u32 reg_offset, vf_shift;
599 u8 *addr = (u8 *)(&msgbuf[1]);
600
601 e_info(probe, "VF Reset msg received from vf %d\n", vf);
602
603 /* reset the filters for the device */
604 ixgbe_vf_reset_event(adapter, vf);
605
606 /* set vf mac address */
607 ixgbe_set_vf_mac(adapter, vf, vf_mac);
529 608
530 vf_shift = vf % 32; 609 vf_shift = vf % 32;
531 reg_offset = vf / 32; 610 reg_offset = vf / 32;
532 611
533 /* enable transmit and receive for vf */ 612 /* enable transmit for vf */
534 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); 613 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
535 reg |= (reg | (1 << vf_shift)); 614 reg |= 1 << vf_shift;
536 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); 615 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
537 616
617 /* enable receive for vf */
538 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); 618 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
539 reg |= (reg | (1 << vf_shift)); 619 reg |= 1 << vf_shift;
620 /*
621 * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
622 * For more info take a look at ixgbe_set_vf_lpe
623 */
624 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
625 struct net_device *dev = adapter->netdev;
626 int pf_max_frame = dev->mtu + ETH_HLEN;
627
628#ifdef CONFIG_FCOE
629 if (dev->features & NETIF_F_FCOE_MTU)
630 pf_max_frame = max_t(int, pf_max_frame,
631 IXGBE_FCOE_JUMBO_FRAME_SIZE);
632
633#endif /* CONFIG_FCOE */
634 if (pf_max_frame > ETH_FRAME_LEN)
635 reg &= ~(1 << vf_shift);
636 }
540 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); 637 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
541 638
639 /* enable VF mailbox for further messages */
640 adapter->vfinfo[vf].clear_to_send = true;
641
542 /* Enable counting of spoofed packets in the SSVPC register */ 642 /* Enable counting of spoofed packets in the SSVPC register */
543 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); 643 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
544 reg |= (1 << vf_shift); 644 reg |= (1 << vf_shift);
545 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); 645 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
546 646
547 ixgbe_vf_reset_event(adapter, vf); 647 /* reply to reset with ack and vf mac address */
648 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
649 memcpy(addr, vf_mac, ETH_ALEN);
650
651 /*
652 * Piggyback the multicast filter type so VF can compute the
653 * correct vectors
654 */
655 msgbuf[3] = hw->mac.mc_filter_type;
656 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
657
658 return 0;
659}
660
661static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
662 u32 *msgbuf, u32 vf)
663{
664 u8 *new_mac = ((u8 *)(&msgbuf[1]));
665
666 if (!is_valid_ether_addr(new_mac)) {
667 e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
668 return -1;
669 }
670
671 if (adapter->vfinfo[vf].pf_set_mac &&
672 memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
673 ETH_ALEN)) {
674 e_warn(drv,
675 "VF %d attempted to override administratively set MAC address\n"
676 "Reload the VF driver to resume operations\n",
677 vf);
678 return -1;
679 }
680
681 return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
682}
683
684static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
685 u32 *msgbuf, u32 vf)
686{
687 struct ixgbe_hw *hw = &adapter->hw;
688 int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
689 int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
690 int err;
691 u8 tcs = netdev_get_num_tc(adapter->netdev);
692
693 if (adapter->vfinfo[vf].pf_vlan || tcs) {
694 e_warn(drv,
695 "VF %d attempted to override administratively set VLAN configuration\n"
696 "Reload the VF driver to resume operations\n",
697 vf);
698 return -1;
699 }
700
701 if (add)
702 adapter->vfinfo[vf].vlan_count++;
703 else if (adapter->vfinfo[vf].vlan_count)
704 adapter->vfinfo[vf].vlan_count--;
705
706 err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
707 if (!err && adapter->vfinfo[vf].spoofchk_enabled)
708 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
709
710 return err;
711}
712
713static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
714 u32 *msgbuf, u32 vf)
715{
716 u8 *new_mac = ((u8 *)(&msgbuf[1]));
717 int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
718 IXGBE_VT_MSGINFO_SHIFT;
719 int err;
720
721 if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
722 e_warn(drv,
723 "VF %d requested MACVLAN filter but is administratively denied\n",
724 vf);
725 return -1;
726 }
727
728 /* An non-zero index indicates the VF is setting a filter */
729 if (index) {
730 if (!is_valid_ether_addr(new_mac)) {
731 e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
732 return -1;
733 }
734
735 /*
736 * If the VF is allowed to set MAC filters then turn off
737 * anti-spoofing to avoid false positives.
738 */
739 if (adapter->vfinfo[vf].spoofchk_enabled)
740 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
741 }
742
743 err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
744 if (err == -ENOSPC)
745 e_warn(drv,
746 "VF %d has requested a MACVLAN filter but there is no space for it\n",
747 vf);
748
749 return err < 0;
750}
751
752static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
753 u32 *msgbuf, u32 vf)
754{
755 int api = msgbuf[1];
756
757 switch (api) {
758 case ixgbe_mbox_api_10:
759 case ixgbe_mbox_api_11:
760 adapter->vfinfo[vf].vf_api = api;
761 return 0;
762 default:
763 break;
764 }
765
766 e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
767
768 return -1;
769}
770
771static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
772 u32 *msgbuf, u32 vf)
773{
774 struct net_device *dev = adapter->netdev;
775 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
776 unsigned int default_tc = 0;
777 u8 num_tcs = netdev_get_num_tc(dev);
778
779 /* verify the PF is supporting the correct APIs */
780 switch (adapter->vfinfo[vf].vf_api) {
781 case ixgbe_mbox_api_20:
782 case ixgbe_mbox_api_11:
783 break;
784 default:
785 return -1;
786 }
787
788 /* only allow 1 Tx queue for bandwidth limiting */
789 msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
790 msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
791
792 /* if TCs > 1 determine which TC belongs to default user priority */
793 if (num_tcs > 1)
794 default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
795
796 /* notify VF of need for VLAN tag stripping, and correct queue */
797 if (num_tcs)
798 msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
799 else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
800 msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
801 else
802 msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
803
804 /* notify VF of default queue */
805 msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
806
807 return 0;
548} 808}
549 809
550static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) 810static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
@@ -553,10 +813,6 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
553 u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; 813 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
554 struct ixgbe_hw *hw = &adapter->hw; 814 struct ixgbe_hw *hw = &adapter->hw;
555 s32 retval; 815 s32 retval;
556 int entries;
557 u16 *hash_list;
558 int add, vid, index;
559 u8 *new_mac;
560 816
561 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); 817 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
562 818
@@ -572,39 +828,13 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
572 /* flush the ack before we write any messages back */ 828 /* flush the ack before we write any messages back */
573 IXGBE_WRITE_FLUSH(hw); 829 IXGBE_WRITE_FLUSH(hw);
574 830
831 if (msgbuf[0] == IXGBE_VF_RESET)
832 return ixgbe_vf_reset_msg(adapter, vf);
833
575 /* 834 /*
576 * until the vf completes a virtual function reset it should not be 835 * until the vf completes a virtual function reset it should not be
577 * allowed to start any configuration. 836 * allowed to start any configuration.
578 */ 837 */
579
580 if (msgbuf[0] == IXGBE_VF_RESET) {
581 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
582 new_mac = (u8 *)(&msgbuf[1]);
583 e_info(probe, "VF Reset msg received from vf %d\n", vf);
584 adapter->vfinfo[vf].clear_to_send = false;
585 ixgbe_vf_reset_msg(adapter, vf);
586 adapter->vfinfo[vf].clear_to_send = true;
587
588 if (is_valid_ether_addr(new_mac) &&
589 !adapter->vfinfo[vf].pf_set_mac)
590 ixgbe_set_vf_mac(adapter, vf, vf_mac);
591 else
592 ixgbe_set_vf_mac(adapter,
593 vf, adapter->vfinfo[vf].vf_mac_addresses);
594
595 /* reply to reset with ack and vf mac address */
596 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
597 memcpy(new_mac, vf_mac, ETH_ALEN);
598 /*
599 * Piggyback the multicast filter type so VF can compute the
600 * correct vectors
601 */
602 msgbuf[3] = hw->mac.mc_filter_type;
603 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
604
605 return retval;
606 }
607
608 if (!adapter->vfinfo[vf].clear_to_send) { 838 if (!adapter->vfinfo[vf].clear_to_send) {
609 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; 839 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
610 ixgbe_write_mbx(hw, msgbuf, 1, vf); 840 ixgbe_write_mbx(hw, msgbuf, 1, vf);
@@ -613,70 +843,25 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
613 843
614 switch ((msgbuf[0] & 0xFFFF)) { 844 switch ((msgbuf[0] & 0xFFFF)) {
615 case IXGBE_VF_SET_MAC_ADDR: 845 case IXGBE_VF_SET_MAC_ADDR:
616 new_mac = ((u8 *)(&msgbuf[1])); 846 retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
617 if (is_valid_ether_addr(new_mac) &&
618 !adapter->vfinfo[vf].pf_set_mac) {
619 ixgbe_set_vf_mac(adapter, vf, new_mac);
620 } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
621 new_mac, ETH_ALEN)) {
622 e_warn(drv, "VF %d attempted to override "
623 "administratively set MAC address\nReload "
624 "the VF driver to resume operations\n", vf);
625 retval = -1;
626 }
627 break; 847 break;
628 case IXGBE_VF_SET_MULTICAST: 848 case IXGBE_VF_SET_MULTICAST:
629 entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) 849 retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
630 >> IXGBE_VT_MSGINFO_SHIFT;
631 hash_list = (u16 *)&msgbuf[1];
632 retval = ixgbe_set_vf_multicasts(adapter, entries,
633 hash_list, vf);
634 break;
635 case IXGBE_VF_SET_LPE:
636 ixgbe_set_vf_lpe(adapter, msgbuf);
637 break; 850 break;
638 case IXGBE_VF_SET_VLAN: 851 case IXGBE_VF_SET_VLAN:
639 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) 852 retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
640 >> IXGBE_VT_MSGINFO_SHIFT; 853 break;
641 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); 854 case IXGBE_VF_SET_LPE:
642 if (adapter->vfinfo[vf].pf_vlan) { 855 retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
643 e_warn(drv, "VF %d attempted to override "
644 "administratively set VLAN configuration\n"
645 "Reload the VF driver to resume operations\n",
646 vf);
647 retval = -1;
648 } else {
649 if (add)
650 adapter->vfinfo[vf].vlan_count++;
651 else if (adapter->vfinfo[vf].vlan_count)
652 adapter->vfinfo[vf].vlan_count--;
653 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
654 if (!retval && adapter->vfinfo[vf].spoofchk_enabled)
655 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
656 }
657 break; 856 break;
658 case IXGBE_VF_SET_MACVLAN: 857 case IXGBE_VF_SET_MACVLAN:
659 index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> 858 retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
660 IXGBE_VT_MSGINFO_SHIFT; 859 break;
661 if (adapter->vfinfo[vf].pf_set_mac && index > 0) { 860 case IXGBE_VF_API_NEGOTIATE:
662 e_warn(drv, "VF %d requested MACVLAN filter but is " 861 retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
663 "administratively denied\n", vf); 862 break;
664 retval = -1; 863 case IXGBE_VF_GET_QUEUES:
665 break; 864 retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
666 }
667 /*
668 * If the VF is allowed to set MAC filters then turn off
669 * anti-spoofing to avoid false positives. An index
670 * greater than 0 will indicate the VF is setting a
671 * macvlan MAC filter.
672 */
673 if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled)
674 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
675 retval = ixgbe_set_vf_macvlan(adapter, vf, index,
676 (unsigned char *)(&msgbuf[1]));
677 if (retval == -ENOSPC)
678 e_warn(drv, "VF %d has requested a MACVLAN filter "
679 "but there is no space for it\n", vf);
680 break; 865 break;
681 default: 866 default:
682 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); 867 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
@@ -692,7 +877,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
692 877
693 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; 878 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
694 879
695 ixgbe_write_mbx(hw, msgbuf, 1, vf); 880 ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
696 881
697 return retval; 882 return retval;
698} 883}
@@ -783,7 +968,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
783 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); 968 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
784 if (err) 969 if (err)
785 goto out; 970 goto out;
786 ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); 971 ixgbe_set_vmvir(adapter, vlan, qos, vf);
787 ixgbe_set_vmolr(hw, vf, false); 972 ixgbe_set_vmolr(hw, vf, false);
788 if (adapter->vfinfo[vf].spoofchk_enabled) 973 if (adapter->vfinfo[vf].spoofchk_enabled)
789 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); 974 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
@@ -803,7 +988,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
803 } else { 988 } else {
804 err = ixgbe_set_vf_vlan(adapter, false, 989 err = ixgbe_set_vf_vlan(adapter, false,
805 adapter->vfinfo[vf].pf_vlan, vf); 990 adapter->vfinfo[vf].pf_vlan, vf);
806 ixgbe_set_vmvir(adapter, vlan, vf); 991 ixgbe_clear_vmvir(adapter, vf);
807 ixgbe_set_vmolr(hw, vf, true); 992 ixgbe_set_vmolr(hw, vf, true);
808 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); 993 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
809 if (adapter->vfinfo[vf].vlan_count) 994 if (adapter->vfinfo[vf].vlan_count)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 0722f3368092..21915e20399a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -56,6 +56,7 @@
56#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 56#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
57#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 57#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
58#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 58#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
59#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
59#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 60#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
60#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D 61#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
61#define IXGBE_DEV_ID_82599EN_SFP 0x1557 62#define IXGBE_DEV_ID_82599EN_SFP 0x1557
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index da17ccf5c09d..3147795bd135 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -33,8 +33,11 @@
33#define IXGBE_DEV_ID_X540_VF 0x1515 33#define IXGBE_DEV_ID_X540_VF 0x1515
34 34
35#define IXGBE_VF_IRQ_CLEAR_MASK 7 35#define IXGBE_VF_IRQ_CLEAR_MASK 7
36#define IXGBE_VF_MAX_TX_QUEUES 1 36#define IXGBE_VF_MAX_TX_QUEUES 8
37#define IXGBE_VF_MAX_RX_QUEUES 1 37#define IXGBE_VF_MAX_RX_QUEUES 8
38
39/* DCB define */
40#define IXGBE_VF_MAX_TRAFFIC_CLASS 8
38 41
39/* Link speed */ 42/* Link speed */
40typedef u32 ixgbe_link_speed; 43typedef u32 ixgbe_link_speed;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 4a9c9c285685..2323ccd211c0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -89,8 +89,8 @@ struct ixgbevf_ring {
89/* How many Rx Buffers do we bundle into one write to the hardware ? */ 89/* How many Rx Buffers do we bundle into one write to the hardware ? */
90#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 90#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
91 91
92#define MAX_RX_QUEUES 1 92#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
93#define MAX_TX_QUEUES 1 93#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
94 94
95#define IXGBEVF_DEFAULT_TXD 1024 95#define IXGBEVF_DEFAULT_TXD 1024
96#define IXGBEVF_DEFAULT_RXD 512 96#define IXGBEVF_DEFAULT_RXD 512
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index de1ad506665d..f3d3947ae962 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
58static const char ixgbevf_driver_string[] = 58static const char ixgbevf_driver_string[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60 60
61#define DRV_VERSION "2.6.0-k" 61#define DRV_VERSION "2.7.12-k"
62const char ixgbevf_driver_version[] = DRV_VERSION; 62const char ixgbevf_driver_version[] = DRV_VERSION;
63static char ixgbevf_copyright[] = 63static char ixgbevf_copyright[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation."; 64 "Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -99,6 +99,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
99 99
100/* forward decls */ 100/* forward decls */
101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
102static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
102 103
103static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, 104static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
104 struct ixgbevf_ring *rx_ring, 105 struct ixgbevf_ring *rx_ring,
@@ -358,6 +359,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
358 bi->dma = dma_map_single(&pdev->dev, skb->data, 359 bi->dma = dma_map_single(&pdev->dev, skb->data,
359 rx_ring->rx_buf_len, 360 rx_ring->rx_buf_len,
360 DMA_FROM_DEVICE); 361 DMA_FROM_DEVICE);
362 if (dma_mapping_error(&pdev->dev, bi->dma)) {
363 dev_kfree_skb(skb);
364 bi->skb = NULL;
365 dev_err(&pdev->dev, "RX DMA map failed\n");
366 break;
367 }
361 } 368 }
362 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 369 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
363 370
@@ -471,6 +478,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
471 } 478 }
472 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 479 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
473 480
481 /* Workaround hardware that can't do proper VEPA multicast
482 * source pruning.
483 */
484 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
485 !(compare_ether_addr(adapter->netdev->dev_addr,
486 eth_hdr(skb)->h_source))) {
487 dev_kfree_skb_irq(skb);
488 goto next_desc;
489 }
490
474 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); 491 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
475 492
476next_desc: 493next_desc:
@@ -1131,12 +1148,12 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1131 if (!hw->mac.ops.set_vfta) 1148 if (!hw->mac.ops.set_vfta)
1132 return -EOPNOTSUPP; 1149 return -EOPNOTSUPP;
1133 1150
1134 spin_lock(&adapter->mbx_lock); 1151 spin_lock_bh(&adapter->mbx_lock);
1135 1152
1136 /* add VID to filter table */ 1153 /* add VID to filter table */
1137 err = hw->mac.ops.set_vfta(hw, vid, 0, true); 1154 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1138 1155
1139 spin_unlock(&adapter->mbx_lock); 1156 spin_unlock_bh(&adapter->mbx_lock);
1140 1157
1141 /* translate error return types so error makes sense */ 1158 /* translate error return types so error makes sense */
1142 if (err == IXGBE_ERR_MBX) 1159 if (err == IXGBE_ERR_MBX)
@@ -1156,13 +1173,13 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1156 struct ixgbe_hw *hw = &adapter->hw; 1173 struct ixgbe_hw *hw = &adapter->hw;
1157 int err = -EOPNOTSUPP; 1174 int err = -EOPNOTSUPP;
1158 1175
1159 spin_lock(&adapter->mbx_lock); 1176 spin_lock_bh(&adapter->mbx_lock);
1160 1177
1161 /* remove VID from filter table */ 1178 /* remove VID from filter table */
1162 if (hw->mac.ops.set_vfta) 1179 if (hw->mac.ops.set_vfta)
1163 err = hw->mac.ops.set_vfta(hw, vid, 0, false); 1180 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1164 1181
1165 spin_unlock(&adapter->mbx_lock); 1182 spin_unlock_bh(&adapter->mbx_lock);
1166 1183
1167 clear_bit(vid, adapter->active_vlans); 1184 clear_bit(vid, adapter->active_vlans);
1168 1185
@@ -1218,7 +1235,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
1218 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1235 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1219 struct ixgbe_hw *hw = &adapter->hw; 1236 struct ixgbe_hw *hw = &adapter->hw;
1220 1237
1221 spin_lock(&adapter->mbx_lock); 1238 spin_lock_bh(&adapter->mbx_lock);
1222 1239
1223 /* reprogram multicast list */ 1240 /* reprogram multicast list */
1224 if (hw->mac.ops.update_mc_addr_list) 1241 if (hw->mac.ops.update_mc_addr_list)
@@ -1226,7 +1243,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
1226 1243
1227 ixgbevf_write_uc_addr_list(netdev); 1244 ixgbevf_write_uc_addr_list(netdev);
1228 1245
1229 spin_unlock(&adapter->mbx_lock); 1246 spin_unlock_bh(&adapter->mbx_lock);
1230} 1247}
1231 1248
1232static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1249static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1335,11 +1352,12 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1335static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) 1352static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1336{ 1353{
1337 struct ixgbe_hw *hw = &adapter->hw; 1354 struct ixgbe_hw *hw = &adapter->hw;
1338 int api[] = { ixgbe_mbox_api_10, 1355 int api[] = { ixgbe_mbox_api_11,
1356 ixgbe_mbox_api_10,
1339 ixgbe_mbox_api_unknown }; 1357 ixgbe_mbox_api_unknown };
1340 int err = 0, idx = 0; 1358 int err = 0, idx = 0;
1341 1359
1342 spin_lock(&adapter->mbx_lock); 1360 spin_lock_bh(&adapter->mbx_lock);
1343 1361
1344 while (api[idx] != ixgbe_mbox_api_unknown) { 1362 while (api[idx] != ixgbe_mbox_api_unknown) {
1345 err = ixgbevf_negotiate_api_version(hw, api[idx]); 1363 err = ixgbevf_negotiate_api_version(hw, api[idx]);
@@ -1348,7 +1366,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1348 idx++; 1366 idx++;
1349 } 1367 }
1350 1368
1351 spin_unlock(&adapter->mbx_lock); 1369 spin_unlock_bh(&adapter->mbx_lock);
1352} 1370}
1353 1371
1354static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1372static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
@@ -1389,7 +1407,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1389 1407
1390 ixgbevf_configure_msix(adapter); 1408 ixgbevf_configure_msix(adapter);
1391 1409
1392 spin_lock(&adapter->mbx_lock); 1410 spin_lock_bh(&adapter->mbx_lock);
1393 1411
1394 if (hw->mac.ops.set_rar) { 1412 if (hw->mac.ops.set_rar) {
1395 if (is_valid_ether_addr(hw->mac.addr)) 1413 if (is_valid_ether_addr(hw->mac.addr))
@@ -1398,7 +1416,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1398 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1416 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1399 } 1417 }
1400 1418
1401 spin_unlock(&adapter->mbx_lock); 1419 spin_unlock_bh(&adapter->mbx_lock);
1402 1420
1403 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1421 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1404 ixgbevf_napi_enable_all(adapter); 1422 ixgbevf_napi_enable_all(adapter);
@@ -1413,12 +1431,87 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1413 mod_timer(&adapter->watchdog_timer, jiffies); 1431 mod_timer(&adapter->watchdog_timer, jiffies);
1414} 1432}
1415 1433
1434static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
1435{
1436 struct ixgbe_hw *hw = &adapter->hw;
1437 struct ixgbevf_ring *rx_ring;
1438 unsigned int def_q = 0;
1439 unsigned int num_tcs = 0;
1440 unsigned int num_rx_queues = 1;
1441 int err, i;
1442
1443 spin_lock_bh(&adapter->mbx_lock);
1444
1445 /* fetch queue configuration from the PF */
1446 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1447
1448 spin_unlock_bh(&adapter->mbx_lock);
1449
1450 if (err)
1451 return err;
1452
1453 if (num_tcs > 1) {
1454 /* update default Tx ring register index */
1455 adapter->tx_ring[0].reg_idx = def_q;
1456
1457 /* we need as many queues as traffic classes */
1458 num_rx_queues = num_tcs;
1459 }
1460
1461 /* nothing to do if we have the correct number of queues */
1462 if (adapter->num_rx_queues == num_rx_queues)
1463 return 0;
1464
1465 /* allocate new rings */
1466 rx_ring = kcalloc(num_rx_queues,
1467 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1468 if (!rx_ring)
1469 return -ENOMEM;
1470
1471 /* setup ring fields */
1472 for (i = 0; i < num_rx_queues; i++) {
1473 rx_ring[i].count = adapter->rx_ring_count;
1474 rx_ring[i].queue_index = i;
1475 rx_ring[i].reg_idx = i;
1476 rx_ring[i].dev = &adapter->pdev->dev;
1477 rx_ring[i].netdev = adapter->netdev;
1478
1479 /* allocate resources on the ring */
1480 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
1481 if (err) {
1482 while (i) {
1483 i--;
1484 ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
1485 }
1486 kfree(rx_ring);
1487 return err;
1488 }
1489 }
1490
1491 /* free the existing rings and queues */
1492 ixgbevf_free_all_rx_resources(adapter);
1493 adapter->num_rx_queues = 0;
1494 kfree(adapter->rx_ring);
1495
1496 /* move new rings into position on the adapter struct */
1497 adapter->rx_ring = rx_ring;
1498 adapter->num_rx_queues = num_rx_queues;
1499
1500 /* reset ring to vector mapping */
1501 ixgbevf_reset_q_vectors(adapter);
1502 ixgbevf_map_rings_to_vectors(adapter);
1503
1504 return 0;
1505}
1506
1416void ixgbevf_up(struct ixgbevf_adapter *adapter) 1507void ixgbevf_up(struct ixgbevf_adapter *adapter)
1417{ 1508{
1418 struct ixgbe_hw *hw = &adapter->hw; 1509 struct ixgbe_hw *hw = &adapter->hw;
1419 1510
1420 ixgbevf_negotiate_api(adapter); 1511 ixgbevf_negotiate_api(adapter);
1421 1512
1513 ixgbevf_reset_queues(adapter);
1514
1422 ixgbevf_configure(adapter); 1515 ixgbevf_configure(adapter);
1423 1516
1424 ixgbevf_up_complete(adapter); 1517 ixgbevf_up_complete(adapter);
@@ -1611,14 +1704,14 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1611 struct ixgbe_hw *hw = &adapter->hw; 1704 struct ixgbe_hw *hw = &adapter->hw;
1612 struct net_device *netdev = adapter->netdev; 1705 struct net_device *netdev = adapter->netdev;
1613 1706
1614 spin_lock(&adapter->mbx_lock); 1707 spin_lock_bh(&adapter->mbx_lock);
1615 1708
1616 if (hw->mac.ops.reset_hw(hw)) 1709 if (hw->mac.ops.reset_hw(hw))
1617 hw_dbg(hw, "PF still resetting\n"); 1710 hw_dbg(hw, "PF still resetting\n");
1618 else 1711 else
1619 hw->mac.ops.init_hw(hw); 1712 hw->mac.ops.init_hw(hw);
1620 1713
1621 spin_unlock(&adapter->mbx_lock); 1714 spin_unlock_bh(&adapter->mbx_lock);
1622 1715
1623 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1716 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1624 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1717 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
@@ -1717,6 +1810,7 @@ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1717 for (i = 0; i < adapter->num_tx_queues; i++) { 1810 for (i = 0; i < adapter->num_tx_queues; i++) {
1718 adapter->tx_ring[i].count = adapter->tx_ring_count; 1811 adapter->tx_ring[i].count = adapter->tx_ring_count;
1719 adapter->tx_ring[i].queue_index = i; 1812 adapter->tx_ring[i].queue_index = i;
1813 /* reg_idx may be remapped later by DCB config */
1720 adapter->tx_ring[i].reg_idx = i; 1814 adapter->tx_ring[i].reg_idx = i;
1721 adapter->tx_ring[i].dev = &adapter->pdev->dev; 1815 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1722 adapter->tx_ring[i].netdev = adapter->netdev; 1816 adapter->tx_ring[i].netdev = adapter->netdev;
@@ -1834,18 +1928,13 @@ err_out:
1834 **/ 1928 **/
1835static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 1929static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1836{ 1930{
1837 int q_idx, num_q_vectors; 1931 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1838 int napi_vectors;
1839
1840 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1841 napi_vectors = adapter->num_rx_queues;
1842 1932
1843 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1933 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1844 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 1934 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1845 1935
1846 adapter->q_vector[q_idx] = NULL; 1936 adapter->q_vector[q_idx] = NULL;
1847 if (q_idx < napi_vectors) 1937 netif_napi_del(&q_vector->napi);
1848 netif_napi_del(&q_vector->napi);
1849 kfree(q_vector); 1938 kfree(q_vector);
1850 } 1939 }
1851} 1940}
@@ -1950,8 +2039,11 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
1950 hw->subsystem_device_id = pdev->subsystem_device; 2039 hw->subsystem_device_id = pdev->subsystem_device;
1951 2040
1952 hw->mbx.ops.init_params(hw); 2041 hw->mbx.ops.init_params(hw);
1953 hw->mac.max_tx_queues = MAX_TX_QUEUES; 2042
1954 hw->mac.max_rx_queues = MAX_RX_QUEUES; 2043 /* assume legacy case in which PF would only give VF 2 queues */
2044 hw->mac.max_tx_queues = 2;
2045 hw->mac.max_rx_queues = 2;
2046
1955 err = hw->mac.ops.reset_hw(hw); 2047 err = hw->mac.ops.reset_hw(hw);
1956 if (err) { 2048 if (err) {
1957 dev_info(&pdev->dev, 2049 dev_info(&pdev->dev,
@@ -2113,12 +2205,12 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2113 if (hw->mac.ops.check_link) { 2205 if (hw->mac.ops.check_link) {
2114 s32 need_reset; 2206 s32 need_reset;
2115 2207
2116 spin_lock(&adapter->mbx_lock); 2208 spin_lock_bh(&adapter->mbx_lock);
2117 2209
2118 need_reset = hw->mac.ops.check_link(hw, &link_speed, 2210 need_reset = hw->mac.ops.check_link(hw, &link_speed,
2119 &link_up, false); 2211 &link_up, false);
2120 2212
2121 spin_unlock(&adapter->mbx_lock); 2213 spin_unlock_bh(&adapter->mbx_lock);
2122 2214
2123 if (need_reset) { 2215 if (need_reset) {
2124 adapter->link_up = link_up; 2216 adapter->link_up = link_up;
@@ -2377,6 +2469,63 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2377 &adapter->rx_ring[i]); 2469 &adapter->rx_ring[i]);
2378} 2470}
2379 2471
2472static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
2473{
2474 struct ixgbe_hw *hw = &adapter->hw;
2475 struct ixgbevf_ring *rx_ring;
2476 unsigned int def_q = 0;
2477 unsigned int num_tcs = 0;
2478 unsigned int num_rx_queues = 1;
2479 int err, i;
2480
2481 spin_lock_bh(&adapter->mbx_lock);
2482
2483 /* fetch queue configuration from the PF */
2484 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2485
2486 spin_unlock_bh(&adapter->mbx_lock);
2487
2488 if (err)
2489 return err;
2490
2491 if (num_tcs > 1) {
2492 /* update default Tx ring register index */
2493 adapter->tx_ring[0].reg_idx = def_q;
2494
2495 /* we need as many queues as traffic classes */
2496 num_rx_queues = num_tcs;
2497 }
2498
2499 /* nothing to do if we have the correct number of queues */
2500 if (adapter->num_rx_queues == num_rx_queues)
2501 return 0;
2502
2503 /* allocate new rings */
2504 rx_ring = kcalloc(num_rx_queues,
2505 sizeof(struct ixgbevf_ring), GFP_KERNEL);
2506 if (!rx_ring)
2507 return -ENOMEM;
2508
2509 /* setup ring fields */
2510 for (i = 0; i < num_rx_queues; i++) {
2511 rx_ring[i].count = adapter->rx_ring_count;
2512 rx_ring[i].queue_index = i;
2513 rx_ring[i].reg_idx = i;
2514 rx_ring[i].dev = &adapter->pdev->dev;
2515 rx_ring[i].netdev = adapter->netdev;
2516 }
2517
2518 /* free the existing ring and queues */
2519 adapter->num_rx_queues = 0;
2520 kfree(adapter->rx_ring);
2521
2522 /* move new rings into position on the adapter struct */
2523 adapter->rx_ring = rx_ring;
2524 adapter->num_rx_queues = num_rx_queues;
2525
2526 return 0;
2527}
2528
2380/** 2529/**
2381 * ixgbevf_open - Called when a network interface is made active 2530 * ixgbevf_open - Called when a network interface is made active
2382 * @netdev: network interface device structure 2531 * @netdev: network interface device structure
@@ -2413,6 +2562,11 @@ static int ixgbevf_open(struct net_device *netdev)
2413 2562
2414 ixgbevf_negotiate_api(adapter); 2563 ixgbevf_negotiate_api(adapter);
2415 2564
2565 /* setup queue reg_idx and Rx queue count */
2566 err = ixgbevf_setup_queues(adapter);
2567 if (err)
2568 goto err_setup_queues;
2569
2416 /* allocate transmit descriptors */ 2570 /* allocate transmit descriptors */
2417 err = ixgbevf_setup_all_tx_resources(adapter); 2571 err = ixgbevf_setup_all_tx_resources(adapter);
2418 if (err) 2572 if (err)
@@ -2451,6 +2605,7 @@ err_setup_rx:
2451 ixgbevf_free_all_rx_resources(adapter); 2605 ixgbevf_free_all_rx_resources(adapter);
2452err_setup_tx: 2606err_setup_tx:
2453 ixgbevf_free_all_tx_resources(adapter); 2607 ixgbevf_free_all_tx_resources(adapter);
2608err_setup_queues:
2454 ixgbevf_reset(adapter); 2609 ixgbevf_reset(adapter);
2455 2610
2456err_setup_reset: 2611err_setup_reset:
@@ -2678,10 +2833,10 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2678 tx_buffer_info->dma = 2833 tx_buffer_info->dma =
2679 skb_frag_dma_map(tx_ring->dev, frag, 2834 skb_frag_dma_map(tx_ring->dev, frag,
2680 offset, size, DMA_TO_DEVICE); 2835 offset, size, DMA_TO_DEVICE);
2681 tx_buffer_info->mapped_as_page = true;
2682 if (dma_mapping_error(tx_ring->dev, 2836 if (dma_mapping_error(tx_ring->dev,
2683 tx_buffer_info->dma)) 2837 tx_buffer_info->dma))
2684 goto dma_error; 2838 goto dma_error;
2839 tx_buffer_info->mapped_as_page = true;
2685 tx_buffer_info->next_to_watch = i; 2840 tx_buffer_info->next_to_watch = i;
2686 2841
2687 len -= size; 2842 len -= size;
@@ -2823,6 +2978,11 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2823#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 2978#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2824 unsigned short f; 2979 unsigned short f;
2825#endif 2980#endif
2981 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
2982 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
2983 dev_kfree_skb(skb);
2984 return NETDEV_TX_OK;
2985 }
2826 2986
2827 tx_ring = &adapter->tx_ring[r_idx]; 2987 tx_ring = &adapter->tx_ring[r_idx];
2828 2988
@@ -2902,12 +3062,12 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
2902 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3062 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2903 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 3063 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2904 3064
2905 spin_lock(&adapter->mbx_lock); 3065 spin_lock_bh(&adapter->mbx_lock);
2906 3066
2907 if (hw->mac.ops.set_rar) 3067 if (hw->mac.ops.set_rar)
2908 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3068 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2909 3069
2910 spin_unlock(&adapter->mbx_lock); 3070 spin_unlock_bh(&adapter->mbx_lock);
2911 3071
2912 return 0; 3072 return 0;
2913} 3073}
@@ -2925,8 +3085,15 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2925 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3085 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2926 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 3086 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
2927 3087
2928 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 3088 switch (adapter->hw.api_version) {
3089 case ixgbe_mbox_api_11:
2929 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3090 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3091 break;
3092 default:
3093 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3094 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3095 break;
3096 }
2930 3097
2931 /* MTU < 68 is an error and causes problems on some kernels */ 3098 /* MTU < 68 is an error and causes problems on some kernels */
2932 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 3099 if ((new_mtu < 68) || (max_frame > max_possible_frame))
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 946ce86f337f..0bc30058ff82 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -85,6 +85,7 @@
85enum ixgbe_pfvf_api_rev { 85enum ixgbe_pfvf_api_rev {
86 ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ 86 ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
87 ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ 87 ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
88 ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
88 /* This value should always be last */ 89 /* This value should always be last */
89 ixgbe_mbox_api_unknown, /* indicates that API version is not known */ 90 ixgbe_mbox_api_unknown, /* indicates that API version is not known */
90}; 91};
@@ -100,6 +101,15 @@ enum ixgbe_pfvf_api_rev {
100#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ 101#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
101#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ 102#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
102 103
104/* mailbox API, version 1.1 VF requests */
105#define IXGBE_VF_GET_QUEUE 0x09 /* get queue configuration */
106
107/* GET_QUEUES return data indices within the mailbox */
108#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
109#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
110#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
111#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
112
103/* length of permanent address message returned from PF */ 113/* length of permanent address message returned from PF */
104#define IXGBE_VF_PERMADDR_MSG_LEN 4 114#define IXGBE_VF_PERMADDR_MSG_LEN 4
105/* word in permanent address message with the current multicast type */ 115/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 0c7447e6fcc8..0c94557b53df 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -331,6 +331,9 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
331 netdev_for_each_mc_addr(ha, netdev) { 331 netdev_for_each_mc_addr(ha, netdev) {
332 if (i == cnt) 332 if (i == cnt)
333 break; 333 break;
334 if (is_link_local_ether_addr(ha->addr))
335 continue;
336
334 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); 337 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
335 } 338 }
336 339
@@ -513,6 +516,64 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
513 return err; 516 return err;
514} 517}
515 518
519int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
520 unsigned int *default_tc)
521{
522 int err;
523 u32 msg[5];
524
525 /* do nothing if API doesn't support ixgbevf_get_queues */
526 switch (hw->api_version) {
527 case ixgbe_mbox_api_11:
528 break;
529 default:
530 return 0;
531 }
532
533 /* Fetch queue configuration from the PF */
534 msg[0] = IXGBE_VF_GET_QUEUE;
535 msg[1] = msg[2] = msg[3] = msg[4] = 0;
536 err = hw->mbx.ops.write_posted(hw, msg, 5);
537
538 if (!err)
539 err = hw->mbx.ops.read_posted(hw, msg, 5);
540
541 if (!err) {
542 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
543
544 /*
545 * if we we didn't get an ACK there must have been
546 * some sort of mailbox error so we should treat it
547 * as such
548 */
549 if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
550 return IXGBE_ERR_MBX;
551
552 /* record and validate values from message */
553 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
554 if (hw->mac.max_tx_queues == 0 ||
555 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
556 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
557
558 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
559 if (hw->mac.max_rx_queues == 0 ||
560 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
561 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
562
563 *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
564 /* in case of unknown state assume we cannot tag frames */
565 if (*num_tcs > hw->mac.max_rx_queues)
566 *num_tcs = 1;
567
568 *default_tc = msg[IXGBE_VF_DEF_QUEUE];
569 /* default to queue 0 on out-of-bounds queue number */
570 if (*default_tc >= hw->mac.max_tx_queues)
571 *default_tc = 0;
572 }
573
574 return err;
575}
576
516static const struct ixgbe_mac_operations ixgbevf_mac_ops = { 577static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
517 .init_hw = ixgbevf_init_hw_vf, 578 .init_hw = ixgbevf_init_hw_vf,
518 .reset_hw = ixgbevf_reset_hw_vf, 579 .reset_hw = ixgbevf_reset_hw_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 47f11a584d8c..7b1f502d1716 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -174,5 +174,7 @@ struct ixgbevf_info {
174 174
175void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); 175void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
176int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); 176int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
177int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
178 unsigned int *default_tc);
177#endif /* __IXGBE_VF_H__ */ 179#endif /* __IXGBE_VF_H__ */
178 180
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 59489722e898..10d678d3dd01 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1131,7 +1131,7 @@ static int pxa168_eth_open(struct net_device *dev)
1131 err = request_irq(dev->irq, pxa168_eth_int_handler, 1131 err = request_irq(dev->irq, pxa168_eth_int_handler,
1132 IRQF_DISABLED, dev->name, dev); 1132 IRQF_DISABLED, dev->name, dev);
1133 if (err) { 1133 if (err) {
1134 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); 1134 dev_err(&dev->dev, "can't assign irq\n");
1135 return -EAGAIN; 1135 return -EAGAIN;
1136 } 1136 }
1137 pep->rx_resource_err = 0; 1137 pep->rx_resource_err = 0;
@@ -1201,9 +1201,8 @@ static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
1201 */ 1201 */
1202 pxa168_eth_stop(dev); 1202 pxa168_eth_stop(dev);
1203 if (pxa168_eth_open(dev)) { 1203 if (pxa168_eth_open(dev)) {
1204 dev_printk(KERN_ERR, &dev->dev, 1204 dev_err(&dev->dev,
1205 "fatal error on re-opening device after " 1205 "fatal error on re-opening device after MTU change\n");
1206 "MTU change\n");
1207 } 1206 }
1208 1207
1209 return 0; 1208 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index edd9cb8d3e1d..2b23ca21b320 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -870,7 +870,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
870 /* If we haven't received a specific coalescing setting 870 /* If we haven't received a specific coalescing setting
871 * (module param), we set the moderation parameters as follows: 871 * (module param), we set the moderation parameters as follows:
872 * - moder_cnt is set to the number of mtu sized packets to 872 * - moder_cnt is set to the number of mtu sized packets to
873 * satisfy our coelsing target. 873 * satisfy our coalescing target.
874 * - moder_time is set to a fixed value. 874 * - moder_time is set to a fixed value.
875 */ 875 */
876 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 876 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9d27e42264e2..8a5e70d68894 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -126,7 +126,7 @@ enum {
126#define MLX4_EN_RX_COAL_TIME 0x10 126#define MLX4_EN_RX_COAL_TIME 0x10
127 127
128#define MLX4_EN_TX_COAL_PKTS 16 128#define MLX4_EN_TX_COAL_PKTS 16
129#define MLX4_EN_TX_COAL_TIME 0x80 129#define MLX4_EN_TX_COAL_TIME 0x10
130 130
131#define MLX4_EN_RX_RATE_LOW 400000 131#define MLX4_EN_RX_RATE_LOW 400000
132#define MLX4_EN_RX_COAL_TIME_LOW 0 132#define MLX4_EN_RX_COAL_TIME_LOW 0
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index e558edd1cb6c..e4ba868e232c 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -7251,18 +7251,7 @@ static struct pci_driver pci_device_driver = {
7251 .remove = pcidev_exit 7251 .remove = pcidev_exit
7252}; 7252};
7253 7253
7254static int __init ksz884x_init_module(void) 7254module_pci_driver(pci_device_driver);
7255{
7256 return pci_register_driver(&pci_device_driver);
7257}
7258
7259static void __exit ksz884x_cleanup_module(void)
7260{
7261 pci_unregister_driver(&pci_device_driver);
7262}
7263
7264module_init(ksz884x_init_module);
7265module_exit(ksz884x_cleanup_module);
7266 7255
7267MODULE_DESCRIPTION("KSZ8841/2 PCI network driver"); 7256MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
7268MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>"); 7257MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index de50547c187d..c98decb19ce8 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8239,7 +8239,8 @@ static int __init s2io_starter(void)
8239 8239
8240/** 8240/**
8241 * s2io_closer - Cleanup routine for the driver 8241 * s2io_closer - Cleanup routine for the driver
8242 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver. 8242 * Description: This function is the cleanup routine for the driver. It
8243 * unregisters the driver.
8243 */ 8244 */
8244 8245
8245static __exit void s2io_closer(void) 8246static __exit void s2io_closer(void)
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 5296cc8d3cba..00bc4fc968c7 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -20,19 +20,3 @@ config PCH_GBE
20 purpose use. 20 purpose use.
21 ML7223/ML7831 is companion chip for Intel Atom E6xx series. 21 ML7223/ML7831 is companion chip for Intel Atom E6xx series.
22 ML7223/ML7831 is completely compatible for Intel EG20T PCH. 22 ML7223/ML7831 is completely compatible for Intel EG20T PCH.
23
24if PCH_GBE
25
26config PCH_PTP
27 bool "PCH PTP clock support"
28 default n
29 depends on EXPERIMENTAL
30 select PPS
31 select PTP_1588_CLOCK
32 select PTP_1588_CLOCK_PCH
33 ---help---
34 Say Y here if you want to use Precision Time Protocol (PTP) in the
35 driver. PTP is a method to precisely synchronize distributed clocks
36 over Ethernet networks.
37
38endif # PCH_GBE
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index b07311eaa693..7fb7e178c74e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -649,7 +649,6 @@ extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
649extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter, 649extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
650 struct pch_gbe_rx_ring *rx_ring); 650 struct pch_gbe_rx_ring *rx_ring);
651extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter); 651extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
652#ifdef CONFIG_PCH_PTP
653extern u32 pch_ch_control_read(struct pci_dev *pdev); 652extern u32 pch_ch_control_read(struct pci_dev *pdev);
654extern void pch_ch_control_write(struct pci_dev *pdev, u32 val); 653extern void pch_ch_control_write(struct pci_dev *pdev, u32 val);
655extern u32 pch_ch_event_read(struct pci_dev *pdev); 654extern u32 pch_ch_event_read(struct pci_dev *pdev);
@@ -659,7 +658,6 @@ extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
659extern u64 pch_rx_snap_read(struct pci_dev *pdev); 658extern u64 pch_rx_snap_read(struct pci_dev *pdev);
660extern u64 pch_tx_snap_read(struct pci_dev *pdev); 659extern u64 pch_tx_snap_read(struct pci_dev *pdev);
661extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev); 660extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
662#endif
663 661
664/* pch_gbe_param.c */ 662/* pch_gbe_param.c */
665extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter); 663extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 4c4fe5b1a29a..39ab4d09faaa 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -21,10 +21,8 @@
21#include "pch_gbe.h" 21#include "pch_gbe.h"
22#include "pch_gbe_api.h" 22#include "pch_gbe_api.h"
23#include <linux/module.h> 23#include <linux/module.h>
24#ifdef CONFIG_PCH_PTP
25#include <linux/net_tstamp.h> 24#include <linux/net_tstamp.h>
26#include <linux/ptp_classify.h> 25#include <linux/ptp_classify.h>
27#endif
28 26
29#define DRV_VERSION "1.01" 27#define DRV_VERSION "1.01"
30const char pch_driver_version[] = DRV_VERSION; 28const char pch_driver_version[] = DRV_VERSION;
@@ -98,7 +96,6 @@ const char pch_driver_version[] = DRV_VERSION;
98 96
99#define PCH_GBE_INT_DISABLE_ALL 0 97#define PCH_GBE_INT_DISABLE_ALL 0
100 98
101#ifdef CONFIG_PCH_PTP
102/* Macros for ieee1588 */ 99/* Macros for ieee1588 */
103/* 0x40 Time Synchronization Channel Control Register Bits */ 100/* 0x40 Time Synchronization Channel Control Register Bits */
104#define MASTER_MODE (1<<0) 101#define MASTER_MODE (1<<0)
@@ -113,7 +110,6 @@ const char pch_driver_version[] = DRV_VERSION;
113 110
114#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81" 111#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
115#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00" 112#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
116#endif
117 113
118static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; 114static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
119 115
@@ -122,7 +118,6 @@ static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
122 int data); 118 int data);
123static void pch_gbe_set_multi(struct net_device *netdev); 119static void pch_gbe_set_multi(struct net_device *netdev);
124 120
125#ifdef CONFIG_PCH_PTP
126static struct sock_filter ptp_filter[] = { 121static struct sock_filter ptp_filter[] = {
127 PTP_FILTER 122 PTP_FILTER
128}; 123};
@@ -291,7 +286,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
291 286
292 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 287 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
293} 288}
294#endif
295 289
296inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) 290inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
297{ 291{
@@ -1244,9 +1238,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1244 (int)sizeof(struct pch_gbe_tx_desc) * ring_num, 1238 (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1245 &hw->reg->TX_DSC_SW_P); 1239 &hw->reg->TX_DSC_SW_P);
1246 1240
1247#ifdef CONFIG_PCH_PTP
1248 pch_tx_timestamp(adapter, skb); 1241 pch_tx_timestamp(adapter, skb);
1249#endif
1250 1242
1251 dev_kfree_skb_any(skb); 1243 dev_kfree_skb_any(skb);
1252} 1244}
@@ -1730,9 +1722,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1730 /* Write meta date of skb */ 1722 /* Write meta date of skb */
1731 skb_put(skb, length); 1723 skb_put(skb, length);
1732 1724
1733#ifdef CONFIG_PCH_PTP
1734 pch_rx_timestamp(adapter, skb); 1725 pch_rx_timestamp(adapter, skb);
1735#endif
1736 1726
1737 skb->protocol = eth_type_trans(skb, netdev); 1727 skb->protocol = eth_type_trans(skb, netdev);
1738 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) 1728 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
@@ -2334,10 +2324,8 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2334 2324
2335 pr_debug("cmd : 0x%04x\n", cmd); 2325 pr_debug("cmd : 0x%04x\n", cmd);
2336 2326
2337#ifdef CONFIG_PCH_PTP
2338 if (cmd == SIOCSHWTSTAMP) 2327 if (cmd == SIOCSHWTSTAMP)
2339 return hwtstamp_ioctl(netdev, ifr, cmd); 2328 return hwtstamp_ioctl(netdev, ifr, cmd);
2340#endif
2341 2329
2342 return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); 2330 return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2343} 2331}
@@ -2623,14 +2611,12 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2623 goto err_free_netdev; 2611 goto err_free_netdev;
2624 } 2612 }
2625 2613
2626#ifdef CONFIG_PCH_PTP
2627 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, 2614 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
2628 PCI_DEVFN(12, 4)); 2615 PCI_DEVFN(12, 4));
2629 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) { 2616 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
2630 pr_err("Bad ptp filter\n"); 2617 pr_err("Bad ptp filter\n");
2631 return -EINVAL; 2618 return -EINVAL;
2632 } 2619 }
2633#endif
2634 2620
2635 netdev->netdev_ops = &pch_gbe_netdev_ops; 2621 netdev->netdev_ops = &pch_gbe_netdev_ops;
2636 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD; 2622 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 10468e7932dd..4ca2c196c98a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -218,7 +218,7 @@ skip:
218 check_sfp_module = netif_running(dev) && 218 check_sfp_module = netif_running(dev) &&
219 adapter->has_link_events; 219 adapter->has_link_events;
220 } else { 220 } else {
221 ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg); 221 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
222 ecmd->advertising |= 222 ecmd->advertising |=
223 (ADVERTISED_TP | ADVERTISED_Autoneg); 223 (ADVERTISED_TP | ADVERTISED_Autoneg);
224 ecmd->port = PORT_TP; 224 ecmd->port = PORT_TP;
@@ -381,7 +381,7 @@ static u32 netxen_nic_test_link(struct net_device *dev)
381 381
382static int 382static int
383netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 383netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
384 u8 * bytes) 384 u8 *bytes)
385{ 385{
386 struct netxen_adapter *adapter = netdev_priv(dev); 386 struct netxen_adapter *adapter = netdev_priv(dev);
387 int offset; 387 int offset;
@@ -488,6 +488,8 @@ netxen_nic_get_pauseparam(struct net_device *dev,
488 __u32 val; 488 __u32 val;
489 int port = adapter->physical_port; 489 int port = adapter->physical_port;
490 490
491 pause->autoneg = 0;
492
491 if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 493 if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
492 if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS)) 494 if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
493 return; 495 return;
@@ -496,19 +498,19 @@ netxen_nic_get_pauseparam(struct net_device *dev,
496 pause->rx_pause = netxen_gb_get_rx_flowctl(val); 498 pause->rx_pause = netxen_gb_get_rx_flowctl(val);
497 val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); 499 val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
498 switch (port) { 500 switch (port) {
499 case 0: 501 case 0:
500 pause->tx_pause = !(netxen_gb_get_gb0_mask(val)); 502 pause->tx_pause = !(netxen_gb_get_gb0_mask(val));
501 break; 503 break;
502 case 1: 504 case 1:
503 pause->tx_pause = !(netxen_gb_get_gb1_mask(val)); 505 pause->tx_pause = !(netxen_gb_get_gb1_mask(val));
504 break; 506 break;
505 case 2: 507 case 2:
506 pause->tx_pause = !(netxen_gb_get_gb2_mask(val)); 508 pause->tx_pause = !(netxen_gb_get_gb2_mask(val));
507 break; 509 break;
508 case 3: 510 case 3:
509 default: 511 default:
510 pause->tx_pause = !(netxen_gb_get_gb3_mask(val)); 512 pause->tx_pause = !(netxen_gb_get_gb3_mask(val));
511 break; 513 break;
512 } 514 }
513 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { 515 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
514 if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS)) 516 if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
@@ -532,6 +534,11 @@ netxen_nic_set_pauseparam(struct net_device *dev,
532 struct netxen_adapter *adapter = netdev_priv(dev); 534 struct netxen_adapter *adapter = netdev_priv(dev);
533 __u32 val; 535 __u32 val;
534 int port = adapter->physical_port; 536 int port = adapter->physical_port;
537
538 /* not supported */
539 if (pause->autoneg)
540 return -EINVAL;
541
535 /* read mode */ 542 /* read mode */
536 if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 543 if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
537 if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS)) 544 if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
@@ -549,31 +556,31 @@ netxen_nic_set_pauseparam(struct net_device *dev,
549 /* set autoneg */ 556 /* set autoneg */
550 val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); 557 val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
551 switch (port) { 558 switch (port) {
552 case 0: 559 case 0:
553 if (pause->tx_pause) 560 if (pause->tx_pause)
554 netxen_gb_unset_gb0_mask(val); 561 netxen_gb_unset_gb0_mask(val);
555 else 562 else
556 netxen_gb_set_gb0_mask(val); 563 netxen_gb_set_gb0_mask(val);
557 break; 564 break;
558 case 1: 565 case 1:
559 if (pause->tx_pause) 566 if (pause->tx_pause)
560 netxen_gb_unset_gb1_mask(val); 567 netxen_gb_unset_gb1_mask(val);
561 else 568 else
562 netxen_gb_set_gb1_mask(val); 569 netxen_gb_set_gb1_mask(val);
563 break; 570 break;
564 case 2: 571 case 2:
565 if (pause->tx_pause) 572 if (pause->tx_pause)
566 netxen_gb_unset_gb2_mask(val); 573 netxen_gb_unset_gb2_mask(val);
567 else 574 else
568 netxen_gb_set_gb2_mask(val); 575 netxen_gb_set_gb2_mask(val);
569 break; 576 break;
570 case 3: 577 case 3:
571 default: 578 default:
572 if (pause->tx_pause) 579 if (pause->tx_pause)
573 netxen_gb_unset_gb3_mask(val); 580 netxen_gb_unset_gb3_mask(val);
574 else 581 else
575 netxen_gb_set_gb3_mask(val); 582 netxen_gb_set_gb3_mask(val);
576 break; 583 break;
577 } 584 }
578 NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val); 585 NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
579 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { 586 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
@@ -636,7 +643,7 @@ static int netxen_get_sset_count(struct net_device *dev, int sset)
636 643
637static void 644static void
638netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, 645netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
639 u64 * data) 646 u64 *data)
640{ 647{
641 memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN); 648 memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN);
642 if ((data[0] = netxen_nic_reg_test(dev))) 649 if ((data[0] = netxen_nic_reg_test(dev)))
@@ -647,7 +654,7 @@ netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
647} 654}
648 655
649static void 656static void
650netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data) 657netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
651{ 658{
652 int index; 659 int index;
653 660
@@ -668,7 +675,7 @@ netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
668 675
669static void 676static void
670netxen_nic_get_ethtool_stats(struct net_device *dev, 677netxen_nic_get_ethtool_stats(struct net_device *dev,
671 struct ethtool_stats *stats, u64 * data) 678 struct ethtool_stats *stats, u64 *data)
672{ 679{
673 struct netxen_adapter *adapter = netdev_priv(dev); 680 struct netxen_adapter *adapter = netdev_priv(dev);
674 int index; 681 int index;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 6407d0d77e81..12d1f2470d5c 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1920,7 +1920,6 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1920{ 1920{
1921 struct ql_tx_buf_cb *tx_cb; 1921 struct ql_tx_buf_cb *tx_cb;
1922 int i; 1922 int i;
1923 int retval = 0;
1924 1923
1925 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1924 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1926 netdev_warn(qdev->ndev, 1925 netdev_warn(qdev->ndev,
@@ -1935,7 +1934,6 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1935 "Frame too short to be legal, frame not sent\n"); 1934 "Frame too short to be legal, frame not sent\n");
1936 1935
1937 qdev->ndev->stats.tx_errors++; 1936 qdev->ndev->stats.tx_errors++;
1938 retval = -EIO;
1939 goto frame_not_sent; 1937 goto frame_not_sent;
1940 } 1938 }
1941 1939
@@ -1944,7 +1942,6 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1944 mac_rsp->transaction_id); 1942 mac_rsp->transaction_id);
1945 1943
1946 qdev->ndev->stats.tx_errors++; 1944 qdev->ndev->stats.tx_errors++;
1947 retval = -EIO;
1948 goto invalid_seg_count; 1945 goto invalid_seg_count;
1949 } 1946 }
1950 1947
@@ -3958,15 +3955,4 @@ static struct pci_driver ql3xxx_driver = {
3958 .remove = __devexit_p(ql3xxx_remove), 3955 .remove = __devexit_p(ql3xxx_remove),
3959}; 3956};
3960 3957
3961static int __init ql3xxx_init_module(void) 3958module_pci_driver(ql3xxx_driver);
3962{
3963 return pci_register_driver(&ql3xxx_driver);
3964}
3965
3966static void __exit ql3xxx_exit(void)
3967{
3968 pci_unregister_driver(&ql3xxx_driver);
3969}
3970
3971module_init(ql3xxx_init_module);
3972module_exit(ql3xxx_exit);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 58185b604b72..10093f0c4c0f 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -86,7 +86,7 @@ exit:
86} 86}
87 87
88/* Read out the SERDES registers */ 88/* Read out the SERDES registers */
89static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data) 89static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
90{ 90{
91 int status; 91 int status;
92 92
@@ -364,7 +364,7 @@ exit:
364/* Read the 400 xgmac control/statistics registers 364/* Read the 400 xgmac control/statistics registers
365 * skipping unused locations. 365 * skipping unused locations.
366 */ 366 */
367static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf, 367static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
368 unsigned int other_function) 368 unsigned int other_function)
369{ 369{
370 int status = 0; 370 int status = 0;
@@ -405,7 +405,7 @@ static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
405 return status; 405 return status;
406} 406}
407 407
408static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf) 408static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
409{ 409{
410 int status = 0; 410 int status = 0;
411 int i; 411 int i;
@@ -423,7 +423,7 @@ static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
423 return status; 423 return status;
424} 424}
425 425
426static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf) 426static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
427{ 427{
428 int i; 428 int i;
429 429
@@ -434,7 +434,7 @@ static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
434 } 434 }
435} 435}
436 436
437static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf) 437static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
438{ 438{
439 int i, status; 439 int i, status;
440 u32 value[3]; 440 u32 value[3];
@@ -471,7 +471,7 @@ err:
471 return status; 471 return status;
472} 472}
473 473
474static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf) 474static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
475{ 475{
476 int status; 476 int status;
477 u32 value, i; 477 u32 value, i;
@@ -496,7 +496,7 @@ err:
496} 496}
497 497
498/* Read the MPI Processor shadow registers */ 498/* Read the MPI Processor shadow registers */
499static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf) 499static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
500{ 500{
501 u32 i; 501 u32 i;
502 int status; 502 int status;
@@ -515,7 +515,7 @@ end:
515} 515}
516 516
517/* Read the MPI Processor core registers */ 517/* Read the MPI Processor core registers */
518static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf, 518static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
519 u32 offset, u32 count) 519 u32 offset, u32 count)
520{ 520{
521 int i, status = 0; 521 int i, status = 0;
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index e02f04d7f3ad..9f2d416de750 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -175,8 +175,7 @@ struct net_local {
175 unsigned int tx_unit_busy:1; 175 unsigned int tx_unit_busy:1;
176 unsigned char re_tx, /* Number of packet retransmissions. */ 176 unsigned char re_tx, /* Number of packet retransmissions. */
177 addr_mode, /* Current Rx filter e.g. promiscuous, etc. */ 177 addr_mode, /* Current Rx filter e.g. promiscuous, etc. */
178 pac_cnt_in_tx_buf, 178 pac_cnt_in_tx_buf;
179 chip_type;
180}; 179};
181 180
182/* This code, written by wwc@super.org, resets the adapter every 181/* This code, written by wwc@super.org, resets the adapter every
@@ -339,7 +338,6 @@ static int __init atp_probe1(long ioaddr)
339 write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX); 338 write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
340 339
341 lp = netdev_priv(dev); 340 lp = netdev_priv(dev);
342 lp->chip_type = RTL8002;
343 lp->addr_mode = CMR2h_Normal; 341 lp->addr_mode = CMR2h_Normal;
344 spin_lock_init(&lp->lock); 342 spin_lock_init(&lp->lock);
345 343
@@ -852,7 +850,7 @@ net_close(struct net_device *dev)
852 * Set or clear the multicast filter for this adapter. 850 * Set or clear the multicast filter for this adapter.
853 */ 851 */
854 852
855static void set_rx_mode_8002(struct net_device *dev) 853static void set_rx_mode(struct net_device *dev)
856{ 854{
857 struct net_local *lp = netdev_priv(dev); 855 struct net_local *lp = netdev_priv(dev);
858 long ioaddr = dev->base_addr; 856 long ioaddr = dev->base_addr;
@@ -864,58 +862,6 @@ static void set_rx_mode_8002(struct net_device *dev)
864 write_reg_high(ioaddr, CMR2, lp->addr_mode); 862 write_reg_high(ioaddr, CMR2, lp->addr_mode);
865} 863}
866 864
867static void set_rx_mode_8012(struct net_device *dev)
868{
869 struct net_local *lp = netdev_priv(dev);
870 long ioaddr = dev->base_addr;
871 unsigned char new_mode, mc_filter[8]; /* Multicast hash filter */
872 int i;
873
874 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
875 new_mode = CMR2h_PROMISC;
876 } else if ((netdev_mc_count(dev) > 1000) ||
877 (dev->flags & IFF_ALLMULTI)) {
878 /* Too many to filter perfectly -- accept all multicasts. */
879 memset(mc_filter, 0xff, sizeof(mc_filter));
880 new_mode = CMR2h_Normal;
881 } else {
882 struct netdev_hw_addr *ha;
883
884 memset(mc_filter, 0, sizeof(mc_filter));
885 netdev_for_each_mc_addr(ha, dev) {
886 int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
887 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
888 }
889 new_mode = CMR2h_Normal;
890 }
891 lp->addr_mode = new_mode;
892 write_reg(ioaddr, CMR2, CMR2_IRQOUT | 0x04); /* Switch to page 1. */
893 for (i = 0; i < 8; i++)
894 write_reg_byte(ioaddr, i, mc_filter[i]);
895 if (net_debug > 2 || 1) {
896 lp->addr_mode = 1;
897 printk(KERN_DEBUG "%s: Mode %d, setting multicast filter to",
898 dev->name, lp->addr_mode);
899 for (i = 0; i < 8; i++)
900 printk(" %2.2x", mc_filter[i]);
901 printk(".\n");
902 }
903
904 write_reg_high(ioaddr, CMR2, lp->addr_mode);
905 write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Switch back to page 0 */
906}
907
908static void set_rx_mode(struct net_device *dev)
909{
910 struct net_local *lp = netdev_priv(dev);
911
912 if (lp->chip_type == RTL8002)
913 return set_rx_mode_8002(dev);
914 else
915 return set_rx_mode_8012(dev);
916}
917
918
919static int __init atp_init_module(void) { 865static int __init atp_init_module(void) {
920 if (debug) /* Emit version even if no cards detected. */ 866 if (debug) /* Emit version even if no cards detected. */
921 printk(KERN_INFO "%s", version); 867 printk(KERN_INFO "%s", version);
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
index 0edc642c2c2f..040b13739947 100644
--- a/drivers/net/ethernet/realtek/atp.h
+++ b/drivers/net/ethernet/realtek/atp.h
@@ -16,8 +16,6 @@ struct rx_header {
16#define PAR_STATUS 1 16#define PAR_STATUS 1
17#define PAR_CONTROL 2 17#define PAR_CONTROL 2
18 18
19enum chip_type { RTL8002, RTL8012 };
20
21#define Ctrl_LNibRead 0x08 /* LP_PSELECP */ 19#define Ctrl_LNibRead 0x08 /* LP_PSELECP */
22#define Ctrl_HNibRead 0 20#define Ctrl_HNibRead 0
23#define Ctrl_LNibWrite 0x08 /* LP_PSELECP */ 21#define Ctrl_LNibWrite 0x08 /* LP_PSELECP */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 927aa33d4349..50a55fb10368 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -78,7 +78,6 @@ static const int multicast_filter_limit = 32;
78 78
79#define MAX_READ_REQUEST_SHIFT 12 79#define MAX_READ_REQUEST_SHIFT 12
80#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ 80#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
82#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ 81#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
83 82
84#define R8169_REGS_SIZE 256 83#define R8169_REGS_SIZE 256
@@ -456,6 +455,7 @@ enum rtl8168_registers {
456#define PWM_EN (1 << 22) 455#define PWM_EN (1 << 22)
457#define RXDV_GATED_EN (1 << 19) 456#define RXDV_GATED_EN (1 << 19)
458#define EARLY_TALLY_EN (1 << 16) 457#define EARLY_TALLY_EN (1 << 16)
458#define FORCE_CLK (1 << 15) /* force clock request */
459}; 459};
460 460
461enum rtl_register_content { 461enum rtl_register_content {
@@ -519,6 +519,7 @@ enum rtl_register_content {
519 PMEnable = (1 << 0), /* Power Management Enable */ 519 PMEnable = (1 << 0), /* Power Management Enable */
520 520
521 /* Config2 register p. 25 */ 521 /* Config2 register p. 25 */
522 ClkReqEn = (1 << 7), /* Clock Request Enable */
522 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ 523 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
523 PCI_Clock_66MHz = 0x01, 524 PCI_Clock_66MHz = 0x01,
524 PCI_Clock_33MHz = 0x00, 525 PCI_Clock_33MHz = 0x00,
@@ -539,6 +540,7 @@ enum rtl_register_content {
539 Spi_en = (1 << 3), 540 Spi_en = (1 << 3),
540 LanWake = (1 << 1), /* LanWake enable/disable */ 541 LanWake = (1 << 1), /* LanWake enable/disable */
541 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ 542 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
543 ASPM_en = (1 << 0), /* ASPM enable */
542 544
543 /* TBICSR p.28 */ 545 /* TBICSR p.28 */
544 TBIReset = 0x80000000, 546 TBIReset = 0x80000000,
@@ -687,6 +689,7 @@ enum features {
687 RTL_FEATURE_WOL = (1 << 0), 689 RTL_FEATURE_WOL = (1 << 0),
688 RTL_FEATURE_MSI = (1 << 1), 690 RTL_FEATURE_MSI = (1 << 1),
689 RTL_FEATURE_GMII = (1 << 2), 691 RTL_FEATURE_GMII = (1 << 2),
692 RTL_FEATURE_FW_LOADED = (1 << 3),
690}; 693};
691 694
692struct rtl8169_counters { 695struct rtl8169_counters {
@@ -2394,8 +2397,10 @@ static void rtl_apply_firmware(struct rtl8169_private *tp)
2394 struct rtl_fw *rtl_fw = tp->rtl_fw; 2397 struct rtl_fw *rtl_fw = tp->rtl_fw;
2395 2398
2396 /* TODO: release firmware once rtl_phy_write_fw signals failures. */ 2399 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2397 if (!IS_ERR_OR_NULL(rtl_fw)) 2400 if (!IS_ERR_OR_NULL(rtl_fw)) {
2398 rtl_phy_write_fw(tp, rtl_fw); 2401 rtl_phy_write_fw(tp, rtl_fw);
2402 tp->features |= RTL_FEATURE_FW_LOADED;
2403 }
2399} 2404}
2400 2405
2401static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) 2406static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -2406,6 +2411,31 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2406 rtl_apply_firmware(tp); 2411 rtl_apply_firmware(tp);
2407} 2412}
2408 2413
2414static void r810x_aldps_disable(struct rtl8169_private *tp)
2415{
2416 rtl_writephy(tp, 0x1f, 0x0000);
2417 rtl_writephy(tp, 0x18, 0x0310);
2418 msleep(100);
2419}
2420
2421static void r810x_aldps_enable(struct rtl8169_private *tp)
2422{
2423 if (!(tp->features & RTL_FEATURE_FW_LOADED))
2424 return;
2425
2426 rtl_writephy(tp, 0x1f, 0x0000);
2427 rtl_writephy(tp, 0x18, 0x8310);
2428}
2429
2430static void r8168_aldps_enable_1(struct rtl8169_private *tp)
2431{
2432 if (!(tp->features & RTL_FEATURE_FW_LOADED))
2433 return;
2434
2435 rtl_writephy(tp, 0x1f, 0x0000);
2436 rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000);
2437}
2438
2409static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) 2439static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2410{ 2440{
2411 static const struct phy_reg phy_reg_init[] = { 2441 static const struct phy_reg phy_reg_init[] = {
@@ -3178,6 +3208,8 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3178 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); 3208 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3179 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); 3209 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3180 rtl_writephy(tp, 0x1f, 0x0000); 3210 rtl_writephy(tp, 0x1f, 0x0000);
3211
3212 r8168_aldps_enable_1(tp);
3181} 3213}
3182 3214
3183static void rtl8168f_hw_phy_config(struct rtl8169_private *tp) 3215static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
@@ -3250,6 +3282,8 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3250 rtl_writephy(tp, 0x05, 0x8b85); 3282 rtl_writephy(tp, 0x05, 0x8b85);
3251 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); 3283 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3252 rtl_writephy(tp, 0x1f, 0x0000); 3284 rtl_writephy(tp, 0x1f, 0x0000);
3285
3286 r8168_aldps_enable_1(tp);
3253} 3287}
3254 3288
3255static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) 3289static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3257,6 +3291,8 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3257 rtl_apply_firmware(tp); 3291 rtl_apply_firmware(tp);
3258 3292
3259 rtl8168f_hw_phy_config(tp); 3293 rtl8168f_hw_phy_config(tp);
3294
3295 r8168_aldps_enable_1(tp);
3260} 3296}
3261 3297
3262static void rtl8411_hw_phy_config(struct rtl8169_private *tp) 3298static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
@@ -3354,6 +3390,8 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3354 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); 3390 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3355 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); 3391 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3356 rtl_writephy(tp, 0x1f, 0x0000); 3392 rtl_writephy(tp, 0x1f, 0x0000);
3393
3394 r8168_aldps_enable_1(tp);
3357} 3395}
3358 3396
3359static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) 3397static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3439,21 +3477,19 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3439 }; 3477 };
3440 3478
3441 /* Disable ALDPS before ram code */ 3479 /* Disable ALDPS before ram code */
3442 rtl_writephy(tp, 0x1f, 0x0000); 3480 r810x_aldps_disable(tp);
3443 rtl_writephy(tp, 0x18, 0x0310);
3444 msleep(100);
3445 3481
3446 rtl_apply_firmware(tp); 3482 rtl_apply_firmware(tp);
3447 3483
3448 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 3484 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3485
3486 r810x_aldps_enable(tp);
3449} 3487}
3450 3488
3451static void rtl8402_hw_phy_config(struct rtl8169_private *tp) 3489static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3452{ 3490{
3453 /* Disable ALDPS before setting firmware */ 3491 /* Disable ALDPS before setting firmware */
3454 rtl_writephy(tp, 0x1f, 0x0000); 3492 r810x_aldps_disable(tp);
3455 rtl_writephy(tp, 0x18, 0x0310);
3456 msleep(20);
3457 3493
3458 rtl_apply_firmware(tp); 3494 rtl_apply_firmware(tp);
3459 3495
@@ -3463,6 +3499,8 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3463 rtl_writephy(tp, 0x10, 0x401f); 3499 rtl_writephy(tp, 0x10, 0x401f);
3464 rtl_writephy(tp, 0x19, 0x7030); 3500 rtl_writephy(tp, 0x19, 0x7030);
3465 rtl_writephy(tp, 0x1f, 0x0000); 3501 rtl_writephy(tp, 0x1f, 0x0000);
3502
3503 r810x_aldps_enable(tp);
3466} 3504}
3467 3505
3468static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) 3506static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
@@ -3475,9 +3513,7 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3475 }; 3513 };
3476 3514
3477 /* Disable ALDPS before ram code */ 3515 /* Disable ALDPS before ram code */
3478 rtl_writephy(tp, 0x1f, 0x0000); 3516 r810x_aldps_disable(tp);
3479 rtl_writephy(tp, 0x18, 0x0310);
3480 msleep(100);
3481 3517
3482 rtl_apply_firmware(tp); 3518 rtl_apply_firmware(tp);
3483 3519
@@ -3485,6 +3521,8 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3485 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 3521 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3486 3522
3487 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 3523 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3524
3525 r810x_aldps_enable(tp);
3488} 3526}
3489 3527
3490static void rtl_hw_phy_config(struct net_device *dev) 3528static void rtl_hw_phy_config(struct net_device *dev)
@@ -5015,8 +5053,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5015 5053
5016 RTL_W8(MaxTxPacketSize, EarlySize); 5054 RTL_W8(MaxTxPacketSize, EarlySize);
5017 5055
5018 rtl_disable_clock_request(pdev);
5019
5020 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); 5056 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5021 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); 5057 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5022 5058
@@ -5025,7 +5061,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5025 5061
5026 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 5062 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5027 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); 5063 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5028 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); 5064 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
5065 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5029} 5066}
5030 5067
5031static void rtl_hw_start_8168f(struct rtl8169_private *tp) 5068static void rtl_hw_start_8168f(struct rtl8169_private *tp)
@@ -5050,13 +5087,12 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5050 5087
5051 RTL_W8(MaxTxPacketSize, EarlySize); 5088 RTL_W8(MaxTxPacketSize, EarlySize);
5052 5089
5053 rtl_disable_clock_request(pdev);
5054
5055 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); 5090 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5056 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); 5091 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5057 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 5092 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5058 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); 5093 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK);
5059 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); 5094 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
5095 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5060} 5096}
5061 5097
5062static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) 5098static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -5113,8 +5149,10 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5113 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); 5149 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5114 5150
5115 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 5151 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5116 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); 5152 RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN);
5117 RTL_W8(MaxTxPacketSize, EarlySize); 5153 RTL_W8(MaxTxPacketSize, EarlySize);
5154 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5155 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5118 5156
5119 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5157 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5120 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5158 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
@@ -5330,6 +5368,9 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5330 5368
5331 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); 5369 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5332 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 5370 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5371 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5372 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5373 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5333 5374
5334 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); 5375 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5335} 5376}
@@ -5355,6 +5396,9 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
5355 5396
5356 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); 5397 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5357 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); 5398 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5399 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5400 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5401 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5358 5402
5359 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); 5403 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5360 5404
@@ -5376,7 +5420,10 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
5376 /* Force LAN exit from ASPM if Rx/Tx are not idle */ 5420 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5377 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); 5421 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5378 5422
5379 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); 5423 RTL_W32(MISC,
5424 (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
5425 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5426 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5380 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); 5427 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5381 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); 5428 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5382} 5429}
@@ -6992,15 +7039,4 @@ static struct pci_driver rtl8169_pci_driver = {
6992 .driver.pm = RTL8169_PM_OPS, 7039 .driver.pm = RTL8169_PM_OPS,
6993}; 7040};
6994 7041
6995static int __init rtl8169_init_module(void) 7042module_pci_driver(rtl8169_pci_driver);
6996{
6997 return pci_register_driver(&rtl8169_pci_driver);
6998}
6999
7000static void __exit rtl8169_cleanup_module(void)
7001{
7002 pci_unregister_driver(&rtl8169_pci_driver);
7003}
7004
7005module_init(rtl8169_init_module);
7006module_exit(rtl8169_cleanup_module);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c8bfea0524dd..3d705862bd7d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2286,7 +2286,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
2286 for (i = 0; i < PHY_MAX_ADDR; i++) 2286 for (i = 0; i < PHY_MAX_ADDR; i++)
2287 mdp->mii_bus->irq[i] = PHY_POLL; 2287 mdp->mii_bus->irq[i] = PHY_POLL;
2288 2288
2289 /* regist mdio bus */ 2289 /* register mdio bus */
2290 ret = mdiobus_register(mdp->mii_bus); 2290 ret = mdiobus_register(mdp->mii_bus);
2291 if (ret) 2291 if (ret)
2292 goto out_free_irq; 2292 goto out_free_irq;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 25906c1d1b15..3ab2c4289a47 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -5,6 +5,7 @@ config SFC
5 select CRC32 5 select CRC32
6 select I2C 6 select I2C
7 select I2C_ALGOBIT 7 select I2C_ALGOBIT
8 select PTP_1588_CLOCK
8 ---help--- 9 ---help---
9 This driver supports 10-gigabit Ethernet cards based on 10 This driver supports 10-gigabit Ethernet cards based on
10 the Solarflare SFC4000 and SFC9000-family controllers. 11 the Solarflare SFC4000 and SFC9000-family controllers.
@@ -34,10 +35,3 @@ config SFC_SRIOV
34 This enables support for the SFC9000 I/O Virtualization 35 This enables support for the SFC9000 I/O Virtualization
35 features, allowing accelerated network performance in 36 features, allowing accelerated network performance in
36 virtualized environments. 37 virtualized environments.
37config SFC_PTP
38 bool "Solarflare SFC9000-family PTP support"
39 depends on SFC && PTP_1588_CLOCK && !(SFC=y && PTP_1588_CLOCK=m)
40 default y
41 ---help---
42 This enables support for the Precision Time Protocol (PTP)
43 on SFC9000-family NICs
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index e11f2ecf69d9..945bf06e69ef 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -2,9 +2,8 @@ sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
2 falcon_xmac.o mcdi_mac.o \ 2 falcon_xmac.o mcdi_mac.o \
3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ 3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
4 tenxpress.o txc43128_phy.o falcon_boards.o \ 4 tenxpress.o txc43128_phy.o falcon_boards.o \
5 mcdi.o mcdi_phy.o mcdi_mon.o 5 mcdi.o mcdi_phy.o mcdi_mon.o ptp.o
6sfc-$(CONFIG_SFC_MTD) += mtd.o 6sfc-$(CONFIG_SFC_MTD) += mtd.o
7sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o 7sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
8sfc-$(CONFIG_SFC_PTP) += ptp.o
9 8
10obj-$(CONFIG_SFC) += sfc.o 9obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 576a31091165..2487f582ab04 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -868,9 +868,7 @@ struct efx_nic {
868 struct work_struct peer_work; 868 struct work_struct peer_work;
869#endif 869#endif
870 870
871#ifdef CONFIG_SFC_PTP
872 struct efx_ptp_data *ptp_data; 871 struct efx_ptp_data *ptp_data;
873#endif
874 872
875 /* The following fields may be written more often */ 873 /* The following fields may be written more often */
876 874
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 438cef11f727..7a9647a3c565 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -252,7 +252,6 @@ extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
252 bool spoofchk); 252 bool spoofchk);
253 253
254struct ethtool_ts_info; 254struct ethtool_ts_info;
255#ifdef CONFIG_SFC_PTP
256extern void efx_ptp_probe(struct efx_nic *efx); 255extern void efx_ptp_probe(struct efx_nic *efx);
257extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd); 256extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
258extern int efx_ptp_get_ts_info(struct net_device *net_dev, 257extern int efx_ptp_get_ts_info(struct net_device *net_dev,
@@ -260,31 +259,6 @@ extern int efx_ptp_get_ts_info(struct net_device *net_dev,
260extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 259extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
261extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 260extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
262extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); 261extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
263#else
264static inline void efx_ptp_probe(struct efx_nic *efx) {}
265static inline int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
266{
267 return -EOPNOTSUPP;
268}
269static inline int efx_ptp_get_ts_info(struct net_device *net_dev,
270 struct ethtool_ts_info *ts_info)
271{
272 ts_info->so_timestamping = (SOF_TIMESTAMPING_SOFTWARE |
273 SOF_TIMESTAMPING_RX_SOFTWARE);
274 ts_info->phc_index = -1;
275
276 return 0;
277}
278static inline bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
279{
280 return false;
281}
282static inline int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
283{
284 return NETDEV_TX_OK;
285}
286static inline void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) {}
287#endif
288 262
289extern const struct efx_nic_type falcon_a1_nic_type; 263extern const struct efx_nic_type falcon_a1_nic_type;
290extern const struct efx_nic_type falcon_b0_nic_type; 264extern const struct efx_nic_type falcon_b0_nic_type;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 2c41894d5472..48fcb5e3bd3d 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -60,6 +60,14 @@ config TI_CPSW
60 To compile this driver as a module, choose M here: the module 60 To compile this driver as a module, choose M here: the module
61 will be called cpsw. 61 will be called cpsw.
62 62
63config TI_CPTS
64 boolean "TI Common Platform Time Sync (CPTS) Support"
65 select PTP_1588_CLOCK
66 ---help---
67 This driver supports the Common Platform Time Sync unit of
68 the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4
69 and Layer 2 packets, and the driver offers a PTP Hardware Clock.
70
63config TLAN 71config TLAN
64 tristate "TI ThunderLAN support" 72 tristate "TI ThunderLAN support"
65 depends on (PCI || EISA) 73 depends on (PCI || EISA)
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 91bd8bba78ff..c65148e8aa1d 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
8obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o 8obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
9obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o 9obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
10obj-$(CONFIG_TI_CPSW) += ti_cpsw.o 10obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
11ti_cpsw-y := cpsw_ale.o cpsw.o 11ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index df55e2403746..7654a62ab75e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -24,6 +24,7 @@
24#include <linux/if_ether.h> 24#include <linux/if_ether.h>
25#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/net_tstamp.h>
27#include <linux/phy.h> 28#include <linux/phy.h>
28#include <linux/workqueue.h> 29#include <linux/workqueue.h>
29#include <linux/delay.h> 30#include <linux/delay.h>
@@ -35,6 +36,7 @@
35#include <linux/platform_data/cpsw.h> 36#include <linux/platform_data/cpsw.h>
36 37
37#include "cpsw_ale.h" 38#include "cpsw_ale.h"
39#include "cpts.h"
38#include "davinci_cpdma.h" 40#include "davinci_cpdma.h"
39 41
40#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ 42#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
@@ -70,10 +72,14 @@ do { \
70 dev_notice(priv->dev, format, ## __VA_ARGS__); \ 72 dev_notice(priv->dev, format, ## __VA_ARGS__); \
71} while (0) 73} while (0)
72 74
75#define ALE_ALL_PORTS 0x7
76
73#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7) 77#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
74#define CPSW_MINOR_VERSION(reg) (reg & 0xff) 78#define CPSW_MINOR_VERSION(reg) (reg & 0xff)
75#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f) 79#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
76 80
81#define CPSW_VERSION_1 0x19010a
82#define CPSW_VERSION_2 0x19010c
77#define CPDMA_RXTHRESH 0x0c0 83#define CPDMA_RXTHRESH 0x0c0
78#define CPDMA_RXFREE 0x0e0 84#define CPDMA_RXFREE 0x0e0
79#define CPDMA_TXHDP 0x00 85#define CPDMA_TXHDP 0x00
@@ -129,7 +135,7 @@ static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
129module_param(rx_packet_max, int, 0); 135module_param(rx_packet_max, int, 0);
130MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)"); 136MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
131 137
132struct cpsw_ss_regs { 138struct cpsw_wr_regs {
133 u32 id_ver; 139 u32 id_ver;
134 u32 soft_reset; 140 u32 soft_reset;
135 u32 control; 141 u32 control;
@@ -140,26 +146,98 @@ struct cpsw_ss_regs {
140 u32 misc_en; 146 u32 misc_en;
141}; 147};
142 148
143struct cpsw_regs { 149struct cpsw_ss_regs {
144 u32 id_ver; 150 u32 id_ver;
145 u32 control; 151 u32 control;
146 u32 soft_reset; 152 u32 soft_reset;
147 u32 stat_port_en; 153 u32 stat_port_en;
148 u32 ptype; 154 u32 ptype;
155 u32 soft_idle;
156 u32 thru_rate;
157 u32 gap_thresh;
158 u32 tx_start_wds;
159 u32 flow_control;
160 u32 vlan_ltype;
161 u32 ts_ltype;
162 u32 dlr_ltype;
149}; 163};
150 164
151struct cpsw_slave_regs { 165/* CPSW_PORT_V1 */
152 u32 max_blks; 166#define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */
153 u32 blk_cnt; 167#define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */
154 u32 flow_thresh; 168#define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */
155 u32 port_vlan; 169#define CPSW1_PORT_VLAN 0x0c /* VLAN Register */
156 u32 tx_pri_map; 170#define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */
157 u32 ts_ctl; 171#define CPSW1_TS_CTL 0x14 /* Time Sync Control */
158 u32 ts_seq_ltype; 172#define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */
159 u32 ts_vlan; 173#define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */
160 u32 sa_lo; 174
161 u32 sa_hi; 175/* CPSW_PORT_V2 */
162}; 176#define CPSW2_CONTROL 0x00 /* Control Register */
177#define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */
178#define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */
179#define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */
180#define CPSW2_PORT_VLAN 0x14 /* VLAN Register */
181#define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */
182#define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */
183
184/* CPSW_PORT_V1 and V2 */
185#define SA_LO 0x20 /* CPGMAC_SL Source Address Low */
186#define SA_HI 0x24 /* CPGMAC_SL Source Address High */
187#define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */
188
189/* CPSW_PORT_V2 only */
190#define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */
191#define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */
192#define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */
193#define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */
194#define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */
195#define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */
196#define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */
197#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
198
199/* Bit definitions for the CPSW2_CONTROL register */
200#define PASS_PRI_TAGGED (1<<24) /* Pass Priority Tagged */
201#define VLAN_LTYPE2_EN (1<<21) /* VLAN LTYPE 2 enable */
202#define VLAN_LTYPE1_EN (1<<20) /* VLAN LTYPE 1 enable */
203#define DSCP_PRI_EN (1<<16) /* DSCP Priority Enable */
204#define TS_320 (1<<14) /* Time Sync Dest Port 320 enable */
205#define TS_319 (1<<13) /* Time Sync Dest Port 319 enable */
206#define TS_132 (1<<12) /* Time Sync Dest IP Addr 132 enable */
207#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
208#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
209#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
210#define TS_BIT8 (1<<8) /* ts_ttl_nonzero? */
211#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
212#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
213#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
214#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
215#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
216
217#define CTRL_TS_BITS \
218 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \
219 TS_ANNEX_D_EN | TS_LTYPE1_EN)
220
221#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN)
222#define CTRL_TX_TS_BITS (CTRL_TS_BITS | TS_TX_EN)
223#define CTRL_RX_TS_BITS (CTRL_TS_BITS | TS_RX_EN)
224
225/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
226#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
227#define TS_SEQ_ID_OFFSET_MASK (0x3f)
228#define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */
229#define TS_MSG_TYPE_EN_MASK (0xffff)
230
231/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
232#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
233
234/* Bit definitions for the CPSW1_TS_CTL register */
235#define CPSW_V1_TS_RX_EN BIT(0)
236#define CPSW_V1_TS_TX_EN BIT(4)
237#define CPSW_V1_MSG_TYPE_OFS 16
238
239/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
240#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
163 241
164struct cpsw_host_regs { 242struct cpsw_host_regs {
165 u32 max_blks; 243 u32 max_blks;
@@ -185,7 +263,7 @@ struct cpsw_sliver_regs {
185}; 263};
186 264
187struct cpsw_slave { 265struct cpsw_slave {
188 struct cpsw_slave_regs __iomem *regs; 266 void __iomem *regs;
189 struct cpsw_sliver_regs __iomem *sliver; 267 struct cpsw_sliver_regs __iomem *sliver;
190 int slave_num; 268 int slave_num;
191 u32 mac_control; 269 u32 mac_control;
@@ -193,19 +271,30 @@ struct cpsw_slave {
193 struct phy_device *phy; 271 struct phy_device *phy;
194}; 272};
195 273
274static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
275{
276 return __raw_readl(slave->regs + offset);
277}
278
279static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
280{
281 __raw_writel(val, slave->regs + offset);
282}
283
196struct cpsw_priv { 284struct cpsw_priv {
197 spinlock_t lock; 285 spinlock_t lock;
198 struct platform_device *pdev; 286 struct platform_device *pdev;
199 struct net_device *ndev; 287 struct net_device *ndev;
200 struct resource *cpsw_res; 288 struct resource *cpsw_res;
201 struct resource *cpsw_ss_res; 289 struct resource *cpsw_wr_res;
202 struct napi_struct napi; 290 struct napi_struct napi;
203 struct device *dev; 291 struct device *dev;
204 struct cpsw_platform_data data; 292 struct cpsw_platform_data data;
205 struct cpsw_regs __iomem *regs; 293 struct cpsw_ss_regs __iomem *regs;
206 struct cpsw_ss_regs __iomem *ss_regs; 294 struct cpsw_wr_regs __iomem *wr_regs;
207 struct cpsw_host_regs __iomem *host_port_regs; 295 struct cpsw_host_regs __iomem *host_port_regs;
208 u32 msg_enable; 296 u32 msg_enable;
297 u32 version;
209 struct net_device_stats stats; 298 struct net_device_stats stats;
210 int rx_packet_max; 299 int rx_packet_max;
211 int host_port; 300 int host_port;
@@ -218,6 +307,7 @@ struct cpsw_priv {
218 /* snapshot of IRQ numbers */ 307 /* snapshot of IRQ numbers */
219 u32 irqs_table[4]; 308 u32 irqs_table[4];
220 u32 num_irqs; 309 u32 num_irqs;
310 struct cpts cpts;
221}; 311};
222 312
223#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) 313#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
@@ -228,10 +318,34 @@ struct cpsw_priv {
228 (func)((priv)->slaves + idx, ##arg); \ 318 (func)((priv)->slaves + idx, ##arg); \
229 } while (0) 319 } while (0)
230 320
321static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
322{
323 struct cpsw_priv *priv = netdev_priv(ndev);
324
325 if (ndev->flags & IFF_PROMISC) {
326 /* Enable promiscuous mode */
327 dev_err(priv->dev, "Ignoring Promiscuous mode\n");
328 return;
329 }
330
331 /* Clear all mcast from ALE */
332 cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
333
334 if (!netdev_mc_empty(ndev)) {
335 struct netdev_hw_addr *ha;
336
337 /* program multicast address list into ALE register */
338 netdev_for_each_mc_addr(ha, ndev) {
339 cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr,
340 ALE_ALL_PORTS << priv->host_port, 0, 0);
341 }
342 }
343}
344
231static void cpsw_intr_enable(struct cpsw_priv *priv) 345static void cpsw_intr_enable(struct cpsw_priv *priv)
232{ 346{
233 __raw_writel(0xFF, &priv->ss_regs->tx_en); 347 __raw_writel(0xFF, &priv->wr_regs->tx_en);
234 __raw_writel(0xFF, &priv->ss_regs->rx_en); 348 __raw_writel(0xFF, &priv->wr_regs->rx_en);
235 349
236 cpdma_ctlr_int_ctrl(priv->dma, true); 350 cpdma_ctlr_int_ctrl(priv->dma, true);
237 return; 351 return;
@@ -239,8 +353,8 @@ static void cpsw_intr_enable(struct cpsw_priv *priv)
239 353
240static void cpsw_intr_disable(struct cpsw_priv *priv) 354static void cpsw_intr_disable(struct cpsw_priv *priv)
241{ 355{
242 __raw_writel(0, &priv->ss_regs->tx_en); 356 __raw_writel(0, &priv->wr_regs->tx_en);
243 __raw_writel(0, &priv->ss_regs->rx_en); 357 __raw_writel(0, &priv->wr_regs->rx_en);
244 358
245 cpdma_ctlr_int_ctrl(priv->dma, false); 359 cpdma_ctlr_int_ctrl(priv->dma, false);
246 return; 360 return;
@@ -254,6 +368,7 @@ void cpsw_tx_handler(void *token, int len, int status)
254 368
255 if (unlikely(netif_queue_stopped(ndev))) 369 if (unlikely(netif_queue_stopped(ndev)))
256 netif_start_queue(ndev); 370 netif_start_queue(ndev);
371 cpts_tx_timestamp(&priv->cpts, skb);
257 priv->stats.tx_packets++; 372 priv->stats.tx_packets++;
258 priv->stats.tx_bytes += len; 373 priv->stats.tx_bytes += len;
259 dev_kfree_skb_any(skb); 374 dev_kfree_skb_any(skb);
@@ -274,6 +389,7 @@ void cpsw_rx_handler(void *token, int len, int status)
274 } 389 }
275 if (likely(status >= 0)) { 390 if (likely(status >= 0)) {
276 skb_put(skb, len); 391 skb_put(skb, len);
392 cpts_rx_timestamp(&priv->cpts, skb);
277 skb->protocol = eth_type_trans(skb, ndev); 393 skb->protocol = eth_type_trans(skb, ndev);
278 netif_receive_skb(skb); 394 netif_receive_skb(skb);
279 priv->stats.rx_bytes += len; 395 priv->stats.rx_bytes += len;
@@ -359,8 +475,8 @@ static inline void soft_reset(const char *module, void __iomem *reg)
359static void cpsw_set_slave_mac(struct cpsw_slave *slave, 475static void cpsw_set_slave_mac(struct cpsw_slave *slave,
360 struct cpsw_priv *priv) 476 struct cpsw_priv *priv)
361{ 477{
362 __raw_writel(mac_hi(priv->mac_addr), &slave->regs->sa_hi); 478 slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
363 __raw_writel(mac_lo(priv->mac_addr), &slave->regs->sa_lo); 479 slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
364} 480}
365 481
366static void _cpsw_adjust_link(struct cpsw_slave *slave, 482static void _cpsw_adjust_link(struct cpsw_slave *slave,
@@ -446,7 +562,15 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
446 562
447 /* setup priority mapping */ 563 /* setup priority mapping */
448 __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); 564 __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
449 __raw_writel(TX_PRIORITY_MAPPING, &slave->regs->tx_pri_map); 565
566 switch (priv->version) {
567 case CPSW_VERSION_1:
568 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
569 break;
570 case CPSW_VERSION_2:
571 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
572 break;
573 }
450 574
451 /* setup max packet size, and mac address */ 575 /* setup max packet size, and mac address */
452 __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen); 576 __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
@@ -506,6 +630,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
506 pm_runtime_get_sync(&priv->pdev->dev); 630 pm_runtime_get_sync(&priv->pdev->dev);
507 631
508 reg = __raw_readl(&priv->regs->id_ver); 632 reg = __raw_readl(&priv->regs->id_ver);
633 priv->version = reg;
509 634
510 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", 635 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
511 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), 636 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
@@ -592,6 +717,11 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
592 return NETDEV_TX_OK; 717 return NETDEV_TX_OK;
593 } 718 }
594 719
720 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable)
721 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
722
723 skb_tx_timestamp(skb);
724
595 ret = cpdma_chan_submit(priv->txch, skb, skb->data, 725 ret = cpdma_chan_submit(priv->txch, skb, skb->data,
596 skb->len, GFP_KERNEL); 726 skb->len, GFP_KERNEL);
597 if (unlikely(ret != 0)) { 727 if (unlikely(ret != 0)) {
@@ -629,6 +759,130 @@ static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
629 dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n"); 759 dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
630} 760}
631 761
762#ifdef CONFIG_TI_CPTS
763
764static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
765{
766 struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
767 u32 ts_en, seq_id;
768
769 if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) {
770 slave_write(slave, 0, CPSW1_TS_CTL);
771 return;
772 }
773
774 seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
775 ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
776
777 if (priv->cpts.tx_enable)
778 ts_en |= CPSW_V1_TS_TX_EN;
779
780 if (priv->cpts.rx_enable)
781 ts_en |= CPSW_V1_TS_RX_EN;
782
783 slave_write(slave, ts_en, CPSW1_TS_CTL);
784 slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
785}
786
787static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
788{
789 struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
790 u32 ctrl, mtype;
791
792 ctrl = slave_read(slave, CPSW2_CONTROL);
793 ctrl &= ~CTRL_ALL_TS_MASK;
794
795 if (priv->cpts.tx_enable)
796 ctrl |= CTRL_TX_TS_BITS;
797
798 if (priv->cpts.rx_enable)
799 ctrl |= CTRL_RX_TS_BITS;
800
801 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
802
803 slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
804 slave_write(slave, ctrl, CPSW2_CONTROL);
805 __raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
806}
807
808static int cpsw_hwtstamp_ioctl(struct cpsw_priv *priv, struct ifreq *ifr)
809{
810 struct cpts *cpts = &priv->cpts;
811 struct hwtstamp_config cfg;
812
813 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
814 return -EFAULT;
815
816 /* reserved for future extensions */
817 if (cfg.flags)
818 return -EINVAL;
819
820 switch (cfg.tx_type) {
821 case HWTSTAMP_TX_OFF:
822 cpts->tx_enable = 0;
823 break;
824 case HWTSTAMP_TX_ON:
825 cpts->tx_enable = 1;
826 break;
827 default:
828 return -ERANGE;
829 }
830
831 switch (cfg.rx_filter) {
832 case HWTSTAMP_FILTER_NONE:
833 cpts->rx_enable = 0;
834 break;
835 case HWTSTAMP_FILTER_ALL:
836 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
837 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
838 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
839 return -ERANGE;
840 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
841 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
842 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
843 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
844 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
845 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
846 case HWTSTAMP_FILTER_PTP_V2_EVENT:
847 case HWTSTAMP_FILTER_PTP_V2_SYNC:
848 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
849 cpts->rx_enable = 1;
850 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
851 break;
852 default:
853 return -ERANGE;
854 }
855
856 switch (priv->version) {
857 case CPSW_VERSION_1:
858 cpsw_hwtstamp_v1(priv);
859 break;
860 case CPSW_VERSION_2:
861 cpsw_hwtstamp_v2(priv);
862 break;
863 default:
864 return -ENOTSUPP;
865 }
866
867 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
868}
869
870#endif /*CONFIG_TI_CPTS*/
871
872static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
873{
874 struct cpsw_priv *priv = netdev_priv(dev);
875
876 if (!netif_running(dev))
877 return -EINVAL;
878
879#ifdef CONFIG_TI_CPTS
880 if (cmd == SIOCSHWTSTAMP)
881 return cpsw_hwtstamp_ioctl(priv, req);
882#endif
883 return -ENOTSUPP;
884}
885
632static void cpsw_ndo_tx_timeout(struct net_device *ndev) 886static void cpsw_ndo_tx_timeout(struct net_device *ndev)
633{ 887{
634 struct cpsw_priv *priv = netdev_priv(ndev); 888 struct cpsw_priv *priv = netdev_priv(ndev);
@@ -669,10 +923,12 @@ static const struct net_device_ops cpsw_netdev_ops = {
669 .ndo_stop = cpsw_ndo_stop, 923 .ndo_stop = cpsw_ndo_stop,
670 .ndo_start_xmit = cpsw_ndo_start_xmit, 924 .ndo_start_xmit = cpsw_ndo_start_xmit,
671 .ndo_change_rx_flags = cpsw_ndo_change_rx_flags, 925 .ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
926 .ndo_do_ioctl = cpsw_ndo_ioctl,
672 .ndo_validate_addr = eth_validate_addr, 927 .ndo_validate_addr = eth_validate_addr,
673 .ndo_change_mtu = eth_change_mtu, 928 .ndo_change_mtu = eth_change_mtu,
674 .ndo_tx_timeout = cpsw_ndo_tx_timeout, 929 .ndo_tx_timeout = cpsw_ndo_tx_timeout,
675 .ndo_get_stats = cpsw_ndo_get_stats, 930 .ndo_get_stats = cpsw_ndo_get_stats,
931 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
676#ifdef CONFIG_NET_POLL_CONTROLLER 932#ifdef CONFIG_NET_POLL_CONTROLLER
677 .ndo_poll_controller = cpsw_ndo_poll_controller, 933 .ndo_poll_controller = cpsw_ndo_poll_controller,
678#endif 934#endif
@@ -699,11 +955,44 @@ static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
699 priv->msg_enable = value; 955 priv->msg_enable = value;
700} 956}
701 957
958static int cpsw_get_ts_info(struct net_device *ndev,
959 struct ethtool_ts_info *info)
960{
961#ifdef CONFIG_TI_CPTS
962 struct cpsw_priv *priv = netdev_priv(ndev);
963
964 info->so_timestamping =
965 SOF_TIMESTAMPING_TX_HARDWARE |
966 SOF_TIMESTAMPING_TX_SOFTWARE |
967 SOF_TIMESTAMPING_RX_HARDWARE |
968 SOF_TIMESTAMPING_RX_SOFTWARE |
969 SOF_TIMESTAMPING_SOFTWARE |
970 SOF_TIMESTAMPING_RAW_HARDWARE;
971 info->phc_index = priv->cpts.phc_index;
972 info->tx_types =
973 (1 << HWTSTAMP_TX_OFF) |
974 (1 << HWTSTAMP_TX_ON);
975 info->rx_filters =
976 (1 << HWTSTAMP_FILTER_NONE) |
977 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
978#else
979 info->so_timestamping =
980 SOF_TIMESTAMPING_TX_SOFTWARE |
981 SOF_TIMESTAMPING_RX_SOFTWARE |
982 SOF_TIMESTAMPING_SOFTWARE;
983 info->phc_index = -1;
984 info->tx_types = 0;
985 info->rx_filters = 0;
986#endif
987 return 0;
988}
989
702static const struct ethtool_ops cpsw_ethtool_ops = { 990static const struct ethtool_ops cpsw_ethtool_ops = {
703 .get_drvinfo = cpsw_get_drvinfo, 991 .get_drvinfo = cpsw_get_drvinfo,
704 .get_msglevel = cpsw_get_msglevel, 992 .get_msglevel = cpsw_get_msglevel,
705 .set_msglevel = cpsw_set_msglevel, 993 .set_msglevel = cpsw_set_msglevel,
706 .get_link = ethtool_op_get_link, 994 .get_link = ethtool_op_get_link,
995 .get_ts_info = cpsw_get_ts_info,
707}; 996};
708 997
709static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv) 998static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
@@ -734,6 +1023,27 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
734 } 1023 }
735 data->slaves = prop; 1024 data->slaves = prop;
736 1025
1026 if (of_property_read_u32(node, "cpts_active_slave", &prop)) {
1027 pr_err("Missing cpts_active_slave property in the DT.\n");
1028 ret = -EINVAL;
1029 goto error_ret;
1030 }
1031 data->cpts_active_slave = prop;
1032
1033 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
1034 pr_err("Missing cpts_clock_mult property in the DT.\n");
1035 ret = -EINVAL;
1036 goto error_ret;
1037 }
1038 data->cpts_clock_mult = prop;
1039
1040 if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
1041 pr_err("Missing cpts_clock_shift property in the DT.\n");
1042 ret = -EINVAL;
1043 goto error_ret;
1044 }
1045 data->cpts_clock_shift = prop;
1046
737 data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) * 1047 data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
738 data->slaves, GFP_KERNEL); 1048 data->slaves, GFP_KERNEL);
739 if (!data->slave_data) { 1049 if (!data->slave_data) {
@@ -799,6 +1109,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
799 } 1109 }
800 data->hw_stats_reg_ofs = prop; 1110 data->hw_stats_reg_ofs = prop;
801 1111
1112 if (of_property_read_u32(node, "cpts_reg_ofs", &prop)) {
1113 pr_err("Missing cpts_reg_ofs property in the DT.\n");
1114 ret = -EINVAL;
1115 goto error_ret;
1116 }
1117 data->cpts_reg_ofs = prop;
1118
802 if (of_property_read_u32(node, "bd_ram_ofs", &prop)) { 1119 if (of_property_read_u32(node, "bd_ram_ofs", &prop)) {
803 pr_err("Missing bd_ram_ofs property in the DT.\n"); 1120 pr_err("Missing bd_ram_ofs property in the DT.\n");
804 ret = -EINVAL; 1121 ret = -EINVAL;
@@ -935,14 +1252,12 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
935 ret = -ENOENT; 1252 ret = -ENOENT;
936 goto clean_clk_ret; 1253 goto clean_clk_ret;
937 } 1254 }
938
939 if (!request_mem_region(priv->cpsw_res->start, 1255 if (!request_mem_region(priv->cpsw_res->start,
940 resource_size(priv->cpsw_res), ndev->name)) { 1256 resource_size(priv->cpsw_res), ndev->name)) {
941 dev_err(priv->dev, "failed request i/o region\n"); 1257 dev_err(priv->dev, "failed request i/o region\n");
942 ret = -ENXIO; 1258 ret = -ENXIO;
943 goto clean_clk_ret; 1259 goto clean_clk_ret;
944 } 1260 }
945
946 regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res)); 1261 regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
947 if (!regs) { 1262 if (!regs) {
948 dev_err(priv->dev, "unable to map i/o region\n"); 1263 dev_err(priv->dev, "unable to map i/o region\n");
@@ -951,28 +1266,27 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
951 priv->regs = regs; 1266 priv->regs = regs;
952 priv->host_port = data->host_port_num; 1267 priv->host_port = data->host_port_num;
953 priv->host_port_regs = regs + data->host_port_reg_ofs; 1268 priv->host_port_regs = regs + data->host_port_reg_ofs;
1269 priv->cpts.reg = regs + data->cpts_reg_ofs;
954 1270
955 priv->cpsw_ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1271 priv->cpsw_wr_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
956 if (!priv->cpsw_ss_res) { 1272 if (!priv->cpsw_wr_res) {
957 dev_err(priv->dev, "error getting i/o resource\n"); 1273 dev_err(priv->dev, "error getting i/o resource\n");
958 ret = -ENOENT; 1274 ret = -ENOENT;
959 goto clean_clk_ret; 1275 goto clean_iomap_ret;
960 } 1276 }
961 1277 if (!request_mem_region(priv->cpsw_wr_res->start,
962 if (!request_mem_region(priv->cpsw_ss_res->start, 1278 resource_size(priv->cpsw_wr_res), ndev->name)) {
963 resource_size(priv->cpsw_ss_res), ndev->name)) {
964 dev_err(priv->dev, "failed request i/o region\n"); 1279 dev_err(priv->dev, "failed request i/o region\n");
965 ret = -ENXIO; 1280 ret = -ENXIO;
966 goto clean_clk_ret; 1281 goto clean_iomap_ret;
967 } 1282 }
968 1283 regs = ioremap(priv->cpsw_wr_res->start,
969 regs = ioremap(priv->cpsw_ss_res->start, 1284 resource_size(priv->cpsw_wr_res));
970 resource_size(priv->cpsw_ss_res));
971 if (!regs) { 1285 if (!regs) {
972 dev_err(priv->dev, "unable to map i/o region\n"); 1286 dev_err(priv->dev, "unable to map i/o region\n");
973 goto clean_cpsw_ss_iores_ret; 1287 goto clean_cpsw_wr_iores_ret;
974 } 1288 }
975 priv->ss_regs = regs; 1289 priv->wr_regs = regs;
976 1290
977 for_each_slave(priv, cpsw_slave_init, priv); 1291 for_each_slave(priv, cpsw_slave_init, priv);
978 1292
@@ -1008,7 +1322,7 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
1008 if (!priv->dma) { 1322 if (!priv->dma) {
1009 dev_err(priv->dev, "error initializing dma\n"); 1323 dev_err(priv->dev, "error initializing dma\n");
1010 ret = -ENOMEM; 1324 ret = -ENOMEM;
1011 goto clean_iomap_ret; 1325 goto clean_wr_iomap_ret;
1012 } 1326 }
1013 1327
1014 priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0), 1328 priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
@@ -1072,6 +1386,10 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
1072 goto clean_irq_ret; 1386 goto clean_irq_ret;
1073 } 1387 }
1074 1388
1389 if (cpts_register(&pdev->dev, &priv->cpts,
1390 data->cpts_clock_mult, data->cpts_clock_shift))
1391 dev_err(priv->dev, "error registering cpts device\n");
1392
1075 cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n", 1393 cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
1076 priv->cpsw_res->start, ndev->irq); 1394 priv->cpsw_res->start, ndev->irq);
1077 1395
@@ -1085,11 +1403,13 @@ clean_dma_ret:
1085 cpdma_chan_destroy(priv->txch); 1403 cpdma_chan_destroy(priv->txch);
1086 cpdma_chan_destroy(priv->rxch); 1404 cpdma_chan_destroy(priv->rxch);
1087 cpdma_ctlr_destroy(priv->dma); 1405 cpdma_ctlr_destroy(priv->dma);
1406clean_wr_iomap_ret:
1407 iounmap(priv->wr_regs);
1408clean_cpsw_wr_iores_ret:
1409 release_mem_region(priv->cpsw_wr_res->start,
1410 resource_size(priv->cpsw_wr_res));
1088clean_iomap_ret: 1411clean_iomap_ret:
1089 iounmap(priv->regs); 1412 iounmap(priv->regs);
1090clean_cpsw_ss_iores_ret:
1091 release_mem_region(priv->cpsw_ss_res->start,
1092 resource_size(priv->cpsw_ss_res));
1093clean_cpsw_iores_ret: 1413clean_cpsw_iores_ret:
1094 release_mem_region(priv->cpsw_res->start, 1414 release_mem_region(priv->cpsw_res->start,
1095 resource_size(priv->cpsw_res)); 1415 resource_size(priv->cpsw_res));
@@ -1111,6 +1431,7 @@ static int __devexit cpsw_remove(struct platform_device *pdev)
1111 pr_info("removing device"); 1431 pr_info("removing device");
1112 platform_set_drvdata(pdev, NULL); 1432 platform_set_drvdata(pdev, NULL);
1113 1433
1434 cpts_unregister(&priv->cpts);
1114 free_irq(ndev->irq, priv); 1435 free_irq(ndev->irq, priv);
1115 cpsw_ale_destroy(priv->ale); 1436 cpsw_ale_destroy(priv->ale);
1116 cpdma_chan_destroy(priv->txch); 1437 cpdma_chan_destroy(priv->txch);
@@ -1119,8 +1440,9 @@ static int __devexit cpsw_remove(struct platform_device *pdev)
1119 iounmap(priv->regs); 1440 iounmap(priv->regs);
1120 release_mem_region(priv->cpsw_res->start, 1441 release_mem_region(priv->cpsw_res->start,
1121 resource_size(priv->cpsw_res)); 1442 resource_size(priv->cpsw_res));
1122 release_mem_region(priv->cpsw_ss_res->start, 1443 iounmap(priv->wr_regs);
1123 resource_size(priv->cpsw_ss_res)); 1444 release_mem_region(priv->cpsw_wr_res->start,
1445 resource_size(priv->cpsw_wr_res));
1124 pm_runtime_disable(&pdev->dev); 1446 pm_runtime_disable(&pdev->dev);
1125 clk_put(priv->clk); 1447 clk_put(priv->clk);
1126 kfree(priv->slaves); 1448 kfree(priv->slaves);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index ca0d48a7e508..0e9ccc2cf91f 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -20,6 +20,7 @@
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/stat.h> 21#include <linux/stat.h>
22#include <linux/sysfs.h> 22#include <linux/sysfs.h>
23#include <linux/etherdevice.h>
23 24
24#include "cpsw_ale.h" 25#include "cpsw_ale.h"
25 26
@@ -211,10 +212,34 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
211 mask &= ~port_mask; 212 mask &= ~port_mask;
212 213
213 /* free if only remaining port is host port */ 214 /* free if only remaining port is host port */
214 if (mask == BIT(ale->params.ale_ports)) 215 if (mask)
215 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
216 else
217 cpsw_ale_set_port_mask(ale_entry, mask); 216 cpsw_ale_set_port_mask(ale_entry, mask);
217 else
218 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
219}
220
221int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
222{
223 u32 ale_entry[ALE_ENTRY_WORDS];
224 int ret, idx;
225
226 for (idx = 0; idx < ale->params.ale_entries; idx++) {
227 cpsw_ale_read(ale, idx, ale_entry);
228 ret = cpsw_ale_get_entry_type(ale_entry);
229 if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
230 continue;
231
232 if (cpsw_ale_get_mcast(ale_entry)) {
233 u8 addr[6];
234
235 cpsw_ale_get_addr(ale_entry, addr);
236 if (!is_broadcast_ether_addr(addr))
237 cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
238 }
239
240 cpsw_ale_write(ale, idx, ale_entry);
241 }
242 return 0;
218} 243}
219 244
220static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry, 245static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index a95b37beb02d..2bd09cbce522 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -80,6 +80,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
80 80
81int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); 81int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
82int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); 82int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
83int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
83int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags); 84int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags);
84int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port); 85int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port);
85int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, 86int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
new file mode 100644
index 000000000000..337766738eca
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -0,0 +1,427 @@
1/*
2 * TI Common Platform Time Sync
3 *
4 * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20#include <linux/err.h>
21#include <linux/if.h>
22#include <linux/hrtimer.h>
23#include <linux/module.h>
24#include <linux/net_tstamp.h>
25#include <linux/ptp_classify.h>
26#include <linux/time.h>
27#include <linux/uaccess.h>
28#include <linux/workqueue.h>
29
30#include <plat/clock.h>
31
32#include "cpts.h"
33
34#ifdef CONFIG_TI_CPTS
35
36static struct sock_filter ptp_filter[] = {
37 PTP_FILTER
38};
39
40#define cpts_read32(c, r) __raw_readl(&c->reg->r)
41#define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r)
42
43static int event_expired(struct cpts_event *event)
44{
45 return time_after(jiffies, event->tmo);
46}
47
48static int event_type(struct cpts_event *event)
49{
50 return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
51}
52
53static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
54{
55 u32 r = cpts_read32(cpts, intstat_raw);
56
57 if (r & TS_PEND_RAW) {
58 *high = cpts_read32(cpts, event_high);
59 *low = cpts_read32(cpts, event_low);
60 cpts_write32(cpts, EVENT_POP, event_pop);
61 return 0;
62 }
63 return -1;
64}
65
66/*
67 * Returns zero if matching event type was found.
68 */
69static int cpts_fifo_read(struct cpts *cpts, int match)
70{
71 int i, type = -1;
72 u32 hi, lo;
73 struct cpts_event *event;
74
75 for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
76 if (cpts_fifo_pop(cpts, &hi, &lo))
77 break;
78 if (list_empty(&cpts->pool)) {
79 pr_err("cpts: event pool is empty\n");
80 return -1;
81 }
82 event = list_first_entry(&cpts->pool, struct cpts_event, list);
83 event->tmo = jiffies + 2;
84 event->high = hi;
85 event->low = lo;
86 type = event_type(event);
87 switch (type) {
88 case CPTS_EV_PUSH:
89 case CPTS_EV_RX:
90 case CPTS_EV_TX:
91 list_del_init(&event->list);
92 list_add_tail(&event->list, &cpts->events);
93 break;
94 case CPTS_EV_ROLL:
95 case CPTS_EV_HALF:
96 case CPTS_EV_HW:
97 break;
98 default:
99 pr_err("cpts: unkown event type\n");
100 break;
101 }
102 if (type == match)
103 break;
104 }
105 return type == match ? 0 : -1;
106}
107
108static cycle_t cpts_systim_read(const struct cyclecounter *cc)
109{
110 u64 val = 0;
111 struct cpts_event *event;
112 struct list_head *this, *next;
113 struct cpts *cpts = container_of(cc, struct cpts, cc);
114
115 cpts_write32(cpts, TS_PUSH, ts_push);
116 if (cpts_fifo_read(cpts, CPTS_EV_PUSH))
117 pr_err("cpts: unable to obtain a time stamp\n");
118
119 list_for_each_safe(this, next, &cpts->events) {
120 event = list_entry(this, struct cpts_event, list);
121 if (event_type(event) == CPTS_EV_PUSH) {
122 list_del_init(&event->list);
123 list_add(&event->list, &cpts->pool);
124 val = event->low;
125 break;
126 }
127 }
128
129 return val;
130}
131
132/* PTP clock operations */
133
134static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
135{
136 u64 adj;
137 u32 diff, mult;
138 int neg_adj = 0;
139 unsigned long flags;
140 struct cpts *cpts = container_of(ptp, struct cpts, info);
141
142 if (ppb < 0) {
143 neg_adj = 1;
144 ppb = -ppb;
145 }
146 mult = cpts->cc_mult;
147 adj = mult;
148 adj *= ppb;
149 diff = div_u64(adj, 1000000000ULL);
150
151 spin_lock_irqsave(&cpts->lock, flags);
152
153 timecounter_read(&cpts->tc);
154
155 cpts->cc.mult = neg_adj ? mult - diff : mult + diff;
156
157 spin_unlock_irqrestore(&cpts->lock, flags);
158
159 return 0;
160}
161
162static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
163{
164 s64 now;
165 unsigned long flags;
166 struct cpts *cpts = container_of(ptp, struct cpts, info);
167
168 spin_lock_irqsave(&cpts->lock, flags);
169 now = timecounter_read(&cpts->tc);
170 now += delta;
171 timecounter_init(&cpts->tc, &cpts->cc, now);
172 spin_unlock_irqrestore(&cpts->lock, flags);
173
174 return 0;
175}
176
177static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
178{
179 u64 ns;
180 u32 remainder;
181 unsigned long flags;
182 struct cpts *cpts = container_of(ptp, struct cpts, info);
183
184 spin_lock_irqsave(&cpts->lock, flags);
185 ns = timecounter_read(&cpts->tc);
186 spin_unlock_irqrestore(&cpts->lock, flags);
187
188 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
189 ts->tv_nsec = remainder;
190
191 return 0;
192}
193
194static int cpts_ptp_settime(struct ptp_clock_info *ptp,
195 const struct timespec *ts)
196{
197 u64 ns;
198 unsigned long flags;
199 struct cpts *cpts = container_of(ptp, struct cpts, info);
200
201 ns = ts->tv_sec * 1000000000ULL;
202 ns += ts->tv_nsec;
203
204 spin_lock_irqsave(&cpts->lock, flags);
205 timecounter_init(&cpts->tc, &cpts->cc, ns);
206 spin_unlock_irqrestore(&cpts->lock, flags);
207
208 return 0;
209}
210
211static int cpts_ptp_enable(struct ptp_clock_info *ptp,
212 struct ptp_clock_request *rq, int on)
213{
214 return -EOPNOTSUPP;
215}
216
217static struct ptp_clock_info cpts_info = {
218 .owner = THIS_MODULE,
219 .name = "CTPS timer",
220 .max_adj = 1000000,
221 .n_ext_ts = 0,
222 .pps = 0,
223 .adjfreq = cpts_ptp_adjfreq,
224 .adjtime = cpts_ptp_adjtime,
225 .gettime = cpts_ptp_gettime,
226 .settime = cpts_ptp_settime,
227 .enable = cpts_ptp_enable,
228};
229
230static void cpts_overflow_check(struct work_struct *work)
231{
232 struct timespec ts;
233 struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
234
235 cpts_write32(cpts, CPTS_EN, control);
236 cpts_write32(cpts, TS_PEND_EN, int_enable);
237 cpts_ptp_gettime(&cpts->info, &ts);
238 pr_debug("cpts overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
239 schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
240}
241
242#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk"
243
244static void cpts_clk_init(struct cpts *cpts)
245{
246 cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME);
247 if (IS_ERR(cpts->refclk)) {
248 pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME);
249 cpts->refclk = NULL;
250 return;
251 }
252 clk_enable(cpts->refclk);
253 cpts->freq = cpts->refclk->recalc(cpts->refclk);
254}
255
256static void cpts_clk_release(struct cpts *cpts)
257{
258 clk_disable(cpts->refclk);
259 clk_put(cpts->refclk);
260}
261
262static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
263 u16 ts_seqid, u8 ts_msgtype)
264{
265 u16 *seqid;
266 unsigned int offset;
267 u8 *msgtype, *data = skb->data;
268
269 switch (ptp_class) {
270 case PTP_CLASS_V1_IPV4:
271 case PTP_CLASS_V2_IPV4:
272 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
273 break;
274 case PTP_CLASS_V1_IPV6:
275 case PTP_CLASS_V2_IPV6:
276 offset = OFF_PTP6;
277 break;
278 case PTP_CLASS_V2_L2:
279 offset = ETH_HLEN;
280 break;
281 case PTP_CLASS_V2_VLAN:
282 offset = ETH_HLEN + VLAN_HLEN;
283 break;
284 default:
285 return 0;
286 }
287
288 if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
289 return 0;
290
291 if (unlikely(ptp_class & PTP_CLASS_V1))
292 msgtype = data + offset + OFF_PTP_CONTROL;
293 else
294 msgtype = data + offset;
295
296 seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
297
298 return (ts_msgtype == (*msgtype & 0xf) && ts_seqid == ntohs(*seqid));
299}
300
301static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
302{
303 u64 ns = 0;
304 struct cpts_event *event;
305 struct list_head *this, *next;
306 unsigned int class = sk_run_filter(skb, ptp_filter);
307 unsigned long flags;
308 u16 seqid;
309 u8 mtype;
310
311 if (class == PTP_CLASS_NONE)
312 return 0;
313
314 spin_lock_irqsave(&cpts->lock, flags);
315 cpts_fifo_read(cpts, CPTS_EV_PUSH);
316 list_for_each_safe(this, next, &cpts->events) {
317 event = list_entry(this, struct cpts_event, list);
318 if (event_expired(event)) {
319 list_del_init(&event->list);
320 list_add(&event->list, &cpts->pool);
321 continue;
322 }
323 mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
324 seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
325 if (ev_type == event_type(event) &&
326 cpts_match(skb, class, seqid, mtype)) {
327 ns = timecounter_cyc2time(&cpts->tc, event->low);
328 list_del_init(&event->list);
329 list_add(&event->list, &cpts->pool);
330 break;
331 }
332 }
333 spin_unlock_irqrestore(&cpts->lock, flags);
334
335 return ns;
336}
337
338void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
339{
340 u64 ns;
341 struct skb_shared_hwtstamps *ssh;
342
343 if (!cpts->rx_enable)
344 return;
345 ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
346 if (!ns)
347 return;
348 ssh = skb_hwtstamps(skb);
349 memset(ssh, 0, sizeof(*ssh));
350 ssh->hwtstamp = ns_to_ktime(ns);
351}
352
353void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
354{
355 u64 ns;
356 struct skb_shared_hwtstamps ssh;
357
358 if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
359 return;
360 ns = cpts_find_ts(cpts, skb, CPTS_EV_TX);
361 if (!ns)
362 return;
363 memset(&ssh, 0, sizeof(ssh));
364 ssh.hwtstamp = ns_to_ktime(ns);
365 skb_tstamp_tx(skb, &ssh);
366}
367
368#endif /*CONFIG_TI_CPTS*/
369
370int cpts_register(struct device *dev, struct cpts *cpts,
371 u32 mult, u32 shift)
372{
373#ifdef CONFIG_TI_CPTS
374 int err, i;
375 unsigned long flags;
376
377 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
378 pr_err("cpts: bad ptp filter\n");
379 return -EINVAL;
380 }
381 cpts->info = cpts_info;
382 cpts->clock = ptp_clock_register(&cpts->info, dev);
383 if (IS_ERR(cpts->clock)) {
384 err = PTR_ERR(cpts->clock);
385 cpts->clock = NULL;
386 return err;
387 }
388 spin_lock_init(&cpts->lock);
389
390 cpts->cc.read = cpts_systim_read;
391 cpts->cc.mask = CLOCKSOURCE_MASK(32);
392 cpts->cc_mult = mult;
393 cpts->cc.mult = mult;
394 cpts->cc.shift = shift;
395
396 INIT_LIST_HEAD(&cpts->events);
397 INIT_LIST_HEAD(&cpts->pool);
398 for (i = 0; i < CPTS_MAX_EVENTS; i++)
399 list_add(&cpts->pool_data[i].list, &cpts->pool);
400
401 cpts_clk_init(cpts);
402 cpts_write32(cpts, CPTS_EN, control);
403 cpts_write32(cpts, TS_PEND_EN, int_enable);
404
405 spin_lock_irqsave(&cpts->lock, flags);
406 timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real()));
407 spin_unlock_irqrestore(&cpts->lock, flags);
408
409 INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
410 schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
411
412 cpts->phc_index = ptp_clock_index(cpts->clock);
413#endif
414 return 0;
415}
416
417void cpts_unregister(struct cpts *cpts)
418{
419#ifdef CONFIG_TI_CPTS
420 if (cpts->clock) {
421 ptp_clock_unregister(cpts->clock);
422 cancel_delayed_work_sync(&cpts->overflow_work);
423 }
424 if (cpts->refclk)
425 cpts_clk_release(cpts);
426#endif
427}
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
new file mode 100644
index 000000000000..e1bba3a496b2
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -0,0 +1,146 @@
1/*
2 * TI Common Platform Time Sync
3 *
4 * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20#ifndef _TI_CPTS_H_
21#define _TI_CPTS_H_
22
23#include <linux/clk.h>
24#include <linux/clkdev.h>
25#include <linux/clocksource.h>
26#include <linux/device.h>
27#include <linux/list.h>
28#include <linux/ptp_clock_kernel.h>
29#include <linux/skbuff.h>
30
31struct cpsw_cpts {
32 u32 idver; /* Identification and version */
33 u32 control; /* Time sync control */
34 u32 res1;
35 u32 ts_push; /* Time stamp event push */
36 u32 ts_load_val; /* Time stamp load value */
37 u32 ts_load_en; /* Time stamp load enable */
38 u32 res2[2];
39 u32 intstat_raw; /* Time sync interrupt status raw */
40 u32 intstat_masked; /* Time sync interrupt status masked */
41 u32 int_enable; /* Time sync interrupt enable */
42 u32 res3;
43 u32 event_pop; /* Event interrupt pop */
44 u32 event_low; /* 32 Bit Event Time Stamp */
45 u32 event_high; /* Event Type Fields */
46};
47
48/* Bit definitions for the IDVER register */
49#define TX_IDENT_SHIFT (16) /* TX Identification Value */
50#define TX_IDENT_MASK (0xffff)
51#define RTL_VER_SHIFT (11) /* RTL Version Value */
52#define RTL_VER_MASK (0x1f)
53#define MAJOR_VER_SHIFT (8) /* Major Version Value */
54#define MAJOR_VER_MASK (0x7)
55#define MINOR_VER_SHIFT (0) /* Minor Version Value */
56#define MINOR_VER_MASK (0xff)
57
58/* Bit definitions for the CONTROL register */
59#define HW4_TS_PUSH_EN (1<<11) /* Hardware push 4 enable */
60#define HW3_TS_PUSH_EN (1<<10) /* Hardware push 3 enable */
61#define HW2_TS_PUSH_EN (1<<9) /* Hardware push 2 enable */
62#define HW1_TS_PUSH_EN (1<<8) /* Hardware push 1 enable */
63#define INT_TEST (1<<1) /* Interrupt Test */
64#define CPTS_EN (1<<0) /* Time Sync Enable */
65
66/*
67 * Definitions for the single bit resisters:
68 * TS_PUSH TS_LOAD_EN INTSTAT_RAW INTSTAT_MASKED INT_ENABLE EVENT_POP
69 */
70#define TS_PUSH (1<<0) /* Time stamp event push */
71#define TS_LOAD_EN (1<<0) /* Time Stamp Load */
72#define TS_PEND_RAW (1<<0) /* int read (before enable) */
73#define TS_PEND (1<<0) /* masked interrupt read (after enable) */
74#define TS_PEND_EN (1<<0) /* masked interrupt enable */
75#define EVENT_POP (1<<0) /* writing discards one event */
76
77/* Bit definitions for the EVENT_HIGH register */
78#define PORT_NUMBER_SHIFT (24) /* Indicates Ethernet port or HW pin */
79#define PORT_NUMBER_MASK (0x1f)
80#define EVENT_TYPE_SHIFT (20) /* Time sync event type */
81#define EVENT_TYPE_MASK (0xf)
82#define MESSAGE_TYPE_SHIFT (16) /* PTP message type */
83#define MESSAGE_TYPE_MASK (0xf)
84#define SEQUENCE_ID_SHIFT (0) /* PTP message sequence ID */
85#define SEQUENCE_ID_MASK (0xffff)
86
87enum {
88 CPTS_EV_PUSH, /* Time Stamp Push Event */
89 CPTS_EV_ROLL, /* Time Stamp Rollover Event */
90 CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
91 CPTS_EV_HW, /* Hardware Time Stamp Push Event */
92 CPTS_EV_RX, /* Ethernet Receive Event */
93 CPTS_EV_TX, /* Ethernet Transmit Event */
94};
95
96/* This covers any input clock up to about 500 MHz. */
97#define CPTS_OVERFLOW_PERIOD (HZ * 8)
98
99#define CPTS_FIFO_DEPTH 16
100#define CPTS_MAX_EVENTS 32
101
102struct cpts_event {
103 struct list_head list;
104 unsigned long tmo;
105 u32 high;
106 u32 low;
107};
108
109struct cpts {
110 struct cpsw_cpts __iomem *reg;
111 int tx_enable;
112 int rx_enable;
113#ifdef CONFIG_TI_CPTS
114 struct ptp_clock_info info;
115 struct ptp_clock *clock;
116 spinlock_t lock; /* protects time registers */
117 u32 cc_mult; /* for the nominal frequency */
118 struct cyclecounter cc;
119 struct timecounter tc;
120 struct delayed_work overflow_work;
121 int phc_index;
122 struct clk *refclk;
123 unsigned long freq;
124 struct list_head events;
125 struct list_head pool;
126 struct cpts_event pool_data[CPTS_MAX_EVENTS];
127#endif
128};
129
130#ifdef CONFIG_TI_CPTS
131extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
132extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
133#else
134static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
135{
136}
137static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
138{
139}
140#endif
141
142extern int cpts_register(struct device *dev, struct cpts *cpts,
143 u32 mult, u32 shift);
144extern void cpts_unregister(struct cpts *cpts);
145
146#endif