aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igb')
-rw-r--r--drivers/net/igb/Makefile4
-rw-r--r--drivers/net/igb/e1000_82575.c141
-rw-r--r--drivers/net/igb/e1000_82575.h64
-rw-r--r--drivers/net/igb/e1000_defines.h134
-rw-r--r--drivers/net/igb/e1000_hw.h202
-rw-r--r--drivers/net/igb/e1000_mac.c66
-rw-r--r--drivers/net/igb/e1000_mac.h9
-rw-r--r--drivers/net/igb/e1000_mbx.c447
-rw-r--r--drivers/net/igb/e1000_mbx.h77
-rw-r--r--drivers/net/igb/e1000_nvm.c44
-rw-r--r--drivers/net/igb/e1000_phy.c352
-rw-r--r--drivers/net/igb/e1000_phy.h3
-rw-r--r--drivers/net/igb/e1000_regs.h103
-rw-r--r--drivers/net/igb/igb.h88
-rw-r--r--drivers/net/igb/igb_ethtool.c206
-rw-r--r--drivers/net/igb/igb_main.c1764
16 files changed, 2506 insertions, 1198 deletions
diff --git a/drivers/net/igb/Makefile b/drivers/net/igb/Makefile
index 1927b3fd6f05..8372cb9a8c1a 100644
--- a/drivers/net/igb/Makefile
+++ b/drivers/net/igb/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel 82575 PCI-Express Ethernet Linux driver 3# Intel 82575 PCI-Express Ethernet Linux driver
4# Copyright(c) 1999 - 2007 Intel Corporation. 4# Copyright(c) 1999 - 2009 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
@@ -33,5 +33,5 @@
33obj-$(CONFIG_IGB) += igb.o 33obj-$(CONFIG_IGB) += igb.o
34 34
35igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ 35igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
36 e1000_mac.o e1000_nvm.o e1000_phy.o 36 e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o
37 37
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 13ca73f96ec6..efd9be214885 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 - 2008 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -62,17 +62,12 @@ static bool igb_sgmii_active_82575(struct e1000_hw *);
62static s32 igb_reset_init_script_82575(struct e1000_hw *); 62static s32 igb_reset_init_script_82575(struct e1000_hw *);
63static s32 igb_read_mac_addr_82575(struct e1000_hw *); 63static s32 igb_read_mac_addr_82575(struct e1000_hw *);
64 64
65
66struct e1000_dev_spec_82575 {
67 bool sgmii_active;
68};
69
70static s32 igb_get_invariants_82575(struct e1000_hw *hw) 65static s32 igb_get_invariants_82575(struct e1000_hw *hw)
71{ 66{
72 struct e1000_phy_info *phy = &hw->phy; 67 struct e1000_phy_info *phy = &hw->phy;
73 struct e1000_nvm_info *nvm = &hw->nvm; 68 struct e1000_nvm_info *nvm = &hw->nvm;
74 struct e1000_mac_info *mac = &hw->mac; 69 struct e1000_mac_info *mac = &hw->mac;
75 struct e1000_dev_spec_82575 *dev_spec; 70 struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
76 u32 eecd; 71 u32 eecd;
77 s32 ret_val; 72 s32 ret_val;
78 u16 size; 73 u16 size;
@@ -85,8 +80,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
85 mac->type = e1000_82575; 80 mac->type = e1000_82575;
86 break; 81 break;
87 case E1000_DEV_ID_82576: 82 case E1000_DEV_ID_82576:
83 case E1000_DEV_ID_82576_NS:
88 case E1000_DEV_ID_82576_FIBER: 84 case E1000_DEV_ID_82576_FIBER:
89 case E1000_DEV_ID_82576_SERDES: 85 case E1000_DEV_ID_82576_SERDES:
86 case E1000_DEV_ID_82576_QUAD_COPPER:
90 mac->type = e1000_82576; 87 mac->type = e1000_82576;
91 break; 88 break;
92 default: 89 default:
@@ -94,17 +91,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
94 break; 91 break;
95 } 92 }
96 93
97 /* MAC initialization */
98 hw->dev_spec_size = sizeof(struct e1000_dev_spec_82575);
99
100 /* Device-specific structure allocation */
101 hw->dev_spec = kzalloc(hw->dev_spec_size, GFP_KERNEL);
102
103 if (!hw->dev_spec)
104 return -ENOMEM;
105
106 dev_spec = (struct e1000_dev_spec_82575 *)hw->dev_spec;
107
108 /* Set media type */ 94 /* Set media type */
109 /* 95 /*
110 * The 82575 uses bits 22:23 for link mode. The mode can be changed 96 * The 82575 uses bits 22:23 for link mode. The mode can be changed
@@ -195,13 +181,13 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
195 181
196 /* PHY function pointers */ 182 /* PHY function pointers */
197 if (igb_sgmii_active_82575(hw)) { 183 if (igb_sgmii_active_82575(hw)) {
198 phy->ops.reset_phy = igb_phy_hw_reset_sgmii_82575; 184 phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
199 phy->ops.read_phy_reg = igb_read_phy_reg_sgmii_82575; 185 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
200 phy->ops.write_phy_reg = igb_write_phy_reg_sgmii_82575; 186 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
201 } else { 187 } else {
202 phy->ops.reset_phy = igb_phy_hw_reset; 188 phy->ops.reset = igb_phy_hw_reset;
203 phy->ops.read_phy_reg = igb_read_phy_reg_igp; 189 phy->ops.read_reg = igb_read_phy_reg_igp;
204 phy->ops.write_phy_reg = igb_write_phy_reg_igp; 190 phy->ops.write_reg = igb_write_phy_reg_igp;
205 } 191 }
206 192
207 /* Set phy->phy_addr and phy->id. */ 193 /* Set phy->phy_addr and phy->id. */
@@ -229,6 +215,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
229 return -E1000_ERR_PHY; 215 return -E1000_ERR_PHY;
230 } 216 }
231 217
218 /* if 82576 then initialize mailbox parameters */
219 if (mac->type == e1000_82576)
220 igb_init_mbx_params_pf(hw);
221
232 return 0; 222 return 0;
233} 223}
234 224
@@ -451,7 +441,7 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
451 * SFP documentation requires the following to configure the SPF module 441 * SFP documentation requires the following to configure the SPF module
452 * to work on SGMII. No further documentation is given. 442 * to work on SGMII. No further documentation is given.
453 */ 443 */
454 ret_val = hw->phy.ops.write_phy_reg(hw, 0x1B, 0x8084); 444 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
455 if (ret_val) 445 if (ret_val)
456 goto out; 446 goto out;
457 447
@@ -480,28 +470,28 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
480 s32 ret_val; 470 s32 ret_val;
481 u16 data; 471 u16 data;
482 472
483 ret_val = phy->ops.read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 473 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
484 if (ret_val) 474 if (ret_val)
485 goto out; 475 goto out;
486 476
487 if (active) { 477 if (active) {
488 data |= IGP02E1000_PM_D0_LPLU; 478 data |= IGP02E1000_PM_D0_LPLU;
489 ret_val = phy->ops.write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, 479 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
490 data); 480 data);
491 if (ret_val) 481 if (ret_val)
492 goto out; 482 goto out;
493 483
494 /* When LPLU is enabled, we should disable SmartSpeed */ 484 /* When LPLU is enabled, we should disable SmartSpeed */
495 ret_val = phy->ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 485 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
496 &data); 486 &data);
497 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 487 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
498 ret_val = phy->ops.write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 488 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
499 data); 489 data);
500 if (ret_val) 490 if (ret_val)
501 goto out; 491 goto out;
502 } else { 492 } else {
503 data &= ~IGP02E1000_PM_D0_LPLU; 493 data &= ~IGP02E1000_PM_D0_LPLU;
504 ret_val = phy->ops.write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, 494 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
505 data); 495 data);
506 /* 496 /*
507 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 497 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
@@ -510,24 +500,24 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
510 * SmartSpeed, so performance is maintained. 500 * SmartSpeed, so performance is maintained.
511 */ 501 */
512 if (phy->smart_speed == e1000_smart_speed_on) { 502 if (phy->smart_speed == e1000_smart_speed_on) {
513 ret_val = phy->ops.read_phy_reg(hw, 503 ret_val = phy->ops.read_reg(hw,
514 IGP01E1000_PHY_PORT_CONFIG, &data); 504 IGP01E1000_PHY_PORT_CONFIG, &data);
515 if (ret_val) 505 if (ret_val)
516 goto out; 506 goto out;
517 507
518 data |= IGP01E1000_PSCFR_SMART_SPEED; 508 data |= IGP01E1000_PSCFR_SMART_SPEED;
519 ret_val = phy->ops.write_phy_reg(hw, 509 ret_val = phy->ops.write_reg(hw,
520 IGP01E1000_PHY_PORT_CONFIG, data); 510 IGP01E1000_PHY_PORT_CONFIG, data);
521 if (ret_val) 511 if (ret_val)
522 goto out; 512 goto out;
523 } else if (phy->smart_speed == e1000_smart_speed_off) { 513 } else if (phy->smart_speed == e1000_smart_speed_off) {
524 ret_val = phy->ops.read_phy_reg(hw, 514 ret_val = phy->ops.read_reg(hw,
525 IGP01E1000_PHY_PORT_CONFIG, &data); 515 IGP01E1000_PHY_PORT_CONFIG, &data);
526 if (ret_val) 516 if (ret_val)
527 goto out; 517 goto out;
528 518
529 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 519 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
530 ret_val = phy->ops.write_phy_reg(hw, 520 ret_val = phy->ops.write_reg(hw,
531 IGP01E1000_PHY_PORT_CONFIG, data); 521 IGP01E1000_PHY_PORT_CONFIG, data);
532 if (ret_val) 522 if (ret_val)
533 goto out; 523 goto out;
@@ -803,7 +793,7 @@ static void igb_init_rx_addrs_82575(struct e1000_hw *hw, u16 rar_count)
803} 793}
804 794
805/** 795/**
806 * igb_update_mc_addr_list_82575 - Update Multicast addresses 796 * igb_update_mc_addr_list - Update Multicast addresses
807 * @hw: pointer to the HW structure 797 * @hw: pointer to the HW structure
808 * @mc_addr_list: array of multicast addresses to program 798 * @mc_addr_list: array of multicast addresses to program
809 * @mc_addr_count: number of multicast addresses to program 799 * @mc_addr_count: number of multicast addresses to program
@@ -815,9 +805,9 @@ static void igb_init_rx_addrs_82575(struct e1000_hw *hw, u16 rar_count)
815 * The parameter rar_count will usually be hw->mac.rar_entry_count 805 * The parameter rar_count will usually be hw->mac.rar_entry_count
816 * unless there are workarounds that change this. 806 * unless there are workarounds that change this.
817 **/ 807 **/
818void igb_update_mc_addr_list_82575(struct e1000_hw *hw, 808void igb_update_mc_addr_list(struct e1000_hw *hw,
819 u8 *mc_addr_list, u32 mc_addr_count, 809 u8 *mc_addr_list, u32 mc_addr_count,
820 u32 rar_used_count, u32 rar_count) 810 u32 rar_used_count, u32 rar_count)
821{ 811{
822 u32 hash_value; 812 u32 hash_value;
823 u32 i; 813 u32 i;
@@ -1051,7 +1041,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1051 * depending on user settings. 1041 * depending on user settings.
1052 */ 1042 */
1053 hw_dbg("Forcing Speed and Duplex\n"); 1043 hw_dbg("Forcing Speed and Duplex\n");
1054 ret_val = igb_phy_force_speed_duplex(hw); 1044 ret_val = hw->phy.ops.force_speed_duplex(hw);
1055 if (ret_val) { 1045 if (ret_val) {
1056 hw_dbg("Error Forcing Speed and Duplex\n"); 1046 hw_dbg("Error Forcing Speed and Duplex\n");
1057 goto out; 1047 goto out;
@@ -1110,6 +1100,13 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
1110 E1000_CTRL_SWDPIN1; 1100 E1000_CTRL_SWDPIN1;
1111 wr32(E1000_CTRL, reg); 1101 wr32(E1000_CTRL, reg);
1112 1102
1103 /* Power on phy for 82576 fiber adapters */
1104 if (hw->mac.type == e1000_82576) {
1105 reg = rd32(E1000_CTRL_EXT);
1106 reg &= ~E1000_CTRL_EXT_SDP7_DATA;
1107 wr32(E1000_CTRL_EXT, reg);
1108 }
1109
1113 /* Set switch control to serdes energy detect */ 1110 /* Set switch control to serdes energy detect */
1114 reg = rd32(E1000_CONNSW); 1111 reg = rd32(E1000_CONNSW);
1115 reg |= E1000_CONNSW_ENRGSRC; 1112 reg |= E1000_CONNSW_ENRGSRC;
@@ -1227,20 +1224,12 @@ out:
1227 **/ 1224 **/
1228static bool igb_sgmii_active_82575(struct e1000_hw *hw) 1225static bool igb_sgmii_active_82575(struct e1000_hw *hw)
1229{ 1226{
1230 struct e1000_dev_spec_82575 *dev_spec; 1227 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1231 bool ret_val;
1232
1233 if (hw->mac.type != e1000_82575) {
1234 ret_val = false;
1235 goto out;
1236 }
1237
1238 dev_spec = (struct e1000_dev_spec_82575 *)hw->dev_spec;
1239 1228
1240 ret_val = dev_spec->sgmii_active; 1229 if (hw->mac.type != e1000_82575 && hw->mac.type != e1000_82576)
1230 return false;
1241 1231
1242out: 1232 return dev_spec->sgmii_active;
1243 return ret_val;
1244} 1233}
1245 1234
1246/** 1235/**
@@ -1430,6 +1419,44 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1430 rd32(E1000_MPC); 1419 rd32(E1000_MPC);
1431} 1420}
1432 1421
1422/**
1423 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
1424 * @hw: pointer to the hardware struct
1425 * @enable: state to enter, either enabled or disabled
1426 *
1427 * enables/disables L2 switch loopback functionality.
1428 **/
1429void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1430{
1431 u32 dtxswc = rd32(E1000_DTXSWC);
1432
1433 if (enable)
1434 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1435 else
1436 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1437
1438 wr32(E1000_DTXSWC, dtxswc);
1439}
1440
1441/**
1442 * igb_vmdq_set_replication_pf - enable or disable vmdq replication
1443 * @hw: pointer to the hardware struct
1444 * @enable: state to enter, either enabled or disabled
1445 *
1446 * enables/disables replication of packets across multiple pools.
1447 **/
1448void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1449{
1450 u32 vt_ctl = rd32(E1000_VT_CTL);
1451
1452 if (enable)
1453 vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
1454 else
1455 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
1456
1457 wr32(E1000_VT_CTL, vt_ctl);
1458}
1459
1433static struct e1000_mac_operations e1000_mac_ops_82575 = { 1460static struct e1000_mac_operations e1000_mac_ops_82575 = {
1434 .reset_hw = igb_reset_hw_82575, 1461 .reset_hw = igb_reset_hw_82575,
1435 .init_hw = igb_init_hw_82575, 1462 .init_hw = igb_init_hw_82575,
@@ -1440,16 +1467,16 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
1440}; 1467};
1441 1468
1442static struct e1000_phy_operations e1000_phy_ops_82575 = { 1469static struct e1000_phy_operations e1000_phy_ops_82575 = {
1443 .acquire_phy = igb_acquire_phy_82575, 1470 .acquire = igb_acquire_phy_82575,
1444 .get_cfg_done = igb_get_cfg_done_82575, 1471 .get_cfg_done = igb_get_cfg_done_82575,
1445 .release_phy = igb_release_phy_82575, 1472 .release = igb_release_phy_82575,
1446}; 1473};
1447 1474
1448static struct e1000_nvm_operations e1000_nvm_ops_82575 = { 1475static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
1449 .acquire_nvm = igb_acquire_nvm_82575, 1476 .acquire = igb_acquire_nvm_82575,
1450 .read_nvm = igb_read_nvm_eerd, 1477 .read = igb_read_nvm_eerd,
1451 .release_nvm = igb_release_nvm_82575, 1478 .release = igb_release_nvm_82575,
1452 .write_nvm = igb_write_nvm_spi, 1479 .write = igb_write_nvm_spi,
1453}; 1480};
1454 1481
1455const struct e1000_info e1000_82575_info = { 1482const struct e1000_info e1000_82575_info = {
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index c1928b5efe1f..eaf977050368 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 - 2008 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -28,7 +28,7 @@
28#ifndef _E1000_82575_H_ 28#ifndef _E1000_82575_H_
29#define _E1000_82575_H_ 29#define _E1000_82575_H_
30 30
31void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32); 31void igb_update_mc_addr_list(struct e1000_hw*, u8*, u32, u32, u32);
32extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw); 32extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
33extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); 33extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
34 34
@@ -40,8 +40,11 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
40#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ 40#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
41#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 41#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
42#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 42#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
43#define E1000_SRRCTL_DROP_EN 0x80000000
43 44
44#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 45#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
46#define E1000_MRQC_ENABLE_VMDQ 0x00000003
47#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
45#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 48#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
46#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 49#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
47#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 50#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
@@ -58,9 +61,6 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
58 E1000_EICR_RX_QUEUE2 | \ 61 E1000_EICR_RX_QUEUE2 | \
59 E1000_EICR_RX_QUEUE3) 62 E1000_EICR_RX_QUEUE3)
60 63
61#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
62#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
63
64/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ 64/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
65 65
66/* Receive Descriptor - Advanced */ 66/* Receive Descriptor - Advanced */
@@ -95,12 +95,6 @@ union e1000_adv_rx_desc {
95#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 95#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
96#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 96#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
97 97
98/* RSS Hash results */
99
100/* RSS Packet Types as indicated in the receive descriptor */
101#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
102#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
103
104/* Transmit Descriptor - Advanced */ 98/* Transmit Descriptor - Advanced */
105union e1000_adv_tx_desc { 99union e1000_adv_tx_desc {
106 struct { 100 struct {
@@ -116,6 +110,7 @@ union e1000_adv_tx_desc {
116}; 110};
117 111
118/* Adv Transmit Descriptor Config Masks */ 112/* Adv Transmit Descriptor Config Masks */
113#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
119#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ 114#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
120#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ 115#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
121#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ 116#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
@@ -149,11 +144,8 @@ struct e1000_adv_tx_context_desc {
149#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ 144#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
150 145
151/* Direct Cache Access (DCA) definitions */ 146/* Direct Cache Access (DCA) definitions */
152#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ 147#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */
153#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ 148#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
154
155#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
156#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
157 149
158#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ 150#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
159#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ 151#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
@@ -170,4 +162,44 @@ struct e1000_adv_tx_context_desc {
170#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ 162#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
171#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ 163#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
172 164
165#define MAX_NUM_VFS 8
166
167#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
168
169/* Easy defines for setting default pool, would normally be left a zero */
170#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
171#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
172
173/* Other useful VMD_CTL register defines */
174#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
175#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
176#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
177
178/* Per VM Offload register setup */
179#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
180#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
181#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
182#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
183#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
184#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
185#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
186#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
187#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
188#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
189
190#define E1000_VLVF_ARRAY_SIZE 32
191#define E1000_VLVF_VLANID_MASK 0x00000FFF
192#define E1000_VLVF_POOLSEL_SHIFT 12
193#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
194#define E1000_VLVF_LVLAN 0x00100000
195#define E1000_VLVF_VLANID_ENABLE 0x80000000
196
197#define E1000_IOVCTL 0x05BBC
198#define E1000_IOVCTL_REUSE_VFQ 0x00000001
199
200#define ALL_QUEUES 0xFFFF
201
202void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
203void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
204
173#endif 205#endif
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 40d03426c122..ad2d319d0f8b 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 - 2008 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -42,33 +42,11 @@
42#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ 42#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
43#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ 43#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
44#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ 44#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
45#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
46#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
47#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
48#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
49#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
50#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
51#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
52#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
53
54/* Wake Up Status */
55
56/* Wake Up Packet Length */
57
58/* Four Flexible Filters are supported */
59#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
60
61/* Each Flexible Filter is at most 128 (0x80) bytes in length */
62#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128
63
64 45
65/* Extended Device Control */ 46/* Extended Device Control */
66#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */
67#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
68#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
69#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ 47#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
70#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ 48/* Physical Func Reset Done Indication */
71#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ 49#define E1000_CTRL_EXT_PFRSTD 0x00004000
72#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 50#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
73#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 51#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
74#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 52#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
@@ -103,13 +81,7 @@
103#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ 81#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
104#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ 82#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
105#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ 83#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
106#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ 84#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */
107#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
108#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
109#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
110#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
111#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
112#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
113 85
114#define E1000_RXDEXT_STATERR_CE 0x01000000 86#define E1000_RXDEXT_STATERR_CE 0x01000000
115#define E1000_RXDEXT_STATERR_SE 0x02000000 87#define E1000_RXDEXT_STATERR_SE 0x02000000
@@ -119,14 +91,6 @@
119#define E1000_RXDEXT_STATERR_IPE 0x40000000 91#define E1000_RXDEXT_STATERR_IPE 0x40000000
120#define E1000_RXDEXT_STATERR_RXE 0x80000000 92#define E1000_RXDEXT_STATERR_RXE 0x80000000
121 93
122/* mask to determine if packets should be dropped due to frame errors */
123#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
124 E1000_RXD_ERR_CE | \
125 E1000_RXD_ERR_SE | \
126 E1000_RXD_ERR_SEQ | \
127 E1000_RXD_ERR_CXE | \
128 E1000_RXD_ERR_RXE)
129
130/* Same mask, but for extended and packet split descriptors */ 94/* Same mask, but for extended and packet split descriptors */
131#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ 95#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
132 E1000_RXDEXT_STATERR_CE | \ 96 E1000_RXDEXT_STATERR_CE | \
@@ -145,16 +109,11 @@
145/* Management Control */ 109/* Management Control */
146#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ 110#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
147#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ 111#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
148#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
149/* Enable Neighbor Discovery Filtering */ 112/* Enable Neighbor Discovery Filtering */
150#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 113#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
151#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ 114#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
152/* Enable MAC address filtering */ 115/* Enable MAC address filtering */
153#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 116#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
154/* Enable MNG packets to host memory */
155#define E1000_MANC_EN_MNG2HOST 0x00200000
156/* Enable IP address filtering */
157
158 117
159/* Receive Control */ 118/* Receive Control */
160#define E1000_RCTL_EN 0x00000002 /* enable */ 119#define E1000_RCTL_EN 0x00000002 /* enable */
@@ -162,14 +121,11 @@
162#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ 121#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
163#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ 122#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
164#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ 123#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
165#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
166#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ 124#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
167#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ 125#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
168#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ 126#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
169#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ 127#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
170#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ 128#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
171#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
172#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
173#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ 129#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
174#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ 130#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
175#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ 131#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
@@ -226,11 +182,7 @@
226/* enable link status from external LINK_0 and LINK_1 pins */ 182/* enable link status from external LINK_0 and LINK_1 pins */
227#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 183#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
228#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 184#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
229#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
230#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
231#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ 185#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
232#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
233#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
234#define E1000_CTRL_RST 0x04000000 /* Global reset */ 186#define E1000_CTRL_RST 0x04000000 /* Global reset */
235#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ 187#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
236#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ 188#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
@@ -308,9 +260,7 @@
308#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX 260#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
309 261
310/* LED Control */ 262/* LED Control */
311#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
312#define E1000_LEDCTL_LED0_MODE_SHIFT 0 263#define E1000_LEDCTL_LED0_MODE_SHIFT 0
313#define E1000_LEDCTL_LED0_IVRT 0x00000040
314#define E1000_LEDCTL_LED0_BLINK 0x00000080 264#define E1000_LEDCTL_LED0_BLINK 0x00000080
315 265
316#define E1000_LEDCTL_MODE_LED_ON 0xE 266#define E1000_LEDCTL_MODE_LED_ON 0xE
@@ -357,12 +307,7 @@
357 307
358#define MAX_JUMBO_FRAME_SIZE 0x3F00 308#define MAX_JUMBO_FRAME_SIZE 0x3F00
359 309
360/* Extended Configuration Control and Size */
361#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
362
363/* PBA constants */ 310/* PBA constants */
364#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
365#define E1000_PBA_24K 0x0018
366#define E1000_PBA_34K 0x0022 311#define E1000_PBA_34K 0x0022
367#define E1000_PBA_64K 0x0040 /* 64KB */ 312#define E1000_PBA_64K 0x0040 /* 64KB */
368 313
@@ -378,41 +323,15 @@
378 323
379/* Interrupt Cause Read */ 324/* Interrupt Cause Read */
380#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ 325#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
381#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
382#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ 326#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
383#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ 327#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
384#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ 328#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
385#define E1000_ICR_RXO 0x00000040 /* rx overrun */
386#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ 329#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
387#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ 330#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
388#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */
389#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
390#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
391#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
392#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
393#define E1000_ICR_TXD_LOW 0x00008000
394#define E1000_ICR_SRPD 0x00010000
395#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
396#define E1000_ICR_MNG 0x00040000 /* Manageability event */
397#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
398/* If this bit asserted, the driver should claim the interrupt */ 331/* If this bit asserted, the driver should claim the interrupt */
399#define E1000_ICR_INT_ASSERTED 0x80000000 332#define E1000_ICR_INT_ASSERTED 0x80000000
400/* queue 0 Rx descriptor FIFO parity error */
401#define E1000_ICR_RXD_FIFO_PAR0 0x00100000
402/* queue 0 Tx descriptor FIFO parity error */
403#define E1000_ICR_TXD_FIFO_PAR0 0x00200000
404/* host arb read buffer parity error */
405#define E1000_ICR_HOST_ARB_PAR 0x00400000
406#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
407/* queue 1 Rx descriptor FIFO parity error */
408#define E1000_ICR_RXD_FIFO_PAR1 0x01000000
409/* queue 1 Tx descriptor FIFO parity error */
410#define E1000_ICR_TXD_FIFO_PAR1 0x02000000
411/* FW changed the status of DISSW bit in the FWSM */
412#define E1000_ICR_DSW 0x00000020
413/* LAN connected device generates an interrupt */ 333/* LAN connected device generates an interrupt */
414#define E1000_ICR_PHYINT 0x00001000 334#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
415#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */
416 335
417/* Extended Interrupt Cause Read */ 336/* Extended Interrupt Cause Read */
418#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ 337#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
@@ -423,7 +342,6 @@
423#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ 342#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
424#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ 343#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
425#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ 344#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
426#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
427#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ 345#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
428/* TCP Timer */ 346/* TCP Timer */
429 347
@@ -441,17 +359,19 @@
441 E1000_IMS_TXDW | \ 359 E1000_IMS_TXDW | \
442 E1000_IMS_RXDMT0 | \ 360 E1000_IMS_RXDMT0 | \
443 E1000_IMS_RXSEQ | \ 361 E1000_IMS_RXSEQ | \
444 E1000_IMS_LSC) 362 E1000_IMS_LSC | \
363 E1000_IMS_DOUTSYNC)
445 364
446/* Interrupt Mask Set */ 365/* Interrupt Mask Set */
447#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 366#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
448#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ 367#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
368#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
449#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 369#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
450#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 370#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
451#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ 371#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
372#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
452 373
453/* Extended Interrupt Mask Set */ 374/* Extended Interrupt Mask Set */
454#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
455#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ 375#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
456 376
457/* Interrupt Cause Set */ 377/* Interrupt Cause Set */
@@ -481,6 +401,10 @@
481 * manageability enabled, allowing us room for 15 multicast addresses. 401 * manageability enabled, allowing us room for 15 multicast addresses.
482 */ 402 */
483#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ 403#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
404#define E1000_RAL_MAC_ADDR_LEN 4
405#define E1000_RAH_MAC_ADDR_LEN 2
406#define E1000_RAH_POOL_MASK 0x03FC0000
407#define E1000_RAH_POOL_1 0x00040000
484 408
485/* Error Codes */ 409/* Error Codes */
486#define E1000_ERR_NVM 1 410#define E1000_ERR_NVM 1
@@ -490,10 +414,10 @@
490#define E1000_ERR_MAC_INIT 5 414#define E1000_ERR_MAC_INIT 5
491#define E1000_ERR_RESET 9 415#define E1000_ERR_RESET 9
492#define E1000_ERR_MASTER_REQUESTS_PENDING 10 416#define E1000_ERR_MASTER_REQUESTS_PENDING 10
493#define E1000_ERR_HOST_INTERFACE_COMMAND 11
494#define E1000_BLK_PHY_RESET 12 417#define E1000_BLK_PHY_RESET 12
495#define E1000_ERR_SWFW_SYNC 13 418#define E1000_ERR_SWFW_SYNC 13
496#define E1000_NOT_IMPLEMENTED 14 419#define E1000_NOT_IMPLEMENTED 14
420#define E1000_ERR_MBX 15
497 421
498/* Loop limit on how long we wait for auto-negotiation to complete */ 422/* Loop limit on how long we wait for auto-negotiation to complete */
499#define COPPER_LINK_UP_LIMIT 10 423#define COPPER_LINK_UP_LIMIT 10
@@ -510,30 +434,9 @@
510/* Flow Control */ 434/* Flow Control */
511#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ 435#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
512 436
513/* Transmit Configuration Word */
514#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
515
516/* Receive Configuration Word */
517
518/* PCI Express Control */
519#define E1000_GCR_RXD_NO_SNOOP 0x00000001
520#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
521#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
522#define E1000_GCR_TXD_NO_SNOOP 0x00000008
523#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
524#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
525
526#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
527 E1000_GCR_RXDSCW_NO_SNOOP | \
528 E1000_GCR_RXDSCR_NO_SNOOP | \
529 E1000_GCR_TXD_NO_SNOOP | \
530 E1000_GCR_TXDSCW_NO_SNOOP | \
531 E1000_GCR_TXDSCR_NO_SNOOP)
532
533/* PHY Control Register */ 437/* PHY Control Register */
534#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ 438#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
535#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ 439#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
536#define MII_CR_POWER_DOWN 0x0800 /* Power down */
537#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ 440#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
538#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ 441#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
539#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ 442#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
@@ -609,6 +512,7 @@
609#define NVM_ID_LED_SETTINGS 0x0004 512#define NVM_ID_LED_SETTINGS 0x0004
610/* For SERDES output amplitude adjustment. */ 513/* For SERDES output amplitude adjustment. */
611#define NVM_INIT_CONTROL2_REG 0x000F 514#define NVM_INIT_CONTROL2_REG 0x000F
515#define NVM_INIT_CONTROL3_PORT_B 0x0014
612#define NVM_INIT_CONTROL3_PORT_A 0x0024 516#define NVM_INIT_CONTROL3_PORT_A 0x0024
613#define NVM_ALT_MAC_ADDR_PTR 0x0037 517#define NVM_ALT_MAC_ADDR_PTR 0x0037
614#define NVM_CHECKSUM_REG 0x003F 518#define NVM_CHECKSUM_REG 0x003F
@@ -663,10 +567,8 @@
663#define IGP_LED3_MODE 0x07000000 567#define IGP_LED3_MODE 0x07000000
664 568
665/* PCI/PCI-X/PCI-EX Config space */ 569/* PCI/PCI-X/PCI-EX Config space */
666#define PCI_HEADER_TYPE_REGISTER 0x0E
667#define PCIE_LINK_STATUS 0x12 570#define PCIE_LINK_STATUS 0x12
668 571
669#define PCI_HEADER_TYPE_MULTIFUNC 0x80
670#define PCIE_LINK_WIDTH_MASK 0x3F0 572#define PCIE_LINK_WIDTH_MASK 0x3F0
671#define PCIE_LINK_WIDTH_SHIFT 4 573#define PCIE_LINK_WIDTH_SHIFT 4
672 574
@@ -763,4 +665,8 @@
763#define E1000_GEN_CTL_ADDRESS_SHIFT 8 665#define E1000_GEN_CTL_ADDRESS_SHIFT 8
764#define E1000_GEN_POLL_TIMEOUT 640 666#define E1000_GEN_POLL_TIMEOUT 640
765 667
668#define E1000_VFTA_ENTRY_SHIFT 5
669#define E1000_VFTA_ENTRY_MASK 0x7F
670#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
671
766#endif 672#endif
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 99504a600a80..68aac20c31ca 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -32,7 +32,6 @@
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/io.h> 33#include <linux/io.h>
34 34
35#include "e1000_mac.h"
36#include "e1000_regs.h" 35#include "e1000_regs.h"
37#include "e1000_defines.h" 36#include "e1000_defines.h"
38 37
@@ -41,6 +40,8 @@ struct e1000_hw;
41#define E1000_DEV_ID_82576 0x10C9 40#define E1000_DEV_ID_82576 0x10C9
42#define E1000_DEV_ID_82576_FIBER 0x10E6 41#define E1000_DEV_ID_82576_FIBER 0x10E6
43#define E1000_DEV_ID_82576_SERDES 0x10E7 42#define E1000_DEV_ID_82576_SERDES 0x10E7
43#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
44#define E1000_DEV_ID_82576_NS 0x150A
44#define E1000_DEV_ID_82575EB_COPPER 0x10A7 45#define E1000_DEV_ID_82575EB_COPPER 0x10A7
45#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 46#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
46#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 47#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
@@ -144,144 +145,6 @@ enum e1000_fc_type {
144 e1000_fc_default = 0xFF 145 e1000_fc_default = 0xFF
145}; 146};
146 147
147
148/* Receive Descriptor */
149struct e1000_rx_desc {
150 __le64 buffer_addr; /* Address of the descriptor's data buffer */
151 __le16 length; /* Length of data DMAed into data buffer */
152 __le16 csum; /* Packet checksum */
153 u8 status; /* Descriptor status */
154 u8 errors; /* Descriptor Errors */
155 __le16 special;
156};
157
158/* Receive Descriptor - Extended */
159union e1000_rx_desc_extended {
160 struct {
161 __le64 buffer_addr;
162 __le64 reserved;
163 } read;
164 struct {
165 struct {
166 __le32 mrq; /* Multiple Rx Queues */
167 union {
168 __le32 rss; /* RSS Hash */
169 struct {
170 __le16 ip_id; /* IP id */
171 __le16 csum; /* Packet Checksum */
172 } csum_ip;
173 } hi_dword;
174 } lower;
175 struct {
176 __le32 status_error; /* ext status/error */
177 __le16 length;
178 __le16 vlan; /* VLAN tag */
179 } upper;
180 } wb; /* writeback */
181};
182
183#define MAX_PS_BUFFERS 4
184/* Receive Descriptor - Packet Split */
185union e1000_rx_desc_packet_split {
186 struct {
187 /* one buffer for protocol header(s), three data buffers */
188 __le64 buffer_addr[MAX_PS_BUFFERS];
189 } read;
190 struct {
191 struct {
192 __le32 mrq; /* Multiple Rx Queues */
193 union {
194 __le32 rss; /* RSS Hash */
195 struct {
196 __le16 ip_id; /* IP id */
197 __le16 csum; /* Packet Checksum */
198 } csum_ip;
199 } hi_dword;
200 } lower;
201 struct {
202 __le32 status_error; /* ext status/error */
203 __le16 length0; /* length of buffer 0 */
204 __le16 vlan; /* VLAN tag */
205 } middle;
206 struct {
207 __le16 header_status;
208 __le16 length[3]; /* length of buffers 1-3 */
209 } upper;
210 __le64 reserved;
211 } wb; /* writeback */
212};
213
214/* Transmit Descriptor */
215struct e1000_tx_desc {
216 __le64 buffer_addr; /* Address of the descriptor's data buffer */
217 union {
218 __le32 data;
219 struct {
220 __le16 length; /* Data buffer length */
221 u8 cso; /* Checksum offset */
222 u8 cmd; /* Descriptor control */
223 } flags;
224 } lower;
225 union {
226 __le32 data;
227 struct {
228 u8 status; /* Descriptor status */
229 u8 css; /* Checksum start */
230 __le16 special;
231 } fields;
232 } upper;
233};
234
235/* Offload Context Descriptor */
236struct e1000_context_desc {
237 union {
238 __le32 ip_config;
239 struct {
240 u8 ipcss; /* IP checksum start */
241 u8 ipcso; /* IP checksum offset */
242 __le16 ipcse; /* IP checksum end */
243 } ip_fields;
244 } lower_setup;
245 union {
246 __le32 tcp_config;
247 struct {
248 u8 tucss; /* TCP checksum start */
249 u8 tucso; /* TCP checksum offset */
250 __le16 tucse; /* TCP checksum end */
251 } tcp_fields;
252 } upper_setup;
253 __le32 cmd_and_length;
254 union {
255 __le32 data;
256 struct {
257 u8 status; /* Descriptor status */
258 u8 hdr_len; /* Header length */
259 __le16 mss; /* Maximum segment size */
260 } fields;
261 } tcp_seg_setup;
262};
263
264/* Offload data descriptor */
265struct e1000_data_desc {
266 __le64 buffer_addr; /* Address of the descriptor's buffer address */
267 union {
268 __le32 data;
269 struct {
270 __le16 length; /* Data buffer length */
271 u8 typ_len_ext;
272 u8 cmd;
273 } flags;
274 } lower;
275 union {
276 __le32 data;
277 struct {
278 u8 status; /* Descriptor status */
279 u8 popts; /* Packet Options */
280 __le16 special;
281 } fields;
282 } upper;
283};
284
285/* Statistics counters collected by the MAC */ 148/* Statistics counters collected by the MAC */
286struct e1000_hw_stats { 149struct e1000_hw_stats {
287 u64 crcerrs; 150 u64 crcerrs;
@@ -359,6 +222,7 @@ struct e1000_hw_stats {
359 u64 lenerrs; 222 u64 lenerrs;
360 u64 scvpc; 223 u64 scvpc;
361 u64 hrmpc; 224 u64 hrmpc;
225 u64 doosync;
362}; 226};
363 227
364struct e1000_phy_stats { 228struct e1000_phy_stats {
@@ -409,6 +273,7 @@ struct e1000_host_mng_command_info {
409#include "e1000_mac.h" 273#include "e1000_mac.h"
410#include "e1000_phy.h" 274#include "e1000_phy.h"
411#include "e1000_nvm.h" 275#include "e1000_nvm.h"
276#include "e1000_mbx.h"
412 277
413struct e1000_mac_operations { 278struct e1000_mac_operations {
414 s32 (*check_for_link)(struct e1000_hw *); 279 s32 (*check_for_link)(struct e1000_hw *);
@@ -422,25 +287,25 @@ struct e1000_mac_operations {
422}; 287};
423 288
424struct e1000_phy_operations { 289struct e1000_phy_operations {
425 s32 (*acquire_phy)(struct e1000_hw *); 290 s32 (*acquire)(struct e1000_hw *);
426 s32 (*check_reset_block)(struct e1000_hw *); 291 s32 (*check_reset_block)(struct e1000_hw *);
427 s32 (*force_speed_duplex)(struct e1000_hw *); 292 s32 (*force_speed_duplex)(struct e1000_hw *);
428 s32 (*get_cfg_done)(struct e1000_hw *hw); 293 s32 (*get_cfg_done)(struct e1000_hw *hw);
429 s32 (*get_cable_length)(struct e1000_hw *); 294 s32 (*get_cable_length)(struct e1000_hw *);
430 s32 (*get_phy_info)(struct e1000_hw *); 295 s32 (*get_phy_info)(struct e1000_hw *);
431 s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); 296 s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
432 void (*release_phy)(struct e1000_hw *); 297 void (*release)(struct e1000_hw *);
433 s32 (*reset_phy)(struct e1000_hw *); 298 s32 (*reset)(struct e1000_hw *);
434 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); 299 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
435 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); 300 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
436 s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); 301 s32 (*write_reg)(struct e1000_hw *, u32, u16);
437}; 302};
438 303
439struct e1000_nvm_operations { 304struct e1000_nvm_operations {
440 s32 (*acquire_nvm)(struct e1000_hw *); 305 s32 (*acquire)(struct e1000_hw *);
441 s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *); 306 s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
442 void (*release_nvm)(struct e1000_hw *); 307 void (*release)(struct e1000_hw *);
443 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *); 308 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
444}; 309};
445 310
446struct e1000_info { 311struct e1000_info {
@@ -483,7 +348,6 @@ struct e1000_mac_info {
483 bool asf_firmware_present; 348 bool asf_firmware_present;
484 bool autoneg; 349 bool autoneg;
485 bool autoneg_failed; 350 bool autoneg_failed;
486 bool disable_av;
487 bool disable_hw_init_bits; 351 bool disable_hw_init_bits;
488 bool get_link_status; 352 bool get_link_status;
489 bool ifs_params_forced; 353 bool ifs_params_forced;
@@ -565,9 +429,40 @@ struct e1000_fc_info {
565 enum e1000_fc_type original_type; 429 enum e1000_fc_type original_type;
566}; 430};
567 431
432struct e1000_mbx_operations {
433 s32 (*init_params)(struct e1000_hw *hw);
434 s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
435 s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
436 s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
437 s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
438 s32 (*check_for_msg)(struct e1000_hw *, u16);
439 s32 (*check_for_ack)(struct e1000_hw *, u16);
440 s32 (*check_for_rst)(struct e1000_hw *, u16);
441};
442
443struct e1000_mbx_stats {
444 u32 msgs_tx;
445 u32 msgs_rx;
446
447 u32 acks;
448 u32 reqs;
449 u32 rsts;
450};
451
452struct e1000_mbx_info {
453 struct e1000_mbx_operations ops;
454 struct e1000_mbx_stats stats;
455 u32 timeout;
456 u32 usec_delay;
457 u16 size;
458};
459
460struct e1000_dev_spec_82575 {
461 bool sgmii_active;
462};
463
568struct e1000_hw { 464struct e1000_hw {
569 void *back; 465 void *back;
570 void *dev_spec;
571 466
572 u8 __iomem *hw_addr; 467 u8 __iomem *hw_addr;
573 u8 __iomem *flash_address; 468 u8 __iomem *flash_address;
@@ -578,9 +473,12 @@ struct e1000_hw {
578 struct e1000_phy_info phy; 473 struct e1000_phy_info phy;
579 struct e1000_nvm_info nvm; 474 struct e1000_nvm_info nvm;
580 struct e1000_bus_info bus; 475 struct e1000_bus_info bus;
476 struct e1000_mbx_info mbx;
581 struct e1000_host_mng_dhcp_cookie mng_cookie; 477 struct e1000_host_mng_dhcp_cookie mng_cookie;
582 478
583 u32 dev_spec_size; 479 union {
480 struct e1000_dev_spec_82575 _82575;
481 } dev_spec;
584 482
585 u16 device_id; 483 u16 device_id;
586 u16 subsystem_vendor_id; 484 u16 subsystem_vendor_id;
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 97f0049a5d6b..f4c315b5a900 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -37,19 +37,6 @@
37static s32 igb_set_default_fc(struct e1000_hw *hw); 37static s32 igb_set_default_fc(struct e1000_hw *hw);
38static s32 igb_set_fc_watermarks(struct e1000_hw *hw); 38static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
39 39
40/**
41 * igb_remove_device - Free device specific structure
42 * @hw: pointer to the HW structure
43 *
44 * If a device specific structure was allocated, this function will
45 * free it.
46 **/
47void igb_remove_device(struct e1000_hw *hw)
48{
49 /* Freeing the dev_spec member of e1000_hw structure */
50 kfree(hw->dev_spec);
51}
52
53static s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) 40static s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
54{ 41{
55 struct igb_adapter *adapter = hw->back; 42 struct igb_adapter *adapter = hw->back;
@@ -131,6 +118,37 @@ void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
131} 118}
132 119
133/** 120/**
121 * igb_vfta_set - enable or disable vlan in VLAN filter table
122 * @hw: pointer to the HW structure
123 * @vid: VLAN id to add or remove
124 * @add: if true add filter, if false remove
125 *
126 * Sets or clears a bit in the VLAN filter table array based on VLAN id
127 * and if we are adding or removing the filter
128 **/
129s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
130{
131 u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
132 u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
133 u32 vfta = array_rd32(E1000_VFTA, index);
134 s32 ret_val = 0;
135
136 /* bit was set/cleared before we started */
137 if ((!!(vfta & mask)) == add) {
138 ret_val = -E1000_ERR_CONFIG;
139 } else {
140 if (add)
141 vfta |= mask;
142 else
143 vfta &= ~mask;
144 }
145
146 igb_write_vfta(hw, index, vfta);
147
148 return ret_val;
149}
150
151/**
134 * igb_check_alt_mac_addr - Check for alternate MAC addr 152 * igb_check_alt_mac_addr - Check for alternate MAC addr
135 * @hw: pointer to the HW structure 153 * @hw: pointer to the HW structure
136 * 154 *
@@ -148,7 +166,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
148 u16 offset, nvm_alt_mac_addr_offset, nvm_data; 166 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
149 u8 alt_mac_addr[ETH_ALEN]; 167 u8 alt_mac_addr[ETH_ALEN];
150 168
151 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, 169 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
152 &nvm_alt_mac_addr_offset); 170 &nvm_alt_mac_addr_offset);
153 if (ret_val) { 171 if (ret_val) {
154 hw_dbg("NVM Read Error\n"); 172 hw_dbg("NVM Read Error\n");
@@ -165,7 +183,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
165 183
166 for (i = 0; i < ETH_ALEN; i += 2) { 184 for (i = 0; i < ETH_ALEN; i += 2) {
167 offset = nvm_alt_mac_addr_offset + (i >> 1); 185 offset = nvm_alt_mac_addr_offset + (i >> 1);
168 ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data); 186 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
169 if (ret_val) { 187 if (ret_val) {
170 hw_dbg("NVM Read Error\n"); 188 hw_dbg("NVM Read Error\n");
171 goto out; 189 goto out;
@@ -213,7 +231,8 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
213 231
214 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 232 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
215 233
216 if (!hw->mac.disable_av) 234 /* If MAC address zero, no need to set the AV bit */
235 if (rar_low || rar_high)
217 rar_high |= E1000_RAH_AV; 236 rar_high |= E1000_RAH_AV;
218 237
219 wr32(E1000_RAL(index), rar_low); 238 wr32(E1000_RAL(index), rar_low);
@@ -588,8 +607,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
588 * control setting, then the variable hw->fc will 607 * control setting, then the variable hw->fc will
589 * be initialized based on a value in the EEPROM. 608 * be initialized based on a value in the EEPROM.
590 */ 609 */
591 ret_val = hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, 610 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
592 &nvm_data);
593 611
594 if (ret_val) { 612 if (ret_val) {
595 hw_dbg("NVM Read Error\n"); 613 hw_dbg("NVM Read Error\n");
@@ -720,11 +738,11 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
720 * has completed. We read this twice because this reg has 738 * has completed. We read this twice because this reg has
721 * some "sticky" (latched) bits. 739 * some "sticky" (latched) bits.
722 */ 740 */
723 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, 741 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
724 &mii_status_reg); 742 &mii_status_reg);
725 if (ret_val) 743 if (ret_val)
726 goto out; 744 goto out;
727 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, 745 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
728 &mii_status_reg); 746 &mii_status_reg);
729 if (ret_val) 747 if (ret_val)
730 goto out; 748 goto out;
@@ -742,11 +760,11 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
742 * Page Ability Register (Address 5) to determine how 760 * Page Ability Register (Address 5) to determine how
743 * flow control was negotiated. 761 * flow control was negotiated.
744 */ 762 */
745 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_AUTONEG_ADV, 763 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
746 &mii_nway_adv_reg); 764 &mii_nway_adv_reg);
747 if (ret_val) 765 if (ret_val)
748 goto out; 766 goto out;
749 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_LP_ABILITY, 767 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
750 &mii_nway_lp_ability_reg); 768 &mii_nway_lp_ability_reg);
751 if (ret_val) 769 if (ret_val)
752 goto out; 770 goto out;
@@ -1041,7 +1059,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1041{ 1059{
1042 s32 ret_val; 1060 s32 ret_val;
1043 1061
1044 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); 1062 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1045 if (ret_val) { 1063 if (ret_val) {
1046 hw_dbg("NVM Read Error\n"); 1064 hw_dbg("NVM Read Error\n");
1047 goto out; 1065 goto out;
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index cbee6af7d912..a34de5269637 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -58,12 +58,12 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
58 58
59void igb_clear_hw_cntrs_base(struct e1000_hw *hw); 59void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
60void igb_clear_vfta(struct e1000_hw *hw); 60void igb_clear_vfta(struct e1000_hw *hw);
61s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add);
61void igb_config_collision_dist(struct e1000_hw *hw); 62void igb_config_collision_dist(struct e1000_hw *hw);
62void igb_mta_set(struct e1000_hw *hw, u32 hash_value); 63void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
63void igb_put_hw_semaphore(struct e1000_hw *hw); 64void igb_put_hw_semaphore(struct e1000_hw *hw);
64void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); 65void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
65s32 igb_check_alt_mac_addr(struct e1000_hw *hw); 66s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
66void igb_remove_device(struct e1000_hw *hw);
67void igb_reset_adaptive(struct e1000_hw *hw); 67void igb_reset_adaptive(struct e1000_hw *hw);
68void igb_update_adaptive(struct e1000_hw *hw); 68void igb_update_adaptive(struct e1000_hw *hw);
69void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); 69void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
@@ -83,13 +83,8 @@ enum e1000_mng_mode {
83#define E1000_FWSM_MODE_MASK 0xE 83#define E1000_FWSM_MODE_MASK 0xE
84#define E1000_FWSM_MODE_SHIFT 1 84#define E1000_FWSM_MODE_SHIFT 1
85 85
86#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
87#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 86#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
88 87
89#define E1000_HICR_EN 0x01 /* Enable bit - RO */
90/* Driver sets this bit when done to put command in RAM */
91#define E1000_HICR_C 0x02
92
93extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); 88extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
94extern u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); 89extern u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
95 90
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
new file mode 100644
index 000000000000..fe71c7ddaa05
--- /dev/null
+++ b/drivers/net/igb/e1000_mbx.c
@@ -0,0 +1,447 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "e1000_mbx.h"
29
30/**
31 * igb_read_mbx - Reads a message from the mailbox
32 * @hw: pointer to the HW structure
33 * @msg: The message buffer
34 * @size: Length of buffer
35 * @mbx_id: id of mailbox to read
36 *
37 * returns SUCCESS if it successfuly read message from buffer
38 **/
39s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
40{
41 struct e1000_mbx_info *mbx = &hw->mbx;
42 s32 ret_val = -E1000_ERR_MBX;
43
44 /* limit read to size of mailbox */
45 if (size > mbx->size)
46 size = mbx->size;
47
48 if (mbx->ops.read)
49 ret_val = mbx->ops.read(hw, msg, size, mbx_id);
50
51 return ret_val;
52}
53
54/**
55 * igb_write_mbx - Write a message to the mailbox
56 * @hw: pointer to the HW structure
57 * @msg: The message buffer
58 * @size: Length of buffer
59 * @mbx_id: id of mailbox to write
60 *
61 * returns SUCCESS if it successfully copied message into the buffer
62 **/
63s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
64{
65 struct e1000_mbx_info *mbx = &hw->mbx;
66 s32 ret_val = 0;
67
68 if (size > mbx->size)
69 ret_val = -E1000_ERR_MBX;
70
71 else if (mbx->ops.write)
72 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
73
74 return ret_val;
75}
76
77/**
78 * igb_check_for_msg - checks to see if someone sent us mail
79 * @hw: pointer to the HW structure
80 * @mbx_id: id of mailbox to check
81 *
82 * returns SUCCESS if the Status bit was found or else ERR_MBX
83 **/
84s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
85{
86 struct e1000_mbx_info *mbx = &hw->mbx;
87 s32 ret_val = -E1000_ERR_MBX;
88
89 if (mbx->ops.check_for_msg)
90 ret_val = mbx->ops.check_for_msg(hw, mbx_id);
91
92 return ret_val;
93}
94
95/**
96 * igb_check_for_ack - checks to see if someone sent us ACK
97 * @hw: pointer to the HW structure
98 * @mbx_id: id of mailbox to check
99 *
100 * returns SUCCESS if the Status bit was found or else ERR_MBX
101 **/
102s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
103{
104 struct e1000_mbx_info *mbx = &hw->mbx;
105 s32 ret_val = -E1000_ERR_MBX;
106
107 if (mbx->ops.check_for_ack)
108 ret_val = mbx->ops.check_for_ack(hw, mbx_id);
109
110 return ret_val;
111}
112
113/**
114 * igb_check_for_rst - checks to see if other side has reset
115 * @hw: pointer to the HW structure
116 * @mbx_id: id of mailbox to check
117 *
118 * returns SUCCESS if the Status bit was found or else ERR_MBX
119 **/
120s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
121{
122 struct e1000_mbx_info *mbx = &hw->mbx;
123 s32 ret_val = -E1000_ERR_MBX;
124
125 if (mbx->ops.check_for_rst)
126 ret_val = mbx->ops.check_for_rst(hw, mbx_id);
127
128 return ret_val;
129}
130
131/**
132 * igb_poll_for_msg - Wait for message notification
133 * @hw: pointer to the HW structure
134 * @mbx_id: id of mailbox to write
135 *
136 * returns SUCCESS if it successfully received a message notification
137 **/
138static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
139{
140 struct e1000_mbx_info *mbx = &hw->mbx;
141 int countdown = mbx->timeout;
142
143 if (!mbx->ops.check_for_msg)
144 goto out;
145
146 while (mbx->ops.check_for_msg(hw, mbx_id)) {
147 if (!countdown)
148 break;
149 countdown--;
150 udelay(mbx->usec_delay);
151 }
152out:
153 return countdown ? 0 : -E1000_ERR_MBX;
154}
155
156/**
157 * igb_poll_for_ack - Wait for message acknowledgement
158 * @hw: pointer to the HW structure
159 * @mbx_id: id of mailbox to write
160 *
161 * returns SUCCESS if it successfully received a message acknowledgement
162 **/
163static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
164{
165 struct e1000_mbx_info *mbx = &hw->mbx;
166 int countdown = mbx->timeout;
167
168 if (!mbx->ops.check_for_ack)
169 goto out;
170
171 while (mbx->ops.check_for_ack(hw, mbx_id)) {
172 if (!countdown)
173 break;
174 countdown--;
175 udelay(mbx->usec_delay);
176 }
177out:
178 return countdown ? 0 : -E1000_ERR_MBX;
179}
180
181/**
182 * igb_read_posted_mbx - Wait for message notification and receive message
183 * @hw: pointer to the HW structure
184 * @msg: The message buffer
185 * @size: Length of buffer
186 * @mbx_id: id of mailbox to write
187 *
188 * returns SUCCESS if it successfully received a message notification and
189 * copied it into the receive buffer.
190 **/
191s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
192{
193 struct e1000_mbx_info *mbx = &hw->mbx;
194 s32 ret_val = -E1000_ERR_MBX;
195
196 if (!mbx->ops.read)
197 goto out;
198
199 ret_val = igb_poll_for_msg(hw, mbx_id);
200
201 if (!ret_val)
202 ret_val = mbx->ops.read(hw, msg, size, mbx_id);
203out:
204 return ret_val;
205}
206
207/**
208 * igb_write_posted_mbx - Write a message to the mailbox, wait for ack
209 * @hw: pointer to the HW structure
210 * @msg: The message buffer
211 * @size: Length of buffer
212 * @mbx_id: id of mailbox to write
213 *
214 * returns SUCCESS if it successfully copied message into the buffer and
215 * received an ack to that message within delay * timeout period
216 **/
217s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
218{
219 struct e1000_mbx_info *mbx = &hw->mbx;
220 s32 ret_val = 0;
221
222 if (!mbx->ops.write)
223 goto out;
224
225 /* send msg*/
226 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
227
228 /* if msg sent wait until we receive an ack */
229 if (!ret_val)
230 ret_val = igb_poll_for_ack(hw, mbx_id);
231out:
232 return ret_val;
233}
234
235/**
236 * e1000_init_mbx_ops_generic - Initialize NVM function pointers
237 * @hw: pointer to the HW structure
238 *
239 * Setups up the function pointers to no-op functions
240 **/
241void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
242{
243 struct e1000_mbx_info *mbx = &hw->mbx;
244 mbx->ops.read_posted = igb_read_posted_mbx;
245 mbx->ops.write_posted = igb_write_posted_mbx;
246}
247
248static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
249{
250 u32 mbvficr = rd32(E1000_MBVFICR);
251 s32 ret_val = -E1000_ERR_MBX;
252
253 if (mbvficr & mask) {
254 ret_val = 0;
255 wr32(E1000_MBVFICR, mask);
256 }
257
258 return ret_val;
259}
260
261/**
262 * igb_check_for_msg_pf - checks to see if the VF has sent mail
263 * @hw: pointer to the HW structure
264 * @vf_number: the VF index
265 *
266 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
267 **/
268static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
269{
270 s32 ret_val = -E1000_ERR_MBX;
271
272 if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
273 ret_val = 0;
274 hw->mbx.stats.reqs++;
275 }
276
277 return ret_val;
278}
279
280/**
281 * igb_check_for_ack_pf - checks to see if the VF has ACKed
282 * @hw: pointer to the HW structure
283 * @vf_number: the VF index
284 *
285 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
286 **/
287static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
288{
289 s32 ret_val = -E1000_ERR_MBX;
290
291 if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
292 ret_val = 0;
293 hw->mbx.stats.acks++;
294 }
295
296 return ret_val;
297}
298
299/**
300 * igb_check_for_rst_pf - checks to see if the VF has reset
301 * @hw: pointer to the HW structure
302 * @vf_number: the VF index
303 *
304 * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
305 **/
306static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
307{
308 u32 vflre = rd32(E1000_VFLRE);
309 s32 ret_val = -E1000_ERR_MBX;
310
311 if (vflre & (1 << vf_number)) {
312 ret_val = 0;
313 wr32(E1000_VFLRE, (1 << vf_number));
314 hw->mbx.stats.rsts++;
315 }
316
317 return ret_val;
318}
319
320/**
321 * igb_write_mbx_pf - Places a message in the mailbox
322 * @hw: pointer to the HW structure
323 * @msg: The message buffer
324 * @size: Length of buffer
325 * @vf_number: the VF index
326 *
327 * returns SUCCESS if it successfully copied message into the buffer
328 **/
329static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
330 u16 vf_number)
331{
332 u32 p2v_mailbox;
333 s32 ret_val = 0;
334 u16 i;
335
336 /* Take ownership of the buffer */
337 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
338
339 /* Make sure we have ownership now... */
340 p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
341 if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) {
342 /* failed to grab ownership */
343 ret_val = -E1000_ERR_MBX;
344 goto out_no_write;
345 }
346
347 /*
348 * flush any ack or msg which may already be in the queue
349 * as they are likely the result of an error
350 */
351 igb_check_for_ack_pf(hw, vf_number);
352 igb_check_for_msg_pf(hw, vf_number);
353
354 /* copy the caller specified message to the mailbox memory buffer */
355 for (i = 0; i < size; i++)
356 array_wr32(E1000_VMBMEM(vf_number), i, msg[i]);
357
358 /* Interrupt VF to tell it a message has been sent and release buffer*/
359 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
360
361 /* update stats */
362 hw->mbx.stats.msgs_tx++;
363
364out_no_write:
365 return ret_val;
366
367}
368
369/**
370 * igb_read_mbx_pf - Read a message from the mailbox
371 * @hw: pointer to the HW structure
372 * @msg: The message buffer
373 * @size: Length of buffer
374 * @vf_number: the VF index
375 *
376 * This function copies a message from the mailbox buffer to the caller's
377 * memory buffer. The presumption is that the caller knows that there was
378 * a message due to a VF request so no polling for message is needed.
379 **/
380static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
381 u16 vf_number)
382{
383 u32 p2v_mailbox;
384 s32 ret_val = 0;
385 u16 i;
386
387 /* Take ownership of the buffer */
388 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
389
390 /* Make sure we have ownership now... */
391 p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
392 if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) {
393 /* failed to grab ownership */
394 ret_val = -E1000_ERR_MBX;
395 goto out_no_read;
396 }
397
398 /* copy the message to the mailbox memory buffer */
399 for (i = 0; i < size; i++)
400 msg[i] = array_rd32(E1000_VMBMEM(vf_number), i);
401
402 /* Acknowledge the message and release buffer */
403 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
404
405 /* update stats */
406 hw->mbx.stats.msgs_rx++;
407
408 ret_val = 0;
409
410out_no_read:
411 return ret_val;
412}
413
414/**
415 * e1000_init_mbx_params_pf - set initial values for pf mailbox
416 * @hw: pointer to the HW structure
417 *
418 * Initializes the hw->mbx struct to correct values for pf mailbox
419 */
420s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
421{
422 struct e1000_mbx_info *mbx = &hw->mbx;
423
424 if (hw->mac.type == e1000_82576) {
425 mbx->timeout = 0;
426 mbx->usec_delay = 0;
427
428 mbx->size = E1000_VFMAILBOX_SIZE;
429
430 mbx->ops.read = igb_read_mbx_pf;
431 mbx->ops.write = igb_write_mbx_pf;
432 mbx->ops.read_posted = igb_read_posted_mbx;
433 mbx->ops.write_posted = igb_write_posted_mbx;
434 mbx->ops.check_for_msg = igb_check_for_msg_pf;
435 mbx->ops.check_for_ack = igb_check_for_ack_pf;
436 mbx->ops.check_for_rst = igb_check_for_rst_pf;
437
438 mbx->stats.msgs_tx = 0;
439 mbx->stats.msgs_rx = 0;
440 mbx->stats.reqs = 0;
441 mbx->stats.acks = 0;
442 mbx->stats.rsts = 0;
443 }
444
445 return 0;
446}
447
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h
new file mode 100644
index 000000000000..6ec9890a8f7a
--- /dev/null
+++ b/drivers/net/igb/e1000_mbx.h
@@ -0,0 +1,77 @@
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _E1000_MBX_H_
29#define _E1000_MBX_H_
30
31#include "e1000_hw.h"
32
33#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
34#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
35#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
36#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
37#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
38
39#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
40#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
41#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
42#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
43
44#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
45
46/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
47 * PF. The reverse is true if it is E1000_PF_*.
48 * Message ACK's are the value or'd with 0xF0000000
49 */
50#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
51 * this are the ACK */
52#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
53 * this are the NACK */
54#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
55 clear to send requests */
56#define E1000_VT_MSGINFO_SHIFT 16
57/* bits 23:16 are used for exra info for certain messages */
58#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
59
60#define E1000_VF_RESET 0x01 /* VF requests reset */
61#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
62#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
63#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
64#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
65
66#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
67
68s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16);
69s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16);
70s32 igb_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
71s32 igb_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
72s32 igb_check_for_msg(struct e1000_hw *, u16);
73s32 igb_check_for_ack(struct e1000_hw *, u16);
74s32 igb_check_for_rst(struct e1000_hw *, u16);
75s32 igb_init_mbx_params_pf(struct e1000_hw *);
76
77#endif /* _E1000_MBX_H_ */
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index a84e4e429fa7..a88bfe2f1e8f 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -419,7 +419,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
419 goto out; 419 goto out;
420 } 420 }
421 421
422 ret_val = hw->nvm.ops.acquire_nvm(hw); 422 ret_val = hw->nvm.ops.acquire(hw);
423 if (ret_val) 423 if (ret_val)
424 goto out; 424 goto out;
425 425
@@ -468,7 +468,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
468 468
469 msleep(10); 469 msleep(10);
470release: 470release:
471 hw->nvm.ops.release_nvm(hw); 471 hw->nvm.ops.release(hw);
472 472
473out: 473out:
474 return ret_val; 474 return ret_val;
@@ -487,14 +487,14 @@ s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num)
487 s32 ret_val; 487 s32 ret_val;
488 u16 nvm_data; 488 u16 nvm_data;
489 489
490 ret_val = hw->nvm.ops.read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); 490 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
491 if (ret_val) { 491 if (ret_val) {
492 hw_dbg("NVM Read Error\n"); 492 hw_dbg("NVM Read Error\n");
493 goto out; 493 goto out;
494 } 494 }
495 *part_num = (u32)(nvm_data << 16); 495 *part_num = (u32)(nvm_data << 16);
496 496
497 ret_val = hw->nvm.ops.read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); 497 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
498 if (ret_val) { 498 if (ret_val) {
499 hw_dbg("NVM Read Error\n"); 499 hw_dbg("NVM Read Error\n");
500 goto out; 500 goto out;
@@ -515,29 +515,23 @@ out:
515 **/ 515 **/
516s32 igb_read_mac_addr(struct e1000_hw *hw) 516s32 igb_read_mac_addr(struct e1000_hw *hw)
517{ 517{
518 s32 ret_val = 0; 518 u32 rar_high;
519 u16 offset, nvm_data, i; 519 u32 rar_low;
520 u16 i;
520 521
521 for (i = 0; i < ETH_ALEN; i += 2) { 522 rar_high = rd32(E1000_RAH(0));
522 offset = i >> 1; 523 rar_low = rd32(E1000_RAL(0));
523 ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data); 524
524 if (ret_val) { 525 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
525 hw_dbg("NVM Read Error\n"); 526 hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
526 goto out;
527 }
528 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
529 hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
530 }
531 527
532 /* Flip last bit of mac address if we're on second port */ 528 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
533 if (hw->bus.func == E1000_FUNC_1) 529 hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
534 hw->mac.perm_addr[5] ^= 1;
535 530
536 for (i = 0; i < ETH_ALEN; i++) 531 for (i = 0; i < ETH_ALEN; i++)
537 hw->mac.addr[i] = hw->mac.perm_addr[i]; 532 hw->mac.addr[i] = hw->mac.perm_addr[i];
538 533
539out: 534 return 0;
540 return ret_val;
541} 535}
542 536
543/** 537/**
@@ -554,7 +548,7 @@ s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
554 u16 i, nvm_data; 548 u16 i, nvm_data;
555 549
556 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 550 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
557 ret_val = hw->nvm.ops.read_nvm(hw, i, 1, &nvm_data); 551 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
558 if (ret_val) { 552 if (ret_val) {
559 hw_dbg("NVM Read Error\n"); 553 hw_dbg("NVM Read Error\n");
560 goto out; 554 goto out;
@@ -587,7 +581,7 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
587 u16 i, nvm_data; 581 u16 i, nvm_data;
588 582
589 for (i = 0; i < NVM_CHECKSUM_REG; i++) { 583 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
590 ret_val = hw->nvm.ops.read_nvm(hw, i, 1, &nvm_data); 584 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
591 if (ret_val) { 585 if (ret_val) {
592 hw_dbg("NVM Read Error while updating checksum.\n"); 586 hw_dbg("NVM Read Error while updating checksum.\n");
593 goto out; 587 goto out;
@@ -595,7 +589,7 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
595 checksum += nvm_data; 589 checksum += nvm_data;
596 } 590 }
597 checksum = (u16) NVM_SUM - checksum; 591 checksum = (u16) NVM_SUM - checksum;
598 ret_val = hw->nvm.ops.write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); 592 ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
599 if (ret_val) 593 if (ret_val)
600 hw_dbg("NVM Write Error while updating checksum.\n"); 594 hw_dbg("NVM Write Error while updating checksum.\n");
601 595
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index 17fddb91c9f5..de2d48624683 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -31,10 +31,6 @@
31#include "e1000_mac.h" 31#include "e1000_mac.h"
32#include "e1000_phy.h" 32#include "e1000_phy.h"
33 33
34static s32 igb_get_phy_cfg_done(struct e1000_hw *hw);
35static void igb_release_phy(struct e1000_hw *hw);
36static s32 igb_acquire_phy(struct e1000_hw *hw);
37static s32 igb_phy_reset_dsp(struct e1000_hw *hw);
38static s32 igb_phy_setup_autoneg(struct e1000_hw *hw); 34static s32 igb_phy_setup_autoneg(struct e1000_hw *hw);
39static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw, 35static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
40 u16 *phy_ctrl); 36 u16 *phy_ctrl);
@@ -43,9 +39,6 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw);
43/* Cable length tables */ 39/* Cable length tables */
44static const u16 e1000_m88_cable_length_table[] = 40static const u16 e1000_m88_cable_length_table[] =
45 { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; 41 { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
46#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
47 (sizeof(e1000_m88_cable_length_table) / \
48 sizeof(e1000_m88_cable_length_table[0]))
49 42
50static const u16 e1000_igp_2_cable_length_table[] = 43static const u16 e1000_igp_2_cable_length_table[] =
51 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 44 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
@@ -91,13 +84,13 @@ s32 igb_get_phy_id(struct e1000_hw *hw)
91 s32 ret_val = 0; 84 s32 ret_val = 0;
92 u16 phy_id; 85 u16 phy_id;
93 86
94 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_ID1, &phy_id); 87 ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
95 if (ret_val) 88 if (ret_val)
96 goto out; 89 goto out;
97 90
98 phy->id = (u32)(phy_id << 16); 91 phy->id = (u32)(phy_id << 16);
99 udelay(20); 92 udelay(20);
100 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_ID2, &phy_id); 93 ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
101 if (ret_val) 94 if (ret_val)
102 goto out; 95 goto out;
103 96
@@ -118,11 +111,11 @@ static s32 igb_phy_reset_dsp(struct e1000_hw *hw)
118{ 111{
119 s32 ret_val; 112 s32 ret_val;
120 113
121 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); 114 ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
122 if (ret_val) 115 if (ret_val)
123 goto out; 116 goto out;
124 117
125 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); 118 ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
126 119
127out: 120out:
128 return ret_val; 121 return ret_val;
@@ -257,9 +250,12 @@ out:
257 **/ 250 **/
258s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) 251s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
259{ 252{
260 s32 ret_val; 253 s32 ret_val = 0;
254
255 if (!(hw->phy.ops.acquire))
256 goto out;
261 257
262 ret_val = igb_acquire_phy(hw); 258 ret_val = hw->phy.ops.acquire(hw);
263 if (ret_val) 259 if (ret_val)
264 goto out; 260 goto out;
265 261
@@ -268,16 +264,15 @@ s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
268 IGP01E1000_PHY_PAGE_SELECT, 264 IGP01E1000_PHY_PAGE_SELECT,
269 (u16)offset); 265 (u16)offset);
270 if (ret_val) { 266 if (ret_val) {
271 igb_release_phy(hw); 267 hw->phy.ops.release(hw);
272 goto out; 268 goto out;
273 } 269 }
274 } 270 }
275 271
276 ret_val = igb_read_phy_reg_mdic(hw, 272 ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
277 MAX_PHY_REG_ADDRESS & offset, 273 data);
278 data);
279 274
280 igb_release_phy(hw); 275 hw->phy.ops.release(hw);
281 276
282out: 277out:
283 return ret_val; 278 return ret_val;
@@ -294,9 +289,12 @@ out:
294 **/ 289 **/
295s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) 290s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
296{ 291{
297 s32 ret_val; 292 s32 ret_val = 0;
298 293
299 ret_val = igb_acquire_phy(hw); 294 if (!(hw->phy.ops.acquire))
295 goto out;
296
297 ret_val = hw->phy.ops.acquire(hw);
300 if (ret_val) 298 if (ret_val)
301 goto out; 299 goto out;
302 300
@@ -305,16 +303,15 @@ s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
305 IGP01E1000_PHY_PAGE_SELECT, 303 IGP01E1000_PHY_PAGE_SELECT,
306 (u16)offset); 304 (u16)offset);
307 if (ret_val) { 305 if (ret_val) {
308 igb_release_phy(hw); 306 hw->phy.ops.release(hw);
309 goto out; 307 goto out;
310 } 308 }
311 } 309 }
312 310
313 ret_val = igb_write_phy_reg_mdic(hw, 311 ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
314 MAX_PHY_REG_ADDRESS & offset,
315 data); 312 data);
316 313
317 igb_release_phy(hw); 314 hw->phy.ops.release(hw);
318 315
319out: 316out:
320 return ret_val; 317 return ret_val;
@@ -339,8 +336,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
339 } 336 }
340 337
341 /* Enable CRS on TX. This must be set for half-duplex operation. */ 338 /* Enable CRS on TX. This must be set for half-duplex operation. */
342 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 339 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
343 &phy_data);
344 if (ret_val) 340 if (ret_val)
345 goto out; 341 goto out;
346 342
@@ -383,8 +379,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
383 if (phy->disable_polarity_correction == 1) 379 if (phy->disable_polarity_correction == 1)
384 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 380 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
385 381
386 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 382 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
387 phy_data);
388 if (ret_val) 383 if (ret_val)
389 goto out; 384 goto out;
390 385
@@ -393,8 +388,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
393 * Force TX_CLK in the Extended PHY Specific Control Register 388 * Force TX_CLK in the Extended PHY Specific Control Register
394 * to 25MHz clock. 389 * to 25MHz clock.
395 */ 390 */
396 ret_val = hw->phy.ops.read_phy_reg(hw, 391 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
397 M88E1000_EXT_PHY_SPEC_CTRL,
398 &phy_data); 392 &phy_data);
399 if (ret_val) 393 if (ret_val)
400 goto out; 394 goto out;
@@ -413,8 +407,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
413 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | 407 phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
414 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); 408 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
415 } 409 }
416 ret_val = hw->phy.ops.write_phy_reg(hw, 410 ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
417 M88E1000_EXT_PHY_SPEC_CTRL,
418 phy_data); 411 phy_data);
419 if (ret_val) 412 if (ret_val)
420 goto out; 413 goto out;
@@ -449,7 +442,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
449 goto out; 442 goto out;
450 } 443 }
451 444
452 ret_val = hw->phy.ops.reset_phy(hw); 445 ret_val = phy->ops.reset(hw);
453 if (ret_val) { 446 if (ret_val) {
454 hw_dbg("Error resetting the PHY.\n"); 447 hw_dbg("Error resetting the PHY.\n");
455 goto out; 448 goto out;
@@ -464,8 +457,8 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
464 */ 457 */
465 if (phy->type == e1000_phy_igp) { 458 if (phy->type == e1000_phy_igp) {
466 /* disable lplu d3 during driver init */ 459 /* disable lplu d3 during driver init */
467 if (hw->phy.ops.set_d3_lplu_state) 460 if (phy->ops.set_d3_lplu_state)
468 ret_val = hw->phy.ops.set_d3_lplu_state(hw, false); 461 ret_val = phy->ops.set_d3_lplu_state(hw, false);
469 if (ret_val) { 462 if (ret_val) {
470 hw_dbg("Error Disabling LPLU D3\n"); 463 hw_dbg("Error Disabling LPLU D3\n");
471 goto out; 464 goto out;
@@ -473,13 +466,13 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
473 } 466 }
474 467
475 /* disable lplu d0 during driver init */ 468 /* disable lplu d0 during driver init */
476 ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); 469 ret_val = phy->ops.set_d0_lplu_state(hw, false);
477 if (ret_val) { 470 if (ret_val) {
478 hw_dbg("Error Disabling LPLU D0\n"); 471 hw_dbg("Error Disabling LPLU D0\n");
479 goto out; 472 goto out;
480 } 473 }
481 /* Configure mdi-mdix settings */ 474 /* Configure mdi-mdix settings */
482 ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); 475 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
483 if (ret_val) 476 if (ret_val)
484 goto out; 477 goto out;
485 478
@@ -497,7 +490,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
497 data |= IGP01E1000_PSCR_AUTO_MDIX; 490 data |= IGP01E1000_PSCR_AUTO_MDIX;
498 break; 491 break;
499 } 492 }
500 ret_val = hw->phy.ops.write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); 493 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
501 if (ret_val) 494 if (ret_val)
502 goto out; 495 goto out;
503 496
@@ -510,33 +503,31 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
510 */ 503 */
511 if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { 504 if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
512 /* Disable SmartSpeed */ 505 /* Disable SmartSpeed */
513 ret_val = hw->phy.ops.read_phy_reg(hw, 506 ret_val = phy->ops.read_reg(hw,
514 IGP01E1000_PHY_PORT_CONFIG, 507 IGP01E1000_PHY_PORT_CONFIG,
515 &data); 508 &data);
516 if (ret_val) 509 if (ret_val)
517 goto out; 510 goto out;
518 511
519 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 512 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
520 ret_val = hw->phy.ops.write_phy_reg(hw, 513 ret_val = phy->ops.write_reg(hw,
521 IGP01E1000_PHY_PORT_CONFIG, 514 IGP01E1000_PHY_PORT_CONFIG,
522 data); 515 data);
523 if (ret_val) 516 if (ret_val)
524 goto out; 517 goto out;
525 518
526 /* Set auto Master/Slave resolution process */ 519 /* Set auto Master/Slave resolution process */
527 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_CTRL, 520 ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
528 &data);
529 if (ret_val) 521 if (ret_val)
530 goto out; 522 goto out;
531 523
532 data &= ~CR_1000T_MS_ENABLE; 524 data &= ~CR_1000T_MS_ENABLE;
533 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_1000T_CTRL, 525 ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
534 data);
535 if (ret_val) 526 if (ret_val)
536 goto out; 527 goto out;
537 } 528 }
538 529
539 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_CTRL, &data); 530 ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
540 if (ret_val) 531 if (ret_val)
541 goto out; 532 goto out;
542 533
@@ -560,7 +551,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
560 default: 551 default:
561 break; 552 break;
562 } 553 }
563 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_1000T_CTRL, data); 554 ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
564 if (ret_val) 555 if (ret_val)
565 goto out; 556 goto out;
566 } 557 }
@@ -609,12 +600,12 @@ s32 igb_copper_link_autoneg(struct e1000_hw *hw)
609 * Restart auto-negotiation by setting the Auto Neg Enable bit and 600 * Restart auto-negotiation by setting the Auto Neg Enable bit and
610 * the Auto Neg Restart bit in the PHY control register. 601 * the Auto Neg Restart bit in the PHY control register.
611 */ 602 */
612 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); 603 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
613 if (ret_val) 604 if (ret_val)
614 goto out; 605 goto out;
615 606
616 phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 607 phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
617 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_ctrl); 608 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
618 if (ret_val) 609 if (ret_val)
619 goto out; 610 goto out;
620 611
@@ -656,15 +647,13 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
656 phy->autoneg_advertised &= phy->autoneg_mask; 647 phy->autoneg_advertised &= phy->autoneg_mask;
657 648
658 /* Read the MII Auto-Neg Advertisement Register (Address 4). */ 649 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
659 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_AUTONEG_ADV, 650 ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
660 &mii_autoneg_adv_reg);
661 if (ret_val) 651 if (ret_val)
662 goto out; 652 goto out;
663 653
664 if (phy->autoneg_mask & ADVERTISE_1000_FULL) { 654 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
665 /* Read the MII 1000Base-T Control Register (Address 9). */ 655 /* Read the MII 1000Base-T Control Register (Address 9). */
666 ret_val = hw->phy.ops.read_phy_reg(hw, 656 ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
667 PHY_1000T_CTRL,
668 &mii_1000t_ctrl_reg); 657 &mii_1000t_ctrl_reg);
669 if (ret_val) 658 if (ret_val)
670 goto out; 659 goto out;
@@ -785,17 +774,16 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
785 goto out; 774 goto out;
786 } 775 }
787 776
788 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_AUTONEG_ADV, 777 ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
789 mii_autoneg_adv_reg);
790 if (ret_val) 778 if (ret_val)
791 goto out; 779 goto out;
792 780
793 hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 781 hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
794 782
795 if (phy->autoneg_mask & ADVERTISE_1000_FULL) { 783 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
796 ret_val = hw->phy.ops.write_phy_reg(hw, 784 ret_val = phy->ops.write_reg(hw,
797 PHY_1000T_CTRL, 785 PHY_1000T_CTRL,
798 mii_1000t_ctrl_reg); 786 mii_1000t_ctrl_reg);
799 if (ret_val) 787 if (ret_val)
800 goto out; 788 goto out;
801 } 789 }
@@ -819,13 +807,13 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
819 u16 phy_data; 807 u16 phy_data;
820 bool link; 808 bool link;
821 809
822 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_data); 810 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
823 if (ret_val) 811 if (ret_val)
824 goto out; 812 goto out;
825 813
826 igb_phy_force_speed_duplex_setup(hw, &phy_data); 814 igb_phy_force_speed_duplex_setup(hw, &phy_data);
827 815
828 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_data); 816 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
829 if (ret_val) 817 if (ret_val)
830 goto out; 818 goto out;
831 819
@@ -833,16 +821,14 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
833 * Clear Auto-Crossover to force MDI manually. IGP requires MDI 821 * Clear Auto-Crossover to force MDI manually. IGP requires MDI
834 * forced whenever speed and duplex are forced. 822 * forced whenever speed and duplex are forced.
835 */ 823 */
836 ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, 824 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
837 &phy_data);
838 if (ret_val) 825 if (ret_val)
839 goto out; 826 goto out;
840 827
841 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; 828 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
842 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; 829 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
843 830
844 ret_val = hw->phy.ops.write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, 831 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
845 phy_data);
846 if (ret_val) 832 if (ret_val)
847 goto out; 833 goto out;
848 834
@@ -897,20 +883,18 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
897 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 883 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
898 * forced whenever speed and duplex are forced. 884 * forced whenever speed and duplex are forced.
899 */ 885 */
900 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 886 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
901 &phy_data);
902 if (ret_val) 887 if (ret_val)
903 goto out; 888 goto out;
904 889
905 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; 890 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
906 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 891 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
907 phy_data);
908 if (ret_val) 892 if (ret_val)
909 goto out; 893 goto out;
910 894
911 hw_dbg("M88E1000 PSCR: %X\n", phy_data); 895 hw_dbg("M88E1000 PSCR: %X\n", phy_data);
912 896
913 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_data); 897 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
914 if (ret_val) 898 if (ret_val)
915 goto out; 899 goto out;
916 900
@@ -919,7 +903,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
919 /* Reset the phy to commit changes. */ 903 /* Reset the phy to commit changes. */
920 phy_data |= MII_CR_RESET; 904 phy_data |= MII_CR_RESET;
921 905
922 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_data); 906 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
923 if (ret_val) 907 if (ret_val)
924 goto out; 908 goto out;
925 909
@@ -940,7 +924,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
940 * We didn't get link. 924 * We didn't get link.
941 * Reset the DSP and cross our fingers. 925 * Reset the DSP and cross our fingers.
942 */ 926 */
943 ret_val = hw->phy.ops.write_phy_reg(hw, 927 ret_val = phy->ops.write_reg(hw,
944 M88E1000_PHY_PAGE_SELECT, 928 M88E1000_PHY_PAGE_SELECT,
945 0x001d); 929 0x001d);
946 if (ret_val) 930 if (ret_val)
@@ -957,8 +941,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
957 goto out; 941 goto out;
958 } 942 }
959 943
960 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 944 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
961 &phy_data);
962 if (ret_val) 945 if (ret_val)
963 goto out; 946 goto out;
964 947
@@ -968,8 +951,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
968 * the reset value of 2.5MHz. 951 * the reset value of 2.5MHz.
969 */ 952 */
970 phy_data |= M88E1000_EPSCR_TX_CLK_25; 953 phy_data |= M88E1000_EPSCR_TX_CLK_25;
971 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 954 ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
972 phy_data);
973 if (ret_val) 955 if (ret_val)
974 goto out; 956 goto out;
975 957
@@ -977,14 +959,12 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
977 * In addition, we must re-enable CRS on Tx for both half and full 959 * In addition, we must re-enable CRS on Tx for both half and full
978 * duplex. 960 * duplex.
979 */ 961 */
980 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 962 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
981 &phy_data);
982 if (ret_val) 963 if (ret_val)
983 goto out; 964 goto out;
984 965
985 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 966 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
986 ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 967 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
987 phy_data);
988 968
989out: 969out:
990 return ret_val; 970 return ret_val;
@@ -1071,15 +1051,13 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1071 s32 ret_val; 1051 s32 ret_val;
1072 u16 data; 1052 u16 data;
1073 1053
1074 ret_val = hw->phy.ops.read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, 1054 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
1075 &data);
1076 if (ret_val) 1055 if (ret_val)
1077 goto out; 1056 goto out;
1078 1057
1079 if (!active) { 1058 if (!active) {
1080 data &= ~IGP02E1000_PM_D3_LPLU; 1059 data &= ~IGP02E1000_PM_D3_LPLU;
1081 ret_val = hw->phy.ops.write_phy_reg(hw, 1060 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
1082 IGP02E1000_PHY_POWER_MGMT,
1083 data); 1061 data);
1084 if (ret_val) 1062 if (ret_val)
1085 goto out; 1063 goto out;
@@ -1090,27 +1068,27 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1090 * SmartSpeed, so performance is maintained. 1068 * SmartSpeed, so performance is maintained.
1091 */ 1069 */
1092 if (phy->smart_speed == e1000_smart_speed_on) { 1070 if (phy->smart_speed == e1000_smart_speed_on) {
1093 ret_val = hw->phy.ops.read_phy_reg(hw, 1071 ret_val = phy->ops.read_reg(hw,
1094 IGP01E1000_PHY_PORT_CONFIG, 1072 IGP01E1000_PHY_PORT_CONFIG,
1095 &data); 1073 &data);
1096 if (ret_val) 1074 if (ret_val)
1097 goto out; 1075 goto out;
1098 1076
1099 data |= IGP01E1000_PSCFR_SMART_SPEED; 1077 data |= IGP01E1000_PSCFR_SMART_SPEED;
1100 ret_val = hw->phy.ops.write_phy_reg(hw, 1078 ret_val = phy->ops.write_reg(hw,
1101 IGP01E1000_PHY_PORT_CONFIG, 1079 IGP01E1000_PHY_PORT_CONFIG,
1102 data); 1080 data);
1103 if (ret_val) 1081 if (ret_val)
1104 goto out; 1082 goto out;
1105 } else if (phy->smart_speed == e1000_smart_speed_off) { 1083 } else if (phy->smart_speed == e1000_smart_speed_off) {
1106 ret_val = hw->phy.ops.read_phy_reg(hw, 1084 ret_val = phy->ops.read_reg(hw,
1107 IGP01E1000_PHY_PORT_CONFIG, 1085 IGP01E1000_PHY_PORT_CONFIG,
1108 &data); 1086 &data);
1109 if (ret_val) 1087 if (ret_val)
1110 goto out; 1088 goto out;
1111 1089
1112 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1090 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1113 ret_val = hw->phy.ops.write_phy_reg(hw, 1091 ret_val = phy->ops.write_reg(hw,
1114 IGP01E1000_PHY_PORT_CONFIG, 1092 IGP01E1000_PHY_PORT_CONFIG,
1115 data); 1093 data);
1116 if (ret_val) 1094 if (ret_val)
@@ -1120,22 +1098,19 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1120 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 1098 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
1121 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 1099 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
1122 data |= IGP02E1000_PM_D3_LPLU; 1100 data |= IGP02E1000_PM_D3_LPLU;
1123 ret_val = hw->phy.ops.write_phy_reg(hw, 1101 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
1124 IGP02E1000_PHY_POWER_MGMT,
1125 data); 1102 data);
1126 if (ret_val) 1103 if (ret_val)
1127 goto out; 1104 goto out;
1128 1105
1129 /* When LPLU is enabled, we should disable SmartSpeed */ 1106 /* When LPLU is enabled, we should disable SmartSpeed */
1130 ret_val = hw->phy.ops.read_phy_reg(hw, 1107 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1131 IGP01E1000_PHY_PORT_CONFIG,
1132 &data); 1108 &data);
1133 if (ret_val) 1109 if (ret_val)
1134 goto out; 1110 goto out;
1135 1111
1136 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1112 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1137 ret_val = hw->phy.ops.write_phy_reg(hw, 1113 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1138 IGP01E1000_PHY_PORT_CONFIG,
1139 data); 1114 data);
1140 } 1115 }
1141 1116
@@ -1176,7 +1151,7 @@ s32 igb_check_downshift(struct e1000_hw *hw)
1176 goto out; 1151 goto out;
1177 } 1152 }
1178 1153
1179 ret_val = hw->phy.ops.read_phy_reg(hw, offset, &phy_data); 1154 ret_val = phy->ops.read_reg(hw, offset, &phy_data);
1180 1155
1181 if (!ret_val) 1156 if (!ret_val)
1182 phy->speed_downgraded = (phy_data & mask) ? true : false; 1157 phy->speed_downgraded = (phy_data & mask) ? true : false;
@@ -1199,7 +1174,7 @@ static s32 igb_check_polarity_m88(struct e1000_hw *hw)
1199 s32 ret_val; 1174 s32 ret_val;
1200 u16 data; 1175 u16 data;
1201 1176
1202 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); 1177 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
1203 1178
1204 if (!ret_val) 1179 if (!ret_val)
1205 phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) 1180 phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
@@ -1228,8 +1203,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw)
1228 * Polarity is determined based on the speed of 1203 * Polarity is determined based on the speed of
1229 * our connection. 1204 * our connection.
1230 */ 1205 */
1231 ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 1206 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
1232 &data);
1233 if (ret_val) 1207 if (ret_val)
1234 goto out; 1208 goto out;
1235 1209
@@ -1246,7 +1220,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw)
1246 mask = IGP01E1000_PSSR_POLARITY_REVERSED; 1220 mask = IGP01E1000_PSSR_POLARITY_REVERSED;
1247 } 1221 }
1248 1222
1249 ret_val = hw->phy.ops.read_phy_reg(hw, offset, &data); 1223 ret_val = phy->ops.read_reg(hw, offset, &data);
1250 1224
1251 if (!ret_val) 1225 if (!ret_val)
1252 phy->cable_polarity = (data & mask) 1226 phy->cable_polarity = (data & mask)
@@ -1271,10 +1245,10 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw)
1271 1245
1272 /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ 1246 /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
1273 for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { 1247 for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
1274 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status); 1248 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1275 if (ret_val) 1249 if (ret_val)
1276 break; 1250 break;
1277 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status); 1251 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1278 if (ret_val) 1252 if (ret_val)
1279 break; 1253 break;
1280 if (phy_status & MII_SR_AUTONEG_COMPLETE) 1254 if (phy_status & MII_SR_AUTONEG_COMPLETE)
@@ -1310,10 +1284,10 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
1310 * twice due to the link bit being sticky. No harm doing 1284 * twice due to the link bit being sticky. No harm doing
1311 * it across the board. 1285 * it across the board.
1312 */ 1286 */
1313 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status); 1287 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1314 if (ret_val) 1288 if (ret_val)
1315 break; 1289 break;
1316 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status); 1290 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1317 if (ret_val) 1291 if (ret_val)
1318 break; 1292 break;
1319 if (phy_status & MII_SR_LINK_STATUS) 1293 if (phy_status & MII_SR_LINK_STATUS)
@@ -1350,8 +1324,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
1350 s32 ret_val; 1324 s32 ret_val;
1351 u16 phy_data, index; 1325 u16 phy_data, index;
1352 1326
1353 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 1327 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
1354 &phy_data);
1355 if (ret_val) 1328 if (ret_val)
1356 goto out; 1329 goto out;
1357 1330
@@ -1372,8 +1345,8 @@ out:
1372 * 1345 *
1373 * The automatic gain control (agc) normalizes the amplitude of the 1346 * The automatic gain control (agc) normalizes the amplitude of the
1374 * received signal, adjusting for the attenuation produced by the 1347 * received signal, adjusting for the attenuation produced by the
1375 * cable. By reading the AGC registers, which reperesent the 1348 * cable. By reading the AGC registers, which represent the
1376 * cobination of course and fine gain value, the value can be put 1349 * combination of coarse and fine gain value, the value can be put
1377 * into a lookup table to obtain the approximate cable length 1350 * into a lookup table to obtain the approximate cable length
1378 * for each channel. 1351 * for each channel.
1379 **/ 1352 **/
@@ -1392,14 +1365,13 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
1392 1365
1393 /* Read the AGC registers for all channels */ 1366 /* Read the AGC registers for all channels */
1394 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { 1367 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
1395 ret_val = hw->phy.ops.read_phy_reg(hw, agc_reg_array[i], 1368 ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
1396 &phy_data);
1397 if (ret_val) 1369 if (ret_val)
1398 goto out; 1370 goto out;
1399 1371
1400 /* 1372 /*
1401 * Getting bits 15:9, which represent the combination of 1373 * Getting bits 15:9, which represent the combination of
1402 * course and fine gain values. The result is a number 1374 * coarse and fine gain values. The result is a number
1403 * that can be put into the lookup table to obtain the 1375 * that can be put into the lookup table to obtain the
1404 * approximate cable length. 1376 * approximate cable length.
1405 */ 1377 */
@@ -1456,7 +1428,7 @@ s32 igb_get_phy_info_m88(struct e1000_hw *hw)
1456 u16 phy_data; 1428 u16 phy_data;
1457 bool link; 1429 bool link;
1458 1430
1459 if (hw->phy.media_type != e1000_media_type_copper) { 1431 if (phy->media_type != e1000_media_type_copper) {
1460 hw_dbg("Phy info is only valid for copper media\n"); 1432 hw_dbg("Phy info is only valid for copper media\n");
1461 ret_val = -E1000_ERR_CONFIG; 1433 ret_val = -E1000_ERR_CONFIG;
1462 goto out; 1434 goto out;
@@ -1472,33 +1444,29 @@ s32 igb_get_phy_info_m88(struct e1000_hw *hw)
1472 goto out; 1444 goto out;
1473 } 1445 }
1474 1446
1475 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 1447 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1476 &phy_data);
1477 if (ret_val) 1448 if (ret_val)
1478 goto out; 1449 goto out;
1479 1450
1480 phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) 1451 phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
1481 ? true 1452 ? true : false;
1482 : false;
1483 1453
1484 ret_val = igb_check_polarity_m88(hw); 1454 ret_val = igb_check_polarity_m88(hw);
1485 if (ret_val) 1455 if (ret_val)
1486 goto out; 1456 goto out;
1487 1457
1488 ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 1458 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
1489 &phy_data);
1490 if (ret_val) 1459 if (ret_val)
1491 goto out; 1460 goto out;
1492 1461
1493 phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false; 1462 phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false;
1494 1463
1495 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { 1464 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
1496 ret_val = hw->phy.ops.get_cable_length(hw); 1465 ret_val = phy->ops.get_cable_length(hw);
1497 if (ret_val) 1466 if (ret_val)
1498 goto out; 1467 goto out;
1499 1468
1500 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS, 1469 ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
1501 &phy_data);
1502 if (ret_val) 1470 if (ret_val)
1503 goto out; 1471 goto out;
1504 1472
@@ -1552,8 +1520,7 @@ s32 igb_get_phy_info_igp(struct e1000_hw *hw)
1552 if (ret_val) 1520 if (ret_val)
1553 goto out; 1521 goto out;
1554 1522
1555 ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 1523 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
1556 &data);
1557 if (ret_val) 1524 if (ret_val)
1558 goto out; 1525 goto out;
1559 1526
@@ -1561,12 +1528,11 @@ s32 igb_get_phy_info_igp(struct e1000_hw *hw)
1561 1528
1562 if ((data & IGP01E1000_PSSR_SPEED_MASK) == 1529 if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
1563 IGP01E1000_PSSR_SPEED_1000MBPS) { 1530 IGP01E1000_PSSR_SPEED_1000MBPS) {
1564 ret_val = hw->phy.ops.get_cable_length(hw); 1531 ret_val = phy->ops.get_cable_length(hw);
1565 if (ret_val) 1532 if (ret_val)
1566 goto out; 1533 goto out;
1567 1534
1568 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS, 1535 ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
1569 &data);
1570 if (ret_val) 1536 if (ret_val)
1571 goto out; 1537 goto out;
1572 1538
@@ -1599,12 +1565,12 @@ s32 igb_phy_sw_reset(struct e1000_hw *hw)
1599 s32 ret_val; 1565 s32 ret_val;
1600 u16 phy_ctrl; 1566 u16 phy_ctrl;
1601 1567
1602 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); 1568 ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
1603 if (ret_val) 1569 if (ret_val)
1604 goto out; 1570 goto out;
1605 1571
1606 phy_ctrl |= MII_CR_RESET; 1572 phy_ctrl |= MII_CR_RESET;
1607 ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_ctrl); 1573 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
1608 if (ret_val) 1574 if (ret_val)
1609 goto out; 1575 goto out;
1610 1576
@@ -1635,7 +1601,7 @@ s32 igb_phy_hw_reset(struct e1000_hw *hw)
1635 goto out; 1601 goto out;
1636 } 1602 }
1637 1603
1638 ret_val = igb_acquire_phy(hw); 1604 ret_val = phy->ops.acquire(hw);
1639 if (ret_val) 1605 if (ret_val)
1640 goto out; 1606 goto out;
1641 1607
@@ -1650,74 +1616,14 @@ s32 igb_phy_hw_reset(struct e1000_hw *hw)
1650 1616
1651 udelay(150); 1617 udelay(150);
1652 1618
1653 igb_release_phy(hw); 1619 phy->ops.release(hw);
1654 1620
1655 ret_val = igb_get_phy_cfg_done(hw); 1621 ret_val = phy->ops.get_cfg_done(hw);
1656 1622
1657out: 1623out:
1658 return ret_val; 1624 return ret_val;
1659} 1625}
1660 1626
1661/* Internal function pointers */
1662
1663/**
1664 * igb_get_phy_cfg_done - Generic PHY configuration done
1665 * @hw: pointer to the HW structure
1666 *
1667 * Return success if silicon family did not implement a family specific
1668 * get_cfg_done function.
1669 **/
1670static s32 igb_get_phy_cfg_done(struct e1000_hw *hw)
1671{
1672 if (hw->phy.ops.get_cfg_done)
1673 return hw->phy.ops.get_cfg_done(hw);
1674
1675 return 0;
1676}
1677
1678/**
1679 * igb_release_phy - Generic release PHY
1680 * @hw: pointer to the HW structure
1681 *
1682 * Return if silicon family does not require a semaphore when accessing the
1683 * PHY.
1684 **/
1685static void igb_release_phy(struct e1000_hw *hw)
1686{
1687 if (hw->phy.ops.release_phy)
1688 hw->phy.ops.release_phy(hw);
1689}
1690
1691/**
1692 * igb_acquire_phy - Generic acquire PHY
1693 * @hw: pointer to the HW structure
1694 *
1695 * Return success if silicon family does not require a semaphore when
1696 * accessing the PHY.
1697 **/
1698static s32 igb_acquire_phy(struct e1000_hw *hw)
1699{
1700 if (hw->phy.ops.acquire_phy)
1701 return hw->phy.ops.acquire_phy(hw);
1702
1703 return 0;
1704}
1705
1706/**
1707 * igb_phy_force_speed_duplex - Generic force PHY speed/duplex
1708 * @hw: pointer to the HW structure
1709 *
1710 * When the silicon family has not implemented a forced speed/duplex
1711 * function for the PHY, simply return 0.
1712 **/
1713s32 igb_phy_force_speed_duplex(struct e1000_hw *hw)
1714{
1715 if (hw->phy.ops.force_speed_duplex)
1716 return hw->phy.ops.force_speed_duplex(hw);
1717
1718 return 0;
1719}
1720
1721/** 1627/**
1722 * igb_phy_init_script_igp3 - Inits the IGP3 PHY 1628 * igb_phy_init_script_igp3 - Inits the IGP3 PHY
1723 * @hw: pointer to the HW structure 1629 * @hw: pointer to the HW structure
@@ -1730,75 +1636,75 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
1730 1636
1731 /* PHY init IGP 3 */ 1637 /* PHY init IGP 3 */
1732 /* Enable rise/fall, 10-mode work in class-A */ 1638 /* Enable rise/fall, 10-mode work in class-A */
1733 hw->phy.ops.write_phy_reg(hw, 0x2F5B, 0x9018); 1639 hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
1734 /* Remove all caps from Replica path filter */ 1640 /* Remove all caps from Replica path filter */
1735 hw->phy.ops.write_phy_reg(hw, 0x2F52, 0x0000); 1641 hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
1736 /* Bias trimming for ADC, AFE and Driver (Default) */ 1642 /* Bias trimming for ADC, AFE and Driver (Default) */
1737 hw->phy.ops.write_phy_reg(hw, 0x2FB1, 0x8B24); 1643 hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
1738 /* Increase Hybrid poly bias */ 1644 /* Increase Hybrid poly bias */
1739 hw->phy.ops.write_phy_reg(hw, 0x2FB2, 0xF8F0); 1645 hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
1740 /* Add 4% to TX amplitude in Giga mode */ 1646 /* Add 4% to TX amplitude in Giga mode */
1741 hw->phy.ops.write_phy_reg(hw, 0x2010, 0x10B0); 1647 hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
1742 /* Disable trimming (TTT) */ 1648 /* Disable trimming (TTT) */
1743 hw->phy.ops.write_phy_reg(hw, 0x2011, 0x0000); 1649 hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
1744 /* Poly DC correction to 94.6% + 2% for all channels */ 1650 /* Poly DC correction to 94.6% + 2% for all channels */
1745 hw->phy.ops.write_phy_reg(hw, 0x20DD, 0x249A); 1651 hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
1746 /* ABS DC correction to 95.9% */ 1652 /* ABS DC correction to 95.9% */
1747 hw->phy.ops.write_phy_reg(hw, 0x20DE, 0x00D3); 1653 hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
1748 /* BG temp curve trim */ 1654 /* BG temp curve trim */
1749 hw->phy.ops.write_phy_reg(hw, 0x28B4, 0x04CE); 1655 hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
1750 /* Increasing ADC OPAMP stage 1 currents to max */ 1656 /* Increasing ADC OPAMP stage 1 currents to max */
1751 hw->phy.ops.write_phy_reg(hw, 0x2F70, 0x29E4); 1657 hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
1752 /* Force 1000 ( required for enabling PHY regs configuration) */ 1658 /* Force 1000 ( required for enabling PHY regs configuration) */
1753 hw->phy.ops.write_phy_reg(hw, 0x0000, 0x0140); 1659 hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
1754 /* Set upd_freq to 6 */ 1660 /* Set upd_freq to 6 */
1755 hw->phy.ops.write_phy_reg(hw, 0x1F30, 0x1606); 1661 hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
1756 /* Disable NPDFE */ 1662 /* Disable NPDFE */
1757 hw->phy.ops.write_phy_reg(hw, 0x1F31, 0xB814); 1663 hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
1758 /* Disable adaptive fixed FFE (Default) */ 1664 /* Disable adaptive fixed FFE (Default) */
1759 hw->phy.ops.write_phy_reg(hw, 0x1F35, 0x002A); 1665 hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
1760 /* Enable FFE hysteresis */ 1666 /* Enable FFE hysteresis */
1761 hw->phy.ops.write_phy_reg(hw, 0x1F3E, 0x0067); 1667 hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
1762 /* Fixed FFE for short cable lengths */ 1668 /* Fixed FFE for short cable lengths */
1763 hw->phy.ops.write_phy_reg(hw, 0x1F54, 0x0065); 1669 hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
1764 /* Fixed FFE for medium cable lengths */ 1670 /* Fixed FFE for medium cable lengths */
1765 hw->phy.ops.write_phy_reg(hw, 0x1F55, 0x002A); 1671 hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
1766 /* Fixed FFE for long cable lengths */ 1672 /* Fixed FFE for long cable lengths */
1767 hw->phy.ops.write_phy_reg(hw, 0x1F56, 0x002A); 1673 hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
1768 /* Enable Adaptive Clip Threshold */ 1674 /* Enable Adaptive Clip Threshold */
1769 hw->phy.ops.write_phy_reg(hw, 0x1F72, 0x3FB0); 1675 hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
1770 /* AHT reset limit to 1 */ 1676 /* AHT reset limit to 1 */
1771 hw->phy.ops.write_phy_reg(hw, 0x1F76, 0xC0FF); 1677 hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
1772 /* Set AHT master delay to 127 msec */ 1678 /* Set AHT master delay to 127 msec */
1773 hw->phy.ops.write_phy_reg(hw, 0x1F77, 0x1DEC); 1679 hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
1774 /* Set scan bits for AHT */ 1680 /* Set scan bits for AHT */
1775 hw->phy.ops.write_phy_reg(hw, 0x1F78, 0xF9EF); 1681 hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
1776 /* Set AHT Preset bits */ 1682 /* Set AHT Preset bits */
1777 hw->phy.ops.write_phy_reg(hw, 0x1F79, 0x0210); 1683 hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
1778 /* Change integ_factor of channel A to 3 */ 1684 /* Change integ_factor of channel A to 3 */
1779 hw->phy.ops.write_phy_reg(hw, 0x1895, 0x0003); 1685 hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
1780 /* Change prop_factor of channels BCD to 8 */ 1686 /* Change prop_factor of channels BCD to 8 */
1781 hw->phy.ops.write_phy_reg(hw, 0x1796, 0x0008); 1687 hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
1782 /* Change cg_icount + enable integbp for channels BCD */ 1688 /* Change cg_icount + enable integbp for channels BCD */
1783 hw->phy.ops.write_phy_reg(hw, 0x1798, 0xD008); 1689 hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
1784 /* 1690 /*
1785 * Change cg_icount + enable integbp + change prop_factor_master 1691 * Change cg_icount + enable integbp + change prop_factor_master
1786 * to 8 for channel A 1692 * to 8 for channel A
1787 */ 1693 */
1788 hw->phy.ops.write_phy_reg(hw, 0x1898, 0xD918); 1694 hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
1789 /* Disable AHT in Slave mode on channel A */ 1695 /* Disable AHT in Slave mode on channel A */
1790 hw->phy.ops.write_phy_reg(hw, 0x187A, 0x0800); 1696 hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
1791 /* 1697 /*
1792 * Enable LPLU and disable AN to 1000 in non-D0a states, 1698 * Enable LPLU and disable AN to 1000 in non-D0a states,
1793 * Enable SPD+B2B 1699 * Enable SPD+B2B
1794 */ 1700 */
1795 hw->phy.ops.write_phy_reg(hw, 0x0019, 0x008D); 1701 hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
1796 /* Enable restart AN on an1000_dis change */ 1702 /* Enable restart AN on an1000_dis change */
1797 hw->phy.ops.write_phy_reg(hw, 0x001B, 0x2080); 1703 hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
1798 /* Enable wh_fifo read clock in 10/100 modes */ 1704 /* Enable wh_fifo read clock in 10/100 modes */
1799 hw->phy.ops.write_phy_reg(hw, 0x0014, 0x0045); 1705 hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
1800 /* Restart AN, Speed selection is 1000 */ 1706 /* Restart AN, Speed selection is 1000 */
1801 hw->phy.ops.write_phy_reg(hw, 0x0000, 0x1340); 1707 hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
1802 1708
1803 return 0; 1709 return 0;
1804} 1710}
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index 8f8fe0a780d1..3228a862031f 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -44,7 +44,6 @@ enum e1000_smart_speed {
44s32 igb_check_downshift(struct e1000_hw *hw); 44s32 igb_check_downshift(struct e1000_hw *hw);
45s32 igb_check_reset_block(struct e1000_hw *hw); 45s32 igb_check_reset_block(struct e1000_hw *hw);
46s32 igb_copper_link_autoneg(struct e1000_hw *hw); 46s32 igb_copper_link_autoneg(struct e1000_hw *hw);
47s32 igb_phy_force_speed_duplex(struct e1000_hw *hw);
48s32 igb_copper_link_setup_igp(struct e1000_hw *hw); 47s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
49s32 igb_copper_link_setup_m88(struct e1000_hw *hw); 48s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
50s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); 49s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index bdf5d839c4bf..0bd7728fe469 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -73,8 +73,75 @@
73#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ 73#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
74#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ 74#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
75#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ 75#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
76#define E1000_RDFPCQ(_n) (0x02430 + (0x4 * (_n)))
77#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ 76#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
77
78/* IEEE 1588 TIMESYNCH */
79#define E1000_TSYNCTXCTL 0x0B614
80#define E1000_TSYNCTXCTL_VALID (1<<0)
81#define E1000_TSYNCTXCTL_ENABLED (1<<4)
82#define E1000_TSYNCRXCTL 0x0B620
83#define E1000_TSYNCRXCTL_VALID (1<<0)
84#define E1000_TSYNCRXCTL_ENABLED (1<<4)
85enum {
86 E1000_TSYNCRXCTL_TYPE_L2_V2 = 0,
87 E1000_TSYNCRXCTL_TYPE_L4_V1 = (1<<1),
88 E1000_TSYNCRXCTL_TYPE_L2_L4_V2 = (1<<2),
89 E1000_TSYNCRXCTL_TYPE_ALL = (1<<3),
90 E1000_TSYNCRXCTL_TYPE_EVENT_V2 = (1<<3) | (1<<1),
91};
92#define E1000_TSYNCRXCFG 0x05F50
93enum {
94 E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE = 0<<0,
95 E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE = 1<<0,
96 E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE = 2<<0,
97 E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE = 3<<0,
98 E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE = 4<<0,
99
100 E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE = 0<<8,
101 E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE = 1<<8,
102 E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE = 2<<8,
103 E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE = 3<<8,
104 E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE = 8<<8,
105 E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE = 9<<8,
106 E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE = 0xA<<8,
107 E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE = 0xB<<8,
108 E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE = 0xC<<8,
109 E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE = 0xD<<8,
110};
111#define E1000_SYSTIML 0x0B600
112#define E1000_SYSTIMH 0x0B604
113#define E1000_TIMINCA 0x0B608
114
115#define E1000_RXMTRL 0x0B634
116#define E1000_RXSTMPL 0x0B624
117#define E1000_RXSTMPH 0x0B628
118#define E1000_RXSATRL 0x0B62C
119#define E1000_RXSATRH 0x0B630
120
121#define E1000_TXSTMPL 0x0B618
122#define E1000_TXSTMPH 0x0B61C
123
124#define E1000_ETQF0 0x05CB0
125#define E1000_ETQF1 0x05CB4
126#define E1000_ETQF2 0x05CB8
127#define E1000_ETQF3 0x05CBC
128#define E1000_ETQF4 0x05CC0
129#define E1000_ETQF5 0x05CC4
130#define E1000_ETQF6 0x05CC8
131#define E1000_ETQF7 0x05CCC
132
133/* Filtering Registers */
134#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
135#define E1000_DAQF(_n) (0x59A0 + 4 * (_n))
136#define E1000_SPQF(_n) (0x59C0 + 4 * (_n))
137#define E1000_FTQF(_n) (0x59E0 + 4 * (_n))
138#define E1000_SAQF0 E1000_SAQF(0)
139#define E1000_DAQF0 E1000_DAQF(0)
140#define E1000_SPQF0 E1000_SPQF(0)
141#define E1000_FTQF0 E1000_FTQF(0)
142#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
143#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
144
78/* Split and Replication RX Control - RW */ 145/* Split and Replication RX Control - RW */
79/* 146/*
80 * Convenience macros 147 * Convenience macros
@@ -110,7 +177,6 @@
110 : (0x0E018 + ((_n) * 0x40))) 177 : (0x0E018 + ((_n) * 0x40)))
111#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ 178#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
112 : (0x0E028 + ((_n) * 0x40))) 179 : (0x0E028 + ((_n) * 0x40)))
113#define E1000_TARC(_n) (0x03840 + (_n << 8))
114#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) 180#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
115#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) 181#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
116#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ 182#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
@@ -226,16 +292,14 @@
226#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ 292#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
227 (0x054E4 + ((_i - 16) * 8))) 293 (0x054E4 + ((_i - 16) * 8)))
228#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ 294#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
229#define E1000_VMD_CTL 0x0581C /* VMDq Control - RW */ 295#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
230#define E1000_WUC 0x05800 /* Wakeup Control - RW */ 296#define E1000_WUC 0x05800 /* Wakeup Control - RW */
231#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ 297#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
232#define E1000_WUS 0x05810 /* Wakeup Status - RO */ 298#define E1000_WUS 0x05810 /* Wakeup Status - RO */
233#define E1000_MANC 0x05820 /* Management Control - RW */ 299#define E1000_MANC 0x05820 /* Management Control - RW */
234#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ 300#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
235#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ 301#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
236#define E1000_HOST_IF 0x08800 /* Host Interface */
237 302
238#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
239#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ 303#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
240#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ 304#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
241#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ 305#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
@@ -243,9 +307,7 @@
243#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ 307#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
244#define E1000_SWSM 0x05B50 /* SW Semaphore */ 308#define E1000_SWSM 0x05B50 /* SW Semaphore */
245#define E1000_FWSM 0x05B54 /* FW Semaphore */ 309#define E1000_FWSM 0x05B54 /* FW Semaphore */
246#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
247#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ 310#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
248#define E1000_HICR 0x08F00 /* Host Inteface Control */
249 311
250/* RSS registers */ 312/* RSS registers */
251#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ 313#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
@@ -254,18 +316,27 @@
254#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */ 316#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */
255/* MSI-X Allocation Register (_i) - RW */ 317/* MSI-X Allocation Register (_i) - RW */
256#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) 318#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4))
257/* MSI-X Table entry addr low reg 0 - RW */
258#define E1000_MSIXTADD(_i) (0x0C000 + ((_i) * 0x10))
259/* MSI-X Table entry addr upper reg 0 - RW */
260#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10))
261/* MSI-X Table entry message reg 0 - RW */
262#define E1000_MSIXTMSG(_i) (0x0C008 + ((_i) * 0x10))
263/* MSI-X Table entry vector ctrl reg 0 - RW */
264#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10))
265/* Redirection Table - RW Array */ 319/* Redirection Table - RW Array */
266#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) 320#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
267#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ 321#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
268 322
323/* VT Registers */
324#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
325#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
326#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
327#define E1000_VFRE 0x00C8C /* VF Receive Enables */
328#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
329#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
330#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
331#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
332#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
333/* These act per VF so an array friendly macro is used */
334#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
335#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
336#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
337#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
338 * Filter - RW */
339
269#define wr32(reg, value) (writel(value, hw->hw_addr + reg)) 340#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
270#define rd32(reg) (readl(hw->hw_addr + reg)) 341#define rd32(reg) (readl(hw->hw_addr + reg))
271#define wrfl() ((void)rd32(E1000_STATUS)) 342#define wrfl() ((void)rd32(E1000_STATUS))
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index aebef8e48e76..e18ac1bf45ff 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -34,25 +34,15 @@
34#include "e1000_mac.h" 34#include "e1000_mac.h"
35#include "e1000_82575.h" 35#include "e1000_82575.h"
36 36
37struct igb_adapter; 37#include <linux/clocksource.h>
38 38#include <linux/timecompare.h>
39#ifdef CONFIG_IGB_LRO 39#include <linux/net_tstamp.h>
40#include <linux/inet_lro.h>
41#define MAX_LRO_AGGR 32
42#define MAX_LRO_DESCRIPTORS 8
43#endif
44 40
45/* Interrupt defines */ 41struct igb_adapter;
46#define IGB_MIN_DYN_ITR 3000
47#define IGB_MAX_DYN_ITR 96000
48 42
49/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */ 43/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */
50#define IGB_START_ITR 648 44#define IGB_START_ITR 648
51 45
52#define IGB_DYN_ITR_PACKET_THRESHOLD 2
53#define IGB_DYN_ITR_LENGTH_LOW 200
54#define IGB_DYN_ITR_LENGTH_HIGH 1000
55
56/* TX/RX descriptor defines */ 46/* TX/RX descriptor defines */
57#define IGB_DEFAULT_TXD 256 47#define IGB_DEFAULT_TXD 256
58#define IGB_MIN_TXD 80 48#define IGB_MIN_TXD 80
@@ -67,8 +57,21 @@ struct igb_adapter;
67#define IGB_MIN_ITR_USECS 10 57#define IGB_MIN_ITR_USECS 10
68 58
69/* Transmit and receive queues */ 59/* Transmit and receive queues */
70#define IGB_MAX_RX_QUEUES 4 60#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \
71#define IGB_MAX_TX_QUEUES 4 61 (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4)
62#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES
63#define IGB_ABS_MAX_TX_QUEUES 4
64
65#define IGB_MAX_VF_MC_ENTRIES 30
66#define IGB_MAX_VF_FUNCTIONS 8
67#define IGB_MAX_VFTA_ENTRIES 128
68
69struct vf_data_storage {
70 unsigned char vf_mac_addresses[ETH_ALEN];
71 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
72 u16 num_vf_mc_hashes;
73 bool clear_to_send;
74};
72 75
73/* RX descriptor control thresholds. 76/* RX descriptor control thresholds.
74 * PTHRESH - MAC will consider prefetch if it has fewer than this number of 77 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
@@ -94,12 +97,9 @@ struct igb_adapter;
94#define IGB_RXBUFFER_512 512 97#define IGB_RXBUFFER_512 512
95#define IGB_RXBUFFER_1024 1024 98#define IGB_RXBUFFER_1024 1024
96#define IGB_RXBUFFER_2048 2048 99#define IGB_RXBUFFER_2048 2048
97#define IGB_RXBUFFER_4096 4096
98#define IGB_RXBUFFER_8192 8192
99#define IGB_RXBUFFER_16384 16384 100#define IGB_RXBUFFER_16384 16384
100 101
101/* Packet Buffer allocations */ 102#define MAX_STD_JUMBO_FRAME_SIZE 9234
102
103 103
104/* How many Tx Descriptors do we need to call netif_wake_queue ? */ 104/* How many Tx Descriptors do we need to call netif_wake_queue ? */
105#define IGB_TX_QUEUE_WAKE 16 105#define IGB_TX_QUEUE_WAKE 16
@@ -176,10 +176,6 @@ struct igb_ring {
176 struct napi_struct napi; 176 struct napi_struct napi;
177 int set_itr; 177 int set_itr;
178 struct igb_ring *buddy; 178 struct igb_ring *buddy;
179#ifdef CONFIG_IGB_LRO
180 struct net_lro_mgr lro_mgr;
181 bool lro_used;
182#endif
183 }; 179 };
184 }; 180 };
185 181
@@ -196,9 +192,6 @@ struct igb_ring {
196 (&(((union e1000_adv_tx_desc *)((R).desc))[i])) 192 (&(((union e1000_adv_tx_desc *)((R).desc))[i]))
197#define E1000_TX_CTXTDESC_ADV(R, i) \ 193#define E1000_TX_CTXTDESC_ADV(R, i) \
198 (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i])) 194 (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
199#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
200#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
201#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
202 195
203/* board specific private data structure */ 196/* board specific private data structure */
204 197
@@ -248,7 +241,6 @@ struct igb_adapter {
248 241
249 u64 hw_csum_err; 242 u64 hw_csum_err;
250 u64 hw_csum_good; 243 u64 hw_csum_good;
251 u64 rx_hdr_split;
252 u32 alloc_rx_buff_failed; 244 u32 alloc_rx_buff_failed;
253 bool rx_csum; 245 bool rx_csum;
254 u32 gorc; 246 u32 gorc;
@@ -262,6 +254,10 @@ struct igb_adapter {
262 struct napi_struct napi; 254 struct napi_struct napi;
263 struct pci_dev *pdev; 255 struct pci_dev *pdev;
264 struct net_device_stats net_stats; 256 struct net_device_stats net_stats;
257 struct cyclecounter cycles;
258 struct timecounter clock;
259 struct timecompare compare;
260 struct hwtstamp_config hwtstamp_config;
265 261
266 /* structs defined in e1000_hw.h */ 262 /* structs defined in e1000_hw.h */
267 struct e1000_hw hw; 263 struct e1000_hw hw;
@@ -283,27 +279,17 @@ struct igb_adapter {
283 unsigned int flags; 279 unsigned int flags;
284 u32 eeprom_wol; 280 u32 eeprom_wol;
285 281
286 /* for ioport free */ 282 struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
287 int bars;
288 int need_ioport;
289
290 struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES];
291#ifdef CONFIG_IGB_LRO
292 unsigned int lro_max_aggr;
293 unsigned int lro_aggregated;
294 unsigned int lro_flushed;
295 unsigned int lro_no_desc;
296#endif
297 unsigned int tx_ring_count; 283 unsigned int tx_ring_count;
298 unsigned int rx_ring_count; 284 unsigned int rx_ring_count;
285 unsigned int vfs_allocated_count;
286 struct vf_data_storage *vf_data;
299}; 287};
300 288
301#define IGB_FLAG_HAS_MSI (1 << 0) 289#define IGB_FLAG_HAS_MSI (1 << 0)
302#define IGB_FLAG_MSI_ENABLE (1 << 1) 290#define IGB_FLAG_DCA_ENABLED (1 << 1)
303#define IGB_FLAG_DCA_ENABLED (1 << 2) 291#define IGB_FLAG_QUAD_PORT_A (1 << 2)
304#define IGB_FLAG_IN_NETPOLL (1 << 3) 292#define IGB_FLAG_NEED_CTX_IDX (1 << 3)
305#define IGB_FLAG_QUAD_PORT_A (1 << 4)
306#define IGB_FLAG_NEED_CTX_IDX (1 << 5)
307 293
308enum e1000_state_t { 294enum e1000_state_t {
309 __IGB_TESTING, 295 __IGB_TESTING,
@@ -333,24 +319,24 @@ extern void igb_set_ethtool_ops(struct net_device *);
333 319
334static inline s32 igb_reset_phy(struct e1000_hw *hw) 320static inline s32 igb_reset_phy(struct e1000_hw *hw)
335{ 321{
336 if (hw->phy.ops.reset_phy) 322 if (hw->phy.ops.reset)
337 return hw->phy.ops.reset_phy(hw); 323 return hw->phy.ops.reset(hw);
338 324
339 return 0; 325 return 0;
340} 326}
341 327
342static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) 328static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
343{ 329{
344 if (hw->phy.ops.read_phy_reg) 330 if (hw->phy.ops.read_reg)
345 return hw->phy.ops.read_phy_reg(hw, offset, data); 331 return hw->phy.ops.read_reg(hw, offset, data);
346 332
347 return 0; 333 return 0;
348} 334}
349 335
350static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) 336static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
351{ 337{
352 if (hw->phy.ops.write_phy_reg) 338 if (hw->phy.ops.write_reg)
353 return hw->phy.ops.write_phy_reg(hw, offset, data); 339 return hw->phy.ops.write_reg(hw, offset, data);
354 340
355 return 0; 341 return 0;
356} 342}
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 3c831f1472ad..fb09c8ad9f0d 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -88,16 +88,11 @@ static const struct igb_stats igb_gstrings_stats[] = {
88 { "rx_long_byte_count", IGB_STAT(stats.gorc) }, 88 { "rx_long_byte_count", IGB_STAT(stats.gorc) },
89 { "rx_csum_offload_good", IGB_STAT(hw_csum_good) }, 89 { "rx_csum_offload_good", IGB_STAT(hw_csum_good) },
90 { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) }, 90 { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
91 { "rx_header_split", IGB_STAT(rx_hdr_split) }, 91 { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) },
92 { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) }, 92 { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
93 { "tx_smbus", IGB_STAT(stats.mgptc) }, 93 { "tx_smbus", IGB_STAT(stats.mgptc) },
94 { "rx_smbus", IGB_STAT(stats.mgprc) }, 94 { "rx_smbus", IGB_STAT(stats.mgprc) },
95 { "dropped_smbus", IGB_STAT(stats.mgpdc) }, 95 { "dropped_smbus", IGB_STAT(stats.mgpdc) },
96#ifdef CONFIG_IGB_LRO
97 { "lro_aggregated", IGB_STAT(lro_aggregated) },
98 { "lro_flushed", IGB_STAT(lro_flushed) },
99 { "lro_no_desc", IGB_STAT(lro_no_desc) },
100#endif
101}; 96};
102 97
103#define IGB_QUEUE_STATS_LEN \ 98#define IGB_QUEUE_STATS_LEN \
@@ -293,15 +288,15 @@ static int igb_set_rx_csum(struct net_device *netdev, u32 data)
293 288
294static u32 igb_get_tx_csum(struct net_device *netdev) 289static u32 igb_get_tx_csum(struct net_device *netdev)
295{ 290{
296 return (netdev->features & NETIF_F_HW_CSUM) != 0; 291 return (netdev->features & NETIF_F_IP_CSUM) != 0;
297} 292}
298 293
299static int igb_set_tx_csum(struct net_device *netdev, u32 data) 294static int igb_set_tx_csum(struct net_device *netdev, u32 data)
300{ 295{
301 if (data) 296 if (data)
302 netdev->features |= NETIF_F_HW_CSUM; 297 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
303 else 298 else
304 netdev->features &= ~NETIF_F_HW_CSUM; 299 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
305 300
306 return 0; 301 return 0;
307} 302}
@@ -310,15 +305,13 @@ static int igb_set_tso(struct net_device *netdev, u32 data)
310{ 305{
311 struct igb_adapter *adapter = netdev_priv(netdev); 306 struct igb_adapter *adapter = netdev_priv(netdev);
312 307
313 if (data) 308 if (data) {
314 netdev->features |= NETIF_F_TSO; 309 netdev->features |= NETIF_F_TSO;
315 else
316 netdev->features &= ~NETIF_F_TSO;
317
318 if (data)
319 netdev->features |= NETIF_F_TSO6; 310 netdev->features |= NETIF_F_TSO6;
320 else 311 } else {
312 netdev->features &= ~NETIF_F_TSO;
321 netdev->features &= ~NETIF_F_TSO6; 313 netdev->features &= ~NETIF_F_TSO6;
314 }
322 315
323 dev_info(&adapter->pdev->dev, "TSO is %s\n", 316 dev_info(&adapter->pdev->dev, "TSO is %s\n",
324 data ? "Enabled" : "Disabled"); 317 data ? "Enabled" : "Disabled");
@@ -405,7 +398,7 @@ static void igb_get_regs(struct net_device *netdev,
405 regs_buff[34] = rd32(E1000_RLPML); 398 regs_buff[34] = rd32(E1000_RLPML);
406 regs_buff[35] = rd32(E1000_RFCTL); 399 regs_buff[35] = rd32(E1000_RFCTL);
407 regs_buff[36] = rd32(E1000_MRQC); 400 regs_buff[36] = rd32(E1000_MRQC);
408 regs_buff[37] = rd32(E1000_VMD_CTL); 401 regs_buff[37] = rd32(E1000_VT_CTL);
409 402
410 /* Transmit */ 403 /* Transmit */
411 regs_buff[38] = rd32(E1000_TCTL); 404 regs_buff[38] = rd32(E1000_TCTL);
@@ -598,12 +591,12 @@ static int igb_get_eeprom(struct net_device *netdev,
598 return -ENOMEM; 591 return -ENOMEM;
599 592
600 if (hw->nvm.type == e1000_nvm_eeprom_spi) 593 if (hw->nvm.type == e1000_nvm_eeprom_spi)
601 ret_val = hw->nvm.ops.read_nvm(hw, first_word, 594 ret_val = hw->nvm.ops.read(hw, first_word,
602 last_word - first_word + 1, 595 last_word - first_word + 1,
603 eeprom_buff); 596 eeprom_buff);
604 else { 597 else {
605 for (i = 0; i < last_word - first_word + 1; i++) { 598 for (i = 0; i < last_word - first_word + 1; i++) {
606 ret_val = hw->nvm.ops.read_nvm(hw, first_word + i, 1, 599 ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
607 &eeprom_buff[i]); 600 &eeprom_buff[i]);
608 if (ret_val) 601 if (ret_val)
609 break; 602 break;
@@ -650,14 +643,14 @@ static int igb_set_eeprom(struct net_device *netdev,
650 if (eeprom->offset & 1) { 643 if (eeprom->offset & 1) {
651 /* need read/modify/write of first changed EEPROM word */ 644 /* need read/modify/write of first changed EEPROM word */
652 /* only the second byte of the word is being modified */ 645 /* only the second byte of the word is being modified */
653 ret_val = hw->nvm.ops.read_nvm(hw, first_word, 1, 646 ret_val = hw->nvm.ops.read(hw, first_word, 1,
654 &eeprom_buff[0]); 647 &eeprom_buff[0]);
655 ptr++; 648 ptr++;
656 } 649 }
657 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { 650 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
658 /* need read/modify/write of last changed EEPROM word */ 651 /* need read/modify/write of last changed EEPROM word */
659 /* only the first byte of the word is being modified */ 652 /* only the first byte of the word is being modified */
660 ret_val = hw->nvm.ops.read_nvm(hw, last_word, 1, 653 ret_val = hw->nvm.ops.read(hw, last_word, 1,
661 &eeprom_buff[last_word - first_word]); 654 &eeprom_buff[last_word - first_word]);
662 } 655 }
663 656
@@ -670,7 +663,7 @@ static int igb_set_eeprom(struct net_device *netdev,
670 for (i = 0; i < last_word - first_word + 1; i++) 663 for (i = 0; i < last_word - first_word + 1; i++)
671 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); 664 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
672 665
673 ret_val = hw->nvm.ops.write_nvm(hw, first_word, 666 ret_val = hw->nvm.ops.write(hw, first_word,
674 last_word - first_word + 1, eeprom_buff); 667 last_word - first_word + 1, eeprom_buff);
675 668
676 /* Update the checksum over the first part of the EEPROM if needed 669 /* Update the checksum over the first part of the EEPROM if needed
@@ -694,7 +687,7 @@ static void igb_get_drvinfo(struct net_device *netdev,
694 687
695 /* EEPROM image version # is reported as firmware version # for 688 /* EEPROM image version # is reported as firmware version # for
696 * 82575 controllers */ 689 * 82575 controllers */
697 adapter->hw.nvm.ops.read_nvm(&adapter->hw, 5, 1, &eeprom_data); 690 adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
698 sprintf(firmware_version, "%d.%d-%d", 691 sprintf(firmware_version, "%d.%d-%d",
699 (eeprom_data & 0xF000) >> 12, 692 (eeprom_data & 0xF000) >> 12,
700 (eeprom_data & 0x0FF0) >> 4, 693 (eeprom_data & 0x0FF0) >> 4,
@@ -863,23 +856,26 @@ static struct igb_reg_test reg_test_82576[] = {
863 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 856 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
864 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 857 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
865 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 858 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
866 { E1000_RDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 859 { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
867 { E1000_RDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 860 { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
868 { E1000_RDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 861 { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
869 /* Enable all four RX queues before testing. */ 862 /* Enable all RX queues before testing. */
870 { E1000_RXDCTL(0), 0x100, 1, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE }, 863 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
864 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
871 /* RDH is read-only for 82576, only test RDT. */ 865 /* RDH is read-only for 82576, only test RDT. */
872 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 866 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
867 { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
873 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 }, 868 { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
869 { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
874 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 }, 870 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
875 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, 871 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
876 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF }, 872 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
877 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 873 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
878 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 874 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
879 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 875 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
880 { E1000_TDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, 876 { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
881 { E1000_TDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 877 { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
882 { E1000_TDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF }, 878 { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
883 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 }, 879 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
884 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB }, 880 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
885 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF }, 881 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
@@ -926,12 +922,13 @@ static struct igb_reg_test reg_test_82575[] = {
926static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, 922static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
927 int reg, u32 mask, u32 write) 923 int reg, u32 mask, u32 write)
928{ 924{
925 struct e1000_hw *hw = &adapter->hw;
929 u32 pat, val; 926 u32 pat, val;
930 u32 _test[] = 927 u32 _test[] =
931 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 928 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
932 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 929 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
933 writel((_test[pat] & write), (adapter->hw.hw_addr + reg)); 930 wr32(reg, (_test[pat] & write));
934 val = readl(adapter->hw.hw_addr + reg); 931 val = rd32(reg);
935 if (val != (_test[pat] & write & mask)) { 932 if (val != (_test[pat] & write & mask)) {
936 dev_err(&adapter->pdev->dev, "pattern test reg %04X " 933 dev_err(&adapter->pdev->dev, "pattern test reg %04X "
937 "failed: got 0x%08X expected 0x%08X\n", 934 "failed: got 0x%08X expected 0x%08X\n",
@@ -946,9 +943,10 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
946static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, 943static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
947 int reg, u32 mask, u32 write) 944 int reg, u32 mask, u32 write)
948{ 945{
946 struct e1000_hw *hw = &adapter->hw;
949 u32 val; 947 u32 val;
950 writel((write & mask), (adapter->hw.hw_addr + reg)); 948 wr32(reg, write & mask);
951 val = readl(adapter->hw.hw_addr + reg); 949 val = rd32(reg);
952 if ((write & mask) != (val & mask)) { 950 if ((write & mask) != (val & mask)) {
953 dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:" 951 dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
954 " got 0x%08X expected 0x%08X\n", reg, 952 " got 0x%08X expected 0x%08X\n", reg,
@@ -1014,12 +1012,14 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1014 for (i = 0; i < test->array_len; i++) { 1012 for (i = 0; i < test->array_len; i++) {
1015 switch (test->test_type) { 1013 switch (test->test_type) {
1016 case PATTERN_TEST: 1014 case PATTERN_TEST:
1017 REG_PATTERN_TEST(test->reg + (i * test->reg_offset), 1015 REG_PATTERN_TEST(test->reg +
1016 (i * test->reg_offset),
1018 test->mask, 1017 test->mask,
1019 test->write); 1018 test->write);
1020 break; 1019 break;
1021 case SET_READ_TEST: 1020 case SET_READ_TEST:
1022 REG_SET_AND_CHECK(test->reg + (i * test->reg_offset), 1021 REG_SET_AND_CHECK(test->reg +
1022 (i * test->reg_offset),
1023 test->mask, 1023 test->mask,
1024 test->write); 1024 test->write);
1025 break; 1025 break;
@@ -1061,7 +1061,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1061 *data = 0; 1061 *data = 0;
1062 /* Read and add up the contents of the EEPROM */ 1062 /* Read and add up the contents of the EEPROM */
1063 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 1063 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
1064 if ((adapter->hw.nvm.ops.read_nvm(&adapter->hw, i, 1, &temp)) 1064 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp))
1065 < 0) { 1065 < 0) {
1066 *data = 1; 1066 *data = 1;
1067 break; 1067 break;
@@ -1091,16 +1091,17 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1091{ 1091{
1092 struct e1000_hw *hw = &adapter->hw; 1092 struct e1000_hw *hw = &adapter->hw;
1093 struct net_device *netdev = adapter->netdev; 1093 struct net_device *netdev = adapter->netdev;
1094 u32 mask, i = 0, shared_int = true; 1094 u32 mask, ics_mask, i = 0, shared_int = true;
1095 u32 irq = adapter->pdev->irq; 1095 u32 irq = adapter->pdev->irq;
1096 1096
1097 *data = 0; 1097 *data = 0;
1098 1098
1099 /* Hook up test interrupt handler just for this test */ 1099 /* Hook up test interrupt handler just for this test */
1100 if (adapter->msix_entries) { 1100 if (adapter->msix_entries)
1101 /* NOTE: we don't test MSI-X interrupts here, yet */ 1101 /* NOTE: we don't test MSI-X interrupts here, yet */
1102 return 0; 1102 return 0;
1103 } else if (adapter->flags & IGB_FLAG_HAS_MSI) { 1103
1104 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1104 shared_int = false; 1105 shared_int = false;
1105 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) { 1106 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) {
1106 *data = 1; 1107 *data = 1;
@@ -1116,16 +1117,31 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1116 } 1117 }
1117 dev_info(&adapter->pdev->dev, "testing %s interrupt\n", 1118 dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
1118 (shared_int ? "shared" : "unshared")); 1119 (shared_int ? "shared" : "unshared"));
1119
1120 /* Disable all the interrupts */ 1120 /* Disable all the interrupts */
1121 wr32(E1000_IMC, 0xFFFFFFFF); 1121 wr32(E1000_IMC, 0xFFFFFFFF);
1122 msleep(10); 1122 msleep(10);
1123 1123
1124 /* Define all writable bits for ICS */
1125 switch(hw->mac.type) {
1126 case e1000_82575:
1127 ics_mask = 0x37F47EDD;
1128 break;
1129 case e1000_82576:
1130 ics_mask = 0x77D4FBFD;
1131 break;
1132 default:
1133 ics_mask = 0x7FFFFFFF;
1134 break;
1135 }
1136
1124 /* Test each interrupt */ 1137 /* Test each interrupt */
1125 for (; i < 10; i++) { 1138 for (; i < 31; i++) {
1126 /* Interrupt to test */ 1139 /* Interrupt to test */
1127 mask = 1 << i; 1140 mask = 1 << i;
1128 1141
1142 if (!(mask & ics_mask))
1143 continue;
1144
1129 if (!shared_int) { 1145 if (!shared_int) {
1130 /* Disable the interrupt to be reported in 1146 /* Disable the interrupt to be reported in
1131 * the cause register and then force the same 1147 * the cause register and then force the same
@@ -1134,8 +1150,12 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1134 * test failed. 1150 * test failed.
1135 */ 1151 */
1136 adapter->test_icr = 0; 1152 adapter->test_icr = 0;
1137 wr32(E1000_IMC, ~mask & 0x00007FFF); 1153
1138 wr32(E1000_ICS, ~mask & 0x00007FFF); 1154 /* Flush any pending interrupts */
1155 wr32(E1000_ICR, ~0);
1156
1157 wr32(E1000_IMC, mask);
1158 wr32(E1000_ICS, mask);
1139 msleep(10); 1159 msleep(10);
1140 1160
1141 if (adapter->test_icr & mask) { 1161 if (adapter->test_icr & mask) {
@@ -1151,6 +1171,10 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1151 * test failed. 1171 * test failed.
1152 */ 1172 */
1153 adapter->test_icr = 0; 1173 adapter->test_icr = 0;
1174
1175 /* Flush any pending interrupts */
1176 wr32(E1000_ICR, ~0);
1177
1154 wr32(E1000_IMS, mask); 1178 wr32(E1000_IMS, mask);
1155 wr32(E1000_ICS, mask); 1179 wr32(E1000_ICS, mask);
1156 msleep(10); 1180 msleep(10);
@@ -1168,11 +1192,15 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1168 * test failed. 1192 * test failed.
1169 */ 1193 */
1170 adapter->test_icr = 0; 1194 adapter->test_icr = 0;
1171 wr32(E1000_IMC, ~mask & 0x00007FFF); 1195
1172 wr32(E1000_ICS, ~mask & 0x00007FFF); 1196 /* Flush any pending interrupts */
1197 wr32(E1000_ICR, ~0);
1198
1199 wr32(E1000_IMC, ~mask);
1200 wr32(E1000_ICS, ~mask);
1173 msleep(10); 1201 msleep(10);
1174 1202
1175 if (adapter->test_icr) { 1203 if (adapter->test_icr & mask) {
1176 *data = 5; 1204 *data = 5;
1177 break; 1205 break;
1178 } 1206 }
@@ -1180,7 +1208,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1180 } 1208 }
1181 1209
1182 /* Disable all the interrupts */ 1210 /* Disable all the interrupts */
1183 wr32(E1000_IMC, 0xFFFFFFFF); 1211 wr32(E1000_IMC, ~0);
1184 msleep(10); 1212 msleep(10);
1185 1213
1186 /* Unhook test interrupt handler */ 1214 /* Unhook test interrupt handler */
@@ -1244,6 +1272,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
1244 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1272 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1245 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1273 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1246 struct pci_dev *pdev = adapter->pdev; 1274 struct pci_dev *pdev = adapter->pdev;
1275 struct igb_buffer *buffer_info;
1247 u32 rctl; 1276 u32 rctl;
1248 int i, ret_val; 1277 int i, ret_val;
1249 1278
@@ -1260,7 +1289,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
1260 goto err_nomem; 1289 goto err_nomem;
1261 } 1290 }
1262 1291
1263 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 1292 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
1264 tx_ring->size = ALIGN(tx_ring->size, 4096); 1293 tx_ring->size = ALIGN(tx_ring->size, 4096);
1265 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 1294 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1266 &tx_ring->dma); 1295 &tx_ring->dma);
@@ -1274,7 +1303,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
1274 ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); 1303 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1275 wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32)); 1304 wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
1276 wr32(E1000_TDLEN(0), 1305 wr32(E1000_TDLEN(0),
1277 tx_ring->count * sizeof(struct e1000_tx_desc)); 1306 tx_ring->count * sizeof(union e1000_adv_tx_desc));
1278 wr32(E1000_TDH(0), 0); 1307 wr32(E1000_TDH(0), 0);
1279 wr32(E1000_TDT(0), 0); 1308 wr32(E1000_TDT(0), 0);
1280 wr32(E1000_TCTL, 1309 wr32(E1000_TCTL,
@@ -1283,27 +1312,31 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
1283 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); 1312 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1284 1313
1285 for (i = 0; i < tx_ring->count; i++) { 1314 for (i = 0; i < tx_ring->count; i++) {
1286 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); 1315 union e1000_adv_tx_desc *tx_desc;
1287 struct sk_buff *skb; 1316 struct sk_buff *skb;
1288 unsigned int size = 1024; 1317 unsigned int size = 1024;
1289 1318
1319 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
1290 skb = alloc_skb(size, GFP_KERNEL); 1320 skb = alloc_skb(size, GFP_KERNEL);
1291 if (!skb) { 1321 if (!skb) {
1292 ret_val = 3; 1322 ret_val = 3;
1293 goto err_nomem; 1323 goto err_nomem;
1294 } 1324 }
1295 skb_put(skb, size); 1325 skb_put(skb, size);
1296 tx_ring->buffer_info[i].skb = skb; 1326 buffer_info = &tx_ring->buffer_info[i];
1297 tx_ring->buffer_info[i].length = skb->len; 1327 buffer_info->skb = skb;
1298 tx_ring->buffer_info[i].dma = 1328 buffer_info->length = skb->len;
1299 pci_map_single(pdev, skb->data, skb->len, 1329 buffer_info->dma = pci_map_single(pdev, skb->data, skb->len,
1300 PCI_DMA_TODEVICE); 1330 PCI_DMA_TODEVICE);
1301 tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma); 1331 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
1302 tx_desc->lower.data = cpu_to_le32(skb->len); 1332 tx_desc->read.olinfo_status = cpu_to_le32(skb->len) <<
1303 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | 1333 E1000_ADVTXD_PAYLEN_SHIFT;
1304 E1000_TXD_CMD_IFCS | 1334 tx_desc->read.cmd_type_len = cpu_to_le32(skb->len);
1305 E1000_TXD_CMD_RS); 1335 tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP |
1306 tx_desc->upper.data = 0; 1336 E1000_TXD_CMD_IFCS |
1337 E1000_TXD_CMD_RS |
1338 E1000_ADVTXD_DTYP_DATA |
1339 E1000_ADVTXD_DCMD_DEXT);
1307 } 1340 }
1308 1341
1309 /* Setup Rx descriptor ring and Rx buffers */ 1342 /* Setup Rx descriptor ring and Rx buffers */
@@ -1319,7 +1352,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
1319 goto err_nomem; 1352 goto err_nomem;
1320 } 1353 }
1321 1354
1322 rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); 1355 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
1323 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 1356 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
1324 &rx_ring->dma); 1357 &rx_ring->dma);
1325 if (!rx_ring->desc) { 1358 if (!rx_ring->desc) {
@@ -1338,16 +1371,17 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
1338 wr32(E1000_RDH(0), 0); 1371 wr32(E1000_RDH(0), 0);
1339 wr32(E1000_RDT(0), 0); 1372 wr32(E1000_RDT(0), 0);
1340 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1373 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1341 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | 1374 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
1342 E1000_RCTL_RDMTS_HALF |
1343 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1375 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1344 wr32(E1000_RCTL, rctl); 1376 wr32(E1000_RCTL, rctl);
1345 wr32(E1000_SRRCTL(0), 0); 1377 wr32(E1000_SRRCTL(0), E1000_SRRCTL_DESCTYPE_ADV_ONEBUF);
1346 1378
1347 for (i = 0; i < rx_ring->count; i++) { 1379 for (i = 0; i < rx_ring->count; i++) {
1348 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); 1380 union e1000_adv_rx_desc *rx_desc;
1349 struct sk_buff *skb; 1381 struct sk_buff *skb;
1350 1382
1383 buffer_info = &rx_ring->buffer_info[i];
1384 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
1351 skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN, 1385 skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN,
1352 GFP_KERNEL); 1386 GFP_KERNEL);
1353 if (!skb) { 1387 if (!skb) {
@@ -1355,11 +1389,11 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
1355 goto err_nomem; 1389 goto err_nomem;
1356 } 1390 }
1357 skb_reserve(skb, NET_IP_ALIGN); 1391 skb_reserve(skb, NET_IP_ALIGN);
1358 rx_ring->buffer_info[i].skb = skb; 1392 buffer_info->skb = skb;
1359 rx_ring->buffer_info[i].dma = 1393 buffer_info->dma = pci_map_single(pdev, skb->data,
1360 pci_map_single(pdev, skb->data, IGB_RXBUFFER_2048, 1394 IGB_RXBUFFER_2048,
1361 PCI_DMA_FROMDEVICE); 1395 PCI_DMA_FROMDEVICE);
1362 rx_desc->buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma); 1396 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
1363 memset(skb->data, 0x00, skb->len); 1397 memset(skb->data, 0x00, skb->len);
1364 } 1398 }
1365 1399
@@ -1458,7 +1492,7 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1458 E1000_CTRL_TFCE | 1492 E1000_CTRL_TFCE |
1459 E1000_CTRL_LRST); 1493 E1000_CTRL_LRST);
1460 reg |= E1000_CTRL_SLU | 1494 reg |= E1000_CTRL_SLU |
1461 E1000_CTRL_FD; 1495 E1000_CTRL_FD;
1462 wr32(E1000_CTRL, reg); 1496 wr32(E1000_CTRL, reg);
1463 1497
1464 /* Unset switch control to serdes energy detect */ 1498 /* Unset switch control to serdes energy detect */
@@ -1745,6 +1779,15 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
1745 /* return success for non excluded adapter ports */ 1779 /* return success for non excluded adapter ports */
1746 retval = 0; 1780 retval = 0;
1747 break; 1781 break;
1782 case E1000_DEV_ID_82576_QUAD_COPPER:
1783 /* quad port adapters only support WoL on port A */
1784 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
1785 wol->supported = 0;
1786 break;
1787 }
1788 /* return success for non excluded adapter ports */
1789 retval = 0;
1790 break;
1748 default: 1791 default:
1749 /* dual port cards only support WoL on port A from now on 1792 /* dual port cards only support WoL on port A from now on
1750 * unless it was enabled in the eeprom for port B 1793 * unless it was enabled in the eeprom for port B
@@ -1827,9 +1870,6 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1827 return 0; 1870 return 0;
1828} 1871}
1829 1872
1830/* toggle LED 4 times per second = 2 "blinks" per second */
1831#define IGB_ID_INTERVAL (HZ/4)
1832
1833/* bit defines for adapter->led_status */ 1873/* bit defines for adapter->led_status */
1834#define IGB_LED_ON 0 1874#define IGB_LED_ON 0
1835 1875
@@ -1921,18 +1961,6 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
1921 int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64); 1961 int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
1922 int j; 1962 int j;
1923 int i; 1963 int i;
1924#ifdef CONFIG_IGB_LRO
1925 int aggregated = 0, flushed = 0, no_desc = 0;
1926
1927 for (i = 0; i < adapter->num_rx_queues; i++) {
1928 aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
1929 flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
1930 no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
1931 }
1932 adapter->lro_aggregated = aggregated;
1933 adapter->lro_flushed = flushed;
1934 adapter->lro_no_desc = no_desc;
1935#endif
1936 1964
1937 igb_update_stats(adapter); 1965 igb_update_stats(adapter);
1938 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 1966 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 9dd13ad12ce4..7c4481b994ab 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007-2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -34,6 +34,7 @@
34#include <linux/ipv6.h> 34#include <linux/ipv6.h>
35#include <net/checksum.h> 35#include <net/checksum.h>
36#include <net/ip6_checksum.h> 36#include <net/ip6_checksum.h>
37#include <linux/net_tstamp.h>
37#include <linux/mii.h> 38#include <linux/mii.h>
38#include <linux/ethtool.h> 39#include <linux/ethtool.h>
39#include <linux/if_vlan.h> 40#include <linux/if_vlan.h>
@@ -48,12 +49,12 @@
48#endif 49#endif
49#include "igb.h" 50#include "igb.h"
50 51
51#define DRV_VERSION "1.2.45-k2" 52#define DRV_VERSION "1.3.16-k2"
52char igb_driver_name[] = "igb"; 53char igb_driver_name[] = "igb";
53char igb_driver_version[] = DRV_VERSION; 54char igb_driver_version[] = DRV_VERSION;
54static const char igb_driver_string[] = 55static const char igb_driver_string[] =
55 "Intel(R) Gigabit Ethernet Network Driver"; 56 "Intel(R) Gigabit Ethernet Network Driver";
56static const char igb_copyright[] = "Copyright (c) 2008 Intel Corporation."; 57static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
57 58
58static const struct e1000_info *igb_info_tbl[] = { 59static const struct e1000_info *igb_info_tbl[] = {
59 [board_82575] = &e1000_82575_info, 60 [board_82575] = &e1000_82575_info,
@@ -61,8 +62,10 @@ static const struct e1000_info *igb_info_tbl[] = {
61 62
62static struct pci_device_id igb_pci_tbl[] = { 63static struct pci_device_id igb_pci_tbl[] = {
63 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, 64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, 66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, 67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, 69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, 70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, 71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
@@ -105,7 +108,6 @@ static irqreturn_t igb_intr_msi(int irq, void *);
105static irqreturn_t igb_msix_other(int irq, void *); 108static irqreturn_t igb_msix_other(int irq, void *);
106static irqreturn_t igb_msix_rx(int irq, void *); 109static irqreturn_t igb_msix_rx(int irq, void *);
107static irqreturn_t igb_msix_tx(int irq, void *); 110static irqreturn_t igb_msix_tx(int irq, void *);
108static int igb_clean_rx_ring_msix(struct napi_struct *, int);
109#ifdef CONFIG_IGB_DCA 111#ifdef CONFIG_IGB_DCA
110static void igb_update_rx_dca(struct igb_ring *); 112static void igb_update_rx_dca(struct igb_ring *);
111static void igb_update_tx_dca(struct igb_ring *); 113static void igb_update_tx_dca(struct igb_ring *);
@@ -115,9 +117,6 @@ static bool igb_clean_tx_irq(struct igb_ring *);
115static int igb_poll(struct napi_struct *, int); 117static int igb_poll(struct napi_struct *, int);
116static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); 118static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
117static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); 119static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
118#ifdef CONFIG_IGB_LRO
119static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *);
120#endif
121static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 120static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
122static void igb_tx_timeout(struct net_device *); 121static void igb_tx_timeout(struct net_device *);
123static void igb_reset_task(struct work_struct *); 122static void igb_reset_task(struct work_struct *);
@@ -125,6 +124,16 @@ static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
125static void igb_vlan_rx_add_vid(struct net_device *, u16); 124static void igb_vlan_rx_add_vid(struct net_device *, u16);
126static void igb_vlan_rx_kill_vid(struct net_device *, u16); 125static void igb_vlan_rx_kill_vid(struct net_device *, u16);
127static void igb_restore_vlan(struct igb_adapter *); 126static void igb_restore_vlan(struct igb_adapter *);
127static void igb_ping_all_vfs(struct igb_adapter *);
128static void igb_msg_task(struct igb_adapter *);
129static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
130static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
131static void igb_set_mc_list_pools(struct igb_adapter *, int, u16);
132static void igb_vmm_control(struct igb_adapter *);
133static inline void igb_set_vmolr(struct e1000_hw *, int);
134static inline int igb_set_vf_rlpml(struct igb_adapter *, int, int);
135static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
136static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
128 137
129static int igb_suspend(struct pci_dev *, pm_message_t); 138static int igb_suspend(struct pci_dev *, pm_message_t);
130#ifdef CONFIG_PM 139#ifdef CONFIG_PM
@@ -139,12 +148,18 @@ static struct notifier_block dca_notifier = {
139 .priority = 0 148 .priority = 0
140}; 149};
141#endif 150#endif
142
143#ifdef CONFIG_NET_POLL_CONTROLLER 151#ifdef CONFIG_NET_POLL_CONTROLLER
144/* for netdump / net console */ 152/* for netdump / net console */
145static void igb_netpoll(struct net_device *); 153static void igb_netpoll(struct net_device *);
146#endif 154#endif
147 155
156#ifdef CONFIG_PCI_IOV
157static ssize_t igb_set_num_vfs(struct device *, struct device_attribute *,
158 const char *, size_t);
159static ssize_t igb_show_num_vfs(struct device *, struct device_attribute *,
160 char *);
161DEVICE_ATTR(num_vfs, S_IRUGO | S_IWUSR, igb_show_num_vfs, igb_set_num_vfs);
162#endif
148static pci_ers_result_t igb_io_error_detected(struct pci_dev *, 163static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
149 pci_channel_state_t); 164 pci_channel_state_t);
150static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); 165static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
@@ -178,6 +193,54 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
178MODULE_LICENSE("GPL"); 193MODULE_LICENSE("GPL");
179MODULE_VERSION(DRV_VERSION); 194MODULE_VERSION(DRV_VERSION);
180 195
196/**
197 * Scale the NIC clock cycle by a large factor so that
198 * relatively small clock corrections can be added or
199 * substracted at each clock tick. The drawbacks of a
200 * large factor are a) that the clock register overflows
201 * more quickly (not such a big deal) and b) that the
202 * increment per tick has to fit into 24 bits.
203 *
204 * Note that
205 * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
206 * IGB_TSYNC_SCALE
207 * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
208 *
209 * The base scale factor is intentionally a power of two
210 * so that the division in %struct timecounter can be done with
211 * a shift.
212 */
213#define IGB_TSYNC_SHIFT (19)
214#define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
215
216/**
217 * The duration of one clock cycle of the NIC.
218 *
219 * @todo This hard-coded value is part of the specification and might change
220 * in future hardware revisions. Add revision check.
221 */
222#define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
223
224#if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
225# error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
226#endif
227
228/**
229 * igb_read_clock - read raw cycle counter (to be used by time counter)
230 */
231static cycle_t igb_read_clock(const struct cyclecounter *tc)
232{
233 struct igb_adapter *adapter =
234 container_of(tc, struct igb_adapter, cycles);
235 struct e1000_hw *hw = &adapter->hw;
236 u64 stamp;
237
238 stamp = rd32(E1000_SYSTIML);
239 stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
240
241 return stamp;
242}
243
181#ifdef DEBUG 244#ifdef DEBUG
182/** 245/**
183 * igb_get_hw_dev_name - return device name string 246 * igb_get_hw_dev_name - return device name string
@@ -188,6 +251,30 @@ char *igb_get_hw_dev_name(struct e1000_hw *hw)
188 struct igb_adapter *adapter = hw->back; 251 struct igb_adapter *adapter = hw->back;
189 return adapter->netdev->name; 252 return adapter->netdev->name;
190} 253}
254
255/**
256 * igb_get_time_str - format current NIC and system time as string
257 */
258static char *igb_get_time_str(struct igb_adapter *adapter,
259 char buffer[160])
260{
261 cycle_t hw = adapter->cycles.read(&adapter->cycles);
262 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
263 struct timespec sys;
264 struct timespec delta;
265 getnstimeofday(&sys);
266
267 delta = timespec_sub(nic, sys);
268
269 sprintf(buffer,
270 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
271 hw,
272 (long)nic.tv_sec, nic.tv_nsec,
273 (long)sys.tv_sec, sys.tv_nsec,
274 (long)delta.tv_sec, delta.tv_nsec);
275
276 return buffer;
277}
191#endif 278#endif
192 279
193/** 280/**
@@ -243,6 +330,7 @@ module_exit(igb_exit_module);
243static void igb_cache_ring_register(struct igb_adapter *adapter) 330static void igb_cache_ring_register(struct igb_adapter *adapter)
244{ 331{
245 int i; 332 int i;
333 unsigned int rbase_offset = adapter->vfs_allocated_count;
246 334
247 switch (adapter->hw.mac.type) { 335 switch (adapter->hw.mac.type) {
248 case e1000_82576: 336 case e1000_82576:
@@ -252,9 +340,11 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
252 * and continue consuming queues in the same sequence 340 * and continue consuming queues in the same sequence
253 */ 341 */
254 for (i = 0; i < adapter->num_rx_queues; i++) 342 for (i = 0; i < adapter->num_rx_queues; i++)
255 adapter->rx_ring[i].reg_idx = Q_IDX_82576(i); 343 adapter->rx_ring[i].reg_idx = rbase_offset +
344 Q_IDX_82576(i);
256 for (i = 0; i < adapter->num_tx_queues; i++) 345 for (i = 0; i < adapter->num_tx_queues; i++)
257 adapter->tx_ring[i].reg_idx = Q_IDX_82576(i); 346 adapter->tx_ring[i].reg_idx = rbase_offset +
347 Q_IDX_82576(i);
258 break; 348 break;
259 case e1000_82575: 349 case e1000_82575:
260 default: 350 default:
@@ -354,7 +444,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
354 a vector number along with a "valid" bit. Sadly, the layout 444 a vector number along with a "valid" bit. Sadly, the layout
355 of the table is somewhat counterintuitive. */ 445 of the table is somewhat counterintuitive. */
356 if (rx_queue > IGB_N0_QUEUE) { 446 if (rx_queue > IGB_N0_QUEUE) {
357 index = (rx_queue >> 1); 447 index = (rx_queue >> 1) + adapter->vfs_allocated_count;
358 ivar = array_rd32(E1000_IVAR0, index); 448 ivar = array_rd32(E1000_IVAR0, index);
359 if (rx_queue & 0x1) { 449 if (rx_queue & 0x1) {
360 /* vector goes into third byte of register */ 450 /* vector goes into third byte of register */
@@ -369,7 +459,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
369 array_wr32(E1000_IVAR0, index, ivar); 459 array_wr32(E1000_IVAR0, index, ivar);
370 } 460 }
371 if (tx_queue > IGB_N0_QUEUE) { 461 if (tx_queue > IGB_N0_QUEUE) {
372 index = (tx_queue >> 1); 462 index = (tx_queue >> 1) + adapter->vfs_allocated_count;
373 ivar = array_rd32(E1000_IVAR0, index); 463 ivar = array_rd32(E1000_IVAR0, index);
374 if (tx_queue & 0x1) { 464 if (tx_queue & 0x1) {
375 /* vector goes into high byte of register */ 465 /* vector goes into high byte of register */
@@ -407,7 +497,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
407 /* Turn on MSI-X capability first, or our settings 497 /* Turn on MSI-X capability first, or our settings
408 * won't stick. And it will take days to debug. */ 498 * won't stick. And it will take days to debug. */
409 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | 499 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
410 E1000_GPIE_PBA | E1000_GPIE_EIAME | 500 E1000_GPIE_PBA | E1000_GPIE_EIAME |
411 E1000_GPIE_NSICR); 501 E1000_GPIE_NSICR);
412 502
413 for (i = 0; i < adapter->num_tx_queues; i++) { 503 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -506,9 +596,6 @@ static int igb_request_msix(struct igb_adapter *adapter)
506 goto out; 596 goto out;
507 ring->itr_register = E1000_EITR(0) + (vector << 2); 597 ring->itr_register = E1000_EITR(0) + (vector << 2);
508 ring->itr_val = adapter->itr; 598 ring->itr_val = adapter->itr;
509 /* overwrite the poll routine for MSIX, we've already done
510 * netif_napi_add */
511 ring->napi.poll = &igb_clean_rx_ring_msix;
512 vector++; 599 vector++;
513 } 600 }
514 601
@@ -546,6 +633,11 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
546 int err; 633 int err;
547 int numvecs, i; 634 int numvecs, i;
548 635
636 /* Number of supported queues. */
637 /* Having more queues than CPUs doesn't make sense. */
638 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
639 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
640
549 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1; 641 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
550 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 642 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
551 GFP_KERNEL); 643 GFP_KERNEL);
@@ -687,7 +779,10 @@ static void igb_irq_enable(struct igb_adapter *adapter)
687 wr32(E1000_EIAC, adapter->eims_enable_mask); 779 wr32(E1000_EIAC, adapter->eims_enable_mask);
688 wr32(E1000_EIAM, adapter->eims_enable_mask); 780 wr32(E1000_EIAM, adapter->eims_enable_mask);
689 wr32(E1000_EIMS, adapter->eims_enable_mask); 781 wr32(E1000_EIMS, adapter->eims_enable_mask);
690 wr32(E1000_IMS, E1000_IMS_LSC); 782 if (adapter->vfs_allocated_count)
783 wr32(E1000_MBVFIMR, 0xFF);
784 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB |
785 E1000_IMS_DOUTSYNC));
691 } else { 786 } else {
692 wr32(E1000_IMS, IMS_ENABLE_MASK); 787 wr32(E1000_IMS, IMS_ENABLE_MASK);
693 wr32(E1000_IAM, IMS_ENABLE_MASK); 788 wr32(E1000_IAM, IMS_ENABLE_MASK);
@@ -811,6 +906,10 @@ int igb_up(struct igb_adapter *adapter)
811 if (adapter->msix_entries) 906 if (adapter->msix_entries)
812 igb_configure_msix(adapter); 907 igb_configure_msix(adapter);
813 908
909 igb_vmm_control(adapter);
910 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
911 igb_set_vmolr(hw, adapter->vfs_allocated_count);
912
814 /* Clear any pending interrupts. */ 913 /* Clear any pending interrupts. */
815 rd32(E1000_ICR); 914 rd32(E1000_ICR);
816 igb_irq_enable(adapter); 915 igb_irq_enable(adapter);
@@ -856,6 +955,10 @@ void igb_down(struct igb_adapter *adapter)
856 955
857 netdev->tx_queue_len = adapter->tx_queue_len; 956 netdev->tx_queue_len = adapter->tx_queue_len;
858 netif_carrier_off(netdev); 957 netif_carrier_off(netdev);
958
959 /* record the stats before reset*/
960 igb_update_stats(adapter);
961
859 adapter->link_speed = 0; 962 adapter->link_speed = 0;
860 adapter->link_duplex = 0; 963 adapter->link_duplex = 0;
861 964
@@ -886,11 +989,14 @@ void igb_reset(struct igb_adapter *adapter)
886 /* Repartition Pba for greater than 9k mtu 989 /* Repartition Pba for greater than 9k mtu
887 * To take effect CTRL.RST is required. 990 * To take effect CTRL.RST is required.
888 */ 991 */
889 if (mac->type != e1000_82576) { 992 switch (mac->type) {
890 pba = E1000_PBA_34K; 993 case e1000_82576:
891 }
892 else {
893 pba = E1000_PBA_64K; 994 pba = E1000_PBA_64K;
995 break;
996 case e1000_82575:
997 default:
998 pba = E1000_PBA_34K;
999 break;
894 } 1000 }
895 1001
896 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && 1002 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
@@ -912,7 +1018,7 @@ void igb_reset(struct igb_adapter *adapter)
912 /* the tx fifo also stores 16 bytes of information about the tx 1018 /* the tx fifo also stores 16 bytes of information about the tx
913 * but don't include ethernet FCS because hardware appends it */ 1019 * but don't include ethernet FCS because hardware appends it */
914 min_tx_space = (adapter->max_frame_size + 1020 min_tx_space = (adapter->max_frame_size +
915 sizeof(struct e1000_tx_desc) - 1021 sizeof(union e1000_adv_tx_desc) -
916 ETH_FCS_LEN) * 2; 1022 ETH_FCS_LEN) * 2;
917 min_tx_space = ALIGN(min_tx_space, 1024); 1023 min_tx_space = ALIGN(min_tx_space, 1024);
918 min_tx_space >>= 10; 1024 min_tx_space >>= 10;
@@ -956,6 +1062,20 @@ void igb_reset(struct igb_adapter *adapter)
956 fc->send_xon = 1; 1062 fc->send_xon = 1;
957 fc->type = fc->original_type; 1063 fc->type = fc->original_type;
958 1064
1065 /* disable receive for all VFs and wait one second */
1066 if (adapter->vfs_allocated_count) {
1067 int i;
1068 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1069 adapter->vf_data[i].clear_to_send = false;
1070
1071 /* ping all the active vfs to let them know we are going down */
1072 igb_ping_all_vfs(adapter);
1073
1074 /* disable transmits and receives */
1075 wr32(E1000_VFRE, 0);
1076 wr32(E1000_VFTE, 0);
1077 }
1078
959 /* Allow time for pending master requests to run */ 1079 /* Allow time for pending master requests to run */
960 adapter->hw.mac.ops.reset_hw(&adapter->hw); 1080 adapter->hw.mac.ops.reset_hw(&adapter->hw);
961 wr32(E1000_WUC, 0); 1081 wr32(E1000_WUC, 0);
@@ -972,21 +1092,6 @@ void igb_reset(struct igb_adapter *adapter)
972 igb_get_phy_info(&adapter->hw); 1092 igb_get_phy_info(&adapter->hw);
973} 1093}
974 1094
975/**
976 * igb_is_need_ioport - determine if an adapter needs ioport resources or not
977 * @pdev: PCI device information struct
978 *
979 * Returns true if an adapter needs ioport resources
980 **/
981static int igb_is_need_ioport(struct pci_dev *pdev)
982{
983 switch (pdev->device) {
984 /* Currently there are no adapters that need ioport resources */
985 default:
986 return false;
987 }
988}
989
990static const struct net_device_ops igb_netdev_ops = { 1095static const struct net_device_ops igb_netdev_ops = {
991 .ndo_open = igb_open, 1096 .ndo_open = igb_open,
992 .ndo_stop = igb_close, 1097 .ndo_stop = igb_close,
@@ -1025,21 +1130,12 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1025 struct e1000_hw *hw; 1130 struct e1000_hw *hw;
1026 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 1131 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1027 unsigned long mmio_start, mmio_len; 1132 unsigned long mmio_start, mmio_len;
1028 int i, err, pci_using_dac; 1133 int err, pci_using_dac;
1029 u16 eeprom_data = 0; 1134 u16 eeprom_data = 0;
1030 u16 eeprom_apme_mask = IGB_EEPROM_APME; 1135 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1031 u32 part_num; 1136 u32 part_num;
1032 int bars, need_ioport;
1033 1137
1034 /* do not allocate ioport bars when not needed */ 1138 err = pci_enable_device_mem(pdev);
1035 need_ioport = igb_is_need_ioport(pdev);
1036 if (need_ioport) {
1037 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1038 err = pci_enable_device(pdev);
1039 } else {
1040 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1041 err = pci_enable_device_mem(pdev);
1042 }
1043 if (err) 1139 if (err)
1044 return err; 1140 return err;
1045 1141
@@ -1061,7 +1157,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1061 } 1157 }
1062 } 1158 }
1063 1159
1064 err = pci_request_selected_regions(pdev, bars, igb_driver_name); 1160 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1161 IORESOURCE_MEM),
1162 igb_driver_name);
1065 if (err) 1163 if (err)
1066 goto err_pci_reg; 1164 goto err_pci_reg;
1067 1165
@@ -1076,7 +1174,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1076 pci_save_state(pdev); 1174 pci_save_state(pdev);
1077 1175
1078 err = -ENOMEM; 1176 err = -ENOMEM;
1079 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_MAX_TX_QUEUES); 1177 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1178 IGB_ABS_MAX_TX_QUEUES);
1080 if (!netdev) 1179 if (!netdev)
1081 goto err_alloc_etherdev; 1180 goto err_alloc_etherdev;
1082 1181
@@ -1089,15 +1188,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1089 hw = &adapter->hw; 1188 hw = &adapter->hw;
1090 hw->back = adapter; 1189 hw->back = adapter;
1091 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE; 1190 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1092 adapter->bars = bars;
1093 adapter->need_ioport = need_ioport;
1094 1191
1095 mmio_start = pci_resource_start(pdev, 0); 1192 mmio_start = pci_resource_start(pdev, 0);
1096 mmio_len = pci_resource_len(pdev, 0); 1193 mmio_len = pci_resource_len(pdev, 0);
1097 1194
1098 err = -EIO; 1195 err = -EIO;
1099 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 1196 hw->hw_addr = ioremap(mmio_start, mmio_len);
1100 if (!adapter->hw.hw_addr) 1197 if (!hw->hw_addr)
1101 goto err_ioremap; 1198 goto err_ioremap;
1102 1199
1103 netdev->netdev_ops = &igb_netdev_ops; 1200 netdev->netdev_ops = &igb_netdev_ops;
@@ -1125,8 +1222,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1125 /* Initialize skew-specific constants */ 1222 /* Initialize skew-specific constants */
1126 err = ei->get_invariants(hw); 1223 err = ei->get_invariants(hw);
1127 if (err) 1224 if (err)
1128 goto err_hw_init; 1225 goto err_sw_init;
1129 1226
1227 /* setup the private structure */
1130 err = igb_sw_init(adapter); 1228 err = igb_sw_init(adapter);
1131 if (err) 1229 if (err)
1132 goto err_sw_init; 1230 goto err_sw_init;
@@ -1158,27 +1256,25 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1158 "PHY reset is blocked due to SOL/IDER session.\n"); 1256 "PHY reset is blocked due to SOL/IDER session.\n");
1159 1257
1160 netdev->features = NETIF_F_SG | 1258 netdev->features = NETIF_F_SG |
1161 NETIF_F_HW_CSUM | 1259 NETIF_F_IP_CSUM |
1162 NETIF_F_HW_VLAN_TX | 1260 NETIF_F_HW_VLAN_TX |
1163 NETIF_F_HW_VLAN_RX | 1261 NETIF_F_HW_VLAN_RX |
1164 NETIF_F_HW_VLAN_FILTER; 1262 NETIF_F_HW_VLAN_FILTER;
1165 1263
1264 netdev->features |= NETIF_F_IPV6_CSUM;
1166 netdev->features |= NETIF_F_TSO; 1265 netdev->features |= NETIF_F_TSO;
1167 netdev->features |= NETIF_F_TSO6; 1266 netdev->features |= NETIF_F_TSO6;
1168 1267
1169#ifdef CONFIG_IGB_LRO 1268 netdev->features |= NETIF_F_GRO;
1170 netdev->features |= NETIF_F_LRO;
1171#endif
1172 1269
1173 netdev->vlan_features |= NETIF_F_TSO; 1270 netdev->vlan_features |= NETIF_F_TSO;
1174 netdev->vlan_features |= NETIF_F_TSO6; 1271 netdev->vlan_features |= NETIF_F_TSO6;
1175 netdev->vlan_features |= NETIF_F_HW_CSUM; 1272 netdev->vlan_features |= NETIF_F_IP_CSUM;
1176 netdev->vlan_features |= NETIF_F_SG; 1273 netdev->vlan_features |= NETIF_F_SG;
1177 1274
1178 if (pci_using_dac) 1275 if (pci_using_dac)
1179 netdev->features |= NETIF_F_HIGHDMA; 1276 netdev->features |= NETIF_F_HIGHDMA;
1180 1277
1181 netdev->features |= NETIF_F_LLTX;
1182 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); 1278 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1183 1279
1184 /* before reading the NVM, reset the controller to put the device in a 1280 /* before reading the NVM, reset the controller to put the device in a
@@ -1216,14 +1312,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1216 INIT_WORK(&adapter->reset_task, igb_reset_task); 1312 INIT_WORK(&adapter->reset_task, igb_reset_task);
1217 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); 1313 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1218 1314
1219 /* Initialize link & ring properties that are user-changeable */ 1315 /* Initialize link properties that are user-changeable */
1220 adapter->tx_ring->count = 256;
1221 for (i = 0; i < adapter->num_tx_queues; i++)
1222 adapter->tx_ring[i].count = adapter->tx_ring->count;
1223 adapter->rx_ring->count = 256;
1224 for (i = 0; i < adapter->num_rx_queues; i++)
1225 adapter->rx_ring[i].count = adapter->rx_ring->count;
1226
1227 adapter->fc_autoneg = true; 1316 adapter->fc_autoneg = true;
1228 hw->mac.autoneg = true; 1317 hw->mac.autoneg = true;
1229 hw->phy.autoneg_advertised = 0x2f; 1318 hw->phy.autoneg_advertised = 0x2f;
@@ -1231,7 +1320,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1231 hw->fc.original_type = e1000_fc_default; 1320 hw->fc.original_type = e1000_fc_default;
1232 hw->fc.type = e1000_fc_default; 1321 hw->fc.type = e1000_fc_default;
1233 1322
1234 adapter->itr_setting = 3; 1323 adapter->itr_setting = IGB_DEFAULT_ITR;
1235 adapter->itr = IGB_START_ITR; 1324 adapter->itr = IGB_START_ITR;
1236 1325
1237 igb_validate_mdi_setting(hw); 1326 igb_validate_mdi_setting(hw);
@@ -1242,10 +1331,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1242 * enable the ACPI Magic Packet filter 1331 * enable the ACPI Magic Packet filter
1243 */ 1332 */
1244 1333
1245 if (hw->bus.func == 0 || 1334 if (hw->bus.func == 0)
1246 hw->device_id == E1000_DEV_ID_82575EB_COPPER) 1335 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1247 hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1, 1336 else if (hw->bus.func == 1)
1248 &eeprom_data); 1337 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1249 1338
1250 if (eeprom_data & eeprom_apme_mask) 1339 if (eeprom_data & eeprom_apme_mask)
1251 adapter->eeprom_wol |= E1000_WUFC_MAG; 1340 adapter->eeprom_wol |= E1000_WUFC_MAG;
@@ -1265,6 +1354,16 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1265 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) 1354 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1266 adapter->eeprom_wol = 0; 1355 adapter->eeprom_wol = 0;
1267 break; 1356 break;
1357 case E1000_DEV_ID_82576_QUAD_COPPER:
1358 /* if quad port adapter, disable WoL on all but port A */
1359 if (global_quad_port_a != 0)
1360 adapter->eeprom_wol = 0;
1361 else
1362 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1363 /* Reset for multiple quad port adapters */
1364 if (++global_quad_port_a == 4)
1365 global_quad_port_a = 0;
1366 break;
1268 } 1367 }
1269 1368
1270 /* initialize the wol settings based on the eeprom settings */ 1369 /* initialize the wol settings based on the eeprom settings */
@@ -1287,17 +1386,82 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1287 if (err) 1386 if (err)
1288 goto err_register; 1387 goto err_register;
1289 1388
1389#ifdef CONFIG_PCI_IOV
1390 /* since iov functionality isn't critical to base device function we
1391 * can accept failure. If it fails we don't allow iov to be enabled */
1392 if (hw->mac.type == e1000_82576) {
1393 err = pci_enable_sriov(pdev, 0);
1394 if (!err)
1395 err = device_create_file(&netdev->dev,
1396 &dev_attr_num_vfs);
1397 if (err)
1398 dev_err(&pdev->dev, "Failed to initialize IOV\n");
1399 }
1400
1401#endif
1290#ifdef CONFIG_IGB_DCA 1402#ifdef CONFIG_IGB_DCA
1291 if (dca_add_requester(&pdev->dev) == 0) { 1403 if (dca_add_requester(&pdev->dev) == 0) {
1292 adapter->flags |= IGB_FLAG_DCA_ENABLED; 1404 adapter->flags |= IGB_FLAG_DCA_ENABLED;
1293 dev_info(&pdev->dev, "DCA enabled\n"); 1405 dev_info(&pdev->dev, "DCA enabled\n");
1294 /* Always use CB2 mode, difference is masked 1406 /* Always use CB2 mode, difference is masked
1295 * in the CB driver. */ 1407 * in the CB driver. */
1296 wr32(E1000_DCA_CTRL, 2); 1408 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
1297 igb_setup_dca(adapter); 1409 igb_setup_dca(adapter);
1298 } 1410 }
1299#endif 1411#endif
1300 1412
1413 /*
1414 * Initialize hardware timer: we keep it running just in case
1415 * that some program needs it later on.
1416 */
1417 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1418 adapter->cycles.read = igb_read_clock;
1419 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1420 adapter->cycles.mult = 1;
1421 adapter->cycles.shift = IGB_TSYNC_SHIFT;
1422 wr32(E1000_TIMINCA,
1423 (1<<24) |
1424 IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
1425#if 0
1426 /*
1427 * Avoid rollover while we initialize by resetting the time counter.
1428 */
1429 wr32(E1000_SYSTIML, 0x00000000);
1430 wr32(E1000_SYSTIMH, 0x00000000);
1431#else
1432 /*
1433 * Set registers so that rollover occurs soon to test this.
1434 */
1435 wr32(E1000_SYSTIML, 0x00000000);
1436 wr32(E1000_SYSTIMH, 0xFF800000);
1437#endif
1438 wrfl();
1439 timecounter_init(&adapter->clock,
1440 &adapter->cycles,
1441 ktime_to_ns(ktime_get_real()));
1442
1443 /*
1444 * Synchronize our NIC clock against system wall clock. NIC
1445 * time stamp reading requires ~3us per sample, each sample
1446 * was pretty stable even under load => only require 10
1447 * samples for each offset comparison.
1448 */
1449 memset(&adapter->compare, 0, sizeof(adapter->compare));
1450 adapter->compare.source = &adapter->clock;
1451 adapter->compare.target = ktime_get_real;
1452 adapter->compare.num_samples = 10;
1453 timecompare_update(&adapter->compare, 0);
1454
1455#ifdef DEBUG
1456 {
1457 char buffer[160];
1458 printk(KERN_DEBUG
1459 "igb: %s: hw %p initialized timer\n",
1460 igb_get_time_str(adapter, buffer),
1461 &adapter->hw);
1462 }
1463#endif
1464
1301 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 1465 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1302 /* print bus type/speed/width info */ 1466 /* print bus type/speed/width info */
1303 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 1467 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -1330,15 +1494,14 @@ err_eeprom:
1330 if (hw->flash_address) 1494 if (hw->flash_address)
1331 iounmap(hw->flash_address); 1495 iounmap(hw->flash_address);
1332 1496
1333 igb_remove_device(hw);
1334 igb_free_queues(adapter); 1497 igb_free_queues(adapter);
1335err_sw_init: 1498err_sw_init:
1336err_hw_init:
1337 iounmap(hw->hw_addr); 1499 iounmap(hw->hw_addr);
1338err_ioremap: 1500err_ioremap:
1339 free_netdev(netdev); 1501 free_netdev(netdev);
1340err_alloc_etherdev: 1502err_alloc_etherdev:
1341 pci_release_selected_regions(pdev, bars); 1503 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1504 IORESOURCE_MEM));
1342err_pci_reg: 1505err_pci_reg:
1343err_dma: 1506err_dma:
1344 pci_disable_device(pdev); 1507 pci_disable_device(pdev);
@@ -1358,9 +1521,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1358{ 1521{
1359 struct net_device *netdev = pci_get_drvdata(pdev); 1522 struct net_device *netdev = pci_get_drvdata(pdev);
1360 struct igb_adapter *adapter = netdev_priv(netdev); 1523 struct igb_adapter *adapter = netdev_priv(netdev);
1361#ifdef CONFIG_IGB_DCA
1362 struct e1000_hw *hw = &adapter->hw; 1524 struct e1000_hw *hw = &adapter->hw;
1363#endif
1364 int err; 1525 int err;
1365 1526
1366 /* flush_scheduled work may reschedule our watchdog task, so 1527 /* flush_scheduled work may reschedule our watchdog task, so
@@ -1376,7 +1537,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1376 dev_info(&pdev->dev, "DCA disabled\n"); 1537 dev_info(&pdev->dev, "DCA disabled\n");
1377 dca_remove_requester(&pdev->dev); 1538 dca_remove_requester(&pdev->dev);
1378 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; 1539 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
1379 wr32(E1000_DCA_CTRL, 1); 1540 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
1380 } 1541 }
1381#endif 1542#endif
1382 1543
@@ -1389,15 +1550,29 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1389 if (!igb_check_reset_block(&adapter->hw)) 1550 if (!igb_check_reset_block(&adapter->hw))
1390 igb_reset_phy(&adapter->hw); 1551 igb_reset_phy(&adapter->hw);
1391 1552
1392 igb_remove_device(&adapter->hw);
1393 igb_reset_interrupt_capability(adapter); 1553 igb_reset_interrupt_capability(adapter);
1394 1554
1395 igb_free_queues(adapter); 1555 igb_free_queues(adapter);
1396 1556
1397 iounmap(adapter->hw.hw_addr); 1557#ifdef CONFIG_PCI_IOV
1398 if (adapter->hw.flash_address) 1558 /* reclaim resources allocated to VFs */
1399 iounmap(adapter->hw.flash_address); 1559 if (adapter->vf_data) {
1400 pci_release_selected_regions(pdev, adapter->bars); 1560 /* disable iov and allow time for transactions to clear */
1561 pci_disable_sriov(pdev);
1562 msleep(500);
1563
1564 kfree(adapter->vf_data);
1565 adapter->vf_data = NULL;
1566 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1567 msleep(100);
1568 dev_info(&pdev->dev, "IOV Disabled\n");
1569 }
1570#endif
1571 iounmap(hw->hw_addr);
1572 if (hw->flash_address)
1573 iounmap(hw->flash_address);
1574 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1575 IORESOURCE_MEM));
1401 1576
1402 free_netdev(netdev); 1577 free_netdev(netdev);
1403 1578
@@ -1432,11 +1607,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
1432 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1607 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1433 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 1608 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1434 1609
1435 /* Number of supported queues. */
1436 /* Having more queues than CPUs doesn't make sense. */
1437 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1438 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
1439
1440 /* This call may decrease the number of queues depending on 1610 /* This call may decrease the number of queues depending on
1441 * interrupt mode. */ 1611 * interrupt mode. */
1442 igb_set_interrupt_capability(adapter); 1612 igb_set_interrupt_capability(adapter);
@@ -1499,6 +1669,10 @@ static int igb_open(struct net_device *netdev)
1499 * clean_rx handler before we do so. */ 1669 * clean_rx handler before we do so. */
1500 igb_configure(adapter); 1670 igb_configure(adapter);
1501 1671
1672 igb_vmm_control(adapter);
1673 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
1674 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1675
1502 err = igb_request_irq(adapter); 1676 err = igb_request_irq(adapter);
1503 if (err) 1677 if (err)
1504 goto err_req_irq; 1678 goto err_req_irq;
@@ -1574,7 +1748,6 @@ static int igb_close(struct net_device *netdev)
1574 * 1748 *
1575 * Return 0 on success, negative on failure 1749 * Return 0 on success, negative on failure
1576 **/ 1750 **/
1577
1578int igb_setup_tx_resources(struct igb_adapter *adapter, 1751int igb_setup_tx_resources(struct igb_adapter *adapter,
1579 struct igb_ring *tx_ring) 1752 struct igb_ring *tx_ring)
1580{ 1753{
@@ -1588,7 +1761,7 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
1588 memset(tx_ring->buffer_info, 0, size); 1761 memset(tx_ring->buffer_info, 0, size);
1589 1762
1590 /* round up to nearest 4K */ 1763 /* round up to nearest 4K */
1591 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 1764 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
1592 tx_ring->size = ALIGN(tx_ring->size, 4096); 1765 tx_ring->size = ALIGN(tx_ring->size, 4096);
1593 1766
1594 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 1767 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
@@ -1635,7 +1808,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1635 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { 1808 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
1636 r_idx = i % adapter->num_tx_queues; 1809 r_idx = i % adapter->num_tx_queues;
1637 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; 1810 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1638 } 1811 }
1639 return err; 1812 return err;
1640} 1813}
1641 1814
@@ -1654,13 +1827,13 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1654 int i, j; 1827 int i, j;
1655 1828
1656 for (i = 0; i < adapter->num_tx_queues; i++) { 1829 for (i = 0; i < adapter->num_tx_queues; i++) {
1657 struct igb_ring *ring = &(adapter->tx_ring[i]); 1830 struct igb_ring *ring = &adapter->tx_ring[i];
1658 j = ring->reg_idx; 1831 j = ring->reg_idx;
1659 wr32(E1000_TDLEN(j), 1832 wr32(E1000_TDLEN(j),
1660 ring->count * sizeof(struct e1000_tx_desc)); 1833 ring->count * sizeof(union e1000_adv_tx_desc));
1661 tdba = ring->dma; 1834 tdba = ring->dma;
1662 wr32(E1000_TDBAL(j), 1835 wr32(E1000_TDBAL(j),
1663 tdba & 0x00000000ffffffffULL); 1836 tdba & 0x00000000ffffffffULL);
1664 wr32(E1000_TDBAH(j), tdba >> 32); 1837 wr32(E1000_TDBAH(j), tdba >> 32);
1665 1838
1666 ring->head = E1000_TDH(j); 1839 ring->head = E1000_TDH(j);
@@ -1680,12 +1853,11 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1680 wr32(E1000_DCA_TXCTRL(j), txctrl); 1853 wr32(E1000_DCA_TXCTRL(j), txctrl);
1681 } 1854 }
1682 1855
1683 1856 /* disable queue 0 to prevent tail bump w/o re-configuration */
1684 1857 if (adapter->vfs_allocated_count)
1685 /* Use the default values for the Tx Inter Packet Gap (IPG) timer */ 1858 wr32(E1000_TXDCTL(0), 0);
1686 1859
1687 /* Program the Transmit Control Register */ 1860 /* Program the Transmit Control Register */
1688
1689 tctl = rd32(E1000_TCTL); 1861 tctl = rd32(E1000_TCTL);
1690 tctl &= ~E1000_TCTL_CT; 1862 tctl &= ~E1000_TCTL_CT;
1691 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1863 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
@@ -1709,21 +1881,12 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1709 * 1881 *
1710 * Returns 0 on success, negative on failure 1882 * Returns 0 on success, negative on failure
1711 **/ 1883 **/
1712
1713int igb_setup_rx_resources(struct igb_adapter *adapter, 1884int igb_setup_rx_resources(struct igb_adapter *adapter,
1714 struct igb_ring *rx_ring) 1885 struct igb_ring *rx_ring)
1715{ 1886{
1716 struct pci_dev *pdev = adapter->pdev; 1887 struct pci_dev *pdev = adapter->pdev;
1717 int size, desc_len; 1888 int size, desc_len;
1718 1889
1719#ifdef CONFIG_IGB_LRO
1720 size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS;
1721 rx_ring->lro_mgr.lro_arr = vmalloc(size);
1722 if (!rx_ring->lro_mgr.lro_arr)
1723 goto err;
1724 memset(rx_ring->lro_mgr.lro_arr, 0, size);
1725#endif
1726
1727 size = sizeof(struct igb_buffer) * rx_ring->count; 1890 size = sizeof(struct igb_buffer) * rx_ring->count;
1728 rx_ring->buffer_info = vmalloc(size); 1891 rx_ring->buffer_info = vmalloc(size);
1729 if (!rx_ring->buffer_info) 1892 if (!rx_ring->buffer_info)
@@ -1750,10 +1913,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
1750 return 0; 1913 return 0;
1751 1914
1752err: 1915err:
1753#ifdef CONFIG_IGB_LRO
1754 vfree(rx_ring->lro_mgr.lro_arr);
1755 rx_ring->lro_mgr.lro_arr = NULL;
1756#endif
1757 vfree(rx_ring->buffer_info); 1916 vfree(rx_ring->buffer_info);
1758 dev_err(&adapter->pdev->dev, "Unable to allocate memory for " 1917 dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
1759 "the receive descriptor ring\n"); 1918 "the receive descriptor ring\n");
@@ -1802,13 +1961,13 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1802 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1961 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1803 1962
1804 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | 1963 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
1805 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1964 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1806 1965
1807 /* 1966 /*
1808 * enable stripping of CRC. It's unlikely this will break BMC 1967 * enable stripping of CRC. It's unlikely this will break BMC
1809 * redirection as it did with e1000. Newer features require 1968 * redirection as it did with e1000. Newer features require
1810 * that the HW strips the CRC. 1969 * that the HW strips the CRC.
1811 */ 1970 */
1812 rctl |= E1000_RCTL_SECRC; 1971 rctl |= E1000_RCTL_SECRC;
1813 1972
1814 /* 1973 /*
@@ -1852,6 +2011,30 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1852 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2011 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1853 } 2012 }
1854 2013
2014 /* Attention!!! For SR-IOV PF driver operations you must enable
2015 * queue drop for all VF and PF queues to prevent head of line blocking
2016 * if an un-trusted VF does not provide descriptors to hardware.
2017 */
2018 if (adapter->vfs_allocated_count) {
2019 u32 vmolr;
2020
2021 j = adapter->rx_ring[0].reg_idx;
2022
2023 /* set all queue drop enable bits */
2024 wr32(E1000_QDE, ALL_QUEUES);
2025 srrctl |= E1000_SRRCTL_DROP_EN;
2026
2027 /* disable queue 0 to prevent tail write w/o re-config */
2028 wr32(E1000_RXDCTL(0), 0);
2029
2030 vmolr = rd32(E1000_VMOLR(j));
2031 if (rctl & E1000_RCTL_LPE)
2032 vmolr |= E1000_VMOLR_LPE;
2033 if (adapter->num_rx_queues > 0)
2034 vmolr |= E1000_VMOLR_RSSE;
2035 wr32(E1000_VMOLR(j), vmolr);
2036 }
2037
1855 for (i = 0; i < adapter->num_rx_queues; i++) { 2038 for (i = 0; i < adapter->num_rx_queues; i++) {
1856 j = adapter->rx_ring[i].reg_idx; 2039 j = adapter->rx_ring[i].reg_idx;
1857 wr32(E1000_SRRCTL(j), srrctl); 2040 wr32(E1000_SRRCTL(j), srrctl);
@@ -1861,6 +2044,54 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1861} 2044}
1862 2045
1863/** 2046/**
2047 * igb_rlpml_set - set maximum receive packet size
2048 * @adapter: board private structure
2049 *
2050 * Configure maximum receivable packet size.
2051 **/
2052static void igb_rlpml_set(struct igb_adapter *adapter)
2053{
2054 u32 max_frame_size = adapter->max_frame_size;
2055 struct e1000_hw *hw = &adapter->hw;
2056 u16 pf_id = adapter->vfs_allocated_count;
2057
2058 if (adapter->vlgrp)
2059 max_frame_size += VLAN_TAG_SIZE;
2060
2061 /* if vfs are enabled we set RLPML to the largest possible request
2062 * size and set the VMOLR RLPML to the size we need */
2063 if (pf_id) {
2064 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2065 max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
2066 }
2067
2068 wr32(E1000_RLPML, max_frame_size);
2069}
2070
2071/**
2072 * igb_configure_vt_default_pool - Configure VT default pool
2073 * @adapter: board private structure
2074 *
2075 * Configure the default pool
2076 **/
2077static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
2078{
2079 struct e1000_hw *hw = &adapter->hw;
2080 u16 pf_id = adapter->vfs_allocated_count;
2081 u32 vtctl;
2082
2083 /* not in sr-iov mode - do nothing */
2084 if (!pf_id)
2085 return;
2086
2087 vtctl = rd32(E1000_VT_CTL);
2088 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2089 E1000_VT_CTL_DISABLE_DEF_POOL);
2090 vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2091 wr32(E1000_VT_CTL, vtctl);
2092}
2093
2094/**
1864 * igb_configure_rx - Configure receive Unit after Reset 2095 * igb_configure_rx - Configure receive Unit after Reset
1865 * @adapter: board private structure 2096 * @adapter: board private structure
1866 * 2097 *
@@ -1872,7 +2103,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1872 struct e1000_hw *hw = &adapter->hw; 2103 struct e1000_hw *hw = &adapter->hw;
1873 u32 rctl, rxcsum; 2104 u32 rctl, rxcsum;
1874 u32 rxdctl; 2105 u32 rxdctl;
1875 int i, j; 2106 int i;
1876 2107
1877 /* disable receives while setting up the descriptors */ 2108 /* disable receives while setting up the descriptors */
1878 rctl = rd32(E1000_RCTL); 2109 rctl = rd32(E1000_RCTL);
@@ -1886,14 +2117,14 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1886 /* Setup the HW Rx Head and Tail Descriptor Pointers and 2117 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1887 * the Base and Length of the Rx Descriptor Ring */ 2118 * the Base and Length of the Rx Descriptor Ring */
1888 for (i = 0; i < adapter->num_rx_queues; i++) { 2119 for (i = 0; i < adapter->num_rx_queues; i++) {
1889 struct igb_ring *ring = &(adapter->rx_ring[i]); 2120 struct igb_ring *ring = &adapter->rx_ring[i];
1890 j = ring->reg_idx; 2121 int j = ring->reg_idx;
1891 rdba = ring->dma; 2122 rdba = ring->dma;
1892 wr32(E1000_RDBAL(j), 2123 wr32(E1000_RDBAL(j),
1893 rdba & 0x00000000ffffffffULL); 2124 rdba & 0x00000000ffffffffULL);
1894 wr32(E1000_RDBAH(j), rdba >> 32); 2125 wr32(E1000_RDBAH(j), rdba >> 32);
1895 wr32(E1000_RDLEN(j), 2126 wr32(E1000_RDLEN(j),
1896 ring->count * sizeof(union e1000_adv_rx_desc)); 2127 ring->count * sizeof(union e1000_adv_rx_desc));
1897 2128
1898 ring->head = E1000_RDH(j); 2129 ring->head = E1000_RDH(j);
1899 ring->tail = E1000_RDT(j); 2130 ring->tail = E1000_RDT(j);
@@ -1907,16 +2138,6 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1907 rxdctl |= IGB_RX_HTHRESH << 8; 2138 rxdctl |= IGB_RX_HTHRESH << 8;
1908 rxdctl |= IGB_RX_WTHRESH << 16; 2139 rxdctl |= IGB_RX_WTHRESH << 16;
1909 wr32(E1000_RXDCTL(j), rxdctl); 2140 wr32(E1000_RXDCTL(j), rxdctl);
1910#ifdef CONFIG_IGB_LRO
1911 /* Intitial LRO Settings */
1912 ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
1913 ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1914 ring->lro_mgr.get_skb_header = igb_get_skb_hdr;
1915 ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1916 ring->lro_mgr.dev = adapter->netdev;
1917 ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1918 ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1919#endif
1920 } 2141 }
1921 2142
1922 if (adapter->num_rx_queues > 1) { 2143 if (adapter->num_rx_queues > 1) {
@@ -1941,7 +2162,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1941 writel(reta.dword, 2162 writel(reta.dword,
1942 hw->hw_addr + E1000_RETA(0) + (j & ~3)); 2163 hw->hw_addr + E1000_RETA(0) + (j & ~3));
1943 } 2164 }
1944 mrqc = E1000_MRQC_ENABLE_RSS_4Q; 2165 if (adapter->vfs_allocated_count)
2166 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2167 else
2168 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
1945 2169
1946 /* Fill out hash function seeds */ 2170 /* Fill out hash function seeds */
1947 for (j = 0; j < 10; j++) 2171 for (j = 0; j < 10; j++)
@@ -1966,27 +2190,23 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1966 rxcsum |= E1000_RXCSUM_PCSD; 2190 rxcsum |= E1000_RXCSUM_PCSD;
1967 wr32(E1000_RXCSUM, rxcsum); 2191 wr32(E1000_RXCSUM, rxcsum);
1968 } else { 2192 } else {
2193 /* Enable multi-queue for sr-iov */
2194 if (adapter->vfs_allocated_count)
2195 wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
1969 /* Enable Receive Checksum Offload for TCP and UDP */ 2196 /* Enable Receive Checksum Offload for TCP and UDP */
1970 rxcsum = rd32(E1000_RXCSUM); 2197 rxcsum = rd32(E1000_RXCSUM);
1971 if (adapter->rx_csum) { 2198 if (adapter->rx_csum)
1972 rxcsum |= E1000_RXCSUM_TUOFL; 2199 rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE;
2200 else
2201 rxcsum &= ~(E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE);
1973 2202
1974 /* Enable IPv4 payload checksum for UDP fragments
1975 * Must be used in conjunction with packet-split. */
1976 if (adapter->rx_ps_hdr_size)
1977 rxcsum |= E1000_RXCSUM_IPPCSE;
1978 } else {
1979 rxcsum &= ~E1000_RXCSUM_TUOFL;
1980 /* don't need to clear IPPCSE as it defaults to 0 */
1981 }
1982 wr32(E1000_RXCSUM, rxcsum); 2203 wr32(E1000_RXCSUM, rxcsum);
1983 } 2204 }
1984 2205
1985 if (adapter->vlgrp) 2206 /* Set the default pool for the PF's first queue */
1986 wr32(E1000_RLPML, 2207 igb_configure_vt_default_pool(adapter);
1987 adapter->max_frame_size + VLAN_TAG_SIZE); 2208
1988 else 2209 igb_rlpml_set(adapter);
1989 wr32(E1000_RLPML, adapter->max_frame_size);
1990 2210
1991 /* Enable Receives */ 2211 /* Enable Receives */
1992 wr32(E1000_RCTL, rctl); 2212 wr32(E1000_RCTL, rctl);
@@ -2041,6 +2261,7 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2041 buffer_info->skb = NULL; 2261 buffer_info->skb = NULL;
2042 } 2262 }
2043 buffer_info->time_stamp = 0; 2263 buffer_info->time_stamp = 0;
2264 buffer_info->next_to_watch = 0;
2044 /* buffer_info must be completely set up in the transmit path */ 2265 /* buffer_info must be completely set up in the transmit path */
2045} 2266}
2046 2267
@@ -2105,11 +2326,6 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
2105 vfree(rx_ring->buffer_info); 2326 vfree(rx_ring->buffer_info);
2106 rx_ring->buffer_info = NULL; 2327 rx_ring->buffer_info = NULL;
2107 2328
2108#ifdef CONFIG_IGB_LRO
2109 vfree(rx_ring->lro_mgr.lro_arr);
2110 rx_ring->lro_mgr.lro_arr = NULL;
2111#endif
2112
2113 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2329 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2114 2330
2115 rx_ring->desc = NULL; 2331 rx_ring->desc = NULL;
@@ -2209,15 +2425,18 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2209static int igb_set_mac(struct net_device *netdev, void *p) 2425static int igb_set_mac(struct net_device *netdev, void *p)
2210{ 2426{
2211 struct igb_adapter *adapter = netdev_priv(netdev); 2427 struct igb_adapter *adapter = netdev_priv(netdev);
2428 struct e1000_hw *hw = &adapter->hw;
2212 struct sockaddr *addr = p; 2429 struct sockaddr *addr = p;
2213 2430
2214 if (!is_valid_ether_addr(addr->sa_data)) 2431 if (!is_valid_ether_addr(addr->sa_data))
2215 return -EADDRNOTAVAIL; 2432 return -EADDRNOTAVAIL;
2216 2433
2217 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2434 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2218 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 2435 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2436
2437 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
2219 2438
2220 adapter->hw.mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 2439 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
2221 2440
2222 return 0; 2441 return 0;
2223} 2442}
@@ -2260,8 +2479,8 @@ static void igb_set_multi(struct net_device *netdev)
2260 2479
2261 if (!netdev->mc_count) { 2480 if (!netdev->mc_count) {
2262 /* nothing to program, so clear mc list */ 2481 /* nothing to program, so clear mc list */
2263 igb_update_mc_addr_list_82575(hw, NULL, 0, 1, 2482 igb_update_mc_addr_list(hw, NULL, 0, 1,
2264 mac->rar_entry_count); 2483 mac->rar_entry_count);
2265 return; 2484 return;
2266 } 2485 }
2267 2486
@@ -2278,8 +2497,13 @@ static void igb_set_multi(struct net_device *netdev)
2278 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); 2497 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2279 mc_ptr = mc_ptr->next; 2498 mc_ptr = mc_ptr->next;
2280 } 2499 }
2281 igb_update_mc_addr_list_82575(hw, mta_list, i, 1, 2500 igb_update_mc_addr_list(hw, mta_list, i,
2282 mac->rar_entry_count); 2501 adapter->vfs_allocated_count + 1,
2502 mac->rar_entry_count);
2503
2504 igb_set_mc_list_pools(adapter, i, mac->rar_entry_count);
2505 igb_restore_vf_multicasts(adapter);
2506
2283 kfree(mta_list); 2507 kfree(mta_list);
2284} 2508}
2285 2509
@@ -2292,6 +2516,46 @@ static void igb_update_phy_info(unsigned long data)
2292} 2516}
2293 2517
2294/** 2518/**
2519 * igb_has_link - check shared code for link and determine up/down
2520 * @adapter: pointer to driver private info
2521 **/
2522static bool igb_has_link(struct igb_adapter *adapter)
2523{
2524 struct e1000_hw *hw = &adapter->hw;
2525 bool link_active = false;
2526 s32 ret_val = 0;
2527
2528 /* get_link_status is set on LSC (link status) interrupt or
2529 * rx sequence error interrupt. get_link_status will stay
2530 * false until the e1000_check_for_link establishes link
2531 * for copper adapters ONLY
2532 */
2533 switch (hw->phy.media_type) {
2534 case e1000_media_type_copper:
2535 if (hw->mac.get_link_status) {
2536 ret_val = hw->mac.ops.check_for_link(hw);
2537 link_active = !hw->mac.get_link_status;
2538 } else {
2539 link_active = true;
2540 }
2541 break;
2542 case e1000_media_type_fiber:
2543 ret_val = hw->mac.ops.check_for_link(hw);
2544 link_active = !!(rd32(E1000_STATUS) & E1000_STATUS_LU);
2545 break;
2546 case e1000_media_type_internal_serdes:
2547 ret_val = hw->mac.ops.check_for_link(hw);
2548 link_active = hw->mac.serdes_has_link;
2549 break;
2550 default:
2551 case e1000_media_type_unknown:
2552 break;
2553 }
2554
2555 return link_active;
2556}
2557
2558/**
2295 * igb_watchdog - Timer Call-back 2559 * igb_watchdog - Timer Call-back
2296 * @data: pointer to adapter cast into an unsigned long 2560 * @data: pointer to adapter cast into an unsigned long
2297 **/ 2561 **/
@@ -2307,34 +2571,16 @@ static void igb_watchdog_task(struct work_struct *work)
2307 struct igb_adapter *adapter = container_of(work, 2571 struct igb_adapter *adapter = container_of(work,
2308 struct igb_adapter, watchdog_task); 2572 struct igb_adapter, watchdog_task);
2309 struct e1000_hw *hw = &adapter->hw; 2573 struct e1000_hw *hw = &adapter->hw;
2310
2311 struct net_device *netdev = adapter->netdev; 2574 struct net_device *netdev = adapter->netdev;
2312 struct igb_ring *tx_ring = adapter->tx_ring; 2575 struct igb_ring *tx_ring = adapter->tx_ring;
2313 struct e1000_mac_info *mac = &adapter->hw.mac;
2314 u32 link; 2576 u32 link;
2315 u32 eics = 0; 2577 u32 eics = 0;
2316 s32 ret_val;
2317 int i; 2578 int i;
2318 2579
2319 if ((netif_carrier_ok(netdev)) && 2580 link = igb_has_link(adapter);
2320 (rd32(E1000_STATUS) & E1000_STATUS_LU)) 2581 if ((netif_carrier_ok(netdev)) && link)
2321 goto link_up; 2582 goto link_up;
2322 2583
2323 ret_val = hw->mac.ops.check_for_link(&adapter->hw);
2324 if ((ret_val == E1000_ERR_PHY) &&
2325 (hw->phy.type == e1000_phy_igp_3) &&
2326 (rd32(E1000_CTRL) &
2327 E1000_PHY_CTRL_GBE_DISABLE))
2328 dev_info(&adapter->pdev->dev,
2329 "Gigabit has been disabled, downgrading speed\n");
2330
2331 if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
2332 !(rd32(E1000_TXCW) & E1000_TXCW_ANE))
2333 link = mac->serdes_has_link;
2334 else
2335 link = rd32(E1000_STATUS) &
2336 E1000_STATUS_LU;
2337
2338 if (link) { 2584 if (link) {
2339 if (!netif_carrier_ok(netdev)) { 2585 if (!netif_carrier_ok(netdev)) {
2340 u32 ctrl; 2586 u32 ctrl;
@@ -2373,6 +2619,9 @@ static void igb_watchdog_task(struct work_struct *work)
2373 netif_carrier_on(netdev); 2619 netif_carrier_on(netdev);
2374 netif_tx_wake_all_queues(netdev); 2620 netif_tx_wake_all_queues(netdev);
2375 2621
2622 igb_ping_all_vfs(adapter);
2623
2624 /* link state has changed, schedule phy info update */
2376 if (!test_bit(__IGB_DOWN, &adapter->state)) 2625 if (!test_bit(__IGB_DOWN, &adapter->state))
2377 mod_timer(&adapter->phy_info_timer, 2626 mod_timer(&adapter->phy_info_timer,
2378 round_jiffies(jiffies + 2 * HZ)); 2627 round_jiffies(jiffies + 2 * HZ));
@@ -2386,6 +2635,10 @@ static void igb_watchdog_task(struct work_struct *work)
2386 netdev->name); 2635 netdev->name);
2387 netif_carrier_off(netdev); 2636 netif_carrier_off(netdev);
2388 netif_tx_stop_all_queues(netdev); 2637 netif_tx_stop_all_queues(netdev);
2638
2639 igb_ping_all_vfs(adapter);
2640
2641 /* link state has changed, schedule phy info update */
2389 if (!test_bit(__IGB_DOWN, &adapter->state)) 2642 if (!test_bit(__IGB_DOWN, &adapter->state))
2390 mod_timer(&adapter->phy_info_timer, 2643 mod_timer(&adapter->phy_info_timer,
2391 round_jiffies(jiffies + 2 * HZ)); 2644 round_jiffies(jiffies + 2 * HZ));
@@ -2395,9 +2648,9 @@ static void igb_watchdog_task(struct work_struct *work)
2395link_up: 2648link_up:
2396 igb_update_stats(adapter); 2649 igb_update_stats(adapter);
2397 2650
2398 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 2651 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2399 adapter->tpt_old = adapter->stats.tpt; 2652 adapter->tpt_old = adapter->stats.tpt;
2400 mac->collision_delta = adapter->stats.colc - adapter->colc_old; 2653 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
2401 adapter->colc_old = adapter->stats.colc; 2654 adapter->colc_old = adapter->stats.colc;
2402 2655
2403 adapter->gorc = adapter->stats.gorc - adapter->gorc_old; 2656 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
@@ -2554,7 +2807,7 @@ static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
2554 if (bytes > 25000) { 2807 if (bytes > 25000) {
2555 if (packets > 35) 2808 if (packets > 35)
2556 retval = low_latency; 2809 retval = low_latency;
2557 } else if (bytes < 6000) { 2810 } else if (bytes < 1500) {
2558 retval = low_latency; 2811 retval = low_latency;
2559 } 2812 }
2560 break; 2813 break;
@@ -2586,15 +2839,13 @@ static void igb_set_itr(struct igb_adapter *adapter)
2586 adapter->tx_itr, 2839 adapter->tx_itr,
2587 adapter->tx_ring->total_packets, 2840 adapter->tx_ring->total_packets,
2588 adapter->tx_ring->total_bytes); 2841 adapter->tx_ring->total_bytes);
2589
2590 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2842 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2591 } else { 2843 } else {
2592 current_itr = adapter->rx_itr; 2844 current_itr = adapter->rx_itr;
2593 } 2845 }
2594 2846
2595 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 2847 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2596 if (adapter->itr_setting == 3 && 2848 if (adapter->itr_setting == 3 && current_itr == lowest_latency)
2597 current_itr == lowest_latency)
2598 current_itr = low_latency; 2849 current_itr = low_latency;
2599 2850
2600 switch (current_itr) { 2851 switch (current_itr) {
@@ -2646,6 +2897,7 @@ set_itr_now:
2646#define IGB_TX_FLAGS_VLAN 0x00000002 2897#define IGB_TX_FLAGS_VLAN 0x00000002
2647#define IGB_TX_FLAGS_TSO 0x00000004 2898#define IGB_TX_FLAGS_TSO 0x00000004
2648#define IGB_TX_FLAGS_IPV4 0x00000008 2899#define IGB_TX_FLAGS_IPV4 0x00000008
2900#define IGB_TX_FLAGS_TSTAMP 0x00000010
2649#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 2901#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
2650#define IGB_TX_FLAGS_VLAN_SHIFT 16 2902#define IGB_TX_FLAGS_VLAN_SHIFT 16
2651 2903
@@ -2711,7 +2963,7 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
2711 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); 2963 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
2712 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 2964 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
2713 2965
2714 /* Context index must be unique per ring. */ 2966 /* For 82575, context index must be unique per ring. */
2715 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) 2967 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2716 mss_l4len_idx |= tx_ring->queue_index << 4; 2968 mss_l4len_idx |= tx_ring->queue_index << 4;
2717 2969
@@ -2757,12 +3009,12 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2757 3009
2758 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3010 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2759 switch (skb->protocol) { 3011 switch (skb->protocol) {
2760 case __constant_htons(ETH_P_IP): 3012 case cpu_to_be16(ETH_P_IP):
2761 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 3013 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2762 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 3014 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2763 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 3015 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2764 break; 3016 break;
2765 case __constant_htons(ETH_P_IPV6): 3017 case cpu_to_be16(ETH_P_IPV6):
2766 /* XXX what about other V6 headers?? */ 3018 /* XXX what about other V6 headers?? */
2767 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 3019 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2768 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 3020 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
@@ -2781,6 +3033,8 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2781 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) 3033 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2782 context_desc->mss_l4len_idx = 3034 context_desc->mss_l4len_idx =
2783 cpu_to_le32(tx_ring->queue_index << 4); 3035 cpu_to_le32(tx_ring->queue_index << 4);
3036 else
3037 context_desc->mss_l4len_idx = 0;
2784 3038
2785 buffer_info->time_stamp = jiffies; 3039 buffer_info->time_stamp = jiffies;
2786 buffer_info->next_to_watch = i; 3040 buffer_info->next_to_watch = i;
@@ -2793,8 +3047,6 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2793 3047
2794 return true; 3048 return true;
2795 } 3049 }
2796
2797
2798 return false; 3050 return false;
2799} 3051}
2800 3052
@@ -2871,6 +3123,9 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
2871 if (tx_flags & IGB_TX_FLAGS_VLAN) 3123 if (tx_flags & IGB_TX_FLAGS_VLAN)
2872 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 3124 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2873 3125
3126 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3127 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3128
2874 if (tx_flags & IGB_TX_FLAGS_TSO) { 3129 if (tx_flags & IGB_TX_FLAGS_TSO) {
2875 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 3130 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2876 3131
@@ -2950,8 +3205,6 @@ static int igb_maybe_stop_tx(struct net_device *netdev,
2950 return __igb_maybe_stop_tx(netdev, tx_ring, size); 3205 return __igb_maybe_stop_tx(netdev, tx_ring, size);
2951} 3206}
2952 3207
2953#define TXD_USE_COUNT(S) (((S) >> (IGB_MAX_TXD_PWR)) + 1)
2954
2955static int igb_xmit_frame_ring_adv(struct sk_buff *skb, 3208static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2956 struct net_device *netdev, 3209 struct net_device *netdev,
2957 struct igb_ring *tx_ring) 3210 struct igb_ring *tx_ring)
@@ -2959,11 +3212,9 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2959 struct igb_adapter *adapter = netdev_priv(netdev); 3212 struct igb_adapter *adapter = netdev_priv(netdev);
2960 unsigned int first; 3213 unsigned int first;
2961 unsigned int tx_flags = 0; 3214 unsigned int tx_flags = 0;
2962 unsigned int len;
2963 u8 hdr_len = 0; 3215 u8 hdr_len = 0;
2964 int tso = 0; 3216 int tso = 0;
2965 3217 union skb_shared_tx *shtx;
2966 len = skb_headlen(skb);
2967 3218
2968 if (test_bit(__IGB_DOWN, &adapter->state)) { 3219 if (test_bit(__IGB_DOWN, &adapter->state)) {
2969 dev_kfree_skb_any(skb); 3220 dev_kfree_skb_any(skb);
@@ -2984,7 +3235,21 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2984 /* this is a hard error */ 3235 /* this is a hard error */
2985 return NETDEV_TX_BUSY; 3236 return NETDEV_TX_BUSY;
2986 } 3237 }
2987 skb_orphan(skb); 3238
3239 /*
3240 * TODO: check that there currently is no other packet with
3241 * time stamping in the queue
3242 *
3243 * When doing time stamping, keep the connection to the socket
3244 * a while longer: it is still needed by skb_hwtstamp_tx(),
3245 * called either in igb_tx_hwtstamp() or by our caller when
3246 * doing software time stamping.
3247 */
3248 shtx = skb_tx(skb);
3249 if (unlikely(shtx->hardware)) {
3250 shtx->in_progress = 1;
3251 tx_flags |= IGB_TX_FLAGS_TSTAMP;
3252 }
2988 3253
2989 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3254 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
2990 tx_flags |= IGB_TX_FLAGS_VLAN; 3255 tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -2995,7 +3260,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2995 tx_flags |= IGB_TX_FLAGS_IPV4; 3260 tx_flags |= IGB_TX_FLAGS_IPV4;
2996 3261
2997 first = tx_ring->next_to_use; 3262 first = tx_ring->next_to_use;
2998
2999 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, 3263 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
3000 &hdr_len) : 0; 3264 &hdr_len) : 0;
3001 3265
@@ -3006,9 +3270,9 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
3006 3270
3007 if (tso) 3271 if (tso)
3008 tx_flags |= IGB_TX_FLAGS_TSO; 3272 tx_flags |= IGB_TX_FLAGS_TSO;
3009 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags)) 3273 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) &&
3010 if (skb->ip_summed == CHECKSUM_PARTIAL) 3274 (skb->ip_summed == CHECKSUM_PARTIAL))
3011 tx_flags |= IGB_TX_FLAGS_CSUM; 3275 tx_flags |= IGB_TX_FLAGS_CSUM;
3012 3276
3013 igb_tx_queue_adv(adapter, tx_ring, tx_flags, 3277 igb_tx_queue_adv(adapter, tx_ring, tx_flags,
3014 igb_tx_map_adv(adapter, tx_ring, skb, first), 3278 igb_tx_map_adv(adapter, tx_ring, skb, first),
@@ -3028,7 +3292,7 @@ static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
3028 struct igb_ring *tx_ring; 3292 struct igb_ring *tx_ring;
3029 3293
3030 int r_idx = 0; 3294 int r_idx = 0;
3031 r_idx = skb->queue_mapping & (IGB_MAX_TX_QUEUES - 1); 3295 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
3032 tx_ring = adapter->multi_tx_table[r_idx]; 3296 tx_ring = adapter->multi_tx_table[r_idx];
3033 3297
3034 /* This goes back to the question of how to logically map a tx queue 3298 /* This goes back to the question of how to logically map a tx queue
@@ -3050,8 +3314,8 @@ static void igb_tx_timeout(struct net_device *netdev)
3050 /* Do the reset outside of interrupt context */ 3314 /* Do the reset outside of interrupt context */
3051 adapter->tx_timeout_count++; 3315 adapter->tx_timeout_count++;
3052 schedule_work(&adapter->reset_task); 3316 schedule_work(&adapter->reset_task);
3053 wr32(E1000_EICS, adapter->eims_enable_mask & 3317 wr32(E1000_EICS,
3054 ~(E1000_EIMS_TCP_TIMER | E1000_EIMS_OTHER)); 3318 (adapter->eims_enable_mask & ~adapter->eims_other));
3055} 3319}
3056 3320
3057static void igb_reset_task(struct work_struct *work) 3321static void igb_reset_task(struct work_struct *work)
@@ -3069,8 +3333,7 @@ static void igb_reset_task(struct work_struct *work)
3069 * Returns the address of the device statistics structure. 3333 * Returns the address of the device statistics structure.
3070 * The statistics are actually updated from the timer callback. 3334 * The statistics are actually updated from the timer callback.
3071 **/ 3335 **/
3072static struct net_device_stats * 3336static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3073igb_get_stats(struct net_device *netdev)
3074{ 3337{
3075 struct igb_adapter *adapter = netdev_priv(netdev); 3338 struct igb_adapter *adapter = netdev_priv(netdev);
3076 3339
@@ -3096,7 +3359,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3096 return -EINVAL; 3359 return -EINVAL;
3097 } 3360 }
3098 3361
3099#define MAX_STD_JUMBO_FRAME_SIZE 9234
3100 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 3362 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3101 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); 3363 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
3102 return -EINVAL; 3364 return -EINVAL;
@@ -3104,6 +3366,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3104 3366
3105 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 3367 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3106 msleep(1); 3368 msleep(1);
3369
3107 /* igb_down has a dependency on max_frame_size */ 3370 /* igb_down has a dependency on max_frame_size */
3108 adapter->max_frame_size = max_frame; 3371 adapter->max_frame_size = max_frame;
3109 if (netif_running(netdev)) 3372 if (netif_running(netdev))
@@ -3129,6 +3392,12 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3129#else 3392#else
3130 adapter->rx_buffer_len = PAGE_SIZE / 2; 3393 adapter->rx_buffer_len = PAGE_SIZE / 2;
3131#endif 3394#endif
3395
3396 /* if sr-iov is enabled we need to force buffer size to 1K or larger */
3397 if (adapter->vfs_allocated_count &&
3398 (adapter->rx_buffer_len < IGB_RXBUFFER_1024))
3399 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3400
3132 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3401 /* adjust allocation if LPE protects us, and we aren't using SBP */
3133 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 3402 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3134 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)) 3403 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
@@ -3273,8 +3542,7 @@ void igb_update_stats(struct igb_adapter *adapter)
3273 /* Phy Stats */ 3542 /* Phy Stats */
3274 if (hw->phy.media_type == e1000_media_type_copper) { 3543 if (hw->phy.media_type == e1000_media_type_copper) {
3275 if ((adapter->link_speed == SPEED_1000) && 3544 if ((adapter->link_speed == SPEED_1000) &&
3276 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, 3545 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3277 &phy_tmp))) {
3278 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3546 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3279 adapter->phy_stats.idle_errors += phy_tmp; 3547 adapter->phy_stats.idle_errors += phy_tmp;
3280 } 3548 }
@@ -3286,7 +3554,6 @@ void igb_update_stats(struct igb_adapter *adapter)
3286 adapter->stats.mgpdc += rd32(E1000_MGTPDC); 3554 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3287} 3555}
3288 3556
3289
3290static irqreturn_t igb_msix_other(int irq, void *data) 3557static irqreturn_t igb_msix_other(int irq, void *data)
3291{ 3558{
3292 struct net_device *netdev = data; 3559 struct net_device *netdev = data;
@@ -3295,15 +3562,24 @@ static irqreturn_t igb_msix_other(int irq, void *data)
3295 u32 icr = rd32(E1000_ICR); 3562 u32 icr = rd32(E1000_ICR);
3296 3563
3297 /* reading ICR causes bit 31 of EICR to be cleared */ 3564 /* reading ICR causes bit 31 of EICR to be cleared */
3298 if (!(icr & E1000_ICR_LSC)) 3565
3299 goto no_link_interrupt; 3566 if(icr & E1000_ICR_DOUTSYNC) {
3300 hw->mac.get_link_status = 1; 3567 /* HW is reporting DMA is out of sync */
3301 /* guard against interrupt when we're going down */ 3568 adapter->stats.doosync++;
3302 if (!test_bit(__IGB_DOWN, &adapter->state)) 3569 }
3303 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3570
3304 3571 /* Check for a mailbox event */
3305no_link_interrupt: 3572 if (icr & E1000_ICR_VMMB)
3306 wr32(E1000_IMS, E1000_IMS_LSC); 3573 igb_msg_task(adapter);
3574
3575 if (icr & E1000_ICR_LSC) {
3576 hw->mac.get_link_status = 1;
3577 /* guard against interrupt when we're going down */
3578 if (!test_bit(__IGB_DOWN, &adapter->state))
3579 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3580 }
3581
3582 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB);
3307 wr32(E1000_EIMS, adapter->eims_other); 3583 wr32(E1000_EIMS, adapter->eims_other);
3308 3584
3309 return IRQ_HANDLED; 3585 return IRQ_HANDLED;
@@ -3319,6 +3595,7 @@ static irqreturn_t igb_msix_tx(int irq, void *data)
3319 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3595 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3320 igb_update_tx_dca(tx_ring); 3596 igb_update_tx_dca(tx_ring);
3321#endif 3597#endif
3598
3322 tx_ring->total_bytes = 0; 3599 tx_ring->total_bytes = 0;
3323 tx_ring->total_packets = 0; 3600 tx_ring->total_packets = 0;
3324 3601
@@ -3339,13 +3616,11 @@ static void igb_write_itr(struct igb_ring *ring)
3339 if ((ring->adapter->itr_setting & 3) && ring->set_itr) { 3616 if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3340 switch (hw->mac.type) { 3617 switch (hw->mac.type) {
3341 case e1000_82576: 3618 case e1000_82576:
3342 wr32(ring->itr_register, 3619 wr32(ring->itr_register, ring->itr_val |
3343 ring->itr_val |
3344 0x80000000); 3620 0x80000000);
3345 break; 3621 break;
3346 default: 3622 default:
3347 wr32(ring->itr_register, 3623 wr32(ring->itr_register, ring->itr_val |
3348 ring->itr_val |
3349 (ring->itr_val << 16)); 3624 (ring->itr_val << 16));
3350 break; 3625 break;
3351 } 3626 }
@@ -3363,8 +3638,8 @@ static irqreturn_t igb_msix_rx(int irq, void *data)
3363 3638
3364 igb_write_itr(rx_ring); 3639 igb_write_itr(rx_ring);
3365 3640
3366 if (netif_rx_schedule_prep(&rx_ring->napi)) 3641 if (napi_schedule_prep(&rx_ring->napi))
3367 __netif_rx_schedule(&rx_ring->napi); 3642 __napi_schedule(&rx_ring->napi);
3368 3643
3369#ifdef CONFIG_IGB_DCA 3644#ifdef CONFIG_IGB_DCA
3370 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) 3645 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
@@ -3386,11 +3661,11 @@ static void igb_update_rx_dca(struct igb_ring *rx_ring)
3386 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); 3661 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
3387 if (hw->mac.type == e1000_82576) { 3662 if (hw->mac.type == e1000_82576) {
3388 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; 3663 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
3389 dca_rxctrl |= dca_get_tag(cpu) << 3664 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
3390 E1000_DCA_RXCTRL_CPUID_SHIFT; 3665 E1000_DCA_RXCTRL_CPUID_SHIFT;
3391 } else { 3666 } else {
3392 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; 3667 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
3393 dca_rxctrl |= dca_get_tag(cpu); 3668 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3394 } 3669 }
3395 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; 3670 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
3396 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; 3671 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
@@ -3413,11 +3688,11 @@ static void igb_update_tx_dca(struct igb_ring *tx_ring)
3413 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); 3688 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
3414 if (hw->mac.type == e1000_82576) { 3689 if (hw->mac.type == e1000_82576) {
3415 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; 3690 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
3416 dca_txctrl |= dca_get_tag(cpu) << 3691 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
3417 E1000_DCA_TXCTRL_CPUID_SHIFT; 3692 E1000_DCA_TXCTRL_CPUID_SHIFT;
3418 } else { 3693 } else {
3419 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; 3694 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
3420 dca_txctrl |= dca_get_tag(cpu); 3695 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3421 } 3696 }
3422 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; 3697 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
3423 wr32(E1000_DCA_TXCTRL(q), dca_txctrl); 3698 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
@@ -3457,7 +3732,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
3457 break; 3732 break;
3458 /* Always use CB2 mode, difference is masked 3733 /* Always use CB2 mode, difference is masked
3459 * in the CB driver. */ 3734 * in the CB driver. */
3460 wr32(E1000_DCA_CTRL, 2); 3735 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
3461 if (dca_add_requester(dev) == 0) { 3736 if (dca_add_requester(dev) == 0) {
3462 adapter->flags |= IGB_FLAG_DCA_ENABLED; 3737 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3463 dev_info(&adapter->pdev->dev, "DCA enabled\n"); 3738 dev_info(&adapter->pdev->dev, "DCA enabled\n");
@@ -3472,7 +3747,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
3472 dca_remove_requester(dev); 3747 dca_remove_requester(dev);
3473 dev_info(&adapter->pdev->dev, "DCA disabled\n"); 3748 dev_info(&adapter->pdev->dev, "DCA disabled\n");
3474 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; 3749 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3475 wr32(E1000_DCA_CTRL, 1); 3750 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3476 } 3751 }
3477 break; 3752 break;
3478 } 3753 }
@@ -3492,6 +3767,322 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
3492} 3767}
3493#endif /* CONFIG_IGB_DCA */ 3768#endif /* CONFIG_IGB_DCA */
3494 3769
3770static void igb_ping_all_vfs(struct igb_adapter *adapter)
3771{
3772 struct e1000_hw *hw = &adapter->hw;
3773 u32 ping;
3774 int i;
3775
3776 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
3777 ping = E1000_PF_CONTROL_MSG;
3778 if (adapter->vf_data[i].clear_to_send)
3779 ping |= E1000_VT_MSGTYPE_CTS;
3780 igb_write_mbx(hw, &ping, 1, i);
3781 }
3782}
3783
3784static int igb_set_vf_multicasts(struct igb_adapter *adapter,
3785 u32 *msgbuf, u32 vf)
3786{
3787 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
3788 u16 *hash_list = (u16 *)&msgbuf[1];
3789 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
3790 int i;
3791
3792 /* only up to 30 hash values supported */
3793 if (n > 30)
3794 n = 30;
3795
3796 /* salt away the number of multi cast addresses assigned
3797 * to this VF for later use to restore when the PF multi cast
3798 * list changes
3799 */
3800 vf_data->num_vf_mc_hashes = n;
3801
3802 /* VFs are limited to using the MTA hash table for their multicast
3803 * addresses */
3804 for (i = 0; i < n; i++)
3805 vf_data->vf_mc_hashes[i] = hash_list[i];;
3806
3807 /* Flush and reset the mta with the new values */
3808 igb_set_multi(adapter->netdev);
3809
3810 return 0;
3811}
3812
3813static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
3814{
3815 struct e1000_hw *hw = &adapter->hw;
3816 struct vf_data_storage *vf_data;
3817 int i, j;
3818
3819 for (i = 0; i < adapter->vfs_allocated_count; i++) {
3820 vf_data = &adapter->vf_data[i];
3821 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
3822 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
3823 }
3824}
3825
3826static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
3827{
3828 struct e1000_hw *hw = &adapter->hw;
3829 u32 pool_mask, reg, vid;
3830 int i;
3831
3832 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
3833
3834 /* Find the vlan filter for this id */
3835 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
3836 reg = rd32(E1000_VLVF(i));
3837
3838 /* remove the vf from the pool */
3839 reg &= ~pool_mask;
3840
3841 /* if pool is empty then remove entry from vfta */
3842 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
3843 (reg & E1000_VLVF_VLANID_ENABLE)) {
3844 reg = 0;
3845 vid = reg & E1000_VLVF_VLANID_MASK;
3846 igb_vfta_set(hw, vid, false);
3847 }
3848
3849 wr32(E1000_VLVF(i), reg);
3850 }
3851}
3852
3853static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
3854{
3855 struct e1000_hw *hw = &adapter->hw;
3856 u32 reg, i;
3857
3858 /* It is an error to call this function when VFs are not enabled */
3859 if (!adapter->vfs_allocated_count)
3860 return -1;
3861
3862 /* Find the vlan filter for this id */
3863 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
3864 reg = rd32(E1000_VLVF(i));
3865 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
3866 vid == (reg & E1000_VLVF_VLANID_MASK))
3867 break;
3868 }
3869
3870 if (add) {
3871 if (i == E1000_VLVF_ARRAY_SIZE) {
3872 /* Did not find a matching VLAN ID entry that was
3873 * enabled. Search for a free filter entry, i.e.
3874 * one without the enable bit set
3875 */
3876 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
3877 reg = rd32(E1000_VLVF(i));
3878 if (!(reg & E1000_VLVF_VLANID_ENABLE))
3879 break;
3880 }
3881 }
3882 if (i < E1000_VLVF_ARRAY_SIZE) {
3883 /* Found an enabled/available entry */
3884 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
3885
3886 /* if !enabled we need to set this up in vfta */
3887 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
3888 /* add VID to filter table, if bit already set
3889 * PF must have added it outside of table */
3890 if (igb_vfta_set(hw, vid, true))
3891 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
3892 adapter->vfs_allocated_count);
3893 reg |= E1000_VLVF_VLANID_ENABLE;
3894 }
3895 reg &= ~E1000_VLVF_VLANID_MASK;
3896 reg |= vid;
3897
3898 wr32(E1000_VLVF(i), reg);
3899 return 0;
3900 }
3901 } else {
3902 if (i < E1000_VLVF_ARRAY_SIZE) {
3903 /* remove vf from the pool */
3904 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
3905 /* if pool is empty then remove entry from vfta */
3906 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
3907 reg = 0;
3908 igb_vfta_set(hw, vid, false);
3909 }
3910 wr32(E1000_VLVF(i), reg);
3911 return 0;
3912 }
3913 }
3914 return -1;
3915}
3916
3917static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
3918{
3919 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
3920 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
3921
3922 return igb_vlvf_set(adapter, vid, add, vf);
3923}
3924
3925static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
3926{
3927 struct e1000_hw *hw = &adapter->hw;
3928
3929 /* disable mailbox functionality for vf */
3930 adapter->vf_data[vf].clear_to_send = false;
3931
3932 /* reset offloads to defaults */
3933 igb_set_vmolr(hw, vf);
3934
3935 /* reset vlans for device */
3936 igb_clear_vf_vfta(adapter, vf);
3937
3938 /* reset multicast table array for vf */
3939 adapter->vf_data[vf].num_vf_mc_hashes = 0;
3940
3941 /* Flush and reset the mta with the new values */
3942 igb_set_multi(adapter->netdev);
3943}
3944
3945static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
3946{
3947 struct e1000_hw *hw = &adapter->hw;
3948 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
3949 u32 reg, msgbuf[3];
3950 u8 *addr = (u8 *)(&msgbuf[1]);
3951
3952 /* process all the same items cleared in a function level reset */
3953 igb_vf_reset_event(adapter, vf);
3954
3955 /* set vf mac address */
3956 igb_rar_set(hw, vf_mac, vf + 1);
3957 igb_set_rah_pool(hw, vf, vf + 1);
3958
3959 /* enable transmit and receive for vf */
3960 reg = rd32(E1000_VFTE);
3961 wr32(E1000_VFTE, reg | (1 << vf));
3962 reg = rd32(E1000_VFRE);
3963 wr32(E1000_VFRE, reg | (1 << vf));
3964
3965 /* enable mailbox functionality for vf */
3966 adapter->vf_data[vf].clear_to_send = true;
3967
3968 /* reply to reset with ack and vf mac address */
3969 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
3970 memcpy(addr, vf_mac, 6);
3971 igb_write_mbx(hw, msgbuf, 3, vf);
3972}
3973
3974static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
3975{
3976 unsigned char *addr = (char *)&msg[1];
3977 int err = -1;
3978
3979 if (is_valid_ether_addr(addr))
3980 err = igb_set_vf_mac(adapter, vf, addr);
3981
3982 return err;
3983
3984}
3985
3986static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
3987{
3988 struct e1000_hw *hw = &adapter->hw;
3989 u32 msg = E1000_VT_MSGTYPE_NACK;
3990
3991 /* if device isn't clear to send it shouldn't be reading either */
3992 if (!adapter->vf_data[vf].clear_to_send)
3993 igb_write_mbx(hw, &msg, 1, vf);
3994}
3995
3996
3997static void igb_msg_task(struct igb_adapter *adapter)
3998{
3999 struct e1000_hw *hw = &adapter->hw;
4000 u32 vf;
4001
4002 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4003 /* process any reset requests */
4004 if (!igb_check_for_rst(hw, vf)) {
4005 adapter->vf_data[vf].clear_to_send = false;
4006 igb_vf_reset_event(adapter, vf);
4007 }
4008
4009 /* process any messages pending */
4010 if (!igb_check_for_msg(hw, vf))
4011 igb_rcv_msg_from_vf(adapter, vf);
4012
4013 /* process any acks */
4014 if (!igb_check_for_ack(hw, vf))
4015 igb_rcv_ack_from_vf(adapter, vf);
4016
4017 }
4018}
4019
4020static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4021{
4022 u32 mbx_size = E1000_VFMAILBOX_SIZE;
4023 u32 msgbuf[mbx_size];
4024 struct e1000_hw *hw = &adapter->hw;
4025 s32 retval;
4026
4027 retval = igb_read_mbx(hw, msgbuf, mbx_size, vf);
4028
4029 if (retval)
4030 dev_err(&adapter->pdev->dev,
4031 "Error receiving message from VF\n");
4032
4033 /* this is a message we already processed, do nothing */
4034 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4035 return retval;
4036
4037 /*
4038 * until the vf completes a reset it should not be
4039 * allowed to start any configuration.
4040 */
4041
4042 if (msgbuf[0] == E1000_VF_RESET) {
4043 igb_vf_reset_msg(adapter, vf);
4044
4045 return retval;
4046 }
4047
4048 if (!adapter->vf_data[vf].clear_to_send) {
4049 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4050 igb_write_mbx(hw, msgbuf, 1, vf);
4051 return retval;
4052 }
4053
4054 switch ((msgbuf[0] & 0xFFFF)) {
4055 case E1000_VF_SET_MAC_ADDR:
4056 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4057 break;
4058 case E1000_VF_SET_MULTICAST:
4059 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4060 break;
4061 case E1000_VF_SET_LPE:
4062 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4063 break;
4064 case E1000_VF_SET_VLAN:
4065 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4066 break;
4067 default:
4068 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4069 retval = -1;
4070 break;
4071 }
4072
4073 /* notify the VF of the results of what it sent us */
4074 if (retval)
4075 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4076 else
4077 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4078
4079 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4080
4081 igb_write_mbx(hw, msgbuf, 1, vf);
4082
4083 return retval;
4084}
4085
3495/** 4086/**
3496 * igb_intr_msi - Interrupt Handler 4087 * igb_intr_msi - Interrupt Handler
3497 * @irq: interrupt number 4088 * @irq: interrupt number
@@ -3507,19 +4098,24 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
3507 4098
3508 igb_write_itr(adapter->rx_ring); 4099 igb_write_itr(adapter->rx_ring);
3509 4100
4101 if(icr & E1000_ICR_DOUTSYNC) {
4102 /* HW is reporting DMA is out of sync */
4103 adapter->stats.doosync++;
4104 }
4105
3510 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 4106 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3511 hw->mac.get_link_status = 1; 4107 hw->mac.get_link_status = 1;
3512 if (!test_bit(__IGB_DOWN, &adapter->state)) 4108 if (!test_bit(__IGB_DOWN, &adapter->state))
3513 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4109 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3514 } 4110 }
3515 4111
3516 netif_rx_schedule(&adapter->rx_ring[0].napi); 4112 napi_schedule(&adapter->rx_ring[0].napi);
3517 4113
3518 return IRQ_HANDLED; 4114 return IRQ_HANDLED;
3519} 4115}
3520 4116
3521/** 4117/**
3522 * igb_intr - Interrupt Handler 4118 * igb_intr - Legacy Interrupt Handler
3523 * @irq: interrupt number 4119 * @irq: interrupt number
3524 * @data: pointer to a network interface device structure 4120 * @data: pointer to a network interface device structure
3525 **/ 4121 **/
@@ -3531,7 +4127,6 @@ static irqreturn_t igb_intr(int irq, void *data)
3531 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 4127 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
3532 * need for the IMC write */ 4128 * need for the IMC write */
3533 u32 icr = rd32(E1000_ICR); 4129 u32 icr = rd32(E1000_ICR);
3534 u32 eicr = 0;
3535 if (!icr) 4130 if (!icr)
3536 return IRQ_NONE; /* Not our interrupt */ 4131 return IRQ_NONE; /* Not our interrupt */
3537 4132
@@ -3542,7 +4137,10 @@ static irqreturn_t igb_intr(int irq, void *data)
3542 if (!(icr & E1000_ICR_INT_ASSERTED)) 4137 if (!(icr & E1000_ICR_INT_ASSERTED))
3543 return IRQ_NONE; 4138 return IRQ_NONE;
3544 4139
3545 eicr = rd32(E1000_EICR); 4140 if(icr & E1000_ICR_DOUTSYNC) {
4141 /* HW is reporting DMA is out of sync */
4142 adapter->stats.doosync++;
4143 }
3546 4144
3547 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 4145 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3548 hw->mac.get_link_status = 1; 4146 hw->mac.get_link_status = 1;
@@ -3551,11 +4149,31 @@ static irqreturn_t igb_intr(int irq, void *data)
3551 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4149 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3552 } 4150 }
3553 4151
3554 netif_rx_schedule(&adapter->rx_ring[0].napi); 4152 napi_schedule(&adapter->rx_ring[0].napi);
3555 4153
3556 return IRQ_HANDLED; 4154 return IRQ_HANDLED;
3557} 4155}
3558 4156
4157static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
4158{
4159 struct igb_adapter *adapter = rx_ring->adapter;
4160 struct e1000_hw *hw = &adapter->hw;
4161
4162 if (adapter->itr_setting & 3) {
4163 if (adapter->num_rx_queues == 1)
4164 igb_set_itr(adapter);
4165 else
4166 igb_update_ring_itr(rx_ring);
4167 }
4168
4169 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4170 if (adapter->msix_entries)
4171 wr32(E1000_EIMS, rx_ring->eims_value);
4172 else
4173 igb_irq_enable(adapter);
4174 }
4175}
4176
3559/** 4177/**
3560 * igb_poll - NAPI Rx polling callback 4178 * igb_poll - NAPI Rx polling callback
3561 * @napi: napi polling structure 4179 * @napi: napi polling structure
@@ -3564,70 +4182,64 @@ static irqreturn_t igb_intr(int irq, void *data)
3564static int igb_poll(struct napi_struct *napi, int budget) 4182static int igb_poll(struct napi_struct *napi, int budget)
3565{ 4183{
3566 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi); 4184 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
3567 struct igb_adapter *adapter = rx_ring->adapter; 4185 int work_done = 0;
3568 struct net_device *netdev = adapter->netdev;
3569 int tx_clean_complete, work_done = 0;
3570 4186
3571 /* this poll routine only supports one tx and one rx queue */
3572#ifdef CONFIG_IGB_DCA 4187#ifdef CONFIG_IGB_DCA
3573 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 4188 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
3574 igb_update_tx_dca(&adapter->tx_ring[0]); 4189 igb_update_rx_dca(rx_ring);
3575#endif 4190#endif
3576 tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]); 4191 igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
3577 4192
4193 if (rx_ring->buddy) {
3578#ifdef CONFIG_IGB_DCA 4194#ifdef CONFIG_IGB_DCA
3579 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 4195 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
3580 igb_update_rx_dca(&adapter->rx_ring[0]); 4196 igb_update_tx_dca(rx_ring->buddy);
3581#endif 4197#endif
3582 igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget); 4198 if (!igb_clean_tx_irq(rx_ring->buddy))
4199 work_done = budget;
4200 }
3583 4201
3584 /* If no Tx and not enough Rx work done, exit the polling mode */ 4202 /* If not enough Rx work done, exit the polling mode */
3585 if ((tx_clean_complete && (work_done < budget)) || 4203 if (work_done < budget) {
3586 !netif_running(netdev)) { 4204 napi_complete(napi);
3587 if (adapter->itr_setting & 3) 4205 igb_rx_irq_enable(rx_ring);
3588 igb_set_itr(adapter);
3589 netif_rx_complete(napi);
3590 if (!test_bit(__IGB_DOWN, &adapter->state))
3591 igb_irq_enable(adapter);
3592 return 0;
3593 } 4206 }
3594 4207
3595 return 1; 4208 return work_done;
3596} 4209}
3597 4210
3598static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget) 4211/**
4212 * igb_hwtstamp - utility function which checks for TX time stamp
4213 * @adapter: board private structure
4214 * @skb: packet that was just sent
4215 *
4216 * If we were asked to do hardware stamping and such a time stamp is
4217 * available, then it must have been for this skb here because we only
4218 * allow only one such packet into the queue.
4219 */
4220static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
3599{ 4221{
3600 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi); 4222 union skb_shared_tx *shtx = skb_tx(skb);
3601 struct igb_adapter *adapter = rx_ring->adapter;
3602 struct e1000_hw *hw = &adapter->hw; 4223 struct e1000_hw *hw = &adapter->hw;
3603 struct net_device *netdev = adapter->netdev;
3604 int work_done = 0;
3605
3606#ifdef CONFIG_IGB_DCA
3607 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3608 igb_update_rx_dca(rx_ring);
3609#endif
3610 igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
3611
3612
3613 /* If not enough Rx work done, exit the polling mode */
3614 if ((work_done == 0) || !netif_running(netdev)) {
3615 netif_rx_complete(napi);
3616 4224
3617 if (adapter->itr_setting & 3) { 4225 if (unlikely(shtx->hardware)) {
3618 if (adapter->num_rx_queues == 1) 4226 u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID;
3619 igb_set_itr(adapter); 4227 if (valid) {
3620 else 4228 u64 regval = rd32(E1000_TXSTMPL);
3621 igb_update_ring_itr(rx_ring); 4229 u64 ns;
4230 struct skb_shared_hwtstamps shhwtstamps;
4231
4232 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
4233 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4234 ns = timecounter_cyc2time(&adapter->clock,
4235 regval);
4236 timecompare_update(&adapter->compare, ns);
4237 shhwtstamps.hwtstamp = ns_to_ktime(ns);
4238 shhwtstamps.syststamp =
4239 timecompare_transform(&adapter->compare, ns);
4240 skb_tstamp_tx(skb, &shhwtstamps);
3622 } 4241 }
3623
3624 if (!test_bit(__IGB_DOWN, &adapter->state))
3625 wr32(E1000_EIMS, rx_ring->eims_value);
3626
3627 return 0;
3628 } 4242 }
3629
3630 return 1;
3631} 4243}
3632 4244
3633/** 4245/**
@@ -3668,6 +4280,8 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3668 skb->len; 4280 skb->len;
3669 total_packets += segs; 4281 total_packets += segs;
3670 total_bytes += bytecount; 4282 total_bytes += bytecount;
4283
4284 igb_tx_hwtstamp(adapter, skb);
3671 } 4285 }
3672 4286
3673 igb_unmap_and_free_tx_resource(adapter, buffer_info); 4287 igb_unmap_and_free_tx_resource(adapter, buffer_info);
@@ -3677,7 +4291,6 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3677 if (i == tx_ring->count) 4291 if (i == tx_ring->count)
3678 i = 0; 4292 i = 0;
3679 } 4293 }
3680
3681 eop = tx_ring->buffer_info[i].next_to_watch; 4294 eop = tx_ring->buffer_info[i].next_to_watch;
3682 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop); 4295 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
3683 } 4296 }
@@ -3742,44 +4355,11 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3742 return (count < tx_ring->count); 4355 return (count < tx_ring->count);
3743} 4356}
3744 4357
3745#ifdef CONFIG_IGB_LRO
3746 /**
3747 * igb_get_skb_hdr - helper function for LRO header processing
3748 * @skb: pointer to sk_buff to be added to LRO packet
3749 * @iphdr: pointer to ip header structure
3750 * @tcph: pointer to tcp header structure
3751 * @hdr_flags: pointer to header flags
3752 * @priv: pointer to the receive descriptor for the current sk_buff
3753 **/
3754static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
3755 u64 *hdr_flags, void *priv)
3756{
3757 union e1000_adv_rx_desc *rx_desc = priv;
3758 u16 pkt_type = rx_desc->wb.lower.lo_dword.pkt_info &
3759 (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP);
3760
3761 /* Verify that this is a valid IPv4 TCP packet */
3762 if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 |
3763 E1000_RXDADV_PKTTYPE_TCP))
3764 return -1;
3765
3766 /* Set network headers */
3767 skb_reset_network_header(skb);
3768 skb_set_transport_header(skb, ip_hdrlen(skb));
3769 *iphdr = ip_hdr(skb);
3770 *tcph = tcp_hdr(skb);
3771 *hdr_flags = LRO_IPV4 | LRO_TCP;
3772
3773 return 0;
3774
3775}
3776#endif /* CONFIG_IGB_LRO */
3777
3778/** 4358/**
3779 * igb_receive_skb - helper function to handle rx indications 4359 * igb_receive_skb - helper function to handle rx indications
3780 * @ring: pointer to receive ring receving this packet 4360 * @ring: pointer to receive ring receving this packet
3781 * @status: descriptor status field as written by hardware 4361 * @status: descriptor status field as written by hardware
3782 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 4362 * @rx_desc: receive descriptor containing vlan and type information.
3783 * @skb: pointer to sk_buff to be indicated to stack 4363 * @skb: pointer to sk_buff to be indicated to stack
3784 **/ 4364 **/
3785static void igb_receive_skb(struct igb_ring *ring, u8 status, 4365static void igb_receive_skb(struct igb_ring *ring, u8 status,
@@ -3789,31 +4369,23 @@ static void igb_receive_skb(struct igb_ring *ring, u8 status,
3789 struct igb_adapter * adapter = ring->adapter; 4369 struct igb_adapter * adapter = ring->adapter;
3790 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); 4370 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
3791 4371
3792#ifdef CONFIG_IGB_LRO 4372 skb_record_rx_queue(skb, ring->queue_index);
3793 if (adapter->netdev->features & NETIF_F_LRO && 4373 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3794 skb->ip_summed == CHECKSUM_UNNECESSARY) {
3795 if (vlan_extracted) 4374 if (vlan_extracted)
3796 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, 4375 vlan_gro_receive(&ring->napi, adapter->vlgrp,
3797 adapter->vlgrp, 4376 le16_to_cpu(rx_desc->wb.upper.vlan),
3798 le16_to_cpu(rx_desc->wb.upper.vlan), 4377 skb);
3799 rx_desc);
3800 else 4378 else
3801 lro_receive_skb(&ring->lro_mgr,skb, rx_desc); 4379 napi_gro_receive(&ring->napi, skb);
3802 ring->lro_used = 1;
3803 } else { 4380 } else {
3804#endif
3805 if (vlan_extracted) 4381 if (vlan_extracted)
3806 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 4382 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3807 le16_to_cpu(rx_desc->wb.upper.vlan)); 4383 le16_to_cpu(rx_desc->wb.upper.vlan));
3808 else 4384 else
3809
3810 netif_receive_skb(skb); 4385 netif_receive_skb(skb);
3811#ifdef CONFIG_IGB_LRO
3812 } 4386 }
3813#endif
3814} 4387}
3815 4388
3816
3817static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, 4389static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
3818 u32 status_err, struct sk_buff *skb) 4390 u32 status_err, struct sk_buff *skb)
3819{ 4391{
@@ -3841,17 +4413,19 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3841{ 4413{
3842 struct igb_adapter *adapter = rx_ring->adapter; 4414 struct igb_adapter *adapter = rx_ring->adapter;
3843 struct net_device *netdev = adapter->netdev; 4415 struct net_device *netdev = adapter->netdev;
4416 struct e1000_hw *hw = &adapter->hw;
3844 struct pci_dev *pdev = adapter->pdev; 4417 struct pci_dev *pdev = adapter->pdev;
3845 union e1000_adv_rx_desc *rx_desc , *next_rxd; 4418 union e1000_adv_rx_desc *rx_desc , *next_rxd;
3846 struct igb_buffer *buffer_info , *next_buffer; 4419 struct igb_buffer *buffer_info , *next_buffer;
3847 struct sk_buff *skb; 4420 struct sk_buff *skb;
3848 unsigned int i;
3849 u32 length, hlen, staterr;
3850 bool cleaned = false; 4421 bool cleaned = false;
3851 int cleaned_count = 0; 4422 int cleaned_count = 0;
3852 unsigned int total_bytes = 0, total_packets = 0; 4423 unsigned int total_bytes = 0, total_packets = 0;
4424 unsigned int i;
4425 u32 length, hlen, staterr;
3853 4426
3854 i = rx_ring->next_to_clean; 4427 i = rx_ring->next_to_clean;
4428 buffer_info = &rx_ring->buffer_info[i];
3855 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 4429 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3856 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 4430 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3857 4431
@@ -3859,25 +4433,22 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3859 if (*work_done >= budget) 4433 if (*work_done >= budget)
3860 break; 4434 break;
3861 (*work_done)++; 4435 (*work_done)++;
3862 buffer_info = &rx_ring->buffer_info[i];
3863 4436
3864 /* HW will not DMA in data larger than the given buffer, even 4437 skb = buffer_info->skb;
3865 * if it parses the (NFS, of course) header to be larger. In 4438 prefetch(skb->data - NET_IP_ALIGN);
3866 * that case, it fills the header buffer and spills the rest 4439 buffer_info->skb = NULL;
3867 * into the page. 4440
3868 */ 4441 i++;
3869 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & 4442 if (i == rx_ring->count)
3870 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 4443 i = 0;
3871 if (hlen > adapter->rx_ps_hdr_size) 4444 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
3872 hlen = adapter->rx_ps_hdr_size; 4445 prefetch(next_rxd);
4446 next_buffer = &rx_ring->buffer_info[i];
3873 4447
3874 length = le16_to_cpu(rx_desc->wb.upper.length); 4448 length = le16_to_cpu(rx_desc->wb.upper.length);
3875 cleaned = true; 4449 cleaned = true;
3876 cleaned_count++; 4450 cleaned_count++;
3877 4451
3878 skb = buffer_info->skb;
3879 prefetch(skb->data - NET_IP_ALIGN);
3880 buffer_info->skb = NULL;
3881 if (!adapter->rx_ps_hdr_size) { 4452 if (!adapter->rx_ps_hdr_size) {
3882 pci_unmap_single(pdev, buffer_info->dma, 4453 pci_unmap_single(pdev, buffer_info->dma,
3883 adapter->rx_buffer_len + 4454 adapter->rx_buffer_len +
@@ -3887,10 +4458,19 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3887 goto send_up; 4458 goto send_up;
3888 } 4459 }
3889 4460
4461 /* HW will not DMA in data larger than the given buffer, even
4462 * if it parses the (NFS, of course) header to be larger. In
4463 * that case, it fills the header buffer and spills the rest
4464 * into the page.
4465 */
4466 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4467 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
4468 if (hlen > adapter->rx_ps_hdr_size)
4469 hlen = adapter->rx_ps_hdr_size;
4470
3890 if (!skb_shinfo(skb)->nr_frags) { 4471 if (!skb_shinfo(skb)->nr_frags) {
3891 pci_unmap_single(pdev, buffer_info->dma, 4472 pci_unmap_single(pdev, buffer_info->dma,
3892 adapter->rx_ps_hdr_size + 4473 adapter->rx_ps_hdr_size + NET_IP_ALIGN,
3893 NET_IP_ALIGN,
3894 PCI_DMA_FROMDEVICE); 4474 PCI_DMA_FROMDEVICE);
3895 skb_put(skb, hlen); 4475 skb_put(skb, hlen);
3896 } 4476 }
@@ -3916,13 +4496,6 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3916 4496
3917 skb->truesize += length; 4497 skb->truesize += length;
3918 } 4498 }
3919send_up:
3920 i++;
3921 if (i == rx_ring->count)
3922 i = 0;
3923 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
3924 prefetch(next_rxd);
3925 next_buffer = &rx_ring->buffer_info[i];
3926 4499
3927 if (!(staterr & E1000_RXD_STAT_EOP)) { 4500 if (!(staterr & E1000_RXD_STAT_EOP)) {
3928 buffer_info->skb = next_buffer->skb; 4501 buffer_info->skb = next_buffer->skb;
@@ -3931,6 +4504,47 @@ send_up:
3931 next_buffer->dma = 0; 4504 next_buffer->dma = 0;
3932 goto next_desc; 4505 goto next_desc;
3933 } 4506 }
4507send_up:
4508 /*
4509 * If this bit is set, then the RX registers contain
4510 * the time stamp. No other packet will be time
4511 * stamped until we read these registers, so read the
4512 * registers to make them available again. Because
4513 * only one packet can be time stamped at a time, we
4514 * know that the register values must belong to this
4515 * one here and therefore we don't need to compare
4516 * any of the additional attributes stored for it.
4517 *
4518 * If nothing went wrong, then it should have a
4519 * skb_shared_tx that we can turn into a
4520 * skb_shared_hwtstamps.
4521 *
4522 * TODO: can time stamping be triggered (thus locking
4523 * the registers) without the packet reaching this point
4524 * here? In that case RX time stamping would get stuck.
4525 *
4526 * TODO: in "time stamp all packets" mode this bit is
4527 * not set. Need a global flag for this mode and then
4528 * always read the registers. Cannot be done without
4529 * a race condition.
4530 */
4531 if (unlikely(staterr & E1000_RXD_STAT_TS)) {
4532 u64 regval;
4533 u64 ns;
4534 struct skb_shared_hwtstamps *shhwtstamps =
4535 skb_hwtstamps(skb);
4536
4537 WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
4538 "igb: no RX time stamp available for time stamped packet");
4539 regval = rd32(E1000_RXSTMPL);
4540 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4541 ns = timecounter_cyc2time(&adapter->clock, regval);
4542 timecompare_update(&adapter->compare, ns);
4543 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
4544 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4545 shhwtstamps->syststamp =
4546 timecompare_transform(&adapter->compare, ns);
4547 }
3934 4548
3935 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 4549 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
3936 dev_kfree_skb_irq(skb); 4550 dev_kfree_skb_irq(skb);
@@ -3958,20 +4572,12 @@ next_desc:
3958 /* use prefetched values */ 4572 /* use prefetched values */
3959 rx_desc = next_rxd; 4573 rx_desc = next_rxd;
3960 buffer_info = next_buffer; 4574 buffer_info = next_buffer;
3961
3962 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 4575 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3963 } 4576 }
3964 4577
3965 rx_ring->next_to_clean = i; 4578 rx_ring->next_to_clean = i;
3966 cleaned_count = IGB_DESC_UNUSED(rx_ring); 4579 cleaned_count = IGB_DESC_UNUSED(rx_ring);
3967 4580
3968#ifdef CONFIG_IGB_LRO
3969 if (rx_ring->lro_used) {
3970 lro_flush_all(&rx_ring->lro_mgr);
3971 rx_ring->lro_used = 0;
3972 }
3973#endif
3974
3975 if (cleaned_count) 4581 if (cleaned_count)
3976 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); 4582 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
3977 4583
@@ -3984,7 +4590,6 @@ next_desc:
3984 return cleaned; 4590 return cleaned;
3985} 4591}
3986 4592
3987
3988/** 4593/**
3989 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split 4594 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
3990 * @adapter: address of board private structure 4595 * @adapter: address of board private structure
@@ -3999,10 +4604,17 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
3999 struct igb_buffer *buffer_info; 4604 struct igb_buffer *buffer_info;
4000 struct sk_buff *skb; 4605 struct sk_buff *skb;
4001 unsigned int i; 4606 unsigned int i;
4607 int bufsz;
4002 4608
4003 i = rx_ring->next_to_use; 4609 i = rx_ring->next_to_use;
4004 buffer_info = &rx_ring->buffer_info[i]; 4610 buffer_info = &rx_ring->buffer_info[i];
4005 4611
4612 if (adapter->rx_ps_hdr_size)
4613 bufsz = adapter->rx_ps_hdr_size;
4614 else
4615 bufsz = adapter->rx_buffer_len;
4616 bufsz += NET_IP_ALIGN;
4617
4006 while (cleaned_count--) { 4618 while (cleaned_count--) {
4007 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 4619 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4008 4620
@@ -4018,23 +4630,14 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4018 buffer_info->page_offset ^= PAGE_SIZE / 2; 4630 buffer_info->page_offset ^= PAGE_SIZE / 2;
4019 } 4631 }
4020 buffer_info->page_dma = 4632 buffer_info->page_dma =
4021 pci_map_page(pdev, 4633 pci_map_page(pdev, buffer_info->page,
4022 buffer_info->page,
4023 buffer_info->page_offset, 4634 buffer_info->page_offset,
4024 PAGE_SIZE / 2, 4635 PAGE_SIZE / 2,
4025 PCI_DMA_FROMDEVICE); 4636 PCI_DMA_FROMDEVICE);
4026 } 4637 }
4027 4638
4028 if (!buffer_info->skb) { 4639 if (!buffer_info->skb) {
4029 int bufsz;
4030
4031 if (adapter->rx_ps_hdr_size)
4032 bufsz = adapter->rx_ps_hdr_size;
4033 else
4034 bufsz = adapter->rx_buffer_len;
4035 bufsz += NET_IP_ALIGN;
4036 skb = netdev_alloc_skb(netdev, bufsz); 4640 skb = netdev_alloc_skb(netdev, bufsz);
4037
4038 if (!skb) { 4641 if (!skb) {
4039 adapter->alloc_rx_buff_failed++; 4642 adapter->alloc_rx_buff_failed++;
4040 goto no_buffers; 4643 goto no_buffers;
@@ -4050,7 +4653,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4050 buffer_info->dma = pci_map_single(pdev, skb->data, 4653 buffer_info->dma = pci_map_single(pdev, skb->data,
4051 bufsz, 4654 bufsz,
4052 PCI_DMA_FROMDEVICE); 4655 PCI_DMA_FROMDEVICE);
4053
4054 } 4656 }
4055 /* Refresh the desc even if buffer_addrs didn't change because 4657 /* Refresh the desc even if buffer_addrs didn't change because
4056 * each write-back erases this info. */ 4658 * each write-back erases this info. */
@@ -4120,6 +4722,163 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4120} 4722}
4121 4723
4122/** 4724/**
4725 * igb_hwtstamp_ioctl - control hardware time stamping
4726 * @netdev:
4727 * @ifreq:
4728 * @cmd:
4729 *
4730 * Outgoing time stamping can be enabled and disabled. Play nice and
4731 * disable it when requested, although it shouldn't case any overhead
4732 * when no packet needs it. At most one packet in the queue may be
4733 * marked for time stamping, otherwise it would be impossible to tell
4734 * for sure to which packet the hardware time stamp belongs.
4735 *
4736 * Incoming time stamping has to be configured via the hardware
4737 * filters. Not all combinations are supported, in particular event
4738 * type has to be specified. Matching the kind of event packet is
4739 * not supported, with the exception of "all V2 events regardless of
4740 * level 2 or 4".
4741 *
4742 **/
4743static int igb_hwtstamp_ioctl(struct net_device *netdev,
4744 struct ifreq *ifr, int cmd)
4745{
4746 struct igb_adapter *adapter = netdev_priv(netdev);
4747 struct e1000_hw *hw = &adapter->hw;
4748 struct hwtstamp_config config;
4749 u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
4750 u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED;
4751 u32 tsync_rx_ctl_type = 0;
4752 u32 tsync_rx_cfg = 0;
4753 int is_l4 = 0;
4754 int is_l2 = 0;
4755 short port = 319; /* PTP */
4756 u32 regval;
4757
4758 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
4759 return -EFAULT;
4760
4761 /* reserved for future extensions */
4762 if (config.flags)
4763 return -EINVAL;
4764
4765 switch (config.tx_type) {
4766 case HWTSTAMP_TX_OFF:
4767 tsync_tx_ctl_bit = 0;
4768 break;
4769 case HWTSTAMP_TX_ON:
4770 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
4771 break;
4772 default:
4773 return -ERANGE;
4774 }
4775
4776 switch (config.rx_filter) {
4777 case HWTSTAMP_FILTER_NONE:
4778 tsync_rx_ctl_bit = 0;
4779 break;
4780 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4781 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
4782 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
4783 case HWTSTAMP_FILTER_ALL:
4784 /*
4785 * register TSYNCRXCFG must be set, therefore it is not
4786 * possible to time stamp both Sync and Delay_Req messages
4787 * => fall back to time stamping all packets
4788 */
4789 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL;
4790 config.rx_filter = HWTSTAMP_FILTER_ALL;
4791 break;
4792 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4793 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
4794 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
4795 is_l4 = 1;
4796 break;
4797 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4798 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
4799 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
4800 is_l4 = 1;
4801 break;
4802 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4803 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4804 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
4805 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
4806 is_l2 = 1;
4807 is_l4 = 1;
4808 config.rx_filter = HWTSTAMP_FILTER_SOME;
4809 break;
4810 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
4811 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
4812 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
4813 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
4814 is_l2 = 1;
4815 is_l4 = 1;
4816 config.rx_filter = HWTSTAMP_FILTER_SOME;
4817 break;
4818 case HWTSTAMP_FILTER_PTP_V2_EVENT:
4819 case HWTSTAMP_FILTER_PTP_V2_SYNC:
4820 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
4821 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2;
4822 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
4823 is_l2 = 1;
4824 break;
4825 default:
4826 return -ERANGE;
4827 }
4828
4829 /* enable/disable TX */
4830 regval = rd32(E1000_TSYNCTXCTL);
4831 regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
4832 wr32(E1000_TSYNCTXCTL, regval);
4833
4834 /* enable/disable RX, define which PTP packets are time stamped */
4835 regval = rd32(E1000_TSYNCRXCTL);
4836 regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
4837 regval = (regval & ~0xE) | tsync_rx_ctl_type;
4838 wr32(E1000_TSYNCRXCTL, regval);
4839 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
4840
4841 /*
4842 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
4843 * (Ethertype to filter on)
4844 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
4845 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
4846 */
4847 wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
4848
4849 /* L4 Queue Filter[0]: only filter by source and destination port */
4850 wr32(E1000_SPQF0, htons(port));
4851 wr32(E1000_IMIREXT(0), is_l4 ?
4852 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
4853 wr32(E1000_IMIR(0), is_l4 ?
4854 (htons(port)
4855 | (0<<16) /* immediate interrupt disabled */
4856 | 0 /* (1<<17) bit cleared: do not bypass
4857 destination port check */)
4858 : 0);
4859 wr32(E1000_FTQF0, is_l4 ?
4860 (0x11 /* UDP */
4861 | (1<<15) /* VF not compared */
4862 | (1<<27) /* Enable Timestamping */
4863 | (7<<28) /* only source port filter enabled,
4864 source/target address and protocol
4865 masked */)
4866 : ((1<<15) | (15<<28) /* all mask bits set = filter not
4867 enabled */));
4868
4869 wrfl();
4870
4871 adapter->hwtstamp_config = config;
4872
4873 /* clear TX/RX time stamp registers, just to be sure */
4874 regval = rd32(E1000_TXSTMPH);
4875 regval = rd32(E1000_RXSTMPH);
4876
4877 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
4878 -EFAULT : 0;
4879}
4880
4881/**
4123 * igb_ioctl - 4882 * igb_ioctl -
4124 * @netdev: 4883 * @netdev:
4125 * @ifreq: 4884 * @ifreq:
@@ -4132,6 +4891,8 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4132 case SIOCGMIIREG: 4891 case SIOCGMIIREG:
4133 case SIOCSMIIREG: 4892 case SIOCSMIIREG:
4134 return igb_mii_ioctl(netdev, ifr, cmd); 4893 return igb_mii_ioctl(netdev, ifr, cmd);
4894 case SIOCSHWTSTAMP:
4895 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
4135 default: 4896 default:
4136 return -EOPNOTSUPP; 4897 return -EOPNOTSUPP;
4137 } 4898 }
@@ -4158,8 +4919,6 @@ static void igb_vlan_rx_register(struct net_device *netdev,
4158 rctl &= ~E1000_RCTL_CFIEN; 4919 rctl &= ~E1000_RCTL_CFIEN;
4159 wr32(E1000_RCTL, rctl); 4920 wr32(E1000_RCTL, rctl);
4160 igb_update_mng_vlan(adapter); 4921 igb_update_mng_vlan(adapter);
4161 wr32(E1000_RLPML,
4162 adapter->max_frame_size + VLAN_TAG_SIZE);
4163 } else { 4922 } else {
4164 /* disable VLAN tag insert/strip */ 4923 /* disable VLAN tag insert/strip */
4165 ctrl = rd32(E1000_CTRL); 4924 ctrl = rd32(E1000_CTRL);
@@ -4170,10 +4929,10 @@ static void igb_vlan_rx_register(struct net_device *netdev,
4170 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4929 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4171 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; 4930 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
4172 } 4931 }
4173 wr32(E1000_RLPML,
4174 adapter->max_frame_size);
4175 } 4932 }
4176 4933
4934 igb_rlpml_set(adapter);
4935
4177 if (!test_bit(__IGB_DOWN, &adapter->state)) 4936 if (!test_bit(__IGB_DOWN, &adapter->state))
4178 igb_irq_enable(adapter); 4937 igb_irq_enable(adapter);
4179} 4938}
@@ -4182,24 +4941,25 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4182{ 4941{
4183 struct igb_adapter *adapter = netdev_priv(netdev); 4942 struct igb_adapter *adapter = netdev_priv(netdev);
4184 struct e1000_hw *hw = &adapter->hw; 4943 struct e1000_hw *hw = &adapter->hw;
4185 u32 vfta, index; 4944 int pf_id = adapter->vfs_allocated_count;
4186 4945
4187 if ((adapter->hw.mng_cookie.status & 4946 if ((hw->mng_cookie.status &
4188 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 4947 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
4189 (vid == adapter->mng_vlan_id)) 4948 (vid == adapter->mng_vlan_id))
4190 return; 4949 return;
4191 /* add VID to filter table */ 4950
4192 index = (vid >> 5) & 0x7F; 4951 /* add vid to vlvf if sr-iov is enabled,
4193 vfta = array_rd32(E1000_VFTA, index); 4952 * if that fails add directly to filter table */
4194 vfta |= (1 << (vid & 0x1F)); 4953 if (igb_vlvf_set(adapter, vid, true, pf_id))
4195 igb_write_vfta(&adapter->hw, index, vfta); 4954 igb_vfta_set(hw, vid, true);
4955
4196} 4956}
4197 4957
4198static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 4958static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4199{ 4959{
4200 struct igb_adapter *adapter = netdev_priv(netdev); 4960 struct igb_adapter *adapter = netdev_priv(netdev);
4201 struct e1000_hw *hw = &adapter->hw; 4961 struct e1000_hw *hw = &adapter->hw;
4202 u32 vfta, index; 4962 int pf_id = adapter->vfs_allocated_count;
4203 4963
4204 igb_irq_disable(adapter); 4964 igb_irq_disable(adapter);
4205 vlan_group_set_device(adapter->vlgrp, vid, NULL); 4965 vlan_group_set_device(adapter->vlgrp, vid, NULL);
@@ -4215,11 +4975,10 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4215 return; 4975 return;
4216 } 4976 }
4217 4977
4218 /* remove VID from filter table */ 4978 /* remove vid from vlvf if sr-iov is enabled,
4219 index = (vid >> 5) & 0x7F; 4979 * if not in vlvf remove from vfta */
4220 vfta = array_rd32(E1000_VFTA, index); 4980 if (igb_vlvf_set(adapter, vid, false, pf_id))
4221 vfta &= ~(1 << (vid & 0x1F)); 4981 igb_vfta_set(hw, vid, false);
4222 igb_write_vfta(&adapter->hw, index, vfta);
4223} 4982}
4224 4983
4225static void igb_restore_vlan(struct igb_adapter *adapter) 4984static void igb_restore_vlan(struct igb_adapter *adapter)
@@ -4276,7 +5035,6 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
4276 return 0; 5035 return 0;
4277} 5036}
4278 5037
4279
4280static int igb_suspend(struct pci_dev *pdev, pm_message_t state) 5038static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
4281{ 5039{
4282 struct net_device *netdev = pci_get_drvdata(pdev); 5040 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4368,10 +5126,7 @@ static int igb_resume(struct pci_dev *pdev)
4368 pci_set_power_state(pdev, PCI_D0); 5126 pci_set_power_state(pdev, PCI_D0);
4369 pci_restore_state(pdev); 5127 pci_restore_state(pdev);
4370 5128
4371 if (adapter->need_ioport) 5129 err = pci_enable_device_mem(pdev);
4372 err = pci_enable_device(pdev);
4373 else
4374 err = pci_enable_device_mem(pdev);
4375 if (err) { 5130 if (err) {
4376 dev_err(&pdev->dev, 5131 dev_err(&pdev->dev,
4377 "igb: Cannot enable PCI device from suspend\n"); 5132 "igb: Cannot enable PCI device from suspend\n");
@@ -4392,6 +5147,11 @@ static int igb_resume(struct pci_dev *pdev)
4392 /* e1000_power_up_phy(adapter); */ 5147 /* e1000_power_up_phy(adapter); */
4393 5148
4394 igb_reset(adapter); 5149 igb_reset(adapter);
5150
5151 /* let the f/w know that the h/w is now under the control of the
5152 * driver. */
5153 igb_get_hw_control(adapter);
5154
4395 wr32(E1000_WUS, ~0); 5155 wr32(E1000_WUS, ~0);
4396 5156
4397 if (netif_running(netdev)) { 5157 if (netif_running(netdev)) {
@@ -4402,10 +5162,6 @@ static int igb_resume(struct pci_dev *pdev)
4402 5162
4403 netif_device_attach(netdev); 5163 netif_device_attach(netdev);
4404 5164
4405 /* let the f/w know that the h/w is now under the control of the
4406 * driver. */
4407 igb_get_hw_control(adapter);
4408
4409 return 0; 5165 return 0;
4410} 5166}
4411#endif 5167#endif
@@ -4424,22 +5180,27 @@ static void igb_shutdown(struct pci_dev *pdev)
4424static void igb_netpoll(struct net_device *netdev) 5180static void igb_netpoll(struct net_device *netdev)
4425{ 5181{
4426 struct igb_adapter *adapter = netdev_priv(netdev); 5182 struct igb_adapter *adapter = netdev_priv(netdev);
5183 struct e1000_hw *hw = &adapter->hw;
4427 int i; 5184 int i;
4428 int work_done = 0;
4429
4430 igb_irq_disable(adapter);
4431 adapter->flags |= IGB_FLAG_IN_NETPOLL;
4432 5185
4433 for (i = 0; i < adapter->num_tx_queues; i++) 5186 if (!adapter->msix_entries) {
4434 igb_clean_tx_irq(&adapter->tx_ring[i]); 5187 igb_irq_disable(adapter);
5188 napi_schedule(&adapter->rx_ring[0].napi);
5189 return;
5190 }
4435 5191
4436 for (i = 0; i < adapter->num_rx_queues; i++) 5192 for (i = 0; i < adapter->num_tx_queues; i++) {
4437 igb_clean_rx_irq_adv(&adapter->rx_ring[i], 5193 struct igb_ring *tx_ring = &adapter->tx_ring[i];
4438 &work_done, 5194 wr32(E1000_EIMC, tx_ring->eims_value);
4439 adapter->rx_ring[i].napi.weight); 5195 igb_clean_tx_irq(tx_ring);
5196 wr32(E1000_EIMS, tx_ring->eims_value);
5197 }
4440 5198
4441 adapter->flags &= ~IGB_FLAG_IN_NETPOLL; 5199 for (i = 0; i < adapter->num_rx_queues; i++) {
4442 igb_irq_enable(adapter); 5200 struct igb_ring *rx_ring = &adapter->rx_ring[i];
5201 wr32(E1000_EIMC, rx_ring->eims_value);
5202 napi_schedule(&rx_ring->napi);
5203 }
4443} 5204}
4444#endif /* CONFIG_NET_POLL_CONTROLLER */ 5205#endif /* CONFIG_NET_POLL_CONTROLLER */
4445 5206
@@ -4482,12 +5243,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
4482 pci_ers_result_t result; 5243 pci_ers_result_t result;
4483 int err; 5244 int err;
4484 5245
4485 if (adapter->need_ioport) 5246 if (pci_enable_device_mem(pdev)) {
4486 err = pci_enable_device(pdev);
4487 else
4488 err = pci_enable_device_mem(pdev);
4489
4490 if (err) {
4491 dev_err(&pdev->dev, 5247 dev_err(&pdev->dev,
4492 "Cannot re-enable PCI device after reset.\n"); 5248 "Cannot re-enable PCI device after reset.\n");
4493 result = PCI_ERS_RESULT_DISCONNECT; 5249 result = PCI_ERS_RESULT_DISCONNECT;
@@ -4540,4 +5296,172 @@ static void igb_io_resume(struct pci_dev *pdev)
4540 igb_get_hw_control(adapter); 5296 igb_get_hw_control(adapter);
4541} 5297}
4542 5298
5299static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
5300{
5301 u32 reg_data;
5302
5303 reg_data = rd32(E1000_VMOLR(vfn));
5304 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
5305 E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
5306 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
5307 E1000_VMOLR_AUPE | /* Accept untagged packets */
5308 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
5309 wr32(E1000_VMOLR(vfn), reg_data);
5310}
5311
5312static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
5313 int vfn)
5314{
5315 struct e1000_hw *hw = &adapter->hw;
5316 u32 vmolr;
5317
5318 vmolr = rd32(E1000_VMOLR(vfn));
5319 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5320 vmolr |= size | E1000_VMOLR_LPE;
5321 wr32(E1000_VMOLR(vfn), vmolr);
5322
5323 return 0;
5324}
5325
5326static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
5327{
5328 u32 reg_data;
5329
5330 reg_data = rd32(E1000_RAH(entry));
5331 reg_data &= ~E1000_RAH_POOL_MASK;
5332 reg_data |= E1000_RAH_POOL_1 << pool;;
5333 wr32(E1000_RAH(entry), reg_data);
5334}
5335
5336static void igb_set_mc_list_pools(struct igb_adapter *adapter,
5337 int entry_count, u16 total_rar_filters)
5338{
5339 struct e1000_hw *hw = &adapter->hw;
5340 int i = adapter->vfs_allocated_count + 1;
5341
5342 if ((i + entry_count) < total_rar_filters)
5343 total_rar_filters = i + entry_count;
5344
5345 for (; i < total_rar_filters; i++)
5346 igb_set_rah_pool(hw, adapter->vfs_allocated_count, i);
5347}
5348
5349static int igb_set_vf_mac(struct igb_adapter *adapter,
5350 int vf, unsigned char *mac_addr)
5351{
5352 struct e1000_hw *hw = &adapter->hw;
5353 int rar_entry = vf + 1; /* VF MAC addresses start at entry 1 */
5354
5355 igb_rar_set(hw, mac_addr, rar_entry);
5356
5357 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
5358
5359 igb_set_rah_pool(hw, vf, rar_entry);
5360
5361 return 0;
5362}
5363
5364static void igb_vmm_control(struct igb_adapter *adapter)
5365{
5366 struct e1000_hw *hw = &adapter->hw;
5367 u32 reg_data;
5368
5369 if (!adapter->vfs_allocated_count)
5370 return;
5371
5372 /* VF's need PF reset indication before they
5373 * can send/receive mail */
5374 reg_data = rd32(E1000_CTRL_EXT);
5375 reg_data |= E1000_CTRL_EXT_PFRSTD;
5376 wr32(E1000_CTRL_EXT, reg_data);
5377
5378 igb_vmdq_set_loopback_pf(hw, true);
5379 igb_vmdq_set_replication_pf(hw, true);
5380}
5381
5382#ifdef CONFIG_PCI_IOV
5383static ssize_t igb_show_num_vfs(struct device *dev,
5384 struct device_attribute *attr, char *buf)
5385{
5386 struct igb_adapter *adapter = netdev_priv(to_net_dev(dev));
5387
5388 return sprintf(buf, "%d\n", adapter->vfs_allocated_count);
5389}
5390
5391static ssize_t igb_set_num_vfs(struct device *dev,
5392 struct device_attribute *attr,
5393 const char *buf, size_t count)
5394{
5395 struct net_device *netdev = to_net_dev(dev);
5396 struct igb_adapter *adapter = netdev_priv(netdev);
5397 struct e1000_hw *hw = &adapter->hw;
5398 struct pci_dev *pdev = adapter->pdev;
5399 unsigned int num_vfs, i;
5400 unsigned char mac_addr[ETH_ALEN];
5401 int err;
5402
5403 sscanf(buf, "%u", &num_vfs);
5404
5405 if (num_vfs > 7)
5406 num_vfs = 7;
5407
5408 /* value unchanged do nothing */
5409 if (num_vfs == adapter->vfs_allocated_count)
5410 return count;
5411
5412 if (netdev->flags & IFF_UP)
5413 igb_close(netdev);
5414
5415 igb_reset_interrupt_capability(adapter);
5416 igb_free_queues(adapter);
5417 adapter->tx_ring = NULL;
5418 adapter->rx_ring = NULL;
5419 adapter->vfs_allocated_count = 0;
5420
5421 /* reclaim resources allocated to VFs since we are changing count */
5422 if (adapter->vf_data) {
5423 /* disable iov and allow time for transactions to clear */
5424 pci_disable_sriov(pdev);
5425 msleep(500);
5426
5427 kfree(adapter->vf_data);
5428 adapter->vf_data = NULL;
5429 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
5430 msleep(100);
5431 dev_info(&pdev->dev, "IOV Disabled\n");
5432 }
5433
5434 if (num_vfs) {
5435 adapter->vf_data = kcalloc(num_vfs,
5436 sizeof(struct vf_data_storage),
5437 GFP_KERNEL);
5438 if (!adapter->vf_data) {
5439 dev_err(&pdev->dev, "Could not allocate VF private "
5440 "data - IOV enable failed\n");
5441 } else {
5442 err = pci_enable_sriov(pdev, num_vfs);
5443 if (!err) {
5444 adapter->vfs_allocated_count = num_vfs;
5445 dev_info(&pdev->dev, "%d vfs allocated\n", num_vfs);
5446 for (i = 0; i < adapter->vfs_allocated_count; i++) {
5447 random_ether_addr(mac_addr);
5448 igb_set_vf_mac(adapter, i, mac_addr);
5449 }
5450 } else {
5451 kfree(adapter->vf_data);
5452 adapter->vf_data = NULL;
5453 }
5454 }
5455 }
5456
5457 igb_set_interrupt_capability(adapter);
5458 igb_alloc_queues(adapter);
5459 igb_reset(adapter);
5460
5461 if (netdev->flags & IFF_UP)
5462 igb_open(netdev);
5463
5464 return count;
5465}
5466#endif /* CONFIG_PCI_IOV */
4543/* igb_main.c */ 5467/* igb_main.c */