aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c2
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c2
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c109
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_netlink.c34
-rw-r--r--drivers/net/bonding/bond_options.c2
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/c_can/Makefile2
-rw-r--r--drivers/net/can/cc770/Makefile2
-rw-r--r--drivers/net/can/dev.c3
-rw-r--r--drivers/net/can/flexcan.c111
-rw-r--r--drivers/net/can/m_can/Kconfig4
-rw-r--r--drivers/net/can/m_can/Makefile5
-rw-r--r--drivers/net/can/m_can/m_can.c1202
-rw-r--r--drivers/net/can/mscan/Makefile2
-rw-r--r--drivers/net/can/rcar_can.c66
-rw-r--r--drivers/net/can/sja1000/Makefile2
-rw-r--r--drivers/net/can/softing/Makefile2
-rw-r--r--drivers/net/can/spi/Makefile2
-rw-r--r--drivers/net/can/spi/mcp251x.c16
-rw-r--r--drivers/net/can/usb/Makefile2
-rw-r--r--drivers/net/dsa/Kconfig11
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/bcm_sf2.c626
-rw-r--r--drivers/net/dsa/bcm_sf2.h140
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h227
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/arc/Kconfig8
-rw-r--r--drivers/net/ethernet/arc/Makefile3
-rw-r--r--drivers/net/ethernet/arc/emac.h6
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c95
-rw-r--r--drivers/net/ethernet/arc/emac_main.c127
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c7
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h40
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c128
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c42
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h222
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h229
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c844
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h178
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c130
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h62
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c9
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c39
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c50
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c152
-rw-r--r--drivers/net/ethernet/ec_bhf.c101
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h28
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c35
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c289
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c155
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c70
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c259
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c90
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c31
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c24
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c204
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c278
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c218
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c156
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c10
-rw-r--r--drivers/net/ethernet/realtek/r8169.c472
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c34
-rw-r--r--drivers/net/ethernet/ti/cpsw.c22
-rw-r--r--drivers/net/hyperv/hyperv_net.h4
-rw-r--r--drivers/net/hyperv/netvsc.c22
-rw-r--r--drivers/net/phy/Kconfig10
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c168
-rw-r--r--drivers/net/phy/bcm7xxx.c116
-rw-r--r--drivers/net/phy/broadcom.c122
-rw-r--r--drivers/net/phy/dp83640.c29
-rw-r--r--drivers/net/phy/fixed.c11
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c213
-rw-r--r--drivers/net/phy/mdio_bus.c8
-rw-r--r--drivers/net/phy/phy.c12
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/sungem_phy.c304
-rw-r--r--drivers/net/team/team.c44
-rw-r--r--drivers/net/usb/r8152.c151
-rw-r--r--drivers/net/virtio_net.c4
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wan/dlci.c6
-rw-r--r--drivers/usb/gadget/function/f_ncm.c8
139 files changed, 6895 insertions, 2240 deletions
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index b7ae0a0dd5b6..aecec6d32463 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2365,7 +2365,7 @@ static int gigaset_probe(struct usb_interface *interface,
2365 endpoint = &hostif->endpoint[0].desc; 2365 endpoint = &hostif->endpoint[0].desc;
2366 usb_fill_int_urb(ucs->urb_int_in, udev, 2366 usb_fill_int_urb(ucs->urb_int_in, udev,
2367 usb_rcvintpipe(udev, 2367 usb_rcvintpipe(udev,
2368 (endpoint->bEndpointAddress) & 0x0f), 2368 usb_endpoint_num(endpoint)),
2369 ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs, 2369 ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs,
2370 endpoint->bInterval); 2370 endpoint->bInterval);
2371 rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL); 2371 rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index d0a41cb0cf62..00d40773b07f 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -751,7 +751,7 @@ static int gigaset_probe(struct usb_interface *interface,
751 /* Fill the interrupt urb and send it to the core */ 751 /* Fill the interrupt urb and send it to the core */
752 usb_fill_int_urb(ucs->read_urb, udev, 752 usb_fill_int_urb(ucs->read_urb, udev,
753 usb_rcvintpipe(udev, 753 usb_rcvintpipe(udev,
754 endpoint->bEndpointAddress & 0x0f), 754 usb_endpoint_num(endpoint)),
755 ucs->rcvbuf, buffer_size, 755 ucs->rcvbuf, buffer_size,
756 gigaset_read_int_callback, 756 gigaset_read_int_callback,
757 cs, endpoint->bInterval); 757 cs, endpoint->bInterval);
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index a4f05c54c32b..87f7dff20ff6 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -1454,66 +1454,63 @@ dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members)
1454#ifdef CMX_CONF_DEBUG 1454#ifdef CMX_CONF_DEBUG
1455 if (0) { 1455 if (0) {
1456#else 1456#else
1457 if (members == 2) { 1457 if (members == 2) {
1458#endif 1458#endif
1459 /* "other" becomes other party */ 1459 /* "other" becomes other party */
1460 other = (list_entry(conf->mlist.next, 1460 other = (list_entry(conf->mlist.next,
1461 struct dsp_conf_member, list))->dsp; 1461 struct dsp_conf_member, list))->dsp;
1462 if (other == member) 1462 if (other == member)
1463 other = (list_entry(conf->mlist.prev, 1463 other = (list_entry(conf->mlist.prev,
1464 struct dsp_conf_member, list))->dsp; 1464 struct dsp_conf_member, list))->dsp;
1465 o_q = other->rx_buff; /* received data */ 1465 o_q = other->rx_buff; /* received data */
1466 o_rr = (other->rx_R + len) & CMX_BUFF_MASK; 1466 o_rr = (other->rx_R + len) & CMX_BUFF_MASK;
1467 /* end of rx-pointer */ 1467 /* end of rx-pointer */
1468 o_r = (o_rr - rr + r) & CMX_BUFF_MASK; 1468 o_r = (o_rr - rr + r) & CMX_BUFF_MASK;
1469 /* start rx-pointer at current read position*/ 1469 /* start rx-pointer at current read position*/
1470 /* -> if echo is NOT enabled */ 1470 /* -> if echo is NOT enabled */
1471 if (!dsp->echo.software) { 1471 if (!dsp->echo.software) {
1472 /* 1472 /*
1473 * -> copy other member's rx-data, 1473 * -> copy other member's rx-data,
1474 * if tx-data is available, mix 1474 * if tx-data is available, mix
1475 */ 1475 */
1476 while (o_r != o_rr && t != tt) { 1476 while (o_r != o_rr && t != tt) {
1477 *d++ = dsp_audio_mix_law[(p[t] << 8) | o_q[o_r]]; 1477 *d++ = dsp_audio_mix_law[(p[t] << 8) | o_q[o_r]];
1478 t = (t + 1) & CMX_BUFF_MASK; 1478 t = (t + 1) & CMX_BUFF_MASK;
1479 o_r = (o_r + 1) & CMX_BUFF_MASK; 1479 o_r = (o_r + 1) & CMX_BUFF_MASK;
1480 } 1480 }
1481 while (o_r != o_rr) { 1481 while (o_r != o_rr) {
1482 *d++ = o_q[o_r]; 1482 *d++ = o_q[o_r];
1483 o_r = (o_r + 1) & CMX_BUFF_MASK; 1483 o_r = (o_r + 1) & CMX_BUFF_MASK;
1484 } 1484 }
1485 /* -> if echo is enabled */ 1485 /* -> if echo is enabled */
1486 } else { 1486 } else {
1487 /* 1487 /*
1488 * -> mix other member's rx-data with echo, 1488 * -> mix other member's rx-data with echo,
1489 * if tx-data is available, mix 1489 * if tx-data is available, mix
1490 */ 1490 */
1491 while (r != rr && t != tt) { 1491 while (r != rr && t != tt) {
1492 sample = dsp_audio_law_to_s32[p[t]] + 1492 sample = dsp_audio_law_to_s32[p[t]] +
1493 dsp_audio_law_to_s32[q[r]] + 1493 dsp_audio_law_to_s32[q[r]] +
1494 dsp_audio_law_to_s32[o_q[o_r]]; 1494 dsp_audio_law_to_s32[o_q[o_r]];
1495 if (sample < -32768) 1495 if (sample < -32768)
1496 sample = -32768; 1496 sample = -32768;
1497 else if (sample > 32767) 1497 else if (sample > 32767)
1498 sample = 32767; 1498 sample = 32767;
1499 *d++ = dsp_audio_s16_to_law[sample & 0xffff]; 1499 *d++ = dsp_audio_s16_to_law[sample & 0xffff];
1500 /* tx-data + rx_data + echo */ 1500 /* tx-data + rx_data + echo */
1501 t = (t + 1) & CMX_BUFF_MASK; 1501 t = (t + 1) & CMX_BUFF_MASK;
1502 r = (r + 1) & CMX_BUFF_MASK; 1502 r = (r + 1) & CMX_BUFF_MASK;
1503 o_r = (o_r + 1) & CMX_BUFF_MASK; 1503 o_r = (o_r + 1) & CMX_BUFF_MASK;
1504 } 1504 }
1505 while (r != rr) { 1505 while (r != rr) {
1506 *d++ = dsp_audio_mix_law[(q[r] << 8) | o_q[o_r]]; 1506 *d++ = dsp_audio_mix_law[(q[r] << 8) | o_q[o_r]];
1507 r = (r + 1) & CMX_BUFF_MASK; 1507 r = (r + 1) & CMX_BUFF_MASK;
1508 o_r = (o_r + 1) & CMX_BUFF_MASK; 1508 o_r = (o_r + 1) & CMX_BUFF_MASK;
1509 }
1510 } 1509 }
1511 dsp->tx_R = t;
1512 goto send_packet;
1513 } 1510 }
1514#ifdef DSP_NEVER_DEFINED 1511 dsp->tx_R = t;
1512 goto send_packet;
1515 } 1513 }
1516#endif
1517 /* PROCESS DATA (three or more members) */ 1514 /* PROCESS DATA (three or more members) */
1518 /* -> if echo is NOT enabled */ 1515 /* -> if echo is NOT enabled */
1519 if (!dsp->echo.software) { 1516 if (!dsp->echo.software) {
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 95dd1f58c260..73c21e233131 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1388,7 +1388,7 @@ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
1388 } 1388 }
1389 1389
1390 if (tx_slave && bond_slave_can_tx(tx_slave)) { 1390 if (tx_slave && bond_slave_can_tx(tx_slave)) {
1391 if (tx_slave != rcu_dereference(bond->curr_active_slave)) { 1391 if (tx_slave != rcu_access_pointer(bond->curr_active_slave)) {
1392 ether_addr_copy(eth_data->h_source, 1392 ether_addr_copy(eth_data->h_source,
1393 tx_slave->dev->dev_addr); 1393 tx_slave->dev->dev_addr);
1394 } 1394 }
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index d163e112f04c..e1489d9df2a4 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -96,6 +96,10 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
96 [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED }, 96 [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
97}; 97};
98 98
99static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
100 [IFLA_BOND_SLAVE_QUEUE_ID] = { .type = NLA_U16 },
101};
102
99static int bond_validate(struct nlattr *tb[], struct nlattr *data[]) 103static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
100{ 104{
101 if (tb[IFLA_ADDRESS]) { 105 if (tb[IFLA_ADDRESS]) {
@@ -107,6 +111,33 @@ static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
107 return 0; 111 return 0;
108} 112}
109 113
114static int bond_slave_changelink(struct net_device *bond_dev,
115 struct net_device *slave_dev,
116 struct nlattr *tb[], struct nlattr *data[])
117{
118 struct bonding *bond = netdev_priv(bond_dev);
119 struct bond_opt_value newval;
120 int err;
121
122 if (!data)
123 return 0;
124
125 if (data[IFLA_BOND_SLAVE_QUEUE_ID]) {
126 u16 queue_id = nla_get_u16(data[IFLA_BOND_SLAVE_QUEUE_ID]);
127 char queue_id_str[IFNAMSIZ + 7];
128
129 /* queue_id option setting expects slave_name:queue_id */
130 snprintf(queue_id_str, sizeof(queue_id_str), "%s:%u\n",
131 slave_dev->name, queue_id);
132 bond_opt_initstr(&newval, queue_id_str);
133 err = __bond_opt_set(bond, BOND_OPT_QUEUE_ID, &newval);
134 if (err)
135 return err;
136 }
137
138 return 0;
139}
140
110static int bond_changelink(struct net_device *bond_dev, 141static int bond_changelink(struct net_device *bond_dev,
111 struct nlattr *tb[], struct nlattr *data[]) 142 struct nlattr *tb[], struct nlattr *data[])
112{ 143{
@@ -562,6 +593,9 @@ struct rtnl_link_ops bond_link_ops __read_mostly = {
562 .get_num_tx_queues = bond_get_num_tx_queues, 593 .get_num_tx_queues = bond_get_num_tx_queues,
563 .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number 594 .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
564 as for TX queues */ 595 as for TX queues */
596 .slave_maxtype = IFLA_BOND_SLAVE_MAX,
597 .slave_policy = bond_slave_policy,
598 .slave_changelink = bond_slave_changelink,
565 .get_slave_size = bond_get_slave_size, 599 .get_slave_size = bond_get_slave_size,
566 .fill_slave_info = bond_fill_slave_info, 600 .fill_slave_info = bond_fill_slave_info,
567}; 601};
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index dc73463c2c23..d8dc17faa6b4 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -625,6 +625,8 @@ int __bond_opt_set(struct bonding *bond,
625out: 625out:
626 if (ret) 626 if (ret)
627 bond_opt_error_interpret(bond, opt, ret, val); 627 bond_opt_error_interpret(bond, opt, ret, val);
628 else
629 call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
628 630
629 return ret; 631 return ret;
630} 632}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 41688229c570..e78d6b32431d 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -143,6 +143,8 @@ source "drivers/net/can/sja1000/Kconfig"
143 143
144source "drivers/net/can/c_can/Kconfig" 144source "drivers/net/can/c_can/Kconfig"
145 145
146source "drivers/net/can/m_can/Kconfig"
147
146source "drivers/net/can/cc770/Kconfig" 148source "drivers/net/can/cc770/Kconfig"
147 149
148source "drivers/net/can/spi/Kconfig" 150source "drivers/net/can/spi/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 1697f22353a9..fc9304143f44 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -17,6 +17,7 @@ obj-y += softing/
17obj-$(CONFIG_CAN_SJA1000) += sja1000/ 17obj-$(CONFIG_CAN_SJA1000) += sja1000/
18obj-$(CONFIG_CAN_MSCAN) += mscan/ 18obj-$(CONFIG_CAN_MSCAN) += mscan/
19obj-$(CONFIG_CAN_C_CAN) += c_can/ 19obj-$(CONFIG_CAN_C_CAN) += c_can/
20obj-$(CONFIG_CAN_M_CAN) += m_can/
20obj-$(CONFIG_CAN_CC770) += cc770/ 21obj-$(CONFIG_CAN_CC770) += cc770/
21obj-$(CONFIG_CAN_AT91) += at91_can.o 22obj-$(CONFIG_CAN_AT91) += at91_can.o
22obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o 23obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
@@ -28,4 +29,4 @@ obj-$(CONFIG_CAN_GRCAN) += grcan.o
28obj-$(CONFIG_CAN_RCAR) += rcar_can.o 29obj-$(CONFIG_CAN_RCAR) += rcar_can.o
29obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o 30obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o
30 31
31ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 32subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
index ad1cc842170a..9fdc678b5b37 100644
--- a/drivers/net/can/c_can/Makefile
+++ b/drivers/net/can/c_can/Makefile
@@ -5,5 +5,3 @@
5obj-$(CONFIG_CAN_C_CAN) += c_can.o 5obj-$(CONFIG_CAN_C_CAN) += c_can.o
6obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o 6obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
7obj-$(CONFIG_CAN_C_CAN_PCI) += c_can_pci.o 7obj-$(CONFIG_CAN_C_CAN_PCI) += c_can_pci.o
8
9ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/cc770/Makefile b/drivers/net/can/cc770/Makefile
index 9fb8321b33eb..8657f879ae19 100644
--- a/drivers/net/can/cc770/Makefile
+++ b/drivers/net/can/cc770/Makefile
@@ -5,5 +5,3 @@
5obj-$(CONFIG_CAN_CC770) += cc770.o 5obj-$(CONFIG_CAN_CC770) += cc770.o
6obj-$(CONFIG_CAN_CC770_ISA) += cc770_isa.o 6obj-$(CONFIG_CAN_CC770_ISA) += cc770_isa.o
7obj-$(CONFIG_CAN_CC770_PLATFORM) += cc770_platform.o 7obj-$(CONFIG_CAN_CC770_PLATFORM) += cc770_platform.o
8
9ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 9f91fcba43f8..02492d241e4c 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -103,11 +103,11 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
103 const struct can_bittiming_const *btc) 103 const struct can_bittiming_const *btc)
104{ 104{
105 struct can_priv *priv = netdev_priv(dev); 105 struct can_priv *priv = netdev_priv(dev);
106 long rate, best_rate = 0;
107 long best_error = 1000000000, error = 0; 106 long best_error = 1000000000, error = 0;
108 int best_tseg = 0, best_brp = 0, brp = 0; 107 int best_tseg = 0, best_brp = 0, brp = 0;
109 int tsegall, tseg = 0, tseg1 = 0, tseg2 = 0; 108 int tsegall, tseg = 0, tseg1 = 0, tseg2 = 0;
110 int spt_error = 1000, spt = 0, sampl_pt; 109 int spt_error = 1000, spt = 0, sampl_pt;
110 long rate;
111 u64 v64; 111 u64 v64;
112 112
113 /* Use CIA recommended sample points */ 113 /* Use CIA recommended sample points */
@@ -152,7 +152,6 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
152 } 152 }
153 best_tseg = tseg / 2; 153 best_tseg = tseg / 2;
154 best_brp = brp; 154 best_brp = brp;
155 best_rate = rate;
156 if (error == 0) 155 if (error == 0)
157 break; 156 break;
158 } 157 }
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 944aa5d3af6e..2700865efcad 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -92,6 +92,27 @@
92#define FLEXCAN_CTRL_ERR_ALL \ 92#define FLEXCAN_CTRL_ERR_ALL \
93 (FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE) 93 (FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE)
94 94
95/* FLEXCAN control register 2 (CTRL2) bits */
96#define FLEXCAN_CRL2_ECRWRE BIT(29)
97#define FLEXCAN_CRL2_WRMFRZ BIT(28)
98#define FLEXCAN_CRL2_RFFN(x) (((x) & 0x0f) << 24)
99#define FLEXCAN_CRL2_TASD(x) (((x) & 0x1f) << 19)
100#define FLEXCAN_CRL2_MRP BIT(18)
101#define FLEXCAN_CRL2_RRS BIT(17)
102#define FLEXCAN_CRL2_EACEN BIT(16)
103
104/* FLEXCAN memory error control register (MECR) bits */
105#define FLEXCAN_MECR_ECRWRDIS BIT(31)
106#define FLEXCAN_MECR_HANCEI_MSK BIT(19)
107#define FLEXCAN_MECR_FANCEI_MSK BIT(18)
108#define FLEXCAN_MECR_CEI_MSK BIT(16)
109#define FLEXCAN_MECR_HAERRIE BIT(15)
110#define FLEXCAN_MECR_FAERRIE BIT(14)
111#define FLEXCAN_MECR_EXTERRIE BIT(13)
112#define FLEXCAN_MECR_RERRDIS BIT(9)
113#define FLEXCAN_MECR_ECCDIS BIT(8)
114#define FLEXCAN_MECR_NCEFAFRZ BIT(7)
115
95/* FLEXCAN error and status register (ESR) bits */ 116/* FLEXCAN error and status register (ESR) bits */
96#define FLEXCAN_ESR_TWRN_INT BIT(17) 117#define FLEXCAN_ESR_TWRN_INT BIT(17)
97#define FLEXCAN_ESR_RWRN_INT BIT(16) 118#define FLEXCAN_ESR_RWRN_INT BIT(16)
@@ -150,18 +171,20 @@
150 * FLEXCAN hardware feature flags 171 * FLEXCAN hardware feature flags
151 * 172 *
152 * Below is some version info we got: 173 * Below is some version info we got:
153 * SOC Version IP-Version Glitch- [TR]WRN_INT 174 * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err
154 * Filter? connected? 175 * Filter? connected? detection
155 * MX25 FlexCAN2 03.00.00.00 no no 176 * MX25 FlexCAN2 03.00.00.00 no no no
156 * MX28 FlexCAN2 03.00.04.00 yes yes 177 * MX28 FlexCAN2 03.00.04.00 yes yes no
157 * MX35 FlexCAN2 03.00.00.00 no no 178 * MX35 FlexCAN2 03.00.00.00 no no no
158 * MX53 FlexCAN2 03.00.00.00 yes no 179 * MX53 FlexCAN2 03.00.00.00 yes no no
159 * MX6s FlexCAN3 10.00.12.00 yes yes 180 * MX6s FlexCAN3 10.00.12.00 yes yes no
181 * VF610 FlexCAN3 ? no yes yes
160 * 182 *
161 * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. 183 * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
162 */ 184 */
163#define FLEXCAN_HAS_V10_FEATURES BIT(1) /* For core version >= 10 */ 185#define FLEXCAN_HAS_V10_FEATURES BIT(1) /* For core version >= 10 */
164#define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* [TR]WRN_INT not connected */ 186#define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* [TR]WRN_INT not connected */
187#define FLEXCAN_HAS_MECR_FEATURES BIT(3) /* Memory error detection */
165 188
166/* Structure of the message buffer */ 189/* Structure of the message buffer */
167struct flexcan_mb { 190struct flexcan_mb {
@@ -192,8 +215,17 @@ struct flexcan_regs {
192 u32 crcr; /* 0x44 */ 215 u32 crcr; /* 0x44 */
193 u32 rxfgmask; /* 0x48 */ 216 u32 rxfgmask; /* 0x48 */
194 u32 rxfir; /* 0x4c */ 217 u32 rxfir; /* 0x4c */
195 u32 _reserved3[12]; 218 u32 _reserved3[12]; /* 0x50 */
196 struct flexcan_mb cantxfg[64]; 219 struct flexcan_mb cantxfg[64]; /* 0x80 */
220 u32 _reserved4[408];
221 u32 mecr; /* 0xae0 */
222 u32 erriar; /* 0xae4 */
223 u32 erridpr; /* 0xae8 */
224 u32 errippr; /* 0xaec */
225 u32 rerrar; /* 0xaf0 */
226 u32 rerrdr; /* 0xaf4 */
227 u32 rerrsynr; /* 0xaf8 */
228 u32 errsr; /* 0xafc */
197}; 229};
198 230
199struct flexcan_devtype_data { 231struct flexcan_devtype_data {
@@ -223,6 +255,9 @@ static struct flexcan_devtype_data fsl_imx28_devtype_data;
223static struct flexcan_devtype_data fsl_imx6q_devtype_data = { 255static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
224 .features = FLEXCAN_HAS_V10_FEATURES, 256 .features = FLEXCAN_HAS_V10_FEATURES,
225}; 257};
258static struct flexcan_devtype_data fsl_vf610_devtype_data = {
259 .features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_MECR_FEATURES,
260};
226 261
227static const struct can_bittiming_const flexcan_bittiming_const = { 262static const struct can_bittiming_const flexcan_bittiming_const = {
228 .name = DRV_NAME, 263 .name = DRV_NAME,
@@ -378,8 +413,9 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv)
378 return 0; 413 return 0;
379} 414}
380 415
381static int flexcan_get_berr_counter(const struct net_device *dev, 416
382 struct can_berr_counter *bec) 417static int __flexcan_get_berr_counter(const struct net_device *dev,
418 struct can_berr_counter *bec)
383{ 419{
384 const struct flexcan_priv *priv = netdev_priv(dev); 420 const struct flexcan_priv *priv = netdev_priv(dev);
385 struct flexcan_regs __iomem *regs = priv->base; 421 struct flexcan_regs __iomem *regs = priv->base;
@@ -391,6 +427,29 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
391 return 0; 427 return 0;
392} 428}
393 429
430static int flexcan_get_berr_counter(const struct net_device *dev,
431 struct can_berr_counter *bec)
432{
433 const struct flexcan_priv *priv = netdev_priv(dev);
434 int err;
435
436 err = clk_prepare_enable(priv->clk_ipg);
437 if (err)
438 return err;
439
440 err = clk_prepare_enable(priv->clk_per);
441 if (err)
442 goto out_disable_ipg;
443
444 err = __flexcan_get_berr_counter(dev, bec);
445
446 clk_disable_unprepare(priv->clk_per);
447 out_disable_ipg:
448 clk_disable_unprepare(priv->clk_ipg);
449
450 return err;
451}
452
394static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) 453static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
395{ 454{
396 const struct flexcan_priv *priv = netdev_priv(dev); 455 const struct flexcan_priv *priv = netdev_priv(dev);
@@ -503,7 +562,7 @@ static void do_state(struct net_device *dev,
503 struct flexcan_priv *priv = netdev_priv(dev); 562 struct flexcan_priv *priv = netdev_priv(dev);
504 struct can_berr_counter bec; 563 struct can_berr_counter bec;
505 564
506 flexcan_get_berr_counter(dev, &bec); 565 __flexcan_get_berr_counter(dev, &bec);
507 566
508 switch (priv->can.state) { 567 switch (priv->can.state) {
509 case CAN_STATE_ERROR_ACTIVE: 568 case CAN_STATE_ERROR_ACTIVE:
@@ -800,7 +859,7 @@ static int flexcan_chip_start(struct net_device *dev)
800 struct flexcan_priv *priv = netdev_priv(dev); 859 struct flexcan_priv *priv = netdev_priv(dev);
801 struct flexcan_regs __iomem *regs = priv->base; 860 struct flexcan_regs __iomem *regs = priv->base;
802 int err; 861 int err;
803 u32 reg_mcr, reg_ctrl; 862 u32 reg_mcr, reg_ctrl, reg_crl2, reg_mecr;
804 863
805 /* enable module */ 864 /* enable module */
806 err = flexcan_chip_enable(priv); 865 err = flexcan_chip_enable(priv);
@@ -879,6 +938,31 @@ static int flexcan_chip_start(struct net_device *dev)
879 if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES) 938 if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
880 flexcan_write(0x0, &regs->rxfgmask); 939 flexcan_write(0x0, &regs->rxfgmask);
881 940
941 /*
942 * On Vybrid, disable memory error detection interrupts
943 * and freeze mode.
944 * This also works around errata e5295 which generates
945 * false positive memory errors and put the device in
946 * freeze mode.
947 */
948 if (priv->devtype_data->features & FLEXCAN_HAS_MECR_FEATURES) {
949 /*
950 * Follow the protocol as described in "Detection
951 * and Correction of Memory Errors" to write to
952 * MECR register
953 */
954 reg_crl2 = flexcan_read(&regs->crl2);
955 reg_crl2 |= FLEXCAN_CRL2_ECRWRE;
956 flexcan_write(reg_crl2, &regs->crl2);
957
958 reg_mecr = flexcan_read(&regs->mecr);
959 reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
960 flexcan_write(reg_mecr, &regs->mecr);
961 reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
962 FLEXCAN_MECR_FANCEI_MSK);
963 flexcan_write(reg_mecr, &regs->mecr);
964 }
965
882 err = flexcan_transceiver_enable(priv); 966 err = flexcan_transceiver_enable(priv);
883 if (err) 967 if (err)
884 goto out_chip_disable; 968 goto out_chip_disable;
@@ -1089,6 +1173,7 @@ static const struct of_device_id flexcan_of_match[] = {
1089 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, 1173 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
1090 { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, }, 1174 { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
1091 { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, }, 1175 { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
1176 { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
1092 { /* sentinel */ }, 1177 { /* sentinel */ },
1093}; 1178};
1094MODULE_DEVICE_TABLE(of, flexcan_of_match); 1179MODULE_DEVICE_TABLE(of, flexcan_of_match);
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
new file mode 100644
index 000000000000..fca5482c09ac
--- /dev/null
+++ b/drivers/net/can/m_can/Kconfig
@@ -0,0 +1,4 @@
1config CAN_M_CAN
2 tristate "Bosch M_CAN devices"
3 ---help---
4 Say Y here if you want to support for Bosch M_CAN controller.
diff --git a/drivers/net/can/m_can/Makefile b/drivers/net/can/m_can/Makefile
new file mode 100644
index 000000000000..8bbd7f24f5be
--- /dev/null
+++ b/drivers/net/can/m_can/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the Bosch M_CAN controller driver.
3#
4
5obj-$(CONFIG_CAN_M_CAN) += m_can.o
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
new file mode 100644
index 000000000000..10d571eaed85
--- /dev/null
+++ b/drivers/net/can/m_can/m_can.c
@@ -0,0 +1,1202 @@
1/*
2 * CAN bus driver for Bosch M_CAN controller
3 *
4 * Copyright (C) 2014 Freescale Semiconductor, Inc.
5 * Dong Aisheng <b29396@freescale.com>
6 *
7 * Bosch M_CAN user manual can be obtained from:
8 * http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/
9 * mcan_users_manual_v302.pdf
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/of.h>
24#include <linux/of_device.h>
25#include <linux/platform_device.h>
26
27#include <linux/can/dev.h>
28
29/* napi related */
30#define M_CAN_NAPI_WEIGHT 64
31
32/* message ram configuration data length */
33#define MRAM_CFG_LEN 8
34
35/* registers definition */
36enum m_can_reg {
37 M_CAN_CREL = 0x0,
38 M_CAN_ENDN = 0x4,
39 M_CAN_CUST = 0x8,
40 M_CAN_FBTP = 0xc,
41 M_CAN_TEST = 0x10,
42 M_CAN_RWD = 0x14,
43 M_CAN_CCCR = 0x18,
44 M_CAN_BTP = 0x1c,
45 M_CAN_TSCC = 0x20,
46 M_CAN_TSCV = 0x24,
47 M_CAN_TOCC = 0x28,
48 M_CAN_TOCV = 0x2c,
49 M_CAN_ECR = 0x40,
50 M_CAN_PSR = 0x44,
51 M_CAN_IR = 0x50,
52 M_CAN_IE = 0x54,
53 M_CAN_ILS = 0x58,
54 M_CAN_ILE = 0x5c,
55 M_CAN_GFC = 0x80,
56 M_CAN_SIDFC = 0x84,
57 M_CAN_XIDFC = 0x88,
58 M_CAN_XIDAM = 0x90,
59 M_CAN_HPMS = 0x94,
60 M_CAN_NDAT1 = 0x98,
61 M_CAN_NDAT2 = 0x9c,
62 M_CAN_RXF0C = 0xa0,
63 M_CAN_RXF0S = 0xa4,
64 M_CAN_RXF0A = 0xa8,
65 M_CAN_RXBC = 0xac,
66 M_CAN_RXF1C = 0xb0,
67 M_CAN_RXF1S = 0xb4,
68 M_CAN_RXF1A = 0xb8,
69 M_CAN_RXESC = 0xbc,
70 M_CAN_TXBC = 0xc0,
71 M_CAN_TXFQS = 0xc4,
72 M_CAN_TXESC = 0xc8,
73 M_CAN_TXBRP = 0xcc,
74 M_CAN_TXBAR = 0xd0,
75 M_CAN_TXBCR = 0xd4,
76 M_CAN_TXBTO = 0xd8,
77 M_CAN_TXBCF = 0xdc,
78 M_CAN_TXBTIE = 0xe0,
79 M_CAN_TXBCIE = 0xe4,
80 M_CAN_TXEFC = 0xf0,
81 M_CAN_TXEFS = 0xf4,
82 M_CAN_TXEFA = 0xf8,
83};
84
85/* m_can lec values */
86enum m_can_lec_type {
87 LEC_NO_ERROR = 0,
88 LEC_STUFF_ERROR,
89 LEC_FORM_ERROR,
90 LEC_ACK_ERROR,
91 LEC_BIT1_ERROR,
92 LEC_BIT0_ERROR,
93 LEC_CRC_ERROR,
94 LEC_UNUSED,
95};
96
97enum m_can_mram_cfg {
98 MRAM_SIDF = 0,
99 MRAM_XIDF,
100 MRAM_RXF0,
101 MRAM_RXF1,
102 MRAM_RXB,
103 MRAM_TXE,
104 MRAM_TXB,
105 MRAM_CFG_NUM,
106};
107
108/* Test Register (TEST) */
109#define TEST_LBCK BIT(4)
110
111/* CC Control Register(CCCR) */
112#define CCCR_TEST BIT(7)
113#define CCCR_MON BIT(5)
114#define CCCR_CCE BIT(1)
115#define CCCR_INIT BIT(0)
116
117/* Bit Timing & Prescaler Register (BTP) */
118#define BTR_BRP_MASK 0x3ff
119#define BTR_BRP_SHIFT 16
120#define BTR_TSEG1_SHIFT 8
121#define BTR_TSEG1_MASK (0x3f << BTR_TSEG1_SHIFT)
122#define BTR_TSEG2_SHIFT 4
123#define BTR_TSEG2_MASK (0xf << BTR_TSEG2_SHIFT)
124#define BTR_SJW_SHIFT 0
125#define BTR_SJW_MASK 0xf
126
127/* Error Counter Register(ECR) */
128#define ECR_RP BIT(15)
129#define ECR_REC_SHIFT 8
130#define ECR_REC_MASK (0x7f << ECR_REC_SHIFT)
131#define ECR_TEC_SHIFT 0
132#define ECR_TEC_MASK 0xff
133
134/* Protocol Status Register(PSR) */
135#define PSR_BO BIT(7)
136#define PSR_EW BIT(6)
137#define PSR_EP BIT(5)
138#define PSR_LEC_MASK 0x7
139
140/* Interrupt Register(IR) */
141#define IR_ALL_INT 0xffffffff
142#define IR_STE BIT(31)
143#define IR_FOE BIT(30)
144#define IR_ACKE BIT(29)
145#define IR_BE BIT(28)
146#define IR_CRCE BIT(27)
147#define IR_WDI BIT(26)
148#define IR_BO BIT(25)
149#define IR_EW BIT(24)
150#define IR_EP BIT(23)
151#define IR_ELO BIT(22)
152#define IR_BEU BIT(21)
153#define IR_BEC BIT(20)
154#define IR_DRX BIT(19)
155#define IR_TOO BIT(18)
156#define IR_MRAF BIT(17)
157#define IR_TSW BIT(16)
158#define IR_TEFL BIT(15)
159#define IR_TEFF BIT(14)
160#define IR_TEFW BIT(13)
161#define IR_TEFN BIT(12)
162#define IR_TFE BIT(11)
163#define IR_TCF BIT(10)
164#define IR_TC BIT(9)
165#define IR_HPM BIT(8)
166#define IR_RF1L BIT(7)
167#define IR_RF1F BIT(6)
168#define IR_RF1W BIT(5)
169#define IR_RF1N BIT(4)
170#define IR_RF0L BIT(3)
171#define IR_RF0F BIT(2)
172#define IR_RF0W BIT(1)
173#define IR_RF0N BIT(0)
174#define IR_ERR_STATE (IR_BO | IR_EW | IR_EP)
175#define IR_ERR_LEC (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
176#define IR_ERR_BUS (IR_ERR_LEC | IR_WDI | IR_ELO | IR_BEU | \
177 IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
178 IR_RF1L | IR_RF0L)
179#define IR_ERR_ALL (IR_ERR_STATE | IR_ERR_BUS)
180
181/* Interrupt Line Select (ILS) */
182#define ILS_ALL_INT0 0x0
183#define ILS_ALL_INT1 0xFFFFFFFF
184
185/* Interrupt Line Enable (ILE) */
186#define ILE_EINT0 BIT(0)
187#define ILE_EINT1 BIT(1)
188
189/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
190#define RXFC_FWM_OFF 24
191#define RXFC_FWM_MASK 0x7f
192#define RXFC_FWM_1 (1 << RXFC_FWM_OFF)
193#define RXFC_FS_OFF 16
194#define RXFC_FS_MASK 0x7f
195
196/* Rx FIFO 0/1 Status (RXF0S/RXF1S) */
197#define RXFS_RFL BIT(25)
198#define RXFS_FF BIT(24)
199#define RXFS_FPI_OFF 16
200#define RXFS_FPI_MASK 0x3f0000
201#define RXFS_FGI_OFF 8
202#define RXFS_FGI_MASK 0x3f00
203#define RXFS_FFL_MASK 0x7f
204
205/* Rx Buffer / FIFO Element Size Configuration (RXESC) */
206#define M_CAN_RXESC_8BYTES 0x0
207
208/* Tx Buffer Configuration(TXBC) */
209#define TXBC_NDTB_OFF 16
210#define TXBC_NDTB_MASK 0x3f
211
212/* Tx Buffer Element Size Configuration(TXESC) */
213#define TXESC_TBDS_8BYTES 0x0
214
215/* Tx Event FIFO Con.guration (TXEFC) */
216#define TXEFC_EFS_OFF 16
217#define TXEFC_EFS_MASK 0x3f
218
219/* Message RAM Configuration (in bytes) */
220#define SIDF_ELEMENT_SIZE 4
221#define XIDF_ELEMENT_SIZE 8
222#define RXF0_ELEMENT_SIZE 16
223#define RXF1_ELEMENT_SIZE 16
224#define RXB_ELEMENT_SIZE 16
225#define TXE_ELEMENT_SIZE 8
226#define TXB_ELEMENT_SIZE 16
227
228/* Message RAM Elements */
229#define M_CAN_FIFO_ID 0x0
230#define M_CAN_FIFO_DLC 0x4
231#define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2))
232
233/* Rx Buffer Element */
234#define RX_BUF_ESI BIT(31)
235#define RX_BUF_XTD BIT(30)
236#define RX_BUF_RTR BIT(29)
237
238/* Tx Buffer Element */
239#define TX_BUF_XTD BIT(30)
240#define TX_BUF_RTR BIT(29)
241
242/* address offset and element number for each FIFO/Buffer in the Message RAM */
243struct mram_cfg {
244 u16 off;
245 u8 num;
246};
247
248/* m_can private data structure */
249struct m_can_priv {
250 struct can_priv can; /* must be the first member */
251 struct napi_struct napi;
252 struct net_device *dev;
253 struct device *device;
254 struct clk *hclk;
255 struct clk *cclk;
256 void __iomem *base;
257 u32 irqstatus;
258
259 /* message ram configuration */
260 void __iomem *mram_base;
261 struct mram_cfg mcfg[MRAM_CFG_NUM];
262};
263
264static inline u32 m_can_read(const struct m_can_priv *priv, enum m_can_reg reg)
265{
266 return readl(priv->base + reg);
267}
268
269static inline void m_can_write(const struct m_can_priv *priv,
270 enum m_can_reg reg, u32 val)
271{
272 writel(val, priv->base + reg);
273}
274
275static inline u32 m_can_fifo_read(const struct m_can_priv *priv,
276 u32 fgi, unsigned int offset)
277{
278 return readl(priv->mram_base + priv->mcfg[MRAM_RXF0].off +
279 fgi * RXF0_ELEMENT_SIZE + offset);
280}
281
282static inline void m_can_fifo_write(const struct m_can_priv *priv,
283 u32 fpi, unsigned int offset, u32 val)
284{
285 return writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off +
286 fpi * TXB_ELEMENT_SIZE + offset);
287}
288
289static inline void m_can_config_endisable(const struct m_can_priv *priv,
290 bool enable)
291{
292 u32 cccr = m_can_read(priv, M_CAN_CCCR);
293 u32 timeout = 10;
294 u32 val = 0;
295
296 if (enable) {
297 /* enable m_can configuration */
298 m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT);
299 /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
300 m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
301 } else {
302 m_can_write(priv, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
303 }
304
305 /* there's a delay for module initialization */
306 if (enable)
307 val = CCCR_INIT | CCCR_CCE;
308
309 while ((m_can_read(priv, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
310 if (timeout == 0) {
311 netdev_warn(priv->dev, "Failed to init module\n");
312 return;
313 }
314 timeout--;
315 udelay(1);
316 }
317}
318
319static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv)
320{
321 m_can_write(priv, M_CAN_ILE, ILE_EINT0 | ILE_EINT1);
322}
323
324static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv)
325{
326 m_can_write(priv, M_CAN_ILE, 0x0);
327}
328
329static void m_can_read_fifo(const struct net_device *dev, struct can_frame *cf,
330 u32 rxfs)
331{
332 struct m_can_priv *priv = netdev_priv(dev);
333 u32 id, fgi;
334
335 /* calculate the fifo get index for where to read data */
336 fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF;
337 id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID);
338 if (id & RX_BUF_XTD)
339 cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
340 else
341 cf->can_id = (id >> 18) & CAN_SFF_MASK;
342
343 if (id & RX_BUF_RTR) {
344 cf->can_id |= CAN_RTR_FLAG;
345 } else {
346 id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC);
347 cf->can_dlc = get_can_dlc((id >> 16) & 0x0F);
348 *(u32 *)(cf->data + 0) = m_can_fifo_read(priv, fgi,
349 M_CAN_FIFO_DATA(0));
350 *(u32 *)(cf->data + 4) = m_can_fifo_read(priv, fgi,
351 M_CAN_FIFO_DATA(1));
352 }
353
354 /* acknowledge rx fifo 0 */
355 m_can_write(priv, M_CAN_RXF0A, fgi);
356}
357
358static int m_can_do_rx_poll(struct net_device *dev, int quota)
359{
360 struct m_can_priv *priv = netdev_priv(dev);
361 struct net_device_stats *stats = &dev->stats;
362 struct sk_buff *skb;
363 struct can_frame *frame;
364 u32 pkts = 0;
365 u32 rxfs;
366
367 rxfs = m_can_read(priv, M_CAN_RXF0S);
368 if (!(rxfs & RXFS_FFL_MASK)) {
369 netdev_dbg(dev, "no messages in fifo0\n");
370 return 0;
371 }
372
373 while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
374 if (rxfs & RXFS_RFL)
375 netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
376
377 skb = alloc_can_skb(dev, &frame);
378 if (!skb) {
379 stats->rx_dropped++;
380 return pkts;
381 }
382
383 m_can_read_fifo(dev, frame, rxfs);
384
385 stats->rx_packets++;
386 stats->rx_bytes += frame->can_dlc;
387
388 netif_receive_skb(skb);
389
390 quota--;
391 pkts++;
392 rxfs = m_can_read(priv, M_CAN_RXF0S);
393 }
394
395 if (pkts)
396 can_led_event(dev, CAN_LED_EVENT_RX);
397
398 return pkts;
399}
400
401static int m_can_handle_lost_msg(struct net_device *dev)
402{
403 struct net_device_stats *stats = &dev->stats;
404 struct sk_buff *skb;
405 struct can_frame *frame;
406
407 netdev_err(dev, "msg lost in rxf0\n");
408
409 stats->rx_errors++;
410 stats->rx_over_errors++;
411
412 skb = alloc_can_err_skb(dev, &frame);
413 if (unlikely(!skb))
414 return 0;
415
416 frame->can_id |= CAN_ERR_CRTL;
417 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
418
419 netif_receive_skb(skb);
420
421 return 1;
422}
423
424static int m_can_handle_lec_err(struct net_device *dev,
425 enum m_can_lec_type lec_type)
426{
427 struct m_can_priv *priv = netdev_priv(dev);
428 struct net_device_stats *stats = &dev->stats;
429 struct can_frame *cf;
430 struct sk_buff *skb;
431
432 priv->can.can_stats.bus_error++;
433 stats->rx_errors++;
434
435 /* propagate the error condition to the CAN stack */
436 skb = alloc_can_err_skb(dev, &cf);
437 if (unlikely(!skb))
438 return 0;
439
440 /* check for 'last error code' which tells us the
441 * type of the last error to occur on the CAN bus
442 */
443 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
444 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
445
446 switch (lec_type) {
447 case LEC_STUFF_ERROR:
448 netdev_dbg(dev, "stuff error\n");
449 cf->data[2] |= CAN_ERR_PROT_STUFF;
450 break;
451 case LEC_FORM_ERROR:
452 netdev_dbg(dev, "form error\n");
453 cf->data[2] |= CAN_ERR_PROT_FORM;
454 break;
455 case LEC_ACK_ERROR:
456 netdev_dbg(dev, "ack error\n");
457 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
458 CAN_ERR_PROT_LOC_ACK_DEL);
459 break;
460 case LEC_BIT1_ERROR:
461 netdev_dbg(dev, "bit1 error\n");
462 cf->data[2] |= CAN_ERR_PROT_BIT1;
463 break;
464 case LEC_BIT0_ERROR:
465 netdev_dbg(dev, "bit0 error\n");
466 cf->data[2] |= CAN_ERR_PROT_BIT0;
467 break;
468 case LEC_CRC_ERROR:
469 netdev_dbg(dev, "CRC error\n");
470 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
471 CAN_ERR_PROT_LOC_CRC_DEL);
472 break;
473 default:
474 break;
475 }
476
477 stats->rx_packets++;
478 stats->rx_bytes += cf->can_dlc;
479 netif_receive_skb(skb);
480
481 return 1;
482}
483
484static int m_can_get_berr_counter(const struct net_device *dev,
485 struct can_berr_counter *bec)
486{
487 struct m_can_priv *priv = netdev_priv(dev);
488 unsigned int ecr;
489 int err;
490
491 err = clk_prepare_enable(priv->hclk);
492 if (err)
493 return err;
494
495 err = clk_prepare_enable(priv->cclk);
496 if (err) {
497 clk_disable_unprepare(priv->hclk);
498 return err;
499 }
500
501 ecr = m_can_read(priv, M_CAN_ECR);
502 bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
503 bec->txerr = ecr & ECR_TEC_MASK;
504
505 clk_disable_unprepare(priv->cclk);
506 clk_disable_unprepare(priv->hclk);
507
508 return 0;
509}
510
511static int m_can_handle_state_change(struct net_device *dev,
512 enum can_state new_state)
513{
514 struct m_can_priv *priv = netdev_priv(dev);
515 struct net_device_stats *stats = &dev->stats;
516 struct can_frame *cf;
517 struct sk_buff *skb;
518 struct can_berr_counter bec;
519 unsigned int ecr;
520
521 switch (new_state) {
522 case CAN_STATE_ERROR_ACTIVE:
523 /* error warning state */
524 priv->can.can_stats.error_warning++;
525 priv->can.state = CAN_STATE_ERROR_WARNING;
526 break;
527 case CAN_STATE_ERROR_PASSIVE:
528 /* error passive state */
529 priv->can.can_stats.error_passive++;
530 priv->can.state = CAN_STATE_ERROR_PASSIVE;
531 break;
532 case CAN_STATE_BUS_OFF:
533 /* bus-off state */
534 priv->can.state = CAN_STATE_BUS_OFF;
535 m_can_disable_all_interrupts(priv);
536 can_bus_off(dev);
537 break;
538 default:
539 break;
540 }
541
542 /* propagate the error condition to the CAN stack */
543 skb = alloc_can_err_skb(dev, &cf);
544 if (unlikely(!skb))
545 return 0;
546
547 m_can_get_berr_counter(dev, &bec);
548
549 switch (new_state) {
550 case CAN_STATE_ERROR_ACTIVE:
551 /* error warning state */
552 cf->can_id |= CAN_ERR_CRTL;
553 cf->data[1] = (bec.txerr > bec.rxerr) ?
554 CAN_ERR_CRTL_TX_WARNING :
555 CAN_ERR_CRTL_RX_WARNING;
556 cf->data[6] = bec.txerr;
557 cf->data[7] = bec.rxerr;
558 break;
559 case CAN_STATE_ERROR_PASSIVE:
560 /* error passive state */
561 cf->can_id |= CAN_ERR_CRTL;
562 ecr = m_can_read(priv, M_CAN_ECR);
563 if (ecr & ECR_RP)
564 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
565 if (bec.txerr > 127)
566 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
567 cf->data[6] = bec.txerr;
568 cf->data[7] = bec.rxerr;
569 break;
570 case CAN_STATE_BUS_OFF:
571 /* bus-off state */
572 cf->can_id |= CAN_ERR_BUSOFF;
573 break;
574 default:
575 break;
576 }
577
578 stats->rx_packets++;
579 stats->rx_bytes += cf->can_dlc;
580 netif_receive_skb(skb);
581
582 return 1;
583}
584
585static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
586{
587 struct m_can_priv *priv = netdev_priv(dev);
588 int work_done = 0;
589
590 if ((psr & PSR_EW) &&
591 (priv->can.state != CAN_STATE_ERROR_WARNING)) {
592 netdev_dbg(dev, "entered error warning state\n");
593 work_done += m_can_handle_state_change(dev,
594 CAN_STATE_ERROR_WARNING);
595 }
596
597 if ((psr & PSR_EP) &&
598 (priv->can.state != CAN_STATE_ERROR_PASSIVE)) {
599 netdev_dbg(dev, "entered error warning state\n");
600 work_done += m_can_handle_state_change(dev,
601 CAN_STATE_ERROR_PASSIVE);
602 }
603
604 if ((psr & PSR_BO) &&
605 (priv->can.state != CAN_STATE_BUS_OFF)) {
606 netdev_dbg(dev, "entered error warning state\n");
607 work_done += m_can_handle_state_change(dev,
608 CAN_STATE_BUS_OFF);
609 }
610
611 return work_done;
612}
613
614static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
615{
616 if (irqstatus & IR_WDI)
617 netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
618 if (irqstatus & IR_BEU)
619 netdev_err(dev, "Error Logging Overflow\n");
620 if (irqstatus & IR_BEU)
621 netdev_err(dev, "Bit Error Uncorrected\n");
622 if (irqstatus & IR_BEC)
623 netdev_err(dev, "Bit Error Corrected\n");
624 if (irqstatus & IR_TOO)
625 netdev_err(dev, "Timeout reached\n");
626 if (irqstatus & IR_MRAF)
627 netdev_err(dev, "Message RAM access failure occurred\n");
628}
629
630static inline bool is_lec_err(u32 psr)
631{
632 psr &= LEC_UNUSED;
633
634 return psr && (psr != LEC_UNUSED);
635}
636
637static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
638 u32 psr)
639{
640 struct m_can_priv *priv = netdev_priv(dev);
641 int work_done = 0;
642
643 if (irqstatus & IR_RF0L)
644 work_done += m_can_handle_lost_msg(dev);
645
646 /* handle lec errors on the bus */
647 if ((priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
648 is_lec_err(psr))
649 work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED);
650
651 /* other unproccessed error interrupts */
652 m_can_handle_other_err(dev, irqstatus);
653
654 return work_done;
655}
656
657static int m_can_poll(struct napi_struct *napi, int quota)
658{
659 struct net_device *dev = napi->dev;
660 struct m_can_priv *priv = netdev_priv(dev);
661 int work_done = 0;
662 u32 irqstatus, psr;
663
664 irqstatus = priv->irqstatus | m_can_read(priv, M_CAN_IR);
665 if (!irqstatus)
666 goto end;
667
668 psr = m_can_read(priv, M_CAN_PSR);
669 if (irqstatus & IR_ERR_STATE)
670 work_done += m_can_handle_state_errors(dev, psr);
671
672 if (irqstatus & IR_ERR_BUS)
673 work_done += m_can_handle_bus_errors(dev, irqstatus, psr);
674
675 if (irqstatus & IR_RF0N)
676 work_done += m_can_do_rx_poll(dev, (quota - work_done));
677
678 if (work_done < quota) {
679 napi_complete(napi);
680 m_can_enable_all_interrupts(priv);
681 }
682
683end:
684 return work_done;
685}
686
687static irqreturn_t m_can_isr(int irq, void *dev_id)
688{
689 struct net_device *dev = (struct net_device *)dev_id;
690 struct m_can_priv *priv = netdev_priv(dev);
691 struct net_device_stats *stats = &dev->stats;
692 u32 ir;
693
694 ir = m_can_read(priv, M_CAN_IR);
695 if (!ir)
696 return IRQ_NONE;
697
698 /* ACK all irqs */
699 if (ir & IR_ALL_INT)
700 m_can_write(priv, M_CAN_IR, ir);
701
702 /* schedule NAPI in case of
703 * - rx IRQ
704 * - state change IRQ
705 * - bus error IRQ and bus error reporting
706 */
707 if ((ir & IR_RF0N) || (ir & IR_ERR_ALL)) {
708 priv->irqstatus = ir;
709 m_can_disable_all_interrupts(priv);
710 napi_schedule(&priv->napi);
711 }
712
713 /* transmission complete interrupt */
714 if (ir & IR_TC) {
715 stats->tx_bytes += can_get_echo_skb(dev, 0);
716 stats->tx_packets++;
717 can_led_event(dev, CAN_LED_EVENT_TX);
718 netif_wake_queue(dev);
719 }
720
721 return IRQ_HANDLED;
722}
723
724static const struct can_bittiming_const m_can_bittiming_const = {
725 .name = KBUILD_MODNAME,
726 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
727 .tseg1_max = 64,
728 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
729 .tseg2_max = 16,
730 .sjw_max = 16,
731 .brp_min = 1,
732 .brp_max = 1024,
733 .brp_inc = 1,
734};
735
736static int m_can_set_bittiming(struct net_device *dev)
737{
738 struct m_can_priv *priv = netdev_priv(dev);
739 const struct can_bittiming *bt = &priv->can.bittiming;
740 u16 brp, sjw, tseg1, tseg2;
741 u32 reg_btp;
742
743 brp = bt->brp - 1;
744 sjw = bt->sjw - 1;
745 tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
746 tseg2 = bt->phase_seg2 - 1;
747 reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) |
748 (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT);
749 m_can_write(priv, M_CAN_BTP, reg_btp);
750 netdev_dbg(dev, "setting BTP 0x%x\n", reg_btp);
751
752 return 0;
753}
754
755/* Configure M_CAN chip:
756 * - set rx buffer/fifo element size
757 * - configure rx fifo
758 * - accept non-matching frame into fifo 0
759 * - configure tx buffer
760 * - configure mode
761 * - setup bittiming
762 */
763static void m_can_chip_config(struct net_device *dev)
764{
765 struct m_can_priv *priv = netdev_priv(dev);
766 u32 cccr, test;
767
768 m_can_config_endisable(priv, true);
769
770 /* RX Buffer/FIFO Element Size 8 bytes data field */
771 m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_8BYTES);
772
773 /* Accept Non-matching Frames Into FIFO 0 */
774 m_can_write(priv, M_CAN_GFC, 0x0);
775
776 /* only support one Tx Buffer currently */
777 m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) |
778 priv->mcfg[MRAM_TXB].off);
779
780 /* only support 8 bytes firstly */
781 m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_8BYTES);
782
783 m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) |
784 priv->mcfg[MRAM_TXE].off);
785
786 /* rx fifo configuration, blocking mode, fifo size 1 */
787 m_can_write(priv, M_CAN_RXF0C,
788 (priv->mcfg[MRAM_RXF0].num << RXFC_FS_OFF) |
789 RXFC_FWM_1 | priv->mcfg[MRAM_RXF0].off);
790
791 m_can_write(priv, M_CAN_RXF1C,
792 (priv->mcfg[MRAM_RXF1].num << RXFC_FS_OFF) |
793 RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off);
794
795 cccr = m_can_read(priv, M_CAN_CCCR);
796 cccr &= ~(CCCR_TEST | CCCR_MON);
797 test = m_can_read(priv, M_CAN_TEST);
798 test &= ~TEST_LBCK;
799
800 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
801 cccr |= CCCR_MON;
802
803 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
804 cccr |= CCCR_TEST;
805 test |= TEST_LBCK;
806 }
807
808 m_can_write(priv, M_CAN_CCCR, cccr);
809 m_can_write(priv, M_CAN_TEST, test);
810
811 /* enable interrupts */
812 m_can_write(priv, M_CAN_IR, IR_ALL_INT);
813 if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
814 m_can_write(priv, M_CAN_IE, IR_ALL_INT & ~IR_ERR_LEC);
815 else
816 m_can_write(priv, M_CAN_IE, IR_ALL_INT);
817
818 /* route all interrupts to INT0 */
819 m_can_write(priv, M_CAN_ILS, ILS_ALL_INT0);
820
821 /* set bittiming params */
822 m_can_set_bittiming(dev);
823
824 m_can_config_endisable(priv, false);
825}
826
827static void m_can_start(struct net_device *dev)
828{
829 struct m_can_priv *priv = netdev_priv(dev);
830
831 /* basic m_can configuration */
832 m_can_chip_config(dev);
833
834 priv->can.state = CAN_STATE_ERROR_ACTIVE;
835
836 m_can_enable_all_interrupts(priv);
837}
838
839static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
840{
841 switch (mode) {
842 case CAN_MODE_START:
843 m_can_start(dev);
844 netif_wake_queue(dev);
845 break;
846 default:
847 return -EOPNOTSUPP;
848 }
849
850 return 0;
851}
852
853static void free_m_can_dev(struct net_device *dev)
854{
855 free_candev(dev);
856}
857
858static struct net_device *alloc_m_can_dev(void)
859{
860 struct net_device *dev;
861 struct m_can_priv *priv;
862
863 dev = alloc_candev(sizeof(*priv), 1);
864 if (!dev)
865 return NULL;
866
867 priv = netdev_priv(dev);
868 netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT);
869
870 priv->dev = dev;
871 priv->can.bittiming_const = &m_can_bittiming_const;
872 priv->can.do_set_mode = m_can_set_mode;
873 priv->can.do_get_berr_counter = m_can_get_berr_counter;
874 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
875 CAN_CTRLMODE_LISTENONLY |
876 CAN_CTRLMODE_BERR_REPORTING;
877
878 return dev;
879}
880
881static int m_can_open(struct net_device *dev)
882{
883 struct m_can_priv *priv = netdev_priv(dev);
884 int err;
885
886 err = clk_prepare_enable(priv->hclk);
887 if (err)
888 return err;
889
890 err = clk_prepare_enable(priv->cclk);
891 if (err)
892 goto exit_disable_hclk;
893
894 /* open the can device */
895 err = open_candev(dev);
896 if (err) {
897 netdev_err(dev, "failed to open can device\n");
898 goto exit_disable_cclk;
899 }
900
901 /* register interrupt handler */
902 err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
903 dev);
904 if (err < 0) {
905 netdev_err(dev, "failed to request interrupt\n");
906 goto exit_irq_fail;
907 }
908
909 /* start the m_can controller */
910 m_can_start(dev);
911
912 can_led_event(dev, CAN_LED_EVENT_OPEN);
913 napi_enable(&priv->napi);
914 netif_start_queue(dev);
915
916 return 0;
917
918exit_irq_fail:
919 close_candev(dev);
920exit_disable_cclk:
921 clk_disable_unprepare(priv->cclk);
922exit_disable_hclk:
923 clk_disable_unprepare(priv->hclk);
924 return err;
925}
926
927static void m_can_stop(struct net_device *dev)
928{
929 struct m_can_priv *priv = netdev_priv(dev);
930
931 /* disable all interrupts */
932 m_can_disable_all_interrupts(priv);
933
934 clk_disable_unprepare(priv->hclk);
935 clk_disable_unprepare(priv->cclk);
936
937 /* set the state as STOPPED */
938 priv->can.state = CAN_STATE_STOPPED;
939}
940
941static int m_can_close(struct net_device *dev)
942{
943 struct m_can_priv *priv = netdev_priv(dev);
944
945 netif_stop_queue(dev);
946 napi_disable(&priv->napi);
947 m_can_stop(dev);
948 free_irq(dev->irq, dev);
949 close_candev(dev);
950 can_led_event(dev, CAN_LED_EVENT_STOP);
951
952 return 0;
953}
954
955static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
956 struct net_device *dev)
957{
958 struct m_can_priv *priv = netdev_priv(dev);
959 struct can_frame *cf = (struct can_frame *)skb->data;
960 u32 id;
961
962 if (can_dropped_invalid_skb(dev, skb))
963 return NETDEV_TX_OK;
964
965 netif_stop_queue(dev);
966
967 if (cf->can_id & CAN_EFF_FLAG) {
968 id = cf->can_id & CAN_EFF_MASK;
969 id |= TX_BUF_XTD;
970 } else {
971 id = ((cf->can_id & CAN_SFF_MASK) << 18);
972 }
973
974 if (cf->can_id & CAN_RTR_FLAG)
975 id |= TX_BUF_RTR;
976
977 /* message ram configuration */
978 m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id);
979 m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, cf->can_dlc << 16);
980 m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(0), *(u32 *)(cf->data + 0));
981 m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(1), *(u32 *)(cf->data + 4));
982 can_put_echo_skb(skb, dev, 0);
983
984 /* enable first TX buffer to start transfer */
985 m_can_write(priv, M_CAN_TXBTIE, 0x1);
986 m_can_write(priv, M_CAN_TXBAR, 0x1);
987
988 return NETDEV_TX_OK;
989}
990
991static const struct net_device_ops m_can_netdev_ops = {
992 .ndo_open = m_can_open,
993 .ndo_stop = m_can_close,
994 .ndo_start_xmit = m_can_start_xmit,
995};
996
997static int register_m_can_dev(struct net_device *dev)
998{
999 dev->flags |= IFF_ECHO; /* we support local echo */
1000 dev->netdev_ops = &m_can_netdev_ops;
1001
1002 return register_candev(dev);
1003}
1004
1005static int m_can_of_parse_mram(struct platform_device *pdev,
1006 struct m_can_priv *priv)
1007{
1008 struct device_node *np = pdev->dev.of_node;
1009 struct resource *res;
1010 void __iomem *addr;
1011 u32 out_val[MRAM_CFG_LEN];
1012 int ret;
1013
1014 /* message ram could be shared */
1015 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
1016 if (!res)
1017 return -ENODEV;
1018
1019 addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1020 if (!addr)
1021 return -ENOMEM;
1022
1023 /* get message ram configuration */
1024 ret = of_property_read_u32_array(np, "bosch,mram-cfg",
1025 out_val, sizeof(out_val) / 4);
1026 if (ret) {
1027 dev_err(&pdev->dev, "can not get message ram configuration\n");
1028 return -ENODEV;
1029 }
1030
1031 priv->mram_base = addr;
1032 priv->mcfg[MRAM_SIDF].off = out_val[0];
1033 priv->mcfg[MRAM_SIDF].num = out_val[1];
1034 priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off +
1035 priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE;
1036 priv->mcfg[MRAM_XIDF].num = out_val[2];
1037 priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off +
1038 priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE;
1039 priv->mcfg[MRAM_RXF0].num = out_val[3] & RXFC_FS_MASK;
1040 priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off +
1041 priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE;
1042 priv->mcfg[MRAM_RXF1].num = out_val[4] & RXFC_FS_MASK;
1043 priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off +
1044 priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE;
1045 priv->mcfg[MRAM_RXB].num = out_val[5];
1046 priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off +
1047 priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE;
1048 priv->mcfg[MRAM_TXE].num = out_val[6];
1049 priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off +
1050 priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE;
1051 priv->mcfg[MRAM_TXB].num = out_val[7] & TXBC_NDTB_MASK;
1052
1053 dev_dbg(&pdev->dev, "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
1054 priv->mram_base,
1055 priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num,
1056 priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num,
1057 priv->mcfg[MRAM_RXF0].off, priv->mcfg[MRAM_RXF0].num,
1058 priv->mcfg[MRAM_RXF1].off, priv->mcfg[MRAM_RXF1].num,
1059 priv->mcfg[MRAM_RXB].off, priv->mcfg[MRAM_RXB].num,
1060 priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num,
1061 priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num);
1062
1063 return 0;
1064}
1065
1066static int m_can_plat_probe(struct platform_device *pdev)
1067{
1068 struct net_device *dev;
1069 struct m_can_priv *priv;
1070 struct resource *res;
1071 void __iomem *addr;
1072 struct clk *hclk, *cclk;
1073 int irq, ret;
1074
1075 hclk = devm_clk_get(&pdev->dev, "hclk");
1076 cclk = devm_clk_get(&pdev->dev, "cclk");
1077 if (IS_ERR(hclk) || IS_ERR(cclk)) {
1078 dev_err(&pdev->dev, "no clock find\n");
1079 return -ENODEV;
1080 }
1081
1082 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
1083 addr = devm_ioremap_resource(&pdev->dev, res);
1084 irq = platform_get_irq_byname(pdev, "int0");
1085 if (IS_ERR(addr) || irq < 0)
1086 return -EINVAL;
1087
1088 /* allocate the m_can device */
1089 dev = alloc_m_can_dev();
1090 if (!dev)
1091 return -ENOMEM;
1092
1093 priv = netdev_priv(dev);
1094 dev->irq = irq;
1095 priv->base = addr;
1096 priv->device = &pdev->dev;
1097 priv->hclk = hclk;
1098 priv->cclk = cclk;
1099 priv->can.clock.freq = clk_get_rate(cclk);
1100
1101 ret = m_can_of_parse_mram(pdev, priv);
1102 if (ret)
1103 goto failed_free_dev;
1104
1105 platform_set_drvdata(pdev, dev);
1106 SET_NETDEV_DEV(dev, &pdev->dev);
1107
1108 ret = register_m_can_dev(dev);
1109 if (ret) {
1110 dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
1111 KBUILD_MODNAME, ret);
1112 goto failed_free_dev;
1113 }
1114
1115 devm_can_led_init(dev);
1116
1117 dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
1118 KBUILD_MODNAME, priv->base, dev->irq);
1119
1120 return 0;
1121
1122failed_free_dev:
1123 free_m_can_dev(dev);
1124 return ret;
1125}
1126
1127static __maybe_unused int m_can_suspend(struct device *dev)
1128{
1129 struct net_device *ndev = dev_get_drvdata(dev);
1130 struct m_can_priv *priv = netdev_priv(ndev);
1131
1132 if (netif_running(ndev)) {
1133 netif_stop_queue(ndev);
1134 netif_device_detach(ndev);
1135 }
1136
1137 /* TODO: enter low power */
1138
1139 priv->can.state = CAN_STATE_SLEEPING;
1140
1141 return 0;
1142}
1143
1144static __maybe_unused int m_can_resume(struct device *dev)
1145{
1146 struct net_device *ndev = dev_get_drvdata(dev);
1147 struct m_can_priv *priv = netdev_priv(ndev);
1148
1149 /* TODO: exit low power */
1150
1151 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1152
1153 if (netif_running(ndev)) {
1154 netif_device_attach(ndev);
1155 netif_start_queue(ndev);
1156 }
1157
1158 return 0;
1159}
1160
1161static void unregister_m_can_dev(struct net_device *dev)
1162{
1163 unregister_candev(dev);
1164}
1165
1166static int m_can_plat_remove(struct platform_device *pdev)
1167{
1168 struct net_device *dev = platform_get_drvdata(pdev);
1169
1170 unregister_m_can_dev(dev);
1171 platform_set_drvdata(pdev, NULL);
1172
1173 free_m_can_dev(dev);
1174
1175 return 0;
1176}
1177
1178static const struct dev_pm_ops m_can_pmops = {
1179 SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume)
1180};
1181
1182static const struct of_device_id m_can_of_table[] = {
1183 { .compatible = "bosch,m_can", .data = NULL },
1184 { /* sentinel */ },
1185};
1186MODULE_DEVICE_TABLE(of, m_can_of_table);
1187
1188static struct platform_driver m_can_plat_driver = {
1189 .driver = {
1190 .name = KBUILD_MODNAME,
1191 .of_match_table = m_can_of_table,
1192 .pm = &m_can_pmops,
1193 },
1194 .probe = m_can_plat_probe,
1195 .remove = m_can_plat_remove,
1196};
1197
1198module_platform_driver(m_can_plat_driver);
1199
1200MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
1201MODULE_LICENSE("GPL v2");
1202MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");
diff --git a/drivers/net/can/mscan/Makefile b/drivers/net/can/mscan/Makefile
index c9fab17cd8b4..58903b45f5fb 100644
--- a/drivers/net/can/mscan/Makefile
+++ b/drivers/net/can/mscan/Makefile
@@ -1,5 +1,3 @@
1 1
2obj-$(CONFIG_CAN_MPC5XXX) += mscan-mpc5xxx.o 2obj-$(CONFIG_CAN_MPC5XXX) += mscan-mpc5xxx.o
3mscan-mpc5xxx-objs := mscan.o mpc5xxx_can.o 3mscan-mpc5xxx-objs := mscan.o mpc5xxx_can.o
4
5ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 5268d216ecfa..1abe133d1594 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -20,6 +20,7 @@
20#include <linux/can/dev.h> 20#include <linux/can/dev.h>
21#include <linux/clk.h> 21#include <linux/clk.h>
22#include <linux/can/platform/rcar_can.h> 22#include <linux/can/platform/rcar_can.h>
23#include <linux/of.h>
23 24
24#define RCAR_CAN_DRV_NAME "rcar_can" 25#define RCAR_CAN_DRV_NAME "rcar_can"
25 26
@@ -87,6 +88,7 @@ struct rcar_can_priv {
87 struct napi_struct napi; 88 struct napi_struct napi;
88 struct rcar_can_regs __iomem *regs; 89 struct rcar_can_regs __iomem *regs;
89 struct clk *clk; 90 struct clk *clk;
91 struct clk *can_clk;
90 u8 tx_dlc[RCAR_CAN_FIFO_DEPTH]; 92 u8 tx_dlc[RCAR_CAN_FIFO_DEPTH];
91 u32 tx_head; 93 u32 tx_head;
92 u32 tx_tail; 94 u32 tx_tail;
@@ -505,14 +507,20 @@ static int rcar_can_open(struct net_device *ndev)
505 507
506 err = clk_prepare_enable(priv->clk); 508 err = clk_prepare_enable(priv->clk);
507 if (err) { 509 if (err) {
508 netdev_err(ndev, "clk_prepare_enable() failed, error %d\n", 510 netdev_err(ndev, "failed to enable periperal clock, error %d\n",
509 err); 511 err);
510 goto out; 512 goto out;
511 } 513 }
514 err = clk_prepare_enable(priv->can_clk);
515 if (err) {
516 netdev_err(ndev, "failed to enable CAN clock, error %d\n",
517 err);
518 goto out_clock;
519 }
512 err = open_candev(ndev); 520 err = open_candev(ndev);
513 if (err) { 521 if (err) {
514 netdev_err(ndev, "open_candev() failed, error %d\n", err); 522 netdev_err(ndev, "open_candev() failed, error %d\n", err);
515 goto out_clock; 523 goto out_can_clock;
516 } 524 }
517 napi_enable(&priv->napi); 525 napi_enable(&priv->napi);
518 err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev); 526 err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
@@ -527,6 +535,8 @@ static int rcar_can_open(struct net_device *ndev)
527out_close: 535out_close:
528 napi_disable(&priv->napi); 536 napi_disable(&priv->napi);
529 close_candev(ndev); 537 close_candev(ndev);
538out_can_clock:
539 clk_disable_unprepare(priv->can_clk);
530out_clock: 540out_clock:
531 clk_disable_unprepare(priv->clk); 541 clk_disable_unprepare(priv->clk);
532out: 542out:
@@ -565,6 +575,7 @@ static int rcar_can_close(struct net_device *ndev)
565 rcar_can_stop(ndev); 575 rcar_can_stop(ndev);
566 free_irq(ndev->irq, ndev); 576 free_irq(ndev->irq, ndev);
567 napi_disable(&priv->napi); 577 napi_disable(&priv->napi);
578 clk_disable_unprepare(priv->can_clk);
568 clk_disable_unprepare(priv->clk); 579 clk_disable_unprepare(priv->clk);
569 close_candev(ndev); 580 close_candev(ndev);
570 can_led_event(ndev, CAN_LED_EVENT_STOP); 581 can_led_event(ndev, CAN_LED_EVENT_STOP);
@@ -715,6 +726,12 @@ static int rcar_can_get_berr_counter(const struct net_device *dev,
715 return 0; 726 return 0;
716} 727}
717 728
729static const char * const clock_names[] = {
730 [CLKR_CLKP1] = "clkp1",
731 [CLKR_CLKP2] = "clkp2",
732 [CLKR_CLKEXT] = "can_clk",
733};
734
718static int rcar_can_probe(struct platform_device *pdev) 735static int rcar_can_probe(struct platform_device *pdev)
719{ 736{
720 struct rcar_can_platform_data *pdata; 737 struct rcar_can_platform_data *pdata;
@@ -722,13 +739,20 @@ static int rcar_can_probe(struct platform_device *pdev)
722 struct net_device *ndev; 739 struct net_device *ndev;
723 struct resource *mem; 740 struct resource *mem;
724 void __iomem *addr; 741 void __iomem *addr;
742 u32 clock_select = CLKR_CLKP1;
725 int err = -ENODEV; 743 int err = -ENODEV;
726 int irq; 744 int irq;
727 745
728 pdata = dev_get_platdata(&pdev->dev); 746 if (pdev->dev.of_node) {
729 if (!pdata) { 747 of_property_read_u32(pdev->dev.of_node,
730 dev_err(&pdev->dev, "No platform data provided!\n"); 748 "renesas,can-clock-select", &clock_select);
731 goto fail; 749 } else {
750 pdata = dev_get_platdata(&pdev->dev);
751 if (!pdata) {
752 dev_err(&pdev->dev, "No platform data provided!\n");
753 goto fail;
754 }
755 clock_select = pdata->clock_select;
732 } 756 }
733 757
734 irq = platform_get_irq(pdev, 0); 758 irq = platform_get_irq(pdev, 0);
@@ -753,10 +777,22 @@ static int rcar_can_probe(struct platform_device *pdev)
753 777
754 priv = netdev_priv(ndev); 778 priv = netdev_priv(ndev);
755 779
756 priv->clk = devm_clk_get(&pdev->dev, NULL); 780 priv->clk = devm_clk_get(&pdev->dev, "clkp1");
757 if (IS_ERR(priv->clk)) { 781 if (IS_ERR(priv->clk)) {
758 err = PTR_ERR(priv->clk); 782 err = PTR_ERR(priv->clk);
759 dev_err(&pdev->dev, "cannot get clock: %d\n", err); 783 dev_err(&pdev->dev, "cannot get peripheral clock: %d\n", err);
784 goto fail_clk;
785 }
786
787 if (clock_select >= ARRAY_SIZE(clock_names)) {
788 err = -EINVAL;
789 dev_err(&pdev->dev, "invalid CAN clock selected\n");
790 goto fail_clk;
791 }
792 priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]);
793 if (IS_ERR(priv->can_clk)) {
794 err = PTR_ERR(priv->can_clk);
795 dev_err(&pdev->dev, "cannot get CAN clock: %d\n", err);
760 goto fail_clk; 796 goto fail_clk;
761 } 797 }
762 798
@@ -765,8 +801,8 @@ static int rcar_can_probe(struct platform_device *pdev)
765 ndev->flags |= IFF_ECHO; 801 ndev->flags |= IFF_ECHO;
766 priv->ndev = ndev; 802 priv->ndev = ndev;
767 priv->regs = addr; 803 priv->regs = addr;
768 priv->clock_select = pdata->clock_select; 804 priv->clock_select = clock_select;
769 priv->can.clock.freq = clk_get_rate(priv->clk); 805 priv->can.clock.freq = clk_get_rate(priv->can_clk);
770 priv->can.bittiming_const = &rcar_can_bittiming_const; 806 priv->can.bittiming_const = &rcar_can_bittiming_const;
771 priv->can.do_set_mode = rcar_can_do_set_mode; 807 priv->can.do_set_mode = rcar_can_do_set_mode;
772 priv->can.do_get_berr_counter = rcar_can_get_berr_counter; 808 priv->can.do_get_berr_counter = rcar_can_get_berr_counter;
@@ -858,10 +894,20 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
858 894
859static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume); 895static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume);
860 896
897static const struct of_device_id rcar_can_of_table[] __maybe_unused = {
898 { .compatible = "renesas,can-r8a7778" },
899 { .compatible = "renesas,can-r8a7779" },
900 { .compatible = "renesas,can-r8a7790" },
901 { .compatible = "renesas,can-r8a7791" },
902 { }
903};
904MODULE_DEVICE_TABLE(of, rcar_can_of_table);
905
861static struct platform_driver rcar_can_driver = { 906static struct platform_driver rcar_can_driver = {
862 .driver = { 907 .driver = {
863 .name = RCAR_CAN_DRV_NAME, 908 .name = RCAR_CAN_DRV_NAME,
864 .owner = THIS_MODULE, 909 .owner = THIS_MODULE,
910 .of_match_table = of_match_ptr(rcar_can_of_table),
865 .pm = &rcar_can_pm_ops, 911 .pm = &rcar_can_pm_ops,
866 }, 912 },
867 .probe = rcar_can_probe, 913 .probe = rcar_can_probe,
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index 531d5fcc97e5..be11ddd11b87 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -12,5 +12,3 @@ obj-$(CONFIG_CAN_PEAK_PCMCIA) += peak_pcmcia.o
12obj-$(CONFIG_CAN_PEAK_PCI) += peak_pci.o 12obj-$(CONFIG_CAN_PEAK_PCI) += peak_pci.o
13obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o 13obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
14obj-$(CONFIG_CAN_TSCAN1) += tscan1.o 14obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
15
16ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile
index c5e5016c742e..a23da492dad5 100644
--- a/drivers/net/can/softing/Makefile
+++ b/drivers/net/can/softing/Makefile
@@ -2,5 +2,3 @@
2softing-y := softing_main.o softing_fw.o 2softing-y := softing_main.o softing_fw.o
3obj-$(CONFIG_CAN_SOFTING) += softing.o 3obj-$(CONFIG_CAN_SOFTING) += softing.o
4obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o 4obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o
5
6ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
index 90bcacffbc65..0e86040cdd8c 100644
--- a/drivers/net/can/spi/Makefile
+++ b/drivers/net/can/spi/Makefile
@@ -4,5 +4,3 @@
4 4
5 5
6obj-$(CONFIG_CAN_MCP251X) += mcp251x.o 6obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
7
8ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 5df239e68812..c66d699640a9 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1107,10 +1107,10 @@ static int mcp251x_can_probe(struct spi_device *spi)
1107 * Minimum coherent DMA allocation is PAGE_SIZE, so allocate 1107 * Minimum coherent DMA allocation is PAGE_SIZE, so allocate
1108 * that much and share it between Tx and Rx DMA buffers. 1108 * that much and share it between Tx and Rx DMA buffers.
1109 */ 1109 */
1110 priv->spi_tx_buf = dma_alloc_coherent(&spi->dev, 1110 priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev,
1111 PAGE_SIZE, 1111 PAGE_SIZE,
1112 &priv->spi_tx_dma, 1112 &priv->spi_tx_dma,
1113 GFP_DMA); 1113 GFP_DMA);
1114 1114
1115 if (priv->spi_tx_buf) { 1115 if (priv->spi_tx_buf) {
1116 priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2)); 1116 priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
@@ -1156,9 +1156,6 @@ static int mcp251x_can_probe(struct spi_device *spi)
1156 return 0; 1156 return 0;
1157 1157
1158error_probe: 1158error_probe:
1159 if (mcp251x_enable_dma)
1160 dma_free_coherent(&spi->dev, PAGE_SIZE,
1161 priv->spi_tx_buf, priv->spi_tx_dma);
1162 mcp251x_power_enable(priv->power, 0); 1159 mcp251x_power_enable(priv->power, 0);
1163 1160
1164out_clk: 1161out_clk:
@@ -1178,11 +1175,6 @@ static int mcp251x_can_remove(struct spi_device *spi)
1178 1175
1179 unregister_candev(net); 1176 unregister_candev(net);
1180 1177
1181 if (mcp251x_enable_dma) {
1182 dma_free_coherent(&spi->dev, PAGE_SIZE,
1183 priv->spi_tx_buf, priv->spi_tx_dma);
1184 }
1185
1186 mcp251x_power_enable(priv->power, 0); 1178 mcp251x_power_enable(priv->power, 0);
1187 1179
1188 if (!IS_ERR(priv->clk)) 1180 if (!IS_ERR(priv->clk))
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 7b9a393b1ac8..a64cf983fb87 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -8,5 +8,3 @@ obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
8obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o 8obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
9obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ 9obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
10obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o 10obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
11
12ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index b8fe808b7957..c6ee07c6a1b5 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -36,4 +36,15 @@ config NET_DSA_MV88E6123_61_65
36 This enables support for the Marvell 88E6123/6161/6165 36 This enables support for the Marvell 88E6123/6161/6165
37 ethernet switch chips. 37 ethernet switch chips.
38 38
39config NET_DSA_BCM_SF2
40 tristate "Broadcom Starfighter 2 Ethernet switch support"
41 select NET_DSA
42 select NET_DSA_TAG_BRCM
43 select FIXED_PHY if NET_DSA_BCM_SF2=y
44 select BCM7XXX_PHY
45 select MDIO_BCM_UNIMAC
46 ---help---
47 This enables support for the Broadcom Starfighter 2 Ethernet
48 switch chips.
49
39endmenu 50endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index f3bda05536cc..dd3cd3b8157f 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -7,3 +7,4 @@ endif
7ifdef CONFIG_NET_DSA_MV88E6131 7ifdef CONFIG_NET_DSA_MV88E6131
8mv88e6xxx_drv-y += mv88e6131.o 8mv88e6xxx_drv-y += mv88e6131.o
9endif 9endif
10obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
new file mode 100644
index 000000000000..bb7cb8e283b1
--- /dev/null
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -0,0 +1,626 @@
1/*
2 * Broadcom Starfighter 2 DSA switch driver
3 *
4 * Copyright (C) 2014, Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/list.h>
13#include <linux/module.h>
14#include <linux/netdevice.h>
15#include <linux/interrupt.h>
16#include <linux/platform_device.h>
17#include <linux/of.h>
18#include <linux/phy.h>
19#include <linux/phy_fixed.h>
20#include <linux/mii.h>
21#include <linux/of.h>
22#include <linux/of_irq.h>
23#include <linux/of_address.h>
24#include <net/dsa.h>
25
26#include "bcm_sf2.h"
27#include "bcm_sf2_regs.h"
28
29/* String, offset, and register size in bytes if different from 4 bytes */
30static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = {
31 { "TxOctets", 0x000, 8 },
32 { "TxDropPkts", 0x020 },
33 { "TxQPKTQ0", 0x030 },
34 { "TxBroadcastPkts", 0x040 },
35 { "TxMulticastPkts", 0x050 },
36 { "TxUnicastPKts", 0x060 },
37 { "TxCollisions", 0x070 },
38 { "TxSingleCollision", 0x080 },
39 { "TxMultipleCollision", 0x090 },
40 { "TxDeferredCollision", 0x0a0 },
41 { "TxLateCollision", 0x0b0 },
42 { "TxExcessiveCollision", 0x0c0 },
43 { "TxFrameInDisc", 0x0d0 },
44 { "TxPausePkts", 0x0e0 },
45 { "TxQPKTQ1", 0x0f0 },
46 { "TxQPKTQ2", 0x100 },
47 { "TxQPKTQ3", 0x110 },
48 { "TxQPKTQ4", 0x120 },
49 { "TxQPKTQ5", 0x130 },
50 { "RxOctets", 0x140, 8 },
51 { "RxUndersizePkts", 0x160 },
52 { "RxPausePkts", 0x170 },
53 { "RxPkts64Octets", 0x180 },
54 { "RxPkts65to127Octets", 0x190 },
55 { "RxPkts128to255Octets", 0x1a0 },
56 { "RxPkts256to511Octets", 0x1b0 },
57 { "RxPkts512to1023Octets", 0x1c0 },
58 { "RxPkts1024toMaxPktsOctets", 0x1d0 },
59 { "RxOversizePkts", 0x1e0 },
60 { "RxJabbers", 0x1f0 },
61 { "RxAlignmentErrors", 0x200 },
62 { "RxFCSErrors", 0x210 },
63 { "RxGoodOctets", 0x220, 8 },
64 { "RxDropPkts", 0x240 },
65 { "RxUnicastPkts", 0x250 },
66 { "RxMulticastPkts", 0x260 },
67 { "RxBroadcastPkts", 0x270 },
68 { "RxSAChanges", 0x280 },
69 { "RxFragments", 0x290 },
70 { "RxJumboPkt", 0x2a0 },
71 { "RxSymblErr", 0x2b0 },
72 { "InRangeErrCount", 0x2c0 },
73 { "OutRangeErrCount", 0x2d0 },
74 { "EEELpiEvent", 0x2e0 },
75 { "EEELpiDuration", 0x2f0 },
76 { "RxDiscard", 0x300, 8 },
77 { "TxQPKTQ6", 0x320 },
78 { "TxQPKTQ7", 0x330 },
79 { "TxPkts64Octets", 0x340 },
80 { "TxPkts65to127Octets", 0x350 },
81 { "TxPkts128to255Octets", 0x360 },
82 { "TxPkts256to511Ocets", 0x370 },
83 { "TxPkts512to1023Ocets", 0x380 },
84 { "TxPkts1024toMaxPktOcets", 0x390 },
85};
86
87#define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib)
88
89static void bcm_sf2_sw_get_strings(struct dsa_switch *ds,
90 int port, uint8_t *data)
91{
92 unsigned int i;
93
94 for (i = 0; i < BCM_SF2_STATS_SIZE; i++)
95 memcpy(data + i * ETH_GSTRING_LEN,
96 bcm_sf2_mib[i].string, ETH_GSTRING_LEN);
97}
98
99static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds,
100 int port, uint64_t *data)
101{
102 struct bcm_sf2_priv *priv = ds_to_priv(ds);
103 const struct bcm_sf2_hw_stats *s;
104 unsigned int i;
105 u64 val = 0;
106 u32 offset;
107
108 mutex_lock(&priv->stats_mutex);
109
110 /* Now fetch the per-port counters */
111 for (i = 0; i < BCM_SF2_STATS_SIZE; i++) {
112 s = &bcm_sf2_mib[i];
113
114 /* Do a latched 64-bit read if needed */
115 offset = s->reg + CORE_P_MIB_OFFSET(port);
116 if (s->sizeof_stat == 8)
117 val = core_readq(priv, offset);
118 else
119 val = core_readl(priv, offset);
120
121 data[i] = (u64)val;
122 }
123
124 mutex_unlock(&priv->stats_mutex);
125}
126
127static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
128{
129 return BCM_SF2_STATS_SIZE;
130}
131
132static char *bcm_sf2_sw_probe(struct mii_bus *bus, int sw_addr)
133{
134 return "Broadcom Starfighter 2";
135}
136
137static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
138{
139 struct bcm_sf2_priv *priv = ds_to_priv(ds);
140 unsigned int i;
141 u32 reg, val;
142
143 /* Enable the port memories */
144 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
145 reg &= ~P_TXQ_PSM_VDD(port);
146 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
147
148 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
149 reg = core_readl(priv, CORE_IMP_CTL);
150 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
151 reg &= ~(RX_DIS | TX_DIS);
152 core_writel(priv, reg, CORE_IMP_CTL);
153
154 /* Enable forwarding */
155 core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
156
157 /* Enable IMP port in dumb mode */
158 reg = core_readl(priv, CORE_SWITCH_CTRL);
159 reg |= MII_DUMB_FWDG_EN;
160 core_writel(priv, reg, CORE_SWITCH_CTRL);
161
162 /* Resolve which bit controls the Broadcom tag */
163 switch (port) {
164 case 8:
165 val = BRCM_HDR_EN_P8;
166 break;
167 case 7:
168 val = BRCM_HDR_EN_P7;
169 break;
170 case 5:
171 val = BRCM_HDR_EN_P5;
172 break;
173 default:
174 val = 0;
175 break;
176 }
177
178 /* Enable Broadcom tags for IMP port */
179 reg = core_readl(priv, CORE_BRCM_HDR_CTRL);
180 reg |= val;
181 core_writel(priv, reg, CORE_BRCM_HDR_CTRL);
182
183 /* Enable reception Broadcom tag for CPU TX (switch RX) to
184 * allow us to tag outgoing frames
185 */
186 reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS);
187 reg &= ~(1 << port);
188 core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS);
189
190 /* Enable transmission of Broadcom tags from the switch (CPU RX) to
191 * allow delivering frames to the per-port net_devices
192 */
193 reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
194 reg &= ~(1 << port);
195 core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
196
197 /* Force link status for IMP port */
198 reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
199 reg |= (MII_SW_OR | LINK_STS);
200 core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
201
202 /* Enable the IMP Port to be in the same VLAN as the other ports
203 * on a per-port basis such that we only have Port i and IMP in
204 * the same VLAN.
205 */
206 for (i = 0; i < priv->hw_params.num_ports; i++) {
207 if (!((1 << i) & ds->phys_port_mask))
208 continue;
209
210 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
211 reg |= (1 << port);
212 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
213 }
214}
215
216static void bcm_sf2_port_setup(struct dsa_switch *ds, int port)
217{
218 struct bcm_sf2_priv *priv = ds_to_priv(ds);
219 u32 reg;
220
221 /* Clear the memory power down */
222 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
223 reg &= ~P_TXQ_PSM_VDD(port);
224 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
225
226 /* Clear the Rx and Tx disable bits and set to no spanning tree */
227 core_writel(priv, 0, CORE_G_PCTL_PORT(port));
228
229 /* Enable port 7 interrupts to get notified */
230 if (port == 7)
231 intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF));
232
233 /* Set this port, and only this one to be in the default VLAN */
234 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
235 reg &= ~PORT_VLAN_CTRL_MASK;
236 reg |= (1 << port);
237 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
238}
239
240static void bcm_sf2_port_disable(struct dsa_switch *ds, int port)
241{
242 struct bcm_sf2_priv *priv = ds_to_priv(ds);
243 u32 off, reg;
244
245 if (dsa_is_cpu_port(ds, port))
246 off = CORE_IMP_CTL;
247 else
248 off = CORE_G_PCTL_PORT(port);
249
250 reg = core_readl(priv, off);
251 reg |= RX_DIS | TX_DIS;
252 core_writel(priv, reg, off);
253
254 /* Power down the port memory */
255 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
256 reg |= P_TXQ_PSM_VDD(port);
257 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
258}
259
260static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
261{
262 struct bcm_sf2_priv *priv = dev_id;
263
264 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
265 ~priv->irq0_mask;
266 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
267
268 return IRQ_HANDLED;
269}
270
271static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
272{
273 struct bcm_sf2_priv *priv = dev_id;
274
275 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
276 ~priv->irq1_mask;
277 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
278
279 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
280 priv->port_sts[7].link = 1;
281 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
282 priv->port_sts[7].link = 0;
283
284 return IRQ_HANDLED;
285}
286
287static int bcm_sf2_sw_setup(struct dsa_switch *ds)
288{
289 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
290 struct bcm_sf2_priv *priv = ds_to_priv(ds);
291 struct device_node *dn;
292 void __iomem **base;
293 unsigned int port;
294 unsigned int i;
295 u32 reg, rev;
296 int ret;
297
298 spin_lock_init(&priv->indir_lock);
299 mutex_init(&priv->stats_mutex);
300
301 /* All the interesting properties are at the parent device_node
302 * level
303 */
304 dn = ds->pd->of_node->parent;
305
306 priv->irq0 = irq_of_parse_and_map(dn, 0);
307 priv->irq1 = irq_of_parse_and_map(dn, 1);
308
309 base = &priv->core;
310 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
311 *base = of_iomap(dn, i);
312 if (*base == NULL) {
313 pr_err("unable to find register: %s\n", reg_names[i]);
314 return -ENODEV;
315 }
316 base++;
317 }
318
319 /* Disable all interrupts and request them */
320 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
321 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
322 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
323 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
324 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
325 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
326
327 ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
328 "switch_0", priv);
329 if (ret < 0) {
330 pr_err("failed to request switch_0 IRQ\n");
331 goto out_unmap;
332 }
333
334 ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
335 "switch_1", priv);
336 if (ret < 0) {
337 pr_err("failed to request switch_1 IRQ\n");
338 goto out_free_irq0;
339 }
340
341 /* Reset the MIB counters */
342 reg = core_readl(priv, CORE_GMNCFGCFG);
343 reg |= RST_MIB_CNT;
344 core_writel(priv, reg, CORE_GMNCFGCFG);
345 reg &= ~RST_MIB_CNT;
346 core_writel(priv, reg, CORE_GMNCFGCFG);
347
348 /* Get the maximum number of ports for this switch */
349 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
350 if (priv->hw_params.num_ports > DSA_MAX_PORTS)
351 priv->hw_params.num_ports = DSA_MAX_PORTS;
352
353 /* Assume a single GPHY setup if we can't read that property */
354 if (of_property_read_u32(dn, "brcm,num-gphy",
355 &priv->hw_params.num_gphy))
356 priv->hw_params.num_gphy = 1;
357
358 /* Enable all valid ports and disable those unused */
359 for (port = 0; port < priv->hw_params.num_ports; port++) {
360 /* IMP port receives special treatment */
361 if ((1 << port) & ds->phys_port_mask)
362 bcm_sf2_port_setup(ds, port);
363 else if (dsa_is_cpu_port(ds, port))
364 bcm_sf2_imp_setup(ds, port);
365 else
366 bcm_sf2_port_disable(ds, port);
367 }
368
369 /* Include the pseudo-PHY address and the broadcast PHY address to
370 * divert reads towards our workaround
371 */
372 ds->phys_mii_mask |= ((1 << 30) | (1 << 0));
373
374 rev = reg_readl(priv, REG_SWITCH_REVISION);
375 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
376 SWITCH_TOP_REV_MASK;
377 priv->hw_params.core_rev = (rev & SF2_REV_MASK);
378
379 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
380 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
381 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
382 priv->core, priv->irq0, priv->irq1);
383
384 return 0;
385
386out_free_irq0:
387 free_irq(priv->irq0, priv);
388out_unmap:
389 base = &priv->core;
390 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
391 iounmap(*base);
392 base++;
393 }
394 return ret;
395}
396
397static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
398{
399 return 0;
400}
401
402static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr,
403 int regnum, u16 val)
404{
405 struct bcm_sf2_priv *priv = ds_to_priv(ds);
406 int ret = 0;
407 u32 reg;
408
409 reg = reg_readl(priv, REG_SWITCH_CNTRL);
410 reg |= MDIO_MASTER_SEL;
411 reg_writel(priv, reg, REG_SWITCH_CNTRL);
412
413 /* Page << 8 | offset */
414 reg = 0x70;
415 reg <<= 2;
416 core_writel(priv, addr, reg);
417
418 /* Page << 8 | offset */
419 reg = 0x80 << 8 | regnum << 1;
420 reg <<= 2;
421
422 if (op)
423 ret = core_readl(priv, reg);
424 else
425 core_writel(priv, val, reg);
426
427 reg = reg_readl(priv, REG_SWITCH_CNTRL);
428 reg &= ~MDIO_MASTER_SEL;
429 reg_writel(priv, reg, REG_SWITCH_CNTRL);
430
431 return ret & 0xffff;
432}
433
434static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum)
435{
436 /* Intercept reads from the MDIO broadcast address or Broadcom
437 * pseudo-PHY address
438 */
439 switch (addr) {
440 case 0:
441 case 30:
442 return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0);
443 default:
444 return 0xffff;
445 }
446}
447
448static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum,
449 u16 val)
450{
451 /* Intercept writes to the MDIO broadcast address or Broadcom
452 * pseudo-PHY address
453 */
454 switch (addr) {
455 case 0:
456 case 30:
457 bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val);
458 break;
459 }
460
461 return 0;
462}
463
464static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
465 struct phy_device *phydev)
466{
467 struct bcm_sf2_priv *priv = ds_to_priv(ds);
468 u32 id_mode_dis = 0, port_mode;
469 const char *str = NULL;
470 u32 reg;
471
472 switch (phydev->interface) {
473 case PHY_INTERFACE_MODE_RGMII:
474 str = "RGMII (no delay)";
475 id_mode_dis = 1;
476 case PHY_INTERFACE_MODE_RGMII_TXID:
477 if (!str)
478 str = "RGMII (TX delay)";
479 port_mode = EXT_GPHY;
480 break;
481 case PHY_INTERFACE_MODE_MII:
482 str = "MII";
483 port_mode = EXT_EPHY;
484 break;
485 case PHY_INTERFACE_MODE_REVMII:
486 str = "Reverse MII";
487 port_mode = EXT_REVMII;
488 break;
489 default:
490 goto force_link;
491 }
492
493 /* Clear id_mode_dis bit, and the existing port mode, but
494 * make sure we enable the RGMII block for data to pass
495 */
496 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
497 reg &= ~ID_MODE_DIS;
498 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
499 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
500
501 reg |= port_mode | RGMII_MODE_EN;
502 if (id_mode_dis)
503 reg |= ID_MODE_DIS;
504
505 if (phydev->pause) {
506 if (phydev->asym_pause)
507 reg |= TX_PAUSE_EN;
508 reg |= RX_PAUSE_EN;
509 }
510
511 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
512
513 pr_info("Port %d configured for %s\n", port, str);
514
515force_link:
516 /* Force link settings detected from the PHY */
517 reg = SW_OVERRIDE;
518 switch (phydev->speed) {
519 case SPEED_1000:
520 reg |= SPDSTS_1000 << SPEED_SHIFT;
521 break;
522 case SPEED_100:
523 reg |= SPDSTS_100 << SPEED_SHIFT;
524 break;
525 }
526
527 if (phydev->link)
528 reg |= LINK_STS;
529 if (phydev->duplex == DUPLEX_FULL)
530 reg |= DUPLX_MODE;
531
532 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
533}
534
535static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
536 struct fixed_phy_status *status)
537{
538 struct bcm_sf2_priv *priv = ds_to_priv(ds);
539 u32 link, duplex, pause, speed;
540 u32 reg;
541
542 link = core_readl(priv, CORE_LNKSTS);
543 duplex = core_readl(priv, CORE_DUPSTS);
544 pause = core_readl(priv, CORE_PAUSESTS);
545 speed = core_readl(priv, CORE_SPDSTS);
546
547 speed >>= (port * SPDSTS_SHIFT);
548 speed &= SPDSTS_MASK;
549
550 status->link = 0;
551
552 /* Port 7 is special as we do not get link status from CORE_LNKSTS,
553 * which means that we need to force the link at the port override
554 * level to get the data to flow. We do use what the interrupt handler
555 * did determine before.
556 */
557 if (port == 7) {
558 status->link = priv->port_sts[port].link;
559 reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(7));
560 reg |= SW_OVERRIDE;
561 if (status->link)
562 reg |= LINK_STS;
563 else
564 reg &= ~LINK_STS;
565 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(7));
566 status->duplex = 1;
567 } else {
568 status->link = !!(link & (1 << port));
569 status->duplex = !!(duplex & (1 << port));
570 }
571
572 switch (speed) {
573 case SPDSTS_10:
574 status->speed = SPEED_10;
575 break;
576 case SPDSTS_100:
577 status->speed = SPEED_100;
578 break;
579 case SPDSTS_1000:
580 status->speed = SPEED_1000;
581 break;
582 }
583
584 if ((pause & (1 << port)) &&
585 (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
586 status->asym_pause = 1;
587 status->pause = 1;
588 }
589
590 if (pause & (1 << port))
591 status->pause = 1;
592}
593
594static struct dsa_switch_driver bcm_sf2_switch_driver = {
595 .tag_protocol = htons(ETH_P_BRCMTAG),
596 .priv_size = sizeof(struct bcm_sf2_priv),
597 .probe = bcm_sf2_sw_probe,
598 .setup = bcm_sf2_sw_setup,
599 .set_addr = bcm_sf2_sw_set_addr,
600 .phy_read = bcm_sf2_sw_phy_read,
601 .phy_write = bcm_sf2_sw_phy_write,
602 .get_strings = bcm_sf2_sw_get_strings,
603 .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
604 .get_sset_count = bcm_sf2_sw_get_sset_count,
605 .adjust_link = bcm_sf2_sw_adjust_link,
606 .fixed_link_update = bcm_sf2_sw_fixed_link_update,
607};
608
609static int __init bcm_sf2_init(void)
610{
611 register_switch_driver(&bcm_sf2_switch_driver);
612
613 return 0;
614}
615module_init(bcm_sf2_init);
616
617static void __exit bcm_sf2_exit(void)
618{
619 unregister_switch_driver(&bcm_sf2_switch_driver);
620}
621module_exit(bcm_sf2_exit);
622
623MODULE_AUTHOR("Broadcom Corporation");
624MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
625MODULE_LICENSE("GPL");
626MODULE_ALIAS("platform:brcm-sf2");
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
new file mode 100644
index 000000000000..260bab313e58
--- /dev/null
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -0,0 +1,140 @@
1/*
2 * Broadcom Starfighter2 private context
3 *
4 * Copyright (C) 2014, Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __BCM_SF2_H
13#define __BCM_SF2_H
14
15#include <linux/platform_device.h>
16#include <linux/kernel.h>
17#include <linux/io.h>
18#include <linux/spinlock.h>
19#include <linux/mutex.h>
20#include <linux/mii.h>
21
22#include <net/dsa.h>
23
24#include "bcm_sf2_regs.h"
25
26struct bcm_sf2_hw_params {
27 u16 top_rev;
28 u16 core_rev;
29 u32 num_gphy;
30 u8 num_acb_queue;
31 u8 num_rgmii;
32 u8 num_ports;
33 u8 fcb_pause_override:1;
34 u8 acb_packets_inflight:1;
35};
36
37#define BCM_SF2_REGS_NAME {\
38 "core", "reg", "intrl2_0", "intrl2_1", "fcb", "acb" \
39}
40
41#define BCM_SF2_REGS_NUM 6
42
43struct bcm_sf2_port_status {
44 unsigned int link;
45};
46
47struct bcm_sf2_priv {
48 /* Base registers, keep those in order with BCM_SF2_REGS_NAME */
49 void __iomem *core;
50 void __iomem *reg;
51 void __iomem *intrl2_0;
52 void __iomem *intrl2_1;
53 void __iomem *fcb;
54 void __iomem *acb;
55
56 /* spinlock protecting access to the indirect registers */
57 spinlock_t indir_lock;
58
59 int irq0;
60 int irq1;
61 u32 irq0_stat;
62 u32 irq0_mask;
63 u32 irq1_stat;
64 u32 irq1_mask;
65
66 /* Mutex protecting access to the MIB counters */
67 struct mutex stats_mutex;
68
69 struct bcm_sf2_hw_params hw_params;
70
71 struct bcm_sf2_port_status port_sts[DSA_MAX_PORTS];
72};
73
74struct bcm_sf2_hw_stats {
75 const char *string;
76 u16 reg;
77 u8 sizeof_stat;
78};
79
80#define SF2_IO_MACRO(name) \
81static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off) \
82{ \
83 return __raw_readl(priv->name + off); \
84} \
85static inline void name##_writel(struct bcm_sf2_priv *priv, \
86 u32 val, u32 off) \
87{ \
88 __raw_writel(val, priv->name + off); \
89} \
90
91/* Accesses to 64-bits register requires us to latch the hi/lo pairs
92 * using the REG_DIR_DATA_{READ,WRITE} ancillary registers. The 'indir_lock'
93 * spinlock is automatically grabbed and released to provide relative
94 * atomiticy with latched reads/writes.
95 */
96#define SF2_IO64_MACRO(name) \
97static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
98{ \
99 u32 indir, dir; \
100 spin_lock(&priv->indir_lock); \
101 indir = reg_readl(priv, REG_DIR_DATA_READ); \
102 dir = __raw_readl(priv->name + off); \
103 spin_unlock(&priv->indir_lock); \
104 return (u64)indir << 32 | dir; \
105} \
106static inline void name##_writeq(struct bcm_sf2_priv *priv, u32 off, \
107 u64 val) \
108{ \
109 spin_lock(&priv->indir_lock); \
110 reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \
111 __raw_writel(lower_32_bits(val), priv->name + off); \
112 spin_unlock(&priv->indir_lock); \
113}
114
115#define SWITCH_INTR_L2(which) \
116static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \
117 u32 mask) \
118{ \
119 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
120 priv->irq##which##_mask &= ~(mask); \
121} \
122static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
123 u32 mask) \
124{ \
125 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
126 priv->irq##which##_mask |= (mask); \
127} \
128
129SF2_IO_MACRO(core);
130SF2_IO_MACRO(reg);
131SF2_IO64_MACRO(core);
132SF2_IO_MACRO(intrl2_0);
133SF2_IO_MACRO(intrl2_1);
134SF2_IO_MACRO(fcb);
135SF2_IO_MACRO(acb);
136
137SWITCH_INTR_L2(0);
138SWITCH_INTR_L2(1);
139
140#endif /* __BCM_SF2_H */
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
new file mode 100644
index 000000000000..885c231b03b5
--- /dev/null
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -0,0 +1,227 @@
1/*
2 * Broadcom Starfighter 2 switch register defines
3 *
4 * Copyright (C) 2014, Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11#ifndef __BCM_SF2_REGS_H
12#define __BCM_SF2_REGS_H
13
14/* Register set relative to 'REG' */
15#define REG_SWITCH_CNTRL 0x00
16#define MDIO_MASTER_SEL (1 << 0)
17
18#define REG_SWITCH_STATUS 0x04
19#define REG_DIR_DATA_WRITE 0x08
20#define REG_DIR_DATA_READ 0x0C
21
22#define REG_SWITCH_REVISION 0x18
23#define SF2_REV_MASK 0xffff
24#define SWITCH_TOP_REV_SHIFT 16
25#define SWITCH_TOP_REV_MASK 0xffff
26
27#define REG_PHY_REVISION 0x1C
28
29#define REG_SPHY_CNTRL 0x2C
30#define IDDQ_BIAS (1 << 0)
31#define EXT_PWR_DOWN (1 << 1)
32#define FORCE_DLL_EN (1 << 2)
33#define IDDQ_GLOBAL_PWR (1 << 3)
34#define CK25_DIS (1 << 4)
35#define PHY_RESET (1 << 5)
36#define PHY_PHYAD_SHIFT 8
37#define PHY_PHYAD_MASK 0x1F
38
39#define REG_RGMII_0_BASE 0x34
40#define REG_RGMII_CNTRL 0x00
41#define REG_RGMII_IB_STATUS 0x04
42#define REG_RGMII_RX_CLOCK_DELAY_CNTRL 0x08
43#define REG_RGMII_CNTRL_SIZE 0x0C
44#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_BASE + \
45 ((x) * REG_RGMII_CNTRL_SIZE))
46/* Relative to REG_RGMII_CNTRL */
47#define RGMII_MODE_EN (1 << 0)
48#define ID_MODE_DIS (1 << 1)
49#define PORT_MODE_SHIFT 2
50#define INT_EPHY (0 << PORT_MODE_SHIFT)
51#define INT_GPHY (1 << PORT_MODE_SHIFT)
52#define EXT_EPHY (2 << PORT_MODE_SHIFT)
53#define EXT_GPHY (3 << PORT_MODE_SHIFT)
54#define EXT_REVMII (4 << PORT_MODE_SHIFT)
55#define PORT_MODE_MASK 0x7
56#define RVMII_REF_SEL (1 << 5)
57#define RX_PAUSE_EN (1 << 6)
58#define TX_PAUSE_EN (1 << 7)
59#define TX_CLK_STOP_EN (1 << 8)
60#define LPI_COUNT_SHIFT 9
61#define LPI_COUNT_MASK 0x3F
62
63/* Register set relative to 'INTRL2_0' and 'INTRL2_1' */
64#define INTRL2_CPU_STATUS 0x00
65#define INTRL2_CPU_SET 0x04
66#define INTRL2_CPU_CLEAR 0x08
67#define INTRL2_CPU_MASK_STATUS 0x0c
68#define INTRL2_CPU_MASK_SET 0x10
69#define INTRL2_CPU_MASK_CLEAR 0x14
70
71/* Shared INTRL2_0 and INTRL2_ interrupt sources macros */
72#define P_LINK_UP_IRQ(x) (1 << (0 + (x)))
73#define P_LINK_DOWN_IRQ(x) (1 << (1 + (x)))
74#define P_ENERGY_ON_IRQ(x) (1 << (2 + (x)))
75#define P_ENERGY_OFF_IRQ(x) (1 << (3 + (x)))
76#define P_GPHY_IRQ(x) (1 << (4 + (x)))
77#define P_NUM_IRQ 5
78#define P_IRQ_MASK(x) (P_LINK_UP_IRQ((x)) | \
79 P_LINK_DOWN_IRQ((x)) | \
80 P_ENERGY_ON_IRQ((x)) | \
81 P_ENERGY_OFF_IRQ((x)) | \
82 P_GPHY_IRQ((x)))
83
84/* INTRL2_0 interrupt sources */
85#define P0_IRQ_OFF 0
86#define MEM_DOUBLE_IRQ (1 << 5)
87#define EEE_LPI_IRQ (1 << 6)
88#define P5_CPU_WAKE_IRQ (1 << 7)
89#define P8_CPU_WAKE_IRQ (1 << 8)
90#define P7_CPU_WAKE_IRQ (1 << 9)
91#define IEEE1588_IRQ (1 << 10)
92#define MDIO_ERR_IRQ (1 << 11)
93#define MDIO_DONE_IRQ (1 << 12)
94#define GISB_ERR_IRQ (1 << 13)
95#define UBUS_ERR_IRQ (1 << 14)
96#define FAILOVER_ON_IRQ (1 << 15)
97#define FAILOVER_OFF_IRQ (1 << 16)
98#define TCAM_SOFT_ERR_IRQ (1 << 17)
99
100/* INTRL2_1 interrupt sources */
101#define P7_IRQ_OFF 0
102#define P_IRQ_OFF(x) ((6 - (x)) * P_NUM_IRQ)
103
104/* Register set relative to 'CORE' */
105#define CORE_G_PCTL_PORT0 0x00000
106#define CORE_G_PCTL_PORT(x) (CORE_G_PCTL_PORT0 + (x * 0x4))
107#define CORE_IMP_CTL 0x00020
108#define RX_DIS (1 << 0)
109#define TX_DIS (1 << 1)
110#define RX_BCST_EN (1 << 2)
111#define RX_MCST_EN (1 << 3)
112#define RX_UCST_EN (1 << 4)
113#define G_MISTP_STATE_SHIFT 5
114#define G_MISTP_NO_STP (0 << G_MISTP_STATE_SHIFT)
115#define G_MISTP_DIS_STATE (1 << G_MISTP_STATE_SHIFT)
116#define G_MISTP_BLOCK_STATE (2 << G_MISTP_STATE_SHIFT)
117#define G_MISTP_LISTEN_STATE (3 << G_MISTP_STATE_SHIFT)
118#define G_MISTP_LEARN_STATE (4 << G_MISTP_STATE_SHIFT)
119#define G_MISTP_FWD_STATE (5 << G_MISTP_STATE_SHIFT)
120#define G_MISTP_STATE_MASK 0x7
121
122#define CORE_SWMODE 0x0002c
123#define SW_FWDG_MODE (1 << 0)
124#define SW_FWDG_EN (1 << 1)
125#define RTRY_LMT_DIS (1 << 2)
126
127#define CORE_STS_OVERRIDE_IMP 0x00038
128#define GMII_SPEED_UP_2G (1 << 6)
129#define MII_SW_OR (1 << 7)
130
131#define CORE_NEW_CTRL 0x00084
132#define IP_MC (1 << 0)
133#define OUTRANGEERR_DISCARD (1 << 1)
134#define INRANGEERR_DISCARD (1 << 2)
135#define CABLE_DIAG_LEN (1 << 3)
136#define OVERRIDE_AUTO_PD_WAR (1 << 4)
137#define EN_AUTO_PD_WAR (1 << 5)
138#define UC_FWD_EN (1 << 6)
139#define MC_FWD_EN (1 << 7)
140
141#define CORE_SWITCH_CTRL 0x00088
142#define MII_DUMB_FWDG_EN (1 << 6)
143
144#define CORE_SFT_LRN_CTRL 0x000f8
145#define SW_LEARN_CNTL(x) (1 << (x))
146
147#define CORE_STS_OVERRIDE_GMIIP_PORT(x) (0x160 + (x) * 4)
148#define LINK_STS (1 << 0)
149#define DUPLX_MODE (1 << 1)
150#define SPEED_SHIFT 2
151#define SPEED_MASK 0x3
152#define RXFLOW_CNTL (1 << 4)
153#define TXFLOW_CNTL (1 << 5)
154#define SW_OVERRIDE (1 << 6)
155
156#define CORE_WATCHDOG_CTRL 0x001e4
157#define SOFTWARE_RESET (1 << 7)
158#define EN_CHIP_RST (1 << 6)
159#define EN_SW_RESET (1 << 4)
160
161#define CORE_LNKSTS 0x00400
162#define LNK_STS_MASK 0x1ff
163
164#define CORE_SPDSTS 0x00410
165#define SPDSTS_10 0
166#define SPDSTS_100 1
167#define SPDSTS_1000 2
168#define SPDSTS_SHIFT 2
169#define SPDSTS_MASK 0x3
170
171#define CORE_DUPSTS 0x00420
172#define CORE_DUPSTS_MASK 0x1ff
173
174#define CORE_PAUSESTS 0x00428
175#define PAUSESTS_TX_PAUSE_SHIFT 9
176
177#define CORE_GMNCFGCFG 0x0800
178#define RST_MIB_CNT (1 << 0)
179#define RXBPDU_EN (1 << 1)
180
181#define CORE_IMP0_PRT_ID 0x0804
182
183#define CORE_BRCM_HDR_CTRL 0x0080c
184#define BRCM_HDR_EN_P8 (1 << 0)
185#define BRCM_HDR_EN_P5 (1 << 1)
186#define BRCM_HDR_EN_P7 (1 << 2)
187
188#define CORE_BRCM_HDR_CTRL2 0x0828
189
190#define CORE_HL_PRTC_CTRL 0x0940
191#define ARP_EN (1 << 0)
192#define RARP_EN (1 << 1)
193#define DHCP_EN (1 << 2)
194#define ICMPV4_EN (1 << 3)
195#define ICMPV6_EN (1 << 4)
196#define ICMPV6_FWD_MODE (1 << 5)
197#define IGMP_DIP_EN (1 << 8)
198#define IGMP_RPTLVE_EN (1 << 9)
199#define IGMP_RTPLVE_FWD_MODE (1 << 10)
200#define IGMP_QRY_EN (1 << 11)
201#define IGMP_QRY_FWD_MODE (1 << 12)
202#define IGMP_UKN_EN (1 << 13)
203#define IGMP_UKN_FWD_MODE (1 << 14)
204#define MLD_RPTDONE_EN (1 << 15)
205#define MLD_RPTDONE_FWD_MODE (1 << 16)
206#define MLD_QRY_EN (1 << 17)
207#define MLD_QRY_FWD_MODE (1 << 18)
208
209#define CORE_RST_MIB_CNT_EN 0x0950
210
211#define CORE_BRCM_HDR_RX_DIS 0x0980
212#define CORE_BRCM_HDR_TX_DIS 0x0988
213
214#define CORE_MEM_PSM_VDD_CTRL 0x2380
215#define P_TXQ_PSM_VDD_SHIFT 2
216#define P_TXQ_PSM_VDD_MASK 0x3
217#define P_TXQ_PSM_VDD(x) (P_TXQ_PSM_VDD_MASK << \
218 ((x) * P_TXQ_PSM_VDD_SHIFT))
219
220#define CORE_P0_MIB_OFFSET 0x8000
221#define P_MIB_SIZE 0x400
222#define CORE_P_MIB_OFFSET(x) (CORE_P0_MIB_OFFSET + (x) * P_MIB_SIZE)
223
224#define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8))
225#define PORT_VLAN_CTRL_MASK 0x1ff
226
227#endif /* __BCM_SF2_REGS_H */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index cc25a3a9e7cf..caade30820d5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -271,7 +271,6 @@
271#define DMA_PBL_X8_DISABLE 0x00 271#define DMA_PBL_X8_DISABLE 0x00
272#define DMA_PBL_X8_ENABLE 0x01 272#define DMA_PBL_X8_ENABLE 0x01
273 273
274
275/* MAC register offsets */ 274/* MAC register offsets */
276#define MAC_TCR 0x0000 275#define MAC_TCR 0x0000
277#define MAC_RCR 0x0004 276#define MAC_RCR 0x0004
@@ -792,7 +791,6 @@
792#define MTL_Q_DISABLED 0x00 791#define MTL_Q_DISABLED 0x00
793#define MTL_Q_ENABLED 0x02 792#define MTL_Q_ENABLED 0x02
794 793
795
796/* MTL traffic class register offsets 794/* MTL traffic class register offsets
797 * Multiple traffic classes can be active. The first class has registers 795 * Multiple traffic classes can be active. The first class has registers
798 * that begin at 0x1100. Each subsequent queue has registers that 796 * that begin at 0x1100. Each subsequent queue has registers that
@@ -815,7 +813,6 @@
815#define MTL_TSA_SP 0x00 813#define MTL_TSA_SP 0x00
816#define MTL_TSA_ETS 0x02 814#define MTL_TSA_ETS 0x02
817 815
818
819/* PCS MMD select register offset 816/* PCS MMD select register offset
820 * The MMD select register is used for accessing PCS registers 817 * The MMD select register is used for accessing PCS registers
821 * when the underlying APB3 interface is using indirect addressing. 818 * when the underlying APB3 interface is using indirect addressing.
@@ -825,7 +822,6 @@
825 */ 822 */
826#define PCS_MMD_SELECT 0xff 823#define PCS_MMD_SELECT 0xff
827 824
828
829/* Descriptor/Packet entry bit positions and sizes */ 825/* Descriptor/Packet entry bit positions and sizes */
830#define RX_PACKET_ERRORS_CRC_INDEX 2 826#define RX_PACKET_ERRORS_CRC_INDEX 2
831#define RX_PACKET_ERRORS_CRC_WIDTH 1 827#define RX_PACKET_ERRORS_CRC_WIDTH 1
@@ -929,7 +925,6 @@
929#define MDIO_AN_COMP_STAT 0x0030 925#define MDIO_AN_COMP_STAT 0x0030
930#endif 926#endif
931 927
932
933/* Bit setting and getting macros 928/* Bit setting and getting macros
934 * The get macro will extract the current bit field value from within 929 * The get macro will extract the current bit field value from within
935 * the variable 930 * the variable
@@ -957,7 +952,6 @@ do { \
957 ((0x1 << (_width)) - 1)) << (_index))); \ 952 ((0x1 << (_width)) - 1)) << (_index))); \
958} while (0) 953} while (0)
959 954
960
961/* Bit setting and getting macros based on register fields 955/* Bit setting and getting macros based on register fields
962 * The get macro uses the bit field definitions formed using the input 956 * The get macro uses the bit field definitions formed using the input
963 * names to extract the current bit field value from within the 957 * names to extract the current bit field value from within the
@@ -986,7 +980,6 @@ do { \
986 _prefix##_##_field##_INDEX, \ 980 _prefix##_##_field##_INDEX, \
987 _prefix##_##_field##_WIDTH, (_val)) 981 _prefix##_##_field##_WIDTH, (_val))
988 982
989
990/* Macros for reading or writing registers 983/* Macros for reading or writing registers
991 * The ioread macros will get bit fields or full values using the 984 * The ioread macros will get bit fields or full values using the
992 * register definitions formed using the input names 985 * register definitions formed using the input names
@@ -1014,7 +1007,6 @@ do { \
1014 XGMAC_IOWRITE((_pdata), _reg, reg_val); \ 1007 XGMAC_IOWRITE((_pdata), _reg, reg_val); \
1015} while (0) 1008} while (0)
1016 1009
1017
1018/* Macros for reading or writing MTL queue or traffic class registers 1010/* Macros for reading or writing MTL queue or traffic class registers
1019 * Similar to the standard read and write macros except that the 1011 * Similar to the standard read and write macros except that the
1020 * base register value is calculated by the queue or traffic class number 1012 * base register value is calculated by the queue or traffic class number
@@ -1041,7 +1033,6 @@ do { \
1041 XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \ 1033 XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
1042} while (0) 1034} while (0)
1043 1035
1044
1045/* Macros for reading or writing DMA channel registers 1036/* Macros for reading or writing DMA channel registers
1046 * Similar to the standard read and write macros except that the 1037 * Similar to the standard read and write macros except that the
1047 * base register value is obtained from the ring 1038 * base register value is obtained from the ring
@@ -1066,7 +1057,6 @@ do { \
1066 XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \ 1057 XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
1067} while (0) 1058} while (0)
1068 1059
1069
1070/* Macros for building, reading or writing register values or bits 1060/* Macros for building, reading or writing register values or bits
1071 * within the register values of XPCS registers. 1061 * within the register values of XPCS registers.
1072 */ 1062 */
@@ -1076,7 +1066,6 @@ do { \
1076#define XPCS_IOREAD(_pdata, _off) \ 1066#define XPCS_IOREAD(_pdata, _off) \
1077 ioread32((_pdata)->xpcs_regs + (_off)) 1067 ioread32((_pdata)->xpcs_regs + (_off))
1078 1068
1079
1080/* Macros for building, reading or writing register values or bits 1069/* Macros for building, reading or writing register values or bits
1081 * using MDIO. Different from above because of the use of standardized 1070 * using MDIO. Different from above because of the use of standardized
1082 * Linux include values. No shifting is performed with the bit 1071 * Linux include values. No shifting is performed with the bit
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index 7d6a49b24321..8a50b01c2686 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -120,7 +120,6 @@
120#include "xgbe.h" 120#include "xgbe.h"
121#include "xgbe-common.h" 121#include "xgbe-common.h"
122 122
123
124static int xgbe_dcb_ieee_getets(struct net_device *netdev, 123static int xgbe_dcb_ieee_getets(struct net_device *netdev,
125 struct ieee_ets *ets) 124 struct ieee_ets *ets)
126{ 125{
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index a3c11355a34d..76479d04b903 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -121,7 +121,6 @@
121#include "xgbe.h" 121#include "xgbe.h"
122#include "xgbe-common.h" 122#include "xgbe-common.h"
123 123
124
125static ssize_t xgbe_common_read(char __user *buffer, size_t count, 124static ssize_t xgbe_common_read(char __user *buffer, size_t count,
126 loff_t *ppos, unsigned int value) 125 loff_t *ppos, unsigned int value)
127{ 126{
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 1c5d62e8dab6..6fc5da01437d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -117,7 +117,6 @@
117#include "xgbe.h" 117#include "xgbe.h"
118#include "xgbe-common.h" 118#include "xgbe-common.h"
119 119
120
121static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *); 120static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *);
122 121
123static void xgbe_free_ring(struct xgbe_prv_data *pdata, 122static void xgbe_free_ring(struct xgbe_prv_data *pdata,
@@ -524,11 +523,8 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
524 523
525 /* Allocate skb & assign to each rdesc */ 524 /* Allocate skb & assign to each rdesc */
526 skb = dev_alloc_skb(pdata->rx_buf_size); 525 skb = dev_alloc_skb(pdata->rx_buf_size);
527 if (skb == NULL) { 526 if (skb == NULL)
528 netdev_alert(pdata->netdev,
529 "failed to allocate skb\n");
530 break; 527 break;
531 }
532 skb_dma = dma_map_single(pdata->dev, skb->data, 528 skb_dma = dma_map_single(pdata->dev, skb->data,
533 pdata->rx_buf_size, DMA_FROM_DEVICE); 529 pdata->rx_buf_size, DMA_FROM_DEVICE);
534 if (dma_mapping_error(pdata->dev, skb_dma)) { 530 if (dma_mapping_error(pdata->dev, skb_dma)) {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index ea273836d999..9da3a03e8c07 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -122,7 +122,6 @@
122#include "xgbe.h" 122#include "xgbe.h"
123#include "xgbe-common.h" 123#include "xgbe-common.h"
124 124
125
126static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, 125static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
127 unsigned int usec) 126 unsigned int usec)
128{ 127{
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b26d75856553..29554992215a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -126,7 +126,6 @@
126#include "xgbe.h" 126#include "xgbe.h"
127#include "xgbe-common.h" 127#include "xgbe-common.h"
128 128
129
130static int xgbe_poll(struct napi_struct *, int); 129static int xgbe_poll(struct napi_struct *, int);
131static void xgbe_set_rx_mode(struct net_device *); 130static void xgbe_set_rx_mode(struct net_device *);
132 131
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 46f613028e9c..49508ec98b72 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -121,7 +121,6 @@
121#include "xgbe.h" 121#include "xgbe.h"
122#include "xgbe-common.h" 122#include "xgbe-common.h"
123 123
124
125struct xgbe_stats { 124struct xgbe_stats {
126 char stat_string[ETH_GSTRING_LEN]; 125 char stat_string[ETH_GSTRING_LEN];
127 int stat_size; 126 int stat_size;
@@ -173,6 +172,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
173 XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), 172 XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
174 XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), 173 XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
175}; 174};
175
176#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats) 176#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
177 177
178static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 178static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index bdf9cfa70e88..f5a8fa03921a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -128,7 +128,6 @@
128#include "xgbe.h" 128#include "xgbe.h"
129#include "xgbe-common.h" 129#include "xgbe-common.h"
130 130
131
132MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); 131MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
133MODULE_LICENSE("Dual BSD/GPL"); 132MODULE_LICENSE("Dual BSD/GPL");
134MODULE_VERSION(XGBE_DRV_VERSION); 133MODULE_VERSION(XGBE_DRV_VERSION);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 6d2221e023f4..363b210560f3 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -123,7 +123,6 @@
123#include "xgbe.h" 123#include "xgbe.h"
124#include "xgbe-common.h" 124#include "xgbe-common.h"
125 125
126
127static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg) 126static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
128{ 127{
129 struct xgbe_prv_data *pdata = mii->priv; 128 struct xgbe_prv_data *pdata = mii->priv;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index 37e64cfa5718..a1bf9d1cdae1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -122,7 +122,6 @@
122#include "xgbe.h" 122#include "xgbe.h"
123#include "xgbe-common.h" 123#include "xgbe-common.h"
124 124
125
126static cycle_t xgbe_cc_read(const struct cyclecounter *cc) 125static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
127{ 126{
128 struct xgbe_prv_data *pdata = container_of(cc, 127 struct xgbe_prv_data *pdata = container_of(cc,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index e9fe6e6ddcc3..789957d43a13 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -128,7 +128,6 @@
128#include <linux/net_tstamp.h> 128#include <linux/net_tstamp.h>
129#include <net/dcbnl.h> 129#include <net/dcbnl.h>
130 130
131
132#define XGBE_DRV_NAME "amd-xgbe" 131#define XGBE_DRV_NAME "amd-xgbe"
133#define XGBE_DRV_VERSION "1.0.0-a" 132#define XGBE_DRV_VERSION "1.0.0-a"
134#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver" 133#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
@@ -199,7 +198,6 @@
199 ((_ring)->rdata + \ 198 ((_ring)->rdata + \
200 ((_idx) & ((_ring)->rdesc_count - 1))) 199 ((_idx) & ((_ring)->rdesc_count - 1)))
201 200
202
203/* Default coalescing parameters */ 201/* Default coalescing parameters */
204#define XGMAC_INIT_DMA_TX_USECS 50 202#define XGMAC_INIT_DMA_TX_USECS 50
205#define XGMAC_INIT_DMA_TX_FRAMES 25 203#define XGMAC_INIT_DMA_TX_FRAMES 25
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 514c57fd26f1..89e04fde5f4e 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -17,10 +17,14 @@ config NET_VENDOR_ARC
17 17
18if NET_VENDOR_ARC 18if NET_VENDOR_ARC
19 19
20config ARC_EMAC 20config ARC_EMAC_CORE
21 tristate "ARC EMAC support" 21 tristate
22 select MII 22 select MII
23 select PHYLIB 23 select PHYLIB
24
25config ARC_EMAC
26 tristate "ARC EMAC support"
27 select ARC_EMAC_CORE
24 depends on OF_IRQ 28 depends on OF_IRQ
25 depends on OF_NET 29 depends on OF_NET
26 ---help--- 30 ---help---
diff --git a/drivers/net/ethernet/arc/Makefile b/drivers/net/ethernet/arc/Makefile
index 00c8657637d5..241bb809f351 100644
--- a/drivers/net/ethernet/arc/Makefile
+++ b/drivers/net/ethernet/arc/Makefile
@@ -3,4 +3,5 @@
3# 3#
4 4
5arc_emac-objs := emac_main.o emac_mdio.o 5arc_emac-objs := emac_main.o emac_mdio.o
6obj-$(CONFIG_ARC_EMAC) += arc_emac.o 6obj-$(CONFIG_ARC_EMAC_CORE) += arc_emac.o
7obj-$(CONFIG_ARC_EMAC) += emac_arc.o
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index 36cc9bd07c47..eb2ba67ac711 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -124,6 +124,8 @@ struct buffer_state {
124 */ 124 */
125struct arc_emac_priv { 125struct arc_emac_priv {
126 /* Devices */ 126 /* Devices */
127 const char *drv_name;
128 const char *drv_version;
127 struct device *dev; 129 struct device *dev;
128 struct phy_device *phy_dev; 130 struct phy_device *phy_dev;
129 struct mii_bus *bus; 131 struct mii_bus *bus;
@@ -204,7 +206,9 @@ static inline void arc_reg_clr(struct arc_emac_priv *priv, int reg, int mask)
204 arc_reg_set(priv, reg, value & ~mask); 206 arc_reg_set(priv, reg, value & ~mask);
205} 207}
206 208
207int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv); 209int arc_mdio_probe(struct arc_emac_priv *priv);
208int arc_mdio_remove(struct arc_emac_priv *priv); 210int arc_mdio_remove(struct arc_emac_priv *priv);
211int arc_emac_probe(struct net_device *ndev, int interface);
212int arc_emac_remove(struct net_device *ndev);
209 213
210#endif /* ARC_EMAC_H */ 214#endif /* ARC_EMAC_H */
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
new file mode 100644
index 000000000000..f9cb99bfb511
--- /dev/null
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -0,0 +1,95 @@
1/**
2 * emac_arc.c - ARC EMAC specific glue layer
3 *
4 * Copyright (C) 2014 Romain Perier
5 *
6 * Romain Perier <romain.perier@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/etherdevice.h>
20#include <linux/module.h>
21#include <linux/of_net.h>
22#include <linux/platform_device.h>
23
24#include "emac.h"
25
26#define DRV_NAME "emac_arc"
27#define DRV_VERSION "1.0"
28
29static int emac_arc_probe(struct platform_device *pdev)
30{
31 struct device *dev = &pdev->dev;
32 struct net_device *ndev;
33 struct arc_emac_priv *priv;
34 int interface, err;
35
36 if (!dev->of_node)
37 return -ENODEV;
38
39 ndev = alloc_etherdev(sizeof(struct arc_emac_priv));
40 if (!ndev)
41 return -ENOMEM;
42 platform_set_drvdata(pdev, ndev);
43 SET_NETDEV_DEV(ndev, dev);
44
45 priv = netdev_priv(ndev);
46 priv->drv_name = DRV_NAME;
47 priv->drv_version = DRV_VERSION;
48
49 interface = of_get_phy_mode(dev->of_node);
50 if (interface < 0)
51 interface = PHY_INTERFACE_MODE_MII;
52
53 priv->clk = devm_clk_get(dev, "hclk");
54 if (IS_ERR(priv->clk)) {
55 dev_err(dev, "failed to retrieve host clock from device tree\n");
56 err = -EINVAL;
57 goto out_netdev;
58 }
59
60 err = arc_emac_probe(ndev, interface);
61out_netdev:
62 if (err)
63 free_netdev(ndev);
64 return err;
65}
66
67static int emac_arc_remove(struct platform_device *pdev)
68{
69 struct net_device *ndev = platform_get_drvdata(pdev);
70 int err;
71
72 err = arc_emac_remove(ndev);
73 free_netdev(ndev);
74 return err;
75}
76
77static const struct of_device_id emac_arc_dt_ids[] = {
78 { .compatible = "snps,arc-emac" },
79 { /* Sentinel */ }
80};
81
82static struct platform_driver emac_arc_driver = {
83 .probe = emac_arc_probe,
84 .remove = emac_arc_remove,
85 .driver = {
86 .name = DRV_NAME,
87 .of_match_table = emac_arc_dt_ids,
88 },
89};
90
91module_platform_driver(emac_arc_driver);
92
93MODULE_AUTHOR("Romain Perier <romain.perier@gmail.com>");
94MODULE_DESCRIPTION("ARC EMAC platform driver");
95MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index fe5cfeace6e3..a7773923a7a0 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -26,8 +26,6 @@
26 26
27#include "emac.h" 27#include "emac.h"
28 28
29#define DRV_NAME "arc_emac"
30#define DRV_VERSION "1.0"
31 29
32/** 30/**
33 * arc_emac_adjust_link - Adjust the PHY link duplex. 31 * arc_emac_adjust_link - Adjust the PHY link duplex.
@@ -120,8 +118,10 @@ static int arc_emac_set_settings(struct net_device *ndev,
120static void arc_emac_get_drvinfo(struct net_device *ndev, 118static void arc_emac_get_drvinfo(struct net_device *ndev,
121 struct ethtool_drvinfo *info) 119 struct ethtool_drvinfo *info)
122{ 120{
123 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 121 struct arc_emac_priv *priv = netdev_priv(ndev);
124 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 122
123 strlcpy(info->driver, priv->drv_name, sizeof(info->driver));
124 strlcpy(info->version, priv->drv_version, sizeof(info->version));
125} 125}
126 126
127static const struct ethtool_ops arc_emac_ethtool_ops = { 127static const struct ethtool_ops arc_emac_ethtool_ops = {
@@ -671,46 +671,38 @@ static const struct net_device_ops arc_emac_netdev_ops = {
671#endif 671#endif
672}; 672};
673 673
674static int arc_emac_probe(struct platform_device *pdev) 674int arc_emac_probe(struct net_device *ndev, int interface)
675{ 675{
676 struct device *dev = ndev->dev.parent;
676 struct resource res_regs; 677 struct resource res_regs;
677 struct device_node *phy_node; 678 struct device_node *phy_node;
678 struct arc_emac_priv *priv; 679 struct arc_emac_priv *priv;
679 struct net_device *ndev;
680 const char *mac_addr; 680 const char *mac_addr;
681 unsigned int id, clock_frequency, irq; 681 unsigned int id, clock_frequency, irq;
682 int err; 682 int err;
683 683
684 if (!pdev->dev.of_node)
685 return -ENODEV;
686 684
687 /* Get PHY from device tree */ 685 /* Get PHY from device tree */
688 phy_node = of_parse_phandle(pdev->dev.of_node, "phy", 0); 686 phy_node = of_parse_phandle(dev->of_node, "phy", 0);
689 if (!phy_node) { 687 if (!phy_node) {
690 dev_err(&pdev->dev, "failed to retrieve phy description from device tree\n"); 688 dev_err(dev, "failed to retrieve phy description from device tree\n");
691 return -ENODEV; 689 return -ENODEV;
692 } 690 }
693 691
694 /* Get EMAC registers base address from device tree */ 692 /* Get EMAC registers base address from device tree */
695 err = of_address_to_resource(pdev->dev.of_node, 0, &res_regs); 693 err = of_address_to_resource(dev->of_node, 0, &res_regs);
696 if (err) { 694 if (err) {
697 dev_err(&pdev->dev, "failed to retrieve registers base from device tree\n"); 695 dev_err(dev, "failed to retrieve registers base from device tree\n");
698 return -ENODEV; 696 return -ENODEV;
699 } 697 }
700 698
701 /* Get IRQ from device tree */ 699 /* Get IRQ from device tree */
702 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 700 irq = irq_of_parse_and_map(dev->of_node, 0);
703 if (!irq) { 701 if (!irq) {
704 dev_err(&pdev->dev, "failed to retrieve <irq> value from device tree\n"); 702 dev_err(dev, "failed to retrieve <irq> value from device tree\n");
705 return -ENODEV; 703 return -ENODEV;
706 } 704 }
707 705
708 ndev = alloc_etherdev(sizeof(struct arc_emac_priv));
709 if (!ndev)
710 return -ENOMEM;
711
712 platform_set_drvdata(pdev, ndev);
713 SET_NETDEV_DEV(ndev, &pdev->dev);
714 706
715 ndev->netdev_ops = &arc_emac_netdev_ops; 707 ndev->netdev_ops = &arc_emac_netdev_ops;
716 ndev->ethtool_ops = &arc_emac_ethtool_ops; 708 ndev->ethtool_ops = &arc_emac_ethtool_ops;
@@ -719,60 +711,57 @@ static int arc_emac_probe(struct platform_device *pdev)
719 ndev->flags &= ~IFF_MULTICAST; 711 ndev->flags &= ~IFF_MULTICAST;
720 712
721 priv = netdev_priv(ndev); 713 priv = netdev_priv(ndev);
722 priv->dev = &pdev->dev; 714 priv->dev = dev;
723 715
724 priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs); 716 priv->regs = devm_ioremap_resource(dev, &res_regs);
725 if (IS_ERR(priv->regs)) { 717 if (IS_ERR(priv->regs)) {
726 err = PTR_ERR(priv->regs); 718 return PTR_ERR(priv->regs);
727 goto out_netdev;
728 } 719 }
729 dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs); 720 dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs);
730 721
731 priv->clk = of_clk_get(pdev->dev.of_node, 0); 722 if (priv->clk) {
732 if (IS_ERR(priv->clk)) {
733 /* Get CPU clock frequency from device tree */
734 if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
735 &clock_frequency)) {
736 dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
737 err = -EINVAL;
738 goto out_netdev;
739 }
740 } else {
741 err = clk_prepare_enable(priv->clk); 723 err = clk_prepare_enable(priv->clk);
742 if (err) { 724 if (err) {
743 dev_err(&pdev->dev, "failed to enable clock\n"); 725 dev_err(dev, "failed to enable clock\n");
744 goto out_clkget; 726 return err;
745 } 727 }
746 728
747 clock_frequency = clk_get_rate(priv->clk); 729 clock_frequency = clk_get_rate(priv->clk);
730 } else {
731 /* Get CPU clock frequency from device tree */
732 if (of_property_read_u32(dev->of_node, "clock-frequency",
733 &clock_frequency)) {
734 dev_err(dev, "failed to retrieve <clock-frequency> from device tree\n");
735 return -EINVAL;
736 }
748 } 737 }
749 738
750 id = arc_reg_get(priv, R_ID); 739 id = arc_reg_get(priv, R_ID);
751 740
752 /* Check for EMAC revision 5 or 7, magic number */ 741 /* Check for EMAC revision 5 or 7, magic number */
753 if (!(id == 0x0005fd02 || id == 0x0007fd02)) { 742 if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
754 dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id); 743 dev_err(dev, "ARC EMAC not detected, id=0x%x\n", id);
755 err = -ENODEV; 744 err = -ENODEV;
756 goto out_clken; 745 goto out_clken;
757 } 746 }
758 dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id); 747 dev_info(dev, "ARC EMAC detected with id: 0x%x\n", id);
759 748
760 /* Set poll rate so that it polls every 1 ms */ 749 /* Set poll rate so that it polls every 1 ms */
761 arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000); 750 arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
762 751
763 ndev->irq = irq; 752 ndev->irq = irq;
764 dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq); 753 dev_info(dev, "IRQ is %d\n", ndev->irq);
765 754
766 /* Register interrupt handler for device */ 755 /* Register interrupt handler for device */
767 err = devm_request_irq(&pdev->dev, ndev->irq, arc_emac_intr, 0, 756 err = devm_request_irq(dev, ndev->irq, arc_emac_intr, 0,
768 ndev->name, ndev); 757 ndev->name, ndev);
769 if (err) { 758 if (err) {
770 dev_err(&pdev->dev, "could not allocate IRQ\n"); 759 dev_err(dev, "could not allocate IRQ\n");
771 goto out_clken; 760 goto out_clken;
772 } 761 }
773 762
774 /* Get MAC address from device tree */ 763 /* Get MAC address from device tree */
775 mac_addr = of_get_mac_address(pdev->dev.of_node); 764 mac_addr = of_get_mac_address(dev->of_node);
776 765
777 if (mac_addr) 766 if (mac_addr)
778 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); 767 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
@@ -780,14 +769,14 @@ static int arc_emac_probe(struct platform_device *pdev)
780 eth_hw_addr_random(ndev); 769 eth_hw_addr_random(ndev);
781 770
782 arc_emac_set_address_internal(ndev); 771 arc_emac_set_address_internal(ndev);
783 dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr); 772 dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
784 773
785 /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ 774 /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */
786 priv->rxbd = dmam_alloc_coherent(&pdev->dev, RX_RING_SZ + TX_RING_SZ, 775 priv->rxbd = dmam_alloc_coherent(dev, RX_RING_SZ + TX_RING_SZ,
787 &priv->rxbd_dma, GFP_KERNEL); 776 &priv->rxbd_dma, GFP_KERNEL);
788 777
789 if (!priv->rxbd) { 778 if (!priv->rxbd) {
790 dev_err(&pdev->dev, "failed to allocate data buffers\n"); 779 dev_err(dev, "failed to allocate data buffers\n");
791 err = -ENOMEM; 780 err = -ENOMEM;
792 goto out_clken; 781 goto out_clken;
793 } 782 }
@@ -795,31 +784,31 @@ static int arc_emac_probe(struct platform_device *pdev)
795 priv->txbd = priv->rxbd + RX_BD_NUM; 784 priv->txbd = priv->rxbd + RX_BD_NUM;
796 785
797 priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ; 786 priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ;
798 dev_dbg(&pdev->dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n", 787 dev_dbg(dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n",
799 (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma); 788 (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma);
800 789
801 err = arc_mdio_probe(pdev, priv); 790 err = arc_mdio_probe(priv);
802 if (err) { 791 if (err) {
803 dev_err(&pdev->dev, "failed to probe MII bus\n"); 792 dev_err(dev, "failed to probe MII bus\n");
804 goto out_clken; 793 goto out_clken;
805 } 794 }
806 795
807 priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, 796 priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
808 PHY_INTERFACE_MODE_MII); 797 interface);
809 if (!priv->phy_dev) { 798 if (!priv->phy_dev) {
810 dev_err(&pdev->dev, "of_phy_connect() failed\n"); 799 dev_err(dev, "of_phy_connect() failed\n");
811 err = -ENODEV; 800 err = -ENODEV;
812 goto out_mdio; 801 goto out_mdio;
813 } 802 }
814 803
815 dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n", 804 dev_info(dev, "connected to %s phy with id 0x%x\n",
816 priv->phy_dev->drv->name, priv->phy_dev->phy_id); 805 priv->phy_dev->drv->name, priv->phy_dev->phy_id);
817 806
818 netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT); 807 netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT);
819 808
820 err = register_netdev(ndev); 809 err = register_netdev(ndev);
821 if (err) { 810 if (err) {
822 dev_err(&pdev->dev, "failed to register network device\n"); 811 dev_err(dev, "failed to register network device\n");
823 goto out_netif_api; 812 goto out_netif_api;
824 } 813 }
825 814
@@ -832,19 +821,14 @@ out_netif_api:
832out_mdio: 821out_mdio:
833 arc_mdio_remove(priv); 822 arc_mdio_remove(priv);
834out_clken: 823out_clken:
835 if (!IS_ERR(priv->clk)) 824 if (priv->clk)
836 clk_disable_unprepare(priv->clk); 825 clk_disable_unprepare(priv->clk);
837out_clkget:
838 if (!IS_ERR(priv->clk))
839 clk_put(priv->clk);
840out_netdev:
841 free_netdev(ndev);
842 return err; 826 return err;
843} 827}
828EXPORT_SYMBOL_GPL(arc_emac_probe);
844 829
845static int arc_emac_remove(struct platform_device *pdev) 830int arc_emac_remove(struct net_device *ndev)
846{ 831{
847 struct net_device *ndev = platform_get_drvdata(pdev);
848 struct arc_emac_priv *priv = netdev_priv(ndev); 832 struct arc_emac_priv *priv = netdev_priv(ndev);
849 833
850 phy_disconnect(priv->phy_dev); 834 phy_disconnect(priv->phy_dev);
@@ -855,31 +839,12 @@ static int arc_emac_remove(struct platform_device *pdev)
855 839
856 if (!IS_ERR(priv->clk)) { 840 if (!IS_ERR(priv->clk)) {
857 clk_disable_unprepare(priv->clk); 841 clk_disable_unprepare(priv->clk);
858 clk_put(priv->clk);
859 } 842 }
860 843
861 free_netdev(ndev);
862 844
863 return 0; 845 return 0;
864} 846}
865 847EXPORT_SYMBOL_GPL(arc_emac_remove);
866static const struct of_device_id arc_emac_dt_ids[] = {
867 { .compatible = "snps,arc-emac" },
868 { /* Sentinel */ }
869};
870MODULE_DEVICE_TABLE(of, arc_emac_dt_ids);
871
872static struct platform_driver arc_emac_driver = {
873 .probe = arc_emac_probe,
874 .remove = arc_emac_remove,
875 .driver = {
876 .name = DRV_NAME,
877 .owner = THIS_MODULE,
878 .of_match_table = arc_emac_dt_ids,
879 },
880};
881
882module_platform_driver(arc_emac_driver);
883 848
884MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>"); 849MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>");
885MODULE_DESCRIPTION("ARC EMAC driver"); 850MODULE_DESCRIPTION("ARC EMAC driver");
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 26ba2423f33a..d5ee986936da 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -100,7 +100,6 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr,
100 100
101/** 101/**
102 * arc_mdio_probe - MDIO probe function. 102 * arc_mdio_probe - MDIO probe function.
103 * @pdev: Pointer to platform device.
104 * @priv: Pointer to ARC EMAC private data structure. 103 * @priv: Pointer to ARC EMAC private data structure.
105 * 104 *
106 * returns: 0 on success, -ENOMEM when mdiobus_alloc 105 * returns: 0 on success, -ENOMEM when mdiobus_alloc
@@ -108,7 +107,7 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr,
108 * 107 *
109 * Sets up and registers the MDIO interface. 108 * Sets up and registers the MDIO interface.
110 */ 109 */
111int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv) 110int arc_mdio_probe(struct arc_emac_priv *priv)
112{ 111{
113 struct mii_bus *bus; 112 struct mii_bus *bus;
114 int error; 113 int error;
@@ -124,9 +123,9 @@ int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv)
124 bus->read = &arc_mdio_read; 123 bus->read = &arc_mdio_read;
125 bus->write = &arc_mdio_write; 124 bus->write = &arc_mdio_write;
126 125
127 snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); 126 snprintf(bus->id, MII_BUS_ID_SIZE, "%s", bus->name);
128 127
129 error = of_mdiobus_register(bus, pdev->dev.of_node); 128 error = of_mdiobus_register(bus, priv->dev->of_node);
130 if (error) { 129 if (error) {
131 dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name); 130 dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name);
132 mdiobus_free(bus); 131 mdiobus_free(bus);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index d8d07a818b89..c3e260c21734 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -122,6 +122,7 @@ config TIGON3
122config BNX2X 122config BNX2X
123 tristate "Broadcom NetXtremeII 10Gb support" 123 tristate "Broadcom NetXtremeII 10Gb support"
124 depends on PCI 124 depends on PCI
125 select PTP_1588_CLOCK
125 select FW_LOADER 126 select FW_LOADER
126 select ZLIB_INFLATE 127 select ZLIB_INFLATE
127 select LIBCRC32C 128 select LIBCRC32C
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 6f4e18644bd4..662cf2222873 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -139,6 +139,15 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,
139 else 139 else
140 reg &= ~RXCHK_SKIP_FCS; 140 reg &= ~RXCHK_SKIP_FCS;
141 141
142 /* If Broadcom tags are enabled (e.g: using a switch), make
143 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
144 * tag after the Ethernet MAC Source Address.
145 */
146 if (netdev_uses_dsa(dev))
147 reg |= RXCHK_BRCM_TAG_EN;
148 else
149 reg &= ~RXCHK_BRCM_TAG_EN;
150
142 rxchk_writel(priv, reg, RXCHK_CONTROL); 151 rxchk_writel(priv, reg, RXCHK_CONTROL);
143 152
144 return 0; 153 return 0;
@@ -1062,16 +1071,19 @@ static void bcm_sysport_adj_link(struct net_device *dev)
1062 if (!phydev->pause) 1071 if (!phydev->pause)
1063 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; 1072 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1064 1073
1065 if (changed) { 1074 if (!changed)
1075 return;
1076
1077 if (phydev->link) {
1066 reg = umac_readl(priv, UMAC_CMD); 1078 reg = umac_readl(priv, UMAC_CMD);
1067 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | 1079 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1068 CMD_HD_EN | CMD_RX_PAUSE_IGNORE | 1080 CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1069 CMD_TX_PAUSE_IGNORE); 1081 CMD_TX_PAUSE_IGNORE);
1070 reg |= cmd_bits; 1082 reg |= cmd_bits;
1071 umac_writel(priv, reg, UMAC_CMD); 1083 umac_writel(priv, reg, UMAC_CMD);
1072
1073 phy_print_status(priv->phydev);
1074 } 1084 }
1085
1086 phy_print_status(priv->phydev);
1075} 1087}
1076 1088
1077static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, 1089static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d777fae86988..86e94517a536 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -20,13 +20,17 @@
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/pci_regs.h> 21#include <linux/pci_regs.h>
22 22
23#include <linux/ptp_clock_kernel.h>
24#include <linux/net_tstamp.h>
25#include <linux/clocksource.h>
26
23/* compilation time flags */ 27/* compilation time flags */
24 28
25/* define this to make the driver freeze on error to allow getting debug info 29/* define this to make the driver freeze on error to allow getting debug info
26 * (you will need to reboot afterwards) */ 30 * (you will need to reboot afterwards) */
27/* #define BNX2X_STOP_ON_ERROR */ 31/* #define BNX2X_STOP_ON_ERROR */
28 32
29#define DRV_MODULE_VERSION "1.78.19-0" 33#define DRV_MODULE_VERSION "1.710.51-0"
30#define DRV_MODULE_RELDATE "2014/02/10" 34#define DRV_MODULE_RELDATE "2014/02/10"
31#define BNX2X_BC_VER 0x040200 35#define BNX2X_BC_VER 0x040200
32 36
@@ -70,6 +74,7 @@ enum bnx2x_int_mode {
70#define BNX2X_MSG_SP 0x0100000 /* was: NETIF_MSG_INTR */ 74#define BNX2X_MSG_SP 0x0100000 /* was: NETIF_MSG_INTR */
71#define BNX2X_MSG_FP 0x0200000 /* was: NETIF_MSG_INTR */ 75#define BNX2X_MSG_FP 0x0200000 /* was: NETIF_MSG_INTR */
72#define BNX2X_MSG_IOV 0x0800000 76#define BNX2X_MSG_IOV 0x0800000
77#define BNX2X_MSG_PTP 0x1000000
73#define BNX2X_MSG_IDLE 0x2000000 /* used for idle check*/ 78#define BNX2X_MSG_IDLE 0x2000000 /* used for idle check*/
74#define BNX2X_MSG_ETHTOOL 0x4000000 79#define BNX2X_MSG_ETHTOOL 0x4000000
75#define BNX2X_MSG_DCB 0x8000000 80#define BNX2X_MSG_DCB 0x8000000
@@ -1587,10 +1592,11 @@ struct bnx2x {
1587#define USING_SINGLE_MSIX_FLAG (1 << 20) 1592#define USING_SINGLE_MSIX_FLAG (1 << 20)
1588#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) 1593#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
1589#define IS_VF_FLAG (1 << 22) 1594#define IS_VF_FLAG (1 << 22)
1590#define INTERRUPTS_ENABLED_FLAG (1 << 23) 1595#define BC_SUPPORTS_RMMOD_CMD (1 << 23)
1591#define BC_SUPPORTS_RMMOD_CMD (1 << 24) 1596#define HAS_PHYS_PORT_ID (1 << 24)
1592#define HAS_PHYS_PORT_ID (1 << 25) 1597#define AER_ENABLED (1 << 25)
1593#define AER_ENABLED (1 << 26) 1598#define PTP_SUPPORTED (1 << 26)
1599#define TX_TIMESTAMPING_EN (1 << 27)
1594 1600
1595#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) 1601#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
1596 1602
@@ -1684,13 +1690,9 @@ struct bnx2x {
1684#define BNX2X_STATE_ERROR 0xf000 1690#define BNX2X_STATE_ERROR 0xf000
1685 1691
1686#define BNX2X_MAX_PRIORITY 8 1692#define BNX2X_MAX_PRIORITY 8
1687#define BNX2X_MAX_ENTRIES_PER_PRI 16
1688#define BNX2X_MAX_COS 3
1689#define BNX2X_MAX_TX_COS 2
1690 int num_queues; 1693 int num_queues;
1691 uint num_ethernet_queues; 1694 uint num_ethernet_queues;
1692 uint num_cnic_queues; 1695 uint num_cnic_queues;
1693 int num_napi_queues;
1694 int disable_tpa; 1696 int disable_tpa;
1695 1697
1696 u32 rx_mode; 1698 u32 rx_mode;
@@ -1933,6 +1935,19 @@ struct bnx2x {
1933 1935
1934 u8 phys_port_id[ETH_ALEN]; 1936 u8 phys_port_id[ETH_ALEN];
1935 1937
1938 /* PTP related context */
1939 struct ptp_clock *ptp_clock;
1940 struct ptp_clock_info ptp_clock_info;
1941 struct work_struct ptp_task;
1942 struct cyclecounter cyclecounter;
1943 struct timecounter timecounter;
1944 bool timecounter_init_done;
1945 struct sk_buff *ptp_tx_skb;
1946 unsigned long ptp_tx_start;
1947 bool hwtstamp_ioctl_called;
1948 u16 tx_type;
1949 u16 rx_filter;
1950
1936 struct bnx2x_link_report_data vf_link_vars; 1951 struct bnx2x_link_report_data vf_link_vars;
1937}; 1952};
1938 1953
@@ -2559,4 +2574,11 @@ void bnx2x_update_mng_version(struct bnx2x *bp);
2559 2574
2560#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX)) 2575#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
2561 2576
2577void bnx2x_init_ptp(struct bnx2x *bp);
2578int bnx2x_configure_ptp_filters(struct bnx2x *bp);
2579void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
2580
2581#define BNX2X_MAX_PHC_DRIFT 31000000
2582#define BNX2X_PTP_TX_TIMEOUT
2583
2562#endif /* bnx2x.h */ 2584#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4ccc806b1150..6dc32aee96bf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -21,6 +21,7 @@
21#include <linux/if_vlan.h> 21#include <linux/if_vlan.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/ip.h> 23#include <linux/ip.h>
24#include <linux/crash_dump.h>
24#include <net/tcp.h> 25#include <net/tcp.h>
25#include <net/ipv6.h> 26#include <net/ipv6.h>
26#include <net/ip6_checksum.h> 27#include <net/ip6_checksum.h>
@@ -64,7 +65,7 @@ static int bnx2x_calc_num_queues(struct bnx2x *bp)
64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues(); 65 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
65 66
66 /* Reduce memory usage in kdump environment by using only one queue */ 67 /* Reduce memory usage in kdump environment by using only one queue */
67 if (reset_devices) 68 if (is_kdump_kernel())
68 nq = 1; 69 nq = 1;
69 70
70 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); 71 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
@@ -1063,6 +1064,11 @@ reuse_rx:
1063 1064
1064 skb_record_rx_queue(skb, fp->rx_queue); 1065 skb_record_rx_queue(skb, fp->rx_queue);
1065 1066
1067 /* Check if this packet was timestamped */
1068 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1069 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1070 bnx2x_set_rx_ts(bp, skb);
1071
1066 if (le16_to_cpu(cqe_fp->pars_flags.flags) & 1072 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1067 PARSING_FLAGS_VLAN) 1073 PARSING_FLAGS_VLAN)
1068 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1074 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
@@ -2078,6 +2084,10 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2078 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags); 2084 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2079 if (rss_obj->udp_rss_v6) 2085 if (rss_obj->udp_rss_v6)
2080 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags); 2086 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2087
2088 if (!CHIP_IS_E1x(bp))
2089 /* valid only for TUNN_MODE_GRE tunnel mode */
2090 __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
2081 } else { 2091 } else {
2082 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags); 2092 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2083 } 2093 }
@@ -2800,7 +2810,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2800 /* Initialize Rx filter. */ 2810 /* Initialize Rx filter. */
2801 bnx2x_set_rx_mode_inner(bp); 2811 bnx2x_set_rx_mode_inner(bp);
2802 2812
2803 /* Start the Tx */ 2813 if (bp->flags & PTP_SUPPORTED) {
2814 bnx2x_init_ptp(bp);
2815 bnx2x_configure_ptp_filters(bp);
2816 }
2817 /* Start Tx */
2804 switch (load_mode) { 2818 switch (load_mode) {
2805 case LOAD_NORMAL: 2819 case LOAD_NORMAL:
2806 /* Tx queue should be only re-enabled */ 2820 /* Tx queue should be only re-enabled */
@@ -3437,26 +3451,6 @@ exit_lbl:
3437} 3451}
3438#endif 3452#endif
3439 3453
3440static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3441 u32 xmit_type)
3442{
3443 struct ipv6hdr *ipv6;
3444
3445 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3446 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3447 ETH_TX_PARSE_BD_E2_LSO_MSS;
3448
3449 if (xmit_type & XMIT_GSO_ENC_V6)
3450 ipv6 = inner_ipv6_hdr(skb);
3451 else if (xmit_type & XMIT_GSO_V6)
3452 ipv6 = ipv6_hdr(skb);
3453 else
3454 ipv6 = NULL;
3455
3456 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3457 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3458}
3459
3460/** 3454/**
3461 * bnx2x_set_pbd_gso - update PBD in GSO case. 3455 * bnx2x_set_pbd_gso - update PBD in GSO case.
3462 * 3456 *
@@ -3466,7 +3460,6 @@ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3466 */ 3460 */
3467static void bnx2x_set_pbd_gso(struct sk_buff *skb, 3461static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3468 struct eth_tx_parse_bd_e1x *pbd, 3462 struct eth_tx_parse_bd_e1x *pbd,
3469 struct eth_tx_start_bd *tx_start_bd,
3470 u32 xmit_type) 3463 u32 xmit_type)
3471{ 3464{
3472 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 3465 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
@@ -3479,9 +3472,6 @@ static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3479 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, 3472 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3480 ip_hdr(skb)->daddr, 3473 ip_hdr(skb)->daddr,
3481 0, IPPROTO_TCP, 0)); 3474 0, IPPROTO_TCP, 0));
3482
3483 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3484 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3485 } else { 3475 } else {
3486 pbd->tcp_pseudo_csum = 3476 pbd->tcp_pseudo_csum =
3487 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3477 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -3653,18 +3643,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3653 (__force u32)iph->tot_len - 3643 (__force u32)iph->tot_len -
3654 (__force u32)iph->frag_off; 3644 (__force u32)iph->frag_off;
3655 3645
3646 outerip_len = iph->ihl << 1;
3647
3656 pbd2->fw_ip_csum_wo_len_flags_frag = 3648 pbd2->fw_ip_csum_wo_len_flags_frag =
3657 bswab16(csum_fold((__force __wsum)csum)); 3649 bswab16(csum_fold((__force __wsum)csum));
3658 } else { 3650 } else {
3659 pbd2->fw_ip_hdr_to_payload_w = 3651 pbd2->fw_ip_hdr_to_payload_w =
3660 hlen_w - ((sizeof(struct ipv6hdr)) >> 1); 3652 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3653 pbd_e2->data.tunnel_data.flags |=
3654 ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
3661 } 3655 }
3662 3656
3663 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); 3657 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3664 3658
3665 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); 3659 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3666 3660
3667 if (xmit_type & XMIT_GSO_V4) { 3661 /* inner IP header info */
3662 if (xmit_type & XMIT_CSUM_ENC_V4) {
3668 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); 3663 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3669 3664
3670 pbd_e2->data.tunnel_data.pseudo_csum = 3665 pbd_e2->data.tunnel_data.pseudo_csum =
@@ -3672,8 +3667,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3672 inner_ip_hdr(skb)->saddr, 3667 inner_ip_hdr(skb)->saddr,
3673 inner_ip_hdr(skb)->daddr, 3668 inner_ip_hdr(skb)->daddr,
3674 0, IPPROTO_TCP, 0)); 3669 0, IPPROTO_TCP, 0));
3675
3676 outerip_len = ip_hdr(skb)->ihl << 1;
3677 } else { 3670 } else {
3678 pbd_e2->data.tunnel_data.pseudo_csum = 3671 pbd_e2->data.tunnel_data.pseudo_csum =
3679 bswab16(~csum_ipv6_magic( 3672 bswab16(~csum_ipv6_magic(
@@ -3686,8 +3679,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3686 3679
3687 *global_data |= 3680 *global_data |=
3688 outerip_off | 3681 outerip_off |
3689 (!!(xmit_type & XMIT_CSUM_V6) <<
3690 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3691 (outerip_len << 3682 (outerip_len <<
3692 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) | 3683 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3693 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << 3684 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
@@ -3699,6 +3690,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3699 } 3690 }
3700} 3691}
3701 3692
3693static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3694 u32 xmit_type)
3695{
3696 struct ipv6hdr *ipv6;
3697
3698 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3699 return;
3700
3701 if (xmit_type & XMIT_GSO_ENC_V6)
3702 ipv6 = inner_ipv6_hdr(skb);
3703 else /* XMIT_GSO_V6 */
3704 ipv6 = ipv6_hdr(skb);
3705
3706 if (ipv6->nexthdr == NEXTHDR_IPV6)
3707 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3708}
3709
3702/* called with netif_tx_lock 3710/* called with netif_tx_lock
3703 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call 3711 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3704 * netif_wake_queue() 3712 * netif_wake_queue()
@@ -3831,6 +3839,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3831 3839
3832 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 3840 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3833 3841
3842 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3843 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3844 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3845 } else if (bp->ptp_tx_skb) {
3846 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3847 } else {
3848 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3849 /* schedule check for Tx timestamp */
3850 bp->ptp_tx_skb = skb_get(skb);
3851 bp->ptp_tx_start = jiffies;
3852 schedule_work(&bp->ptp_task);
3853 }
3854 }
3855
3834 /* header nbd: indirectly zero other flags! */ 3856 /* header nbd: indirectly zero other flags! */
3835 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT; 3857 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3836 3858
@@ -3852,12 +3874,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3852 /* when transmitting in a vf, start bd must hold the ethertype 3874 /* when transmitting in a vf, start bd must hold the ethertype
3853 * for fw to enforce it 3875 * for fw to enforce it
3854 */ 3876 */
3877#ifndef BNX2X_STOP_ON_ERROR
3855 if (IS_VF(bp)) 3878 if (IS_VF(bp))
3879#endif
3856 tx_start_bd->vlan_or_ethertype = 3880 tx_start_bd->vlan_or_ethertype =
3857 cpu_to_le16(ntohs(eth->h_proto)); 3881 cpu_to_le16(ntohs(eth->h_proto));
3882#ifndef BNX2X_STOP_ON_ERROR
3858 else 3883 else
3859 /* used by FW for packet accounting */ 3884 /* used by FW for packet accounting */
3860 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); 3885 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3886#endif
3861 } 3887 }
3862 3888
3863 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ 3889 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
@@ -3915,6 +3941,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3915 xmit_type); 3941 xmit_type);
3916 } 3942 }
3917 3943
3944 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3918 /* Add the macs to the parsing BD if this is a vf or if 3945 /* Add the macs to the parsing BD if this is a vf or if
3919 * Tx Switching is enabled. 3946 * Tx Switching is enabled.
3920 */ 3947 */
@@ -3929,11 +3956,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3929 &pbd_e2->data.mac_addr.dst_mid, 3956 &pbd_e2->data.mac_addr.dst_mid,
3930 &pbd_e2->data.mac_addr.dst_lo, 3957 &pbd_e2->data.mac_addr.dst_lo,
3931 eth->h_dest); 3958 eth->h_dest);
3932 } else if (bp->flags & TX_SWITCHING) { 3959 } else {
3933 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, 3960 if (bp->flags & TX_SWITCHING)
3934 &pbd_e2->data.mac_addr.dst_mid, 3961 bnx2x_set_fw_mac_addr(
3935 &pbd_e2->data.mac_addr.dst_lo, 3962 &pbd_e2->data.mac_addr.dst_hi,
3936 eth->h_dest); 3963 &pbd_e2->data.mac_addr.dst_mid,
3964 &pbd_e2->data.mac_addr.dst_lo,
3965 eth->h_dest);
3966#ifdef BNX2X_STOP_ON_ERROR
3967 /* Enforce security is always set in Stop on Error -
3968 * source mac should be present in the parsing BD
3969 */
3970 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3971 &pbd_e2->data.mac_addr.src_mid,
3972 &pbd_e2->data.mac_addr.src_lo,
3973 eth->h_source);
3974#endif
3937 } 3975 }
3938 3976
3939 SET_FLAG(pbd_e2_parsing_data, 3977 SET_FLAG(pbd_e2_parsing_data,
@@ -3980,10 +4018,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3980 bd_prod); 4018 bd_prod);
3981 } 4019 }
3982 if (!CHIP_IS_E1x(bp)) 4020 if (!CHIP_IS_E1x(bp))
3983 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, 4021 pbd_e2_parsing_data |=
3984 xmit_type); 4022 (skb_shinfo(skb)->gso_size <<
4023 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4024 ETH_TX_PARSE_BD_E2_LSO_MSS;
3985 else 4025 else
3986 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type); 4026 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3987 } 4027 }
3988 4028
3989 /* Set the PBD's parsing_data field if not zero 4029 /* Set the PBD's parsing_data field if not zero
@@ -4771,11 +4811,15 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
4771 struct bnx2x *bp = netdev_priv(dev); 4811 struct bnx2x *bp = netdev_priv(dev);
4772 4812
4773 /* TPA requires Rx CSUM offloading */ 4813 /* TPA requires Rx CSUM offloading */
4774 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) { 4814 if (!(features & NETIF_F_RXCSUM)) {
4775 features &= ~NETIF_F_LRO; 4815 features &= ~NETIF_F_LRO;
4776 features &= ~NETIF_F_GRO; 4816 features &= ~NETIF_F_GRO;
4777 } 4817 }
4778 4818
4819 /* Note: do not disable SW GRO in kernel when HW GRO is off */
4820 if (bp->disable_tpa)
4821 features &= ~NETIF_F_LRO;
4822
4779 return features; 4823 return features;
4780} 4824}
4781 4825
@@ -4814,6 +4858,10 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4814 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) 4858 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4815 changes &= ~GRO_ENABLE_FLAG; 4859 changes &= ~GRO_ENABLE_FLAG;
4816 4860
4861 /* if GRO is changed while HW TPA is off, don't force a reload */
4862 if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa)
4863 changes &= ~GRO_ENABLE_FLAG;
4864
4817 if (changes) 4865 if (changes)
4818 bnx2x_reload = true; 4866 bnx2x_reload = true;
4819 4867
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 571427c7226b..ac63e16829ef 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -932,8 +932,9 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
932 else /* CHIP_IS_E1X */ 932 else /* CHIP_IS_E1X */
933 start_params->network_cos_mode = FW_WRR; 933 start_params->network_cos_mode = FW_WRR;
934 934
935 start_params->gre_tunnel_mode = L2GRE_TUNNEL; 935 start_params->tunnel_mode = TUNN_MODE_GRE;
936 start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS; 936 start_params->gre_tunnel_type = IPGRE_TUNNEL;
937 start_params->inner_gre_rss_en = 1;
937 938
938 return bnx2x_func_state_change(bp, &func_params); 939 return bnx2x_func_state_change(bp, &func_params);
939} 940}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index fb26bc4c42a1..6e4294ed1fc9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -2092,7 +2092,6 @@ static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
2092static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) 2092static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
2093{ 2093{
2094 struct bnx2x *bp = netdev_priv(netdev); 2094 struct bnx2x *bp = netdev_priv(netdev);
2095 int rc = 0;
2096 2095
2097 DP(BNX2X_MSG_DCB, "SET-ALL\n"); 2096 DP(BNX2X_MSG_DCB, "SET-ALL\n");
2098 2097
@@ -2110,9 +2109,7 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
2110 1); 2109 1);
2111 bnx2x_dcbx_init(bp, true); 2110 bnx2x_dcbx_init(bp, true);
2112 } 2111 }
2113 DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc); 2112 DP(BNX2X_MSG_DCB, "set_dcbx_params done\n");
2114 if (rc)
2115 return 1;
2116 2113
2117 return 0; 2114 return 0;
2118} 2115}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index 12eb4baee9f6..741aa130c19f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -40,7 +40,7 @@ struct dump_header {
40 u32 dump_meta_data; /* OR of CHIP and PATH. */ 40 u32 dump_meta_data; /* OR of CHIP and PATH. */
41}; 41};
42 42
43#define BNX2X_DUMP_VERSION 0x50acff01 43#define BNX2X_DUMP_VERSION 0x61111111
44struct reg_addr { 44struct reg_addr {
45 u32 addr; 45 u32 addr;
46 u32 size; 46 u32 size;
@@ -1464,7 +1464,6 @@ static const struct reg_addr reg_addrs[] = {
1464 { 0x180398, 1, 0x1c, 0x924}, 1464 { 0x180398, 1, 0x1c, 0x924},
1465 { 0x1803a0, 5, 0x1c, 0x924}, 1465 { 0x1803a0, 5, 0x1c, 0x924},
1466 { 0x1803b4, 2, 0x18, 0x924}, 1466 { 0x1803b4, 2, 0x18, 0x924},
1467 { 0x180400, 256, 0x3, 0xfff},
1468 { 0x181000, 4, 0x1f, 0x93c}, 1467 { 0x181000, 4, 0x1f, 0x93c},
1469 { 0x181010, 1020, 0x1f, 0x38}, 1468 { 0x181010, 1020, 0x1f, 0x38},
1470 { 0x182000, 4, 0x18, 0x924}, 1469 { 0x182000, 4, 0x18, 0x924},
@@ -1576,7 +1575,6 @@ static const struct reg_addr reg_addrs[] = {
1576 { 0x200398, 1, 0x1c, 0x924}, 1575 { 0x200398, 1, 0x1c, 0x924},
1577 { 0x2003a0, 1, 0x1c, 0x924}, 1576 { 0x2003a0, 1, 0x1c, 0x924},
1578 { 0x2003a8, 2, 0x1c, 0x924}, 1577 { 0x2003a8, 2, 0x1c, 0x924},
1579 { 0x200400, 256, 0x3, 0xfff},
1580 { 0x202000, 4, 0x1f, 0x1927}, 1578 { 0x202000, 4, 0x1f, 0x1927},
1581 { 0x202010, 2044, 0x1f, 0x1007}, 1579 { 0x202010, 2044, 0x1f, 0x1007},
1582 { 0x204000, 4, 0x18, 0x924}, 1580 { 0x204000, 4, 0x18, 0x924},
@@ -1688,7 +1686,6 @@ static const struct reg_addr reg_addrs[] = {
1688 { 0x280398, 1, 0x1c, 0x924}, 1686 { 0x280398, 1, 0x1c, 0x924},
1689 { 0x2803a0, 1, 0x1c, 0x924}, 1687 { 0x2803a0, 1, 0x1c, 0x924},
1690 { 0x2803a8, 2, 0x1c, 0x924}, 1688 { 0x2803a8, 2, 0x1c, 0x924},
1691 { 0x280400, 256, 0x3, 0xfff},
1692 { 0x282000, 4, 0x1f, 0x9e4}, 1689 { 0x282000, 4, 0x1f, 0x9e4},
1693 { 0x282010, 2044, 0x1f, 0x1c0}, 1690 { 0x282010, 2044, 0x1f, 0x1c0},
1694 { 0x284000, 4, 0x18, 0x924}, 1691 { 0x284000, 4, 0x18, 0x924},
@@ -1800,7 +1797,6 @@ static const struct reg_addr reg_addrs[] = {
1800 { 0x300398, 1, 0x1c, 0x924}, 1797 { 0x300398, 1, 0x1c, 0x924},
1801 { 0x3003a0, 1, 0x1c, 0x924}, 1798 { 0x3003a0, 1, 0x1c, 0x924},
1802 { 0x3003a8, 2, 0x1c, 0x924}, 1799 { 0x3003a8, 2, 0x1c, 0x924},
1803 { 0x300400, 256, 0x3, 0xfff},
1804 { 0x302000, 4, 0x1f, 0xf24}, 1800 { 0x302000, 4, 0x1f, 0xf24},
1805 { 0x302010, 2044, 0x1f, 0xe00}, 1801 { 0x302010, 2044, 0x1f, 0xe00},
1806 { 0x304000, 4, 0x18, 0x924}, 1802 { 0x304000, 4, 0x18, 0x924},
@@ -2206,10 +2202,10 @@ static const struct wreg_addr wreg_addr_e3b0 = {
2206 0x1b0c00, 128, 2, read_reg_e3b0, 0x1f, 0x1fff}; 2202 0x1b0c00, 128, 2, read_reg_e3b0, 0x1f, 0x1fff};
2207 2203
2208static const unsigned int dump_num_registers[NUM_CHIPS][NUM_PRESETS] = { 2204static const unsigned int dump_num_registers[NUM_CHIPS][NUM_PRESETS] = {
2209 {20782, 18567, 27975, 19729, 18311, 27719, 20836, 32391, 41799, 20812, 2205 {19758, 17543, 26951, 18705, 17287, 26695, 19812, 31367, 40775, 19788,
2210 26247, 35655, 19074}, 2206 25223, 34631, 19074},
2211 {32774, 19297, 33277, 31721, 19041, 33021, 32828, 33121, 47101, 32804, 2207 {31750, 18273, 32253, 30697, 18017, 31997, 31804, 32097, 46077, 31780,
2212 26977, 40957, 35895}, 2208 25953, 39933, 35895},
2213 {36527, 17928, 33697, 35474, 18700, 34466, 36581, 31752, 47521, 36557, 2209 {36527, 17928, 33697, 35474, 18700, 34466, 36581, 31752, 47521, 36557,
2214 25608, 41377, 43903}, 2210 25608, 41377, 43903},
2215 {45239, 17936, 34387, 44186, 18708, 35156, 45293, 31760, 48211, 45269, 2211 {45239, 17936, 34387, 44186, 18708, 35156, 45293, 31760, 48211, 45269,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 92fee842f954..0b173ed20ae9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3481,6 +3481,46 @@ static int bnx2x_set_channels(struct net_device *dev,
3481 return bnx2x_nic_load(bp, LOAD_NORMAL); 3481 return bnx2x_nic_load(bp, LOAD_NORMAL);
3482} 3482}
3483 3483
3484static int bnx2x_get_ts_info(struct net_device *dev,
3485 struct ethtool_ts_info *info)
3486{
3487 struct bnx2x *bp = netdev_priv(dev);
3488
3489 if (bp->flags & PTP_SUPPORTED) {
3490 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
3491 SOF_TIMESTAMPING_RX_SOFTWARE |
3492 SOF_TIMESTAMPING_SOFTWARE |
3493 SOF_TIMESTAMPING_TX_HARDWARE |
3494 SOF_TIMESTAMPING_RX_HARDWARE |
3495 SOF_TIMESTAMPING_RAW_HARDWARE;
3496
3497 if (bp->ptp_clock)
3498 info->phc_index = ptp_clock_index(bp->ptp_clock);
3499 else
3500 info->phc_index = -1;
3501
3502 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
3503 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
3504 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
3505 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
3506 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
3507 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
3508 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
3509 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
3510 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
3511 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
3512 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
3513 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
3514 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
3515
3516 info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
3517
3518 return 0;
3519 }
3520
3521 return ethtool_op_get_ts_info(dev, info);
3522}
3523
3484static const struct ethtool_ops bnx2x_ethtool_ops = { 3524static const struct ethtool_ops bnx2x_ethtool_ops = {
3485 .get_settings = bnx2x_get_settings, 3525 .get_settings = bnx2x_get_settings,
3486 .set_settings = bnx2x_set_settings, 3526 .set_settings = bnx2x_set_settings,
@@ -3522,7 +3562,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
3522 .get_module_eeprom = bnx2x_get_module_eeprom, 3562 .get_module_eeprom = bnx2x_get_module_eeprom,
3523 .get_eee = bnx2x_get_eee, 3563 .get_eee = bnx2x_get_eee,
3524 .set_eee = bnx2x_set_eee, 3564 .set_eee = bnx2x_set_eee,
3525 .get_ts_info = ethtool_op_get_ts_info, 3565 .get_ts_info = bnx2x_get_ts_info,
3526}; 3566};
3527 3567
3528static const struct ethtool_ops bnx2x_vf_ethtool_ops = { 3568static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 95dc36543548..7636e3c18771 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -10,170 +10,170 @@
10#ifndef BNX2X_FW_DEFS_H 10#ifndef BNX2X_FW_DEFS_H
11#define BNX2X_FW_DEFS_H 11#define BNX2X_FW_DEFS_H
12 12
13#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base) 13#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[152].base)
14#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ 14#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
15 (IRO[147].base + ((assertListEntry) * IRO[147].m1)) 15 (IRO[151].base + ((assertListEntry) * IRO[151].m1))
16#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \ 16#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
17 (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \ 17 (IRO[157].base + (((pfId)>>1) * IRO[157].m1) + (((pfId)&1) * \
18 IRO[153].m2)) 18 IRO[157].m2))
19#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \ 19#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
20 (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \ 20 (IRO[158].base + (((pfId)>>1) * IRO[158].m1) + (((pfId)&1) * \
21 IRO[154].m2)) 21 IRO[158].m2))
22#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \ 22#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
23 (IRO[159].base + ((funcId) * IRO[159].m1)) 23 (IRO[163].base + ((funcId) * IRO[163].m1))
24#define CSTORM_FUNC_EN_OFFSET(funcId) \ 24#define CSTORM_FUNC_EN_OFFSET(funcId) \
25 (IRO[149].base + ((funcId) * IRO[149].m1)) 25 (IRO[153].base + ((funcId) * IRO[153].m1))
26#define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \ 26#define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \
27 (IRO[139].base + ((hcIndex) * IRO[139].m1) + ((sbId) * IRO[139].m2)) 27 (IRO[143].base + ((hcIndex) * IRO[143].m1) + ((sbId) * IRO[143].m2))
28#define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \ 28#define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \
29 (IRO[138].base + (((hcIndex)>>2) * IRO[138].m1) + (((hcIndex)&3) \ 29 (IRO[142].base + (((hcIndex)>>2) * IRO[142].m1) + (((hcIndex)&3) \
30 * IRO[138].m2) + ((sbId) * IRO[138].m3)) 30 * IRO[142].m2) + ((sbId) * IRO[142].m3))
31#define CSTORM_IGU_MODE_OFFSET (IRO[157].base) 31#define CSTORM_IGU_MODE_OFFSET (IRO[161].base)
32#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ 32#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
33 (IRO[317].base + ((pfId) * IRO[317].m1)) 33 (IRO[323].base + ((pfId) * IRO[323].m1))
34#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ 34#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
35 (IRO[318].base + ((pfId) * IRO[318].m1)) 35 (IRO[324].base + ((pfId) * IRO[324].m1))
36#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ 36#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
37 (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) 37 (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2))
38#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ 38#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
39 (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) 39 (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2))
40#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ 40#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
41 (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) 41 (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2))
42#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ 42#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
43 (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) 43 (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2))
44#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ 44#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
45 (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
46#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
47 (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2)) 45 (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
46#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
47 (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2))
48#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ 48#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
49 (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2)) 49 (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2))
50#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ 50#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
51 (IRO[316].base + ((pfId) * IRO[316].m1)) 51 (IRO[322].base + ((pfId) * IRO[322].m1))
52#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 52#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
53 (IRO[308].base + ((pfId) * IRO[308].m1)) 53 (IRO[314].base + ((pfId) * IRO[314].m1))
54#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 54#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
55 (IRO[307].base + ((pfId) * IRO[307].m1)) 55 (IRO[313].base + ((pfId) * IRO[313].m1))
56#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 56#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
57 (IRO[306].base + ((pfId) * IRO[306].m1)) 57 (IRO[312].base + ((pfId) * IRO[312].m1))
58#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 58#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
59 (IRO[151].base + ((funcId) * IRO[151].m1)) 59 (IRO[155].base + ((funcId) * IRO[155].m1))
60#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ 60#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
61 (IRO[142].base + ((pfId) * IRO[142].m1)) 61 (IRO[146].base + ((pfId) * IRO[146].m1))
62#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \ 62#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
63 (IRO[143].base + ((pfId) * IRO[143].m1)) 63 (IRO[147].base + ((pfId) * IRO[147].m1))
64#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \ 64#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
65 (IRO[141].base + ((pfId) * IRO[141].m1)) 65 (IRO[145].base + ((pfId) * IRO[145].m1))
66#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size) 66#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[145].size)
67#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \ 67#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
68 (IRO[144].base + ((pfId) * IRO[144].m1)) 68 (IRO[148].base + ((pfId) * IRO[148].m1))
69#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size) 69#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[148].size)
70#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \ 70#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
71 (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2)) 71 (IRO[140].base + ((sbId) * IRO[140].m1) + ((hcIndex) * IRO[140].m2))
72#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \ 72#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
73 (IRO[133].base + ((sbId) * IRO[133].m1)) 73 (IRO[137].base + ((sbId) * IRO[137].m1))
74#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \ 74#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
75 (IRO[134].base + ((sbId) * IRO[134].m1)) 75 (IRO[138].base + ((sbId) * IRO[138].m1))
76#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \ 76#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
77 (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2)) 77 (IRO[139].base + ((sbId) * IRO[139].m1) + ((hcIndex) * IRO[139].m2))
78#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \ 78#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
79 (IRO[132].base + ((sbId) * IRO[132].m1)) 79 (IRO[136].base + ((sbId) * IRO[136].m1))
80#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size) 80#define CSTORM_STATUS_BLOCK_SIZE (IRO[136].size)
81#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \ 81#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
82 (IRO[137].base + ((sbId) * IRO[137].m1)) 82 (IRO[141].base + ((sbId) * IRO[141].m1))
83#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size) 83#define CSTORM_SYNC_BLOCK_SIZE (IRO[141].size)
84#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \ 84#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
85 (IRO[155].base + ((vfId) * IRO[155].m1)) 85 (IRO[159].base + ((vfId) * IRO[159].m1))
86#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \ 86#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
87 (IRO[156].base + ((vfId) * IRO[156].m1)) 87 (IRO[160].base + ((vfId) * IRO[160].m1))
88#define CSTORM_VF_TO_PF_OFFSET(funcId) \ 88#define CSTORM_VF_TO_PF_OFFSET(funcId) \
89 (IRO[150].base + ((funcId) * IRO[150].m1)) 89 (IRO[154].base + ((funcId) * IRO[154].m1))
90#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ 90#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
91 (IRO[203].base + ((pfId) * IRO[203].m1)) 91 (IRO[207].base + ((pfId) * IRO[207].m1))
92#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) 92#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
93#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ 93#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
94 (IRO[101].base + ((assertListEntry) * IRO[101].m1)) 94 (IRO[101].base + ((assertListEntry) * IRO[101].m1))
95#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ 95#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
96 (IRO[201].base + ((pfId) * IRO[201].m1)) 96 (IRO[205].base + ((pfId) * IRO[205].m1))
97#define TSTORM_FUNC_EN_OFFSET(funcId) \ 97#define TSTORM_FUNC_EN_OFFSET(funcId) \
98 (IRO[103].base + ((funcId) * IRO[103].m1)) 98 (IRO[107].base + ((funcId) * IRO[107].m1))
99#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ 99#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
100 (IRO[272].base + ((pfId) * IRO[272].m1)) 100 (IRO[278].base + ((pfId) * IRO[278].m1))
101#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ 101#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
102 (IRO[273].base + ((pfId) * IRO[273].m1)) 102 (IRO[279].base + ((pfId) * IRO[279].m1))
103#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ 103#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
104 (IRO[274].base + ((pfId) * IRO[274].m1)) 104 (IRO[280].base + ((pfId) * IRO[280].m1))
105#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ 105#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
106 (IRO[275].base + ((pfId) * IRO[275].m1)) 106 (IRO[281].base + ((pfId) * IRO[281].m1))
107#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 107#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
108 (IRO[271].base + ((pfId) * IRO[271].m1)) 108 (IRO[277].base + ((pfId) * IRO[277].m1))
109#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 109#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
110 (IRO[270].base + ((pfId) * IRO[270].m1)) 110 (IRO[276].base + ((pfId) * IRO[276].m1))
111#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 111#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
112 (IRO[269].base + ((pfId) * IRO[269].m1)) 112 (IRO[275].base + ((pfId) * IRO[275].m1))
113#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ 113#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
114 (IRO[268].base + ((pfId) * IRO[268].m1)) 114 (IRO[274].base + ((pfId) * IRO[274].m1))
115#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ 115#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
116 (IRO[278].base + ((pfId) * IRO[278].m1)) 116 (IRO[284].base + ((pfId) * IRO[284].m1))
117#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ 117#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
118 (IRO[264].base + ((pfId) * IRO[264].m1)) 118 (IRO[270].base + ((pfId) * IRO[270].m1))
119#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ 119#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
120 (IRO[265].base + ((pfId) * IRO[265].m1)) 120 (IRO[271].base + ((pfId) * IRO[271].m1))
121#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ 121#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
122 (IRO[266].base + ((pfId) * IRO[266].m1)) 122 (IRO[272].base + ((pfId) * IRO[272].m1))
123#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ 123#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
124 (IRO[267].base + ((pfId) * IRO[267].m1)) 124 (IRO[273].base + ((pfId) * IRO[273].m1))
125#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ 125#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
126 (IRO[202].base + ((pfId) * IRO[202].m1)) 126 (IRO[206].base + ((pfId) * IRO[206].m1))
127#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 127#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
128 (IRO[105].base + ((funcId) * IRO[105].m1)) 128 (IRO[109].base + ((funcId) * IRO[109].m1))
129#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ 129#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
130 (IRO[217].base + ((pfId) * IRO[217].m1)) 130 (IRO[223].base + ((pfId) * IRO[223].m1))
131#define TSTORM_VF_TO_PF_OFFSET(funcId) \ 131#define TSTORM_VF_TO_PF_OFFSET(funcId) \
132 (IRO[104].base + ((funcId) * IRO[104].m1)) 132 (IRO[108].base + ((funcId) * IRO[108].m1))
133#define USTORM_AGG_DATA_OFFSET (IRO[206].base) 133#define USTORM_AGG_DATA_OFFSET (IRO[212].base)
134#define USTORM_AGG_DATA_SIZE (IRO[206].size) 134#define USTORM_AGG_DATA_SIZE (IRO[212].size)
135#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base) 135#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[181].base)
136#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ 136#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
137 (IRO[176].base + ((assertListEntry) * IRO[176].m1)) 137 (IRO[180].base + ((assertListEntry) * IRO[180].m1))
138#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ 138#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
139 (IRO[183].base + ((portId) * IRO[183].m1)) 139 (IRO[187].base + ((portId) * IRO[187].m1))
140#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ 140#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
141 (IRO[319].base + ((pfId) * IRO[319].m1)) 141 (IRO[325].base + ((pfId) * IRO[325].m1))
142#define USTORM_FUNC_EN_OFFSET(funcId) \ 142#define USTORM_FUNC_EN_OFFSET(funcId) \
143 (IRO[178].base + ((funcId) * IRO[178].m1)) 143 (IRO[182].base + ((funcId) * IRO[182].m1))
144#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ 144#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
145 (IRO[283].base + ((pfId) * IRO[283].m1)) 145 (IRO[289].base + ((pfId) * IRO[289].m1))
146#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ 146#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
147 (IRO[284].base + ((pfId) * IRO[284].m1)) 147 (IRO[290].base + ((pfId) * IRO[290].m1))
148#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ 148#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
149 (IRO[288].base + ((pfId) * IRO[288].m1)) 149 (IRO[294].base + ((pfId) * IRO[294].m1))
150#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ 150#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
151 (IRO[285].base + ((pfId) * IRO[285].m1)) 151 (IRO[291].base + ((pfId) * IRO[291].m1))
152#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 152#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
153 (IRO[281].base + ((pfId) * IRO[281].m1)) 153 (IRO[287].base + ((pfId) * IRO[287].m1))
154#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 154#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
155 (IRO[280].base + ((pfId) * IRO[280].m1)) 155 (IRO[286].base + ((pfId) * IRO[286].m1))
156#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 156#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
157 (IRO[279].base + ((pfId) * IRO[279].m1)) 157 (IRO[285].base + ((pfId) * IRO[285].m1))
158#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ 158#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
159 (IRO[282].base + ((pfId) * IRO[282].m1)) 159 (IRO[288].base + ((pfId) * IRO[288].m1))
160#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ 160#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
161 (IRO[286].base + ((pfId) * IRO[286].m1)) 161 (IRO[292].base + ((pfId) * IRO[292].m1))
162#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ 162#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
163 (IRO[287].base + ((pfId) * IRO[287].m1)) 163 (IRO[293].base + ((pfId) * IRO[293].m1))
164#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ 164#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
165 (IRO[182].base + ((pfId) * IRO[182].m1)) 165 (IRO[186].base + ((pfId) * IRO[186].m1))
166#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 166#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
167 (IRO[180].base + ((funcId) * IRO[180].m1)) 167 (IRO[184].base + ((funcId) * IRO[184].m1))
168#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ 168#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
169 (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \ 169 (IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \
170 IRO[209].m2)) 170 IRO[215].m2))
171#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \ 171#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
172 (IRO[210].base + ((qzoneId) * IRO[210].m1)) 172 (IRO[216].base + ((qzoneId) * IRO[216].m1))
173#define USTORM_TPA_BTR_OFFSET (IRO[207].base) 173#define USTORM_TPA_BTR_OFFSET (IRO[213].base)
174#define USTORM_TPA_BTR_SIZE (IRO[207].size) 174#define USTORM_TPA_BTR_SIZE (IRO[213].size)
175#define USTORM_VF_TO_PF_OFFSET(funcId) \ 175#define USTORM_VF_TO_PF_OFFSET(funcId) \
176 (IRO[179].base + ((funcId) * IRO[179].m1)) 176 (IRO[183].base + ((funcId) * IRO[183].m1))
177#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base) 177#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
178#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base) 178#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
179#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base) 179#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base)
@@ -186,39 +186,39 @@
186#define XSTORM_FUNC_EN_OFFSET(funcId) \ 186#define XSTORM_FUNC_EN_OFFSET(funcId) \
187 (IRO[47].base + ((funcId) * IRO[47].m1)) 187 (IRO[47].base + ((funcId) * IRO[47].m1))
188#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ 188#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
189 (IRO[296].base + ((pfId) * IRO[296].m1)) 189 (IRO[302].base + ((pfId) * IRO[302].m1))
190#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ 190#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
191 (IRO[299].base + ((pfId) * IRO[299].m1)) 191 (IRO[305].base + ((pfId) * IRO[305].m1))
192#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ 192#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
193 (IRO[300].base + ((pfId) * IRO[300].m1)) 193 (IRO[306].base + ((pfId) * IRO[306].m1))
194#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ 194#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
195 (IRO[301].base + ((pfId) * IRO[301].m1)) 195 (IRO[307].base + ((pfId) * IRO[307].m1))
196#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ 196#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
197 (IRO[302].base + ((pfId) * IRO[302].m1)) 197 (IRO[308].base + ((pfId) * IRO[308].m1))
198#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ 198#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
199 (IRO[303].base + ((pfId) * IRO[303].m1)) 199 (IRO[309].base + ((pfId) * IRO[309].m1))
200#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ 200#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
201 (IRO[304].base + ((pfId) * IRO[304].m1)) 201 (IRO[310].base + ((pfId) * IRO[310].m1))
202#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ 202#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
203 (IRO[305].base + ((pfId) * IRO[305].m1)) 203 (IRO[311].base + ((pfId) * IRO[311].m1))
204#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 204#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
205 (IRO[295].base + ((pfId) * IRO[295].m1)) 205 (IRO[301].base + ((pfId) * IRO[301].m1))
206#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 206#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
207 (IRO[294].base + ((pfId) * IRO[294].m1)) 207 (IRO[300].base + ((pfId) * IRO[300].m1))
208#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 208#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
209 (IRO[293].base + ((pfId) * IRO[293].m1)) 209 (IRO[299].base + ((pfId) * IRO[299].m1))
210#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ 210#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
211 (IRO[298].base + ((pfId) * IRO[298].m1)) 211 (IRO[304].base + ((pfId) * IRO[304].m1))
212#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ 212#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
213 (IRO[297].base + ((pfId) * IRO[297].m1)) 213 (IRO[303].base + ((pfId) * IRO[303].m1))
214#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ 214#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
215 (IRO[292].base + ((pfId) * IRO[292].m1)) 215 (IRO[298].base + ((pfId) * IRO[298].m1))
216#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ 216#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
217 (IRO[291].base + ((pfId) * IRO[291].m1)) 217 (IRO[297].base + ((pfId) * IRO[297].m1))
218#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ 218#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
219 (IRO[290].base + ((pfId) * IRO[290].m1)) 219 (IRO[296].base + ((pfId) * IRO[296].m1))
220#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ 220#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
221 (IRO[289].base + ((pfId) * IRO[289].m1)) 221 (IRO[295].base + ((pfId) * IRO[295].m1))
222#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ 222#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
223 (IRO[44].base + ((pfId) * IRO[44].m1)) 223 (IRO[44].base + ((pfId) * IRO[44].m1))
224#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 224#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -231,16 +231,19 @@
231#define XSTORM_SPQ_PROD_OFFSET(funcId) \ 231#define XSTORM_SPQ_PROD_OFFSET(funcId) \
232 (IRO[31].base + ((funcId) * IRO[31].m1)) 232 (IRO[31].base + ((funcId) * IRO[31].m1))
233#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \ 233#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
234 (IRO[211].base + ((portId) * IRO[211].m1)) 234 (IRO[217].base + ((portId) * IRO[217].m1))
235#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ 235#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
236 (IRO[212].base + ((portId) * IRO[212].m1)) 236 (IRO[218].base + ((portId) * IRO[218].m1))
237#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \ 237#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
238 (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \ 238 (IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \
239 IRO[214].m2)) 239 IRO[220].m2))
240#define XSTORM_VF_TO_PF_OFFSET(funcId) \ 240#define XSTORM_VF_TO_PF_OFFSET(funcId) \
241 (IRO[48].base + ((funcId) * IRO[48].m1)) 241 (IRO[48].base + ((funcId) * IRO[48].m1))
242#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 242#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
243 243
244/* eth hsi version */
245#define ETH_FP_HSI_VERSION (ETH_FP_HSI_VER_2)
246
244/* Ethernet Ring parameters */ 247/* Ethernet Ring parameters */
245#define X_ETH_LOCAL_RING_SIZE 13 248#define X_ETH_LOCAL_RING_SIZE 13
246#define FIRST_BD_IN_PKT 0 249#define FIRST_BD_IN_PKT 0
@@ -356,6 +359,7 @@
356#define XSEMI_CLK1_RESUL_CHIP (1e-3) 359#define XSEMI_CLK1_RESUL_CHIP (1e-3)
357 360
358#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6)) 361#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
362#define TSDM_TIMER_TICK_RESUL_CHIP (1 * (1e-6))
359 363
360/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ 364/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
361 365
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index c4daa068f1db..3e0621acdf05 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -2881,8 +2881,8 @@ struct afex_stats {
2881}; 2881};
2882 2882
2883#define BCM_5710_FW_MAJOR_VERSION 7 2883#define BCM_5710_FW_MAJOR_VERSION 7
2884#define BCM_5710_FW_MINOR_VERSION 8 2884#define BCM_5710_FW_MINOR_VERSION 10
2885#define BCM_5710_FW_REVISION_VERSION 19 2885#define BCM_5710_FW_REVISION_VERSION 51
2886#define BCM_5710_FW_ENGINEERING_VERSION 0 2886#define BCM_5710_FW_ENGINEERING_VERSION 0
2887#define BCM_5710_FW_COMPILE_FLAGS 1 2887#define BCM_5710_FW_COMPILE_FLAGS 1
2888 2888
@@ -3451,6 +3451,7 @@ enum classify_rule {
3451 CLASSIFY_RULE_OPCODE_MAC, 3451 CLASSIFY_RULE_OPCODE_MAC,
3452 CLASSIFY_RULE_OPCODE_VLAN, 3452 CLASSIFY_RULE_OPCODE_VLAN,
3453 CLASSIFY_RULE_OPCODE_PAIR, 3453 CLASSIFY_RULE_OPCODE_PAIR,
3454 CLASSIFY_RULE_OPCODE_VXLAN,
3454 MAX_CLASSIFY_RULE 3455 MAX_CLASSIFY_RULE
3455}; 3456};
3456 3457
@@ -3480,7 +3481,8 @@ struct client_init_general_data {
3480 u8 func_id; 3481 u8 func_id;
3481 u8 cos; 3482 u8 cos;
3482 u8 traffic_type; 3483 u8 traffic_type;
3483 u32 reserved0; 3484 u8 fp_hsi_ver;
3485 u8 reserved0[3];
3484}; 3486};
3485 3487
3486 3488
@@ -3550,7 +3552,9 @@ struct client_init_rx_data {
3550 __le16 rx_cos_mask; 3552 __le16 rx_cos_mask;
3551 __le16 silent_vlan_value; 3553 __le16 silent_vlan_value;
3552 __le16 silent_vlan_mask; 3554 __le16 silent_vlan_mask;
3553 __le32 reserved6[2]; 3555 u8 handle_ptp_pkts_flg;
3556 u8 reserved6[3];
3557 __le32 reserved7;
3554}; 3558};
3555 3559
3556/* 3560/*
@@ -3581,7 +3585,7 @@ struct client_init_tx_data {
3581 u8 tunnel_lso_inc_ip_id; 3585 u8 tunnel_lso_inc_ip_id;
3582 u8 refuse_outband_vlan_flg; 3586 u8 refuse_outband_vlan_flg;
3583 u8 tunnel_non_lso_pcsum_location; 3587 u8 tunnel_non_lso_pcsum_location;
3584 u8 reserved1; 3588 u8 tunnel_non_lso_outer_ip_csum_location;
3585}; 3589};
3586 3590
3587/* 3591/*
@@ -3619,7 +3623,9 @@ struct client_update_ramrod_data {
3619 u8 refuse_outband_vlan_change_flg; 3623 u8 refuse_outband_vlan_change_flg;
3620 u8 tx_switching_flg; 3624 u8 tx_switching_flg;
3621 u8 tx_switching_change_flg; 3625 u8 tx_switching_change_flg;
3622 __le32 reserved1; 3626 u8 handle_ptp_pkts_flg;
3627 u8 handle_ptp_pkts_change_flg;
3628 __le16 reserved1;
3623 __le32 echo; 3629 __le32 echo;
3624}; 3630};
3625 3631
@@ -3639,6 +3645,11 @@ struct double_regpair {
3639 u32 regpair1_hi; 3645 u32 regpair1_hi;
3640}; 3646};
3641 3647
3648/* 2nd parse bd type used in ethernet tx BDs */
3649enum eth_2nd_parse_bd_type {
3650 ETH_2ND_PARSE_BD_TYPE_LSO_TUNNEL,
3651 MAX_ETH_2ND_PARSE_BD_TYPE
3652};
3642 3653
3643/* 3654/*
3644 * Ethernet address typesm used in ethernet tx BDs 3655 * Ethernet address typesm used in ethernet tx BDs
@@ -3724,12 +3735,25 @@ struct eth_classify_vlan_cmd {
3724}; 3735};
3725 3736
3726/* 3737/*
3738 * Command for adding/removing a VXLAN classification rule
3739 */
3740struct eth_classify_vxlan_cmd {
3741 struct eth_classify_cmd_header header;
3742 __le32 vni;
3743 __le16 inner_mac_lsb;
3744 __le16 inner_mac_mid;
3745 __le16 inner_mac_msb;
3746 __le16 reserved1;
3747};
3748
3749/*
3727 * union for eth classification rule 3750 * union for eth classification rule
3728 */ 3751 */
3729union eth_classify_rule_cmd { 3752union eth_classify_rule_cmd {
3730 struct eth_classify_mac_cmd mac; 3753 struct eth_classify_mac_cmd mac;
3731 struct eth_classify_vlan_cmd vlan; 3754 struct eth_classify_vlan_cmd vlan;
3732 struct eth_classify_pair_cmd pair; 3755 struct eth_classify_pair_cmd pair;
3756 struct eth_classify_vxlan_cmd vxlan;
3733}; 3757};
3734 3758
3735/* 3759/*
@@ -3835,8 +3859,10 @@ struct eth_fast_path_rx_cqe {
3835#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4 3859#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4
3836#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5) 3860#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5)
3837#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5 3861#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5
3838#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) 3862#define ETH_FAST_PATH_RX_CQE_PTP_PKT (0x1<<6)
3839#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6 3863#define ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT 6
3864#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x1<<7)
3865#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 7
3840 u8 status_flags; 3866 u8 status_flags;
3841#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) 3867#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
3842#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 3868#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
@@ -3907,6 +3933,13 @@ struct eth_filter_rules_ramrod_data {
3907 struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT]; 3933 struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT];
3908}; 3934};
3909 3935
3936/* Hsi version */
3937enum eth_fp_hsi_ver {
3938 ETH_FP_HSI_VER_0,
3939 ETH_FP_HSI_VER_1,
3940 ETH_FP_HSI_VER_2,
3941 MAX_ETH_FP_HSI_VER
3942};
3910 3943
3911/* 3944/*
3912 * parameters for eth classification configuration ramrod 3945 * parameters for eth classification configuration ramrod
@@ -3955,29 +3988,17 @@ struct eth_mac_addresses {
3955 3988
3956/* tunneling related data */ 3989/* tunneling related data */
3957struct eth_tunnel_data { 3990struct eth_tunnel_data {
3958#if defined(__BIG_ENDIAN)
3959 __le16 dst_mid;
3960 __le16 dst_lo;
3961#elif defined(__LITTLE_ENDIAN)
3962 __le16 dst_lo; 3991 __le16 dst_lo;
3963 __le16 dst_mid; 3992 __le16 dst_mid;
3964#endif
3965#if defined(__BIG_ENDIAN)
3966 __le16 reserved0;
3967 __le16 dst_hi;
3968#elif defined(__LITTLE_ENDIAN)
3969 __le16 dst_hi; 3993 __le16 dst_hi;
3970 __le16 reserved0; 3994 __le16 fw_ip_hdr_csum;
3971#endif
3972#if defined(__BIG_ENDIAN)
3973 u8 reserved1;
3974 u8 ip_hdr_start_inner_w;
3975 __le16 pseudo_csum;
3976#elif defined(__LITTLE_ENDIAN)
3977 __le16 pseudo_csum; 3995 __le16 pseudo_csum;
3978 u8 ip_hdr_start_inner_w; 3996 u8 ip_hdr_start_inner_w;
3979 u8 reserved1; 3997 u8 flags;
3980#endif 3998#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0)
3999#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
4000#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1)
4001#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
3981}; 4002};
3982 4003
3983/* union for mac addresses and for tunneling data. 4004/* union for mac addresses and for tunneling data.
@@ -4064,31 +4085,41 @@ enum eth_rss_mode {
4064 */ 4085 */
4065struct eth_rss_update_ramrod_data { 4086struct eth_rss_update_ramrod_data {
4066 u8 rss_engine_id; 4087 u8 rss_engine_id;
4067 u8 capabilities; 4088 u8 rss_mode;
4089 __le16 capabilities;
4068#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0) 4090#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0)
4069#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0 4091#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0
4070#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1) 4092#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1)
4071#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1 4093#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1
4072#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2) 4094#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2)
4073#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2 4095#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2
4074#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3) 4096#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY (0x1<<3)
4075#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3 4097#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY_SHIFT 3
4076#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4) 4098#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<4)
4077#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 4099#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 4
4078#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) 4100#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<5)
4079#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 4101#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 5
4080#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6) 4102#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<6)
4081#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6 4103#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6
4082#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7) 4104#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7)
4083#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7 4105#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7
4106#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8)
4107#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8
4108#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9)
4109#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9
4110#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10)
4111#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10
4112#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11)
4113#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11
4114#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12)
4115#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12
4084 u8 rss_result_mask; 4116 u8 rss_result_mask;
4085 u8 rss_mode; 4117 u8 reserved3;
4086 __le16 udp_4tuple_dst_port_mask; 4118 __le16 reserved4;
4087 __le16 udp_4tuple_dst_port_value;
4088 u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE]; 4119 u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
4089 __le32 rss_key[T_ETH_RSS_KEY]; 4120 __le32 rss_key[T_ETH_RSS_KEY];
4090 __le32 echo; 4121 __le32 echo;
4091 __le32 reserved3; 4122 __le32 reserved5;
4092}; 4123};
4093 4124
4094 4125
@@ -4260,10 +4291,10 @@ enum eth_tunnel_lso_inc_ip_id {
4260/* In case tunnel exist and L4 checksum offload, 4291/* In case tunnel exist and L4 checksum offload,
4261 * the pseudo checksum location, on packet or on BD. 4292 * the pseudo checksum location, on packet or on BD.
4262 */ 4293 */
4263enum eth_tunnel_non_lso_pcsum_location { 4294enum eth_tunnel_non_lso_csum_location {
4264 PCSUM_ON_PKT, 4295 CSUM_ON_PKT,
4265 PCSUM_ON_BD, 4296 CSUM_ON_BD,
4266 MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION 4297 MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION
4267}; 4298};
4268 4299
4269/* 4300/*
@@ -4310,8 +4341,10 @@ struct eth_tx_start_bd {
4310 __le16 vlan_or_ethertype; 4341 __le16 vlan_or_ethertype;
4311 struct eth_tx_bd_flags bd_flags; 4342 struct eth_tx_bd_flags bd_flags;
4312 u8 general_data; 4343 u8 general_data;
4313#define ETH_TX_START_BD_HDR_NBDS (0xF<<0) 4344#define ETH_TX_START_BD_HDR_NBDS (0x7<<0)
4314#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 4345#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
4346#define ETH_TX_START_BD_NO_ADDED_TAGS (0x1<<3)
4347#define ETH_TX_START_BD_NO_ADDED_TAGS_SHIFT 3
4315#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) 4348#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
4316#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 4349#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
4317#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5) 4350#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
@@ -4387,8 +4420,8 @@ struct eth_tx_parse_2nd_bd {
4387 __le16 global_data; 4420 __le16 global_data;
4388#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0) 4421#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0)
4389#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0 4422#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
4390#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4) 4423#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x1<<4)
4391#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4 4424#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 4
4392#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5) 4425#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5)
4393#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5 4426#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
4394#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6) 4427#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6)
@@ -4397,9 +4430,14 @@ struct eth_tx_parse_2nd_bd {
4397#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7 4430#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
4398#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8) 4431#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8)
4399#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8 4432#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
4400#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13) 4433#define ETH_TX_PARSE_2ND_BD_RESERVED1 (0x7<<13)
4401#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13 4434#define ETH_TX_PARSE_2ND_BD_RESERVED1_SHIFT 13
4402 __le16 reserved1; 4435 u8 bd_type;
4436#define ETH_TX_PARSE_2ND_BD_TYPE (0xF<<0)
4437#define ETH_TX_PARSE_2ND_BD_TYPE_SHIFT 0
4438#define ETH_TX_PARSE_2ND_BD_RESERVED2 (0xF<<4)
4439#define ETH_TX_PARSE_2ND_BD_RESERVED2_SHIFT 4
4440 u8 reserved3;
4403 u8 tcp_flags; 4441 u8 tcp_flags;
4404#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0) 4442#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0)
4405#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0 4443#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
@@ -4417,7 +4455,7 @@ struct eth_tx_parse_2nd_bd {
4417#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6 4455#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
4418#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7) 4456#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7)
4419#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7 4457#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
4420 u8 reserved2; 4458 u8 reserved4;
4421 u8 tunnel_udp_hdr_start_w; 4459 u8 tunnel_udp_hdr_start_w;
4422 u8 fw_ip_hdr_to_payload_w; 4460 u8 fw_ip_hdr_to_payload_w;
4423 __le16 fw_ip_csum_wo_len_flags_frag; 4461 __le16 fw_ip_csum_wo_len_flags_frag;
@@ -5205,10 +5243,18 @@ struct function_start_data {
5205 u8 path_id; 5243 u8 path_id;
5206 u8 network_cos_mode; 5244 u8 network_cos_mode;
5207 u8 dmae_cmd_id; 5245 u8 dmae_cmd_id;
5208 u8 gre_tunnel_mode; 5246 u8 tunnel_mode;
5209 u8 gre_tunnel_rss; 5247 u8 gre_tunnel_type;
5210 u8 nvgre_clss_en; 5248 u8 tunn_clss_en;
5211 __le16 reserved1[2]; 5249 u8 inner_gre_rss_en;
5250 u8 sd_accept_mf_clss_fail;
5251 __le16 vxlan_dst_port;
5252 __le16 sd_accept_mf_clss_fail_ethtype;
5253 __le16 sd_vlan_eth_type;
5254 u8 sd_vlan_force_pri_flg;
5255 u8 sd_vlan_force_pri_val;
5256 u8 sd_accept_mf_clss_fail_match_ethtype;
5257 u8 no_added_tags;
5212}; 5258};
5213 5259
5214struct function_update_data { 5260struct function_update_data {
@@ -5225,12 +5271,20 @@ struct function_update_data {
5225 u8 tx_switch_suspend_change_flg; 5271 u8 tx_switch_suspend_change_flg;
5226 u8 tx_switch_suspend; 5272 u8 tx_switch_suspend;
5227 u8 echo; 5273 u8 echo;
5274 u8 update_tunn_cfg_flg;
5275 u8 tunnel_mode;
5276 u8 gre_tunnel_type;
5277 u8 tunn_clss_en;
5278 u8 inner_gre_rss_en;
5279 __le16 vxlan_dst_port;
5280 u8 sd_vlan_force_pri_change_flg;
5281 u8 sd_vlan_force_pri_flg;
5282 u8 sd_vlan_force_pri_val;
5283 u8 sd_vlan_tag_change_flg;
5284 u8 sd_vlan_eth_type_change_flg;
5228 u8 reserved1; 5285 u8 reserved1;
5229 u8 update_gre_cfg_flg; 5286 __le16 sd_vlan_tag;
5230 u8 gre_tunnel_mode; 5287 __le16 sd_vlan_eth_type;
5231 u8 gre_tunnel_rss;
5232 u8 nvgre_clss_en;
5233 u32 reserved3;
5234}; 5288};
5235 5289
5236/* 5290/*
@@ -5259,17 +5313,9 @@ struct fw_version {
5259#define __FW_VERSION_RESERVED_SHIFT 4 5313#define __FW_VERSION_RESERVED_SHIFT 4
5260}; 5314};
5261 5315
5262/* GRE RSS Mode */
5263enum gre_rss_mode {
5264 GRE_OUTER_HEADERS_RSS,
5265 GRE_INNER_HEADERS_RSS,
5266 NVGRE_KEY_ENTROPY_RSS,
5267 MAX_GRE_RSS_MODE
5268};
5269 5316
5270/* GRE Tunnel Mode */ 5317/* GRE Tunnel Mode */
5271enum gre_tunnel_type { 5318enum gre_tunnel_type {
5272 NO_GRE_TUNNEL,
5273 NVGRE_TUNNEL, 5319 NVGRE_TUNNEL,
5274 L2GRE_TUNNEL, 5320 L2GRE_TUNNEL,
5275 IPGRE_TUNNEL, 5321 IPGRE_TUNNEL,
@@ -5442,6 +5488,7 @@ enum ip_ver {
5442 * Malicious VF error ID 5488 * Malicious VF error ID
5443 */ 5489 */
5444enum malicious_vf_error_id { 5490enum malicious_vf_error_id {
5491 MALICIOUS_VF_NO_ERROR,
5445 VF_PF_CHANNEL_NOT_READY, 5492 VF_PF_CHANNEL_NOT_READY,
5446 ETH_ILLEGAL_BD_LENGTHS, 5493 ETH_ILLEGAL_BD_LENGTHS,
5447 ETH_PACKET_TOO_SHORT, 5494 ETH_PACKET_TOO_SHORT,
@@ -5602,6 +5649,16 @@ struct protocol_common_spe {
5602 union protocol_common_specific_data data; 5649 union protocol_common_specific_data data;
5603}; 5650};
5604 5651
5652/* The data for the Set Timesync Ramrod */
5653struct set_timesync_ramrod_data {
5654 u8 drift_adjust_cmd;
5655 u8 offset_cmd;
5656 u8 add_sub_drift_adjust_value;
5657 u8 drift_adjust_value;
5658 u32 drift_adjust_period;
5659 struct regpair offset_delta;
5660};
5661
5605/* 5662/*
5606 * The send queue element 5663 * The send queue element
5607 */ 5664 */
@@ -5724,10 +5781,38 @@ struct tstorm_vf_zone_data {
5724 struct regpair reserved; 5781 struct regpair reserved;
5725}; 5782};
5726 5783
5784/* Add or Subtract Value for Set Timesync Ramrod */
5785enum ts_add_sub_value {
5786 TS_SUB_VALUE,
5787 TS_ADD_VALUE,
5788 MAX_TS_ADD_SUB_VALUE
5789};
5727 5790
5728/* 5791/* Drift-Adjust Commands for Set Timesync Ramrod */
5729 * zone A per-queue data 5792enum ts_drift_adjust_cmd {
5730 */ 5793 TS_DRIFT_ADJUST_KEEP,
5794 TS_DRIFT_ADJUST_SET,
5795 TS_DRIFT_ADJUST_RESET,
5796 MAX_TS_DRIFT_ADJUST_CMD
5797};
5798
5799/* Offset Commands for Set Timesync Ramrod */
5800enum ts_offset_cmd {
5801 TS_OFFSET_KEEP,
5802 TS_OFFSET_INC,
5803 TS_OFFSET_DEC,
5804 MAX_TS_OFFSET_CMD
5805};
5806
5807/* Tunnel Mode */
5808enum tunnel_mode {
5809 TUNN_MODE_NONE,
5810 TUNN_MODE_VXLAN,
5811 TUNN_MODE_GRE,
5812 MAX_TUNNEL_MODE
5813};
5814
5815 /* zone A per-queue data */
5731struct ustorm_queue_zone_data { 5816struct ustorm_queue_zone_data {
5732 struct ustorm_eth_rx_producers eth_rx_producers; 5817 struct ustorm_eth_rx_producers eth_rx_producers;
5733 struct regpair reserved[3]; 5818 struct regpair reserved[3];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index d1c093dcb054..32e2444ab5e1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -41,6 +41,7 @@
41#include <linux/ethtool.h> 41#include <linux/ethtool.h>
42#include <linux/mii.h> 42#include <linux/mii.h>
43#include <linux/if_vlan.h> 43#include <linux/if_vlan.h>
44#include <linux/crash_dump.h>
44#include <net/ip.h> 45#include <net/ip.h>
45#include <net/ipv6.h> 46#include <net/ipv6.h>
46#include <net/tcp.h> 47#include <net/tcp.h>
@@ -63,7 +64,6 @@
63#include "bnx2x_vfpf.h" 64#include "bnx2x_vfpf.h"
64#include "bnx2x_dcb.h" 65#include "bnx2x_dcb.h"
65#include "bnx2x_sp.h" 66#include "bnx2x_sp.h"
66
67#include <linux/firmware.h> 67#include <linux/firmware.h>
68#include "bnx2x_fw_file_hdr.h" 68#include "bnx2x_fw_file_hdr.h"
69/* FW files */ 69/* FW files */
@@ -290,6 +290,8 @@ static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
290* General service functions 290* General service functions
291****************************************************************************/ 291****************************************************************************/
292 292
293static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
294
293static void __storm_memset_dma_mapping(struct bnx2x *bp, 295static void __storm_memset_dma_mapping(struct bnx2x *bp,
294 u32 addr, dma_addr_t mapping) 296 u32 addr, dma_addr_t mapping)
295{ 297{
@@ -523,6 +525,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
523 * as long as this code is called both from syscall context and 525 * as long as this code is called both from syscall context and
524 * from ndo_set_rx_mode() flow that may be called from BH. 526 * from ndo_set_rx_mode() flow that may be called from BH.
525 */ 527 */
528
526 spin_lock_bh(&bp->dmae_lock); 529 spin_lock_bh(&bp->dmae_lock);
527 530
528 /* reset completion */ 531 /* reset completion */
@@ -551,7 +554,9 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
551 } 554 }
552 555
553unlock: 556unlock:
557
554 spin_unlock_bh(&bp->dmae_lock); 558 spin_unlock_bh(&bp->dmae_lock);
559
555 return rc; 560 return rc;
556} 561}
557 562
@@ -646,119 +651,98 @@ static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
646 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 651 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
647} 652}
648 653
654enum storms {
655 XSTORM,
656 TSTORM,
657 CSTORM,
658 USTORM,
659 MAX_STORMS
660};
661
662#define STORMS_NUM 4
663#define REGS_IN_ENTRY 4
664
665static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
666 enum storms storm,
667 int entry)
668{
669 switch (storm) {
670 case XSTORM:
671 return XSTORM_ASSERT_LIST_OFFSET(entry);
672 case TSTORM:
673 return TSTORM_ASSERT_LIST_OFFSET(entry);
674 case CSTORM:
675 return CSTORM_ASSERT_LIST_OFFSET(entry);
676 case USTORM:
677 return USTORM_ASSERT_LIST_OFFSET(entry);
678 case MAX_STORMS:
679 default:
680 BNX2X_ERR("unknown storm\n");
681 }
682 return -EINVAL;
683}
684
649static int bnx2x_mc_assert(struct bnx2x *bp) 685static int bnx2x_mc_assert(struct bnx2x *bp)
650{ 686{
651 char last_idx; 687 char last_idx;
652 int i, rc = 0; 688 int i, j, rc = 0;
653 u32 row0, row1, row2, row3; 689 enum storms storm;
654 690 u32 regs[REGS_IN_ENTRY];
655 /* XSTORM */ 691 u32 bar_storm_intmem[STORMS_NUM] = {
656 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + 692 BAR_XSTRORM_INTMEM,
657 XSTORM_ASSERT_LIST_INDEX_OFFSET); 693 BAR_TSTRORM_INTMEM,
658 if (last_idx) 694 BAR_CSTRORM_INTMEM,
659 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 695 BAR_USTRORM_INTMEM
660 696 };
661 /* print the asserts */ 697 u32 storm_assert_list_index[STORMS_NUM] = {
662 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 698 XSTORM_ASSERT_LIST_INDEX_OFFSET,
663 699 TSTORM_ASSERT_LIST_INDEX_OFFSET,
664 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + 700 CSTORM_ASSERT_LIST_INDEX_OFFSET,
665 XSTORM_ASSERT_LIST_OFFSET(i)); 701 USTORM_ASSERT_LIST_INDEX_OFFSET
666 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + 702 };
667 XSTORM_ASSERT_LIST_OFFSET(i) + 4); 703 char *storms_string[STORMS_NUM] = {
668 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + 704 "XSTORM",
669 XSTORM_ASSERT_LIST_OFFSET(i) + 8); 705 "TSTORM",
670 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + 706 "CSTORM",
671 XSTORM_ASSERT_LIST_OFFSET(i) + 12); 707 "USTORM"
672 708 };
673 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
674 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
675 i, row3, row2, row1, row0);
676 rc++;
677 } else {
678 break;
679 }
680 }
681
682 /* TSTORM */
683 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
684 TSTORM_ASSERT_LIST_INDEX_OFFSET);
685 if (last_idx)
686 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
687
688 /* print the asserts */
689 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
690
691 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
692 TSTORM_ASSERT_LIST_OFFSET(i));
693 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
694 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
695 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
696 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
697 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
698 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
699
700 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
701 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
702 i, row3, row2, row1, row0);
703 rc++;
704 } else {
705 break;
706 }
707 }
708 709
709 /* CSTORM */ 710 for (storm = XSTORM; storm < MAX_STORMS; storm++) {
710 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + 711 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
711 CSTORM_ASSERT_LIST_INDEX_OFFSET); 712 storm_assert_list_index[storm]);
712 if (last_idx) 713 if (last_idx)
713 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 714 BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
714 715 storms_string[storm], last_idx);
715 /* print the asserts */ 716
716 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 717 /* print the asserts */
717 718 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
718 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + 719 /* read a single assert entry */
719 CSTORM_ASSERT_LIST_OFFSET(i)); 720 for (j = 0; j < REGS_IN_ENTRY; j++)
720 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + 721 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
721 CSTORM_ASSERT_LIST_OFFSET(i) + 4); 722 bnx2x_get_assert_list_entry(bp,
722 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + 723 storm,
723 CSTORM_ASSERT_LIST_OFFSET(i) + 8); 724 i) +
724 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + 725 sizeof(u32) * j);
725 CSTORM_ASSERT_LIST_OFFSET(i) + 12); 726
726 727 /* log entry if it contains a valid assert */
727 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 728 if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
728 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 729 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
729 i, row3, row2, row1, row0); 730 storms_string[storm], i, regs[3],
730 rc++; 731 regs[2], regs[1], regs[0]);
731 } else { 732 rc++;
732 break; 733 } else {
734 break;
735 }
733 } 736 }
734 } 737 }
735 738
736 /* USTORM */ 739 BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
737 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + 740 CHIP_IS_E1(bp) ? "everest1" :
738 USTORM_ASSERT_LIST_INDEX_OFFSET); 741 CHIP_IS_E1H(bp) ? "everest1h" :
739 if (last_idx) 742 CHIP_IS_E2(bp) ? "everest2" : "everest3",
740 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 743 BCM_5710_FW_MAJOR_VERSION,
741 744 BCM_5710_FW_MINOR_VERSION,
742 /* print the asserts */ 745 BCM_5710_FW_REVISION_VERSION);
743 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
744
745 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
746 USTORM_ASSERT_LIST_OFFSET(i));
747 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
748 USTORM_ASSERT_LIST_OFFSET(i) + 4);
749 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
750 USTORM_ASSERT_LIST_OFFSET(i) + 8);
751 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
752 USTORM_ASSERT_LIST_OFFSET(i) + 12);
753
754 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
755 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
756 i, row3, row2, row1, row0);
757 rc++;
758 } else {
759 break;
760 }
761 }
762 746
763 return rc; 747 return rc;
764} 748}
@@ -983,6 +967,12 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
983 u32 *sb_data_p; 967 u32 *sb_data_p;
984 struct bnx2x_fp_txdata txdata; 968 struct bnx2x_fp_txdata txdata;
985 969
970 if (!bp->fp)
971 break;
972
973 if (!fp->rx_cons_sb)
974 continue;
975
986 /* Rx */ 976 /* Rx */
987 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", 977 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
988 i, fp->rx_bd_prod, fp->rx_bd_cons, 978 i, fp->rx_bd_prod, fp->rx_bd_cons,
@@ -995,7 +985,14 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
995 /* Tx */ 985 /* Tx */
996 for_each_cos_in_tx_queue(fp, cos) 986 for_each_cos_in_tx_queue(fp, cos)
997 { 987 {
988 if (!fp->txdata_ptr[cos])
989 break;
990
998 txdata = *fp->txdata_ptr[cos]; 991 txdata = *fp->txdata_ptr[cos];
992
993 if (!txdata.tx_cons_sb)
994 continue;
995
999 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", 996 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
1000 i, txdata.tx_pkt_prod, 997 i, txdata.tx_pkt_prod,
1001 txdata.tx_pkt_cons, txdata.tx_bd_prod, 998 txdata.tx_pkt_cons, txdata.tx_bd_prod,
@@ -1097,6 +1094,12 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
1097 for_each_valid_rx_queue(bp, i) { 1094 for_each_valid_rx_queue(bp, i) {
1098 struct bnx2x_fastpath *fp = &bp->fp[i]; 1095 struct bnx2x_fastpath *fp = &bp->fp[i];
1099 1096
1097 if (!bp->fp)
1098 break;
1099
1100 if (!fp->rx_cons_sb)
1101 continue;
1102
1100 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 1103 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1101 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); 1104 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1102 for (j = start; j != end; j = RX_BD(j + 1)) { 1105 for (j = start; j != end; j = RX_BD(j + 1)) {
@@ -1130,9 +1133,19 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
1130 /* Tx */ 1133 /* Tx */
1131 for_each_valid_tx_queue(bp, i) { 1134 for_each_valid_tx_queue(bp, i) {
1132 struct bnx2x_fastpath *fp = &bp->fp[i]; 1135 struct bnx2x_fastpath *fp = &bp->fp[i];
1136
1137 if (!bp->fp)
1138 break;
1139
1133 for_each_cos_in_tx_queue(fp, cos) { 1140 for_each_cos_in_tx_queue(fp, cos) {
1134 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 1141 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1135 1142
1143 if (!fp->txdata_ptr[cos])
1144 break;
1145
1146 if (!txdata->tx_cons_sb)
1147 continue;
1148
1136 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); 1149 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1137 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); 1150 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1138 for (j = start; j != end; j = TX_BD(j + 1)) { 1151 for (j = start; j != end; j = TX_BD(j + 1)) {
@@ -2071,8 +2084,6 @@ int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2071 else 2084 else
2072 value = 0; 2085 value = 0;
2073 2086
2074 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2075
2076 return value; 2087 return value;
2077} 2088}
2078 2089
@@ -4678,7 +4689,7 @@ static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4678 for (i = 0; sig; i++) { 4689 for (i = 0; sig; i++) {
4679 cur_bit = (0x1UL << i); 4690 cur_bit = (0x1UL << i);
4680 if (sig & cur_bit) { 4691 if (sig & cur_bit) {
4681 res |= true; /* Each bit is real error! */ 4692 res = true; /* Each bit is real error! */
4682 if (print) { 4693 if (print) {
4683 switch (cur_bit) { 4694 switch (cur_bit) {
4684 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 4695 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
@@ -4757,21 +4768,21 @@ static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4757 _print_next_block((*par_num)++, 4768 _print_next_block((*par_num)++,
4758 "MCP ROM"); 4769 "MCP ROM");
4759 *global = true; 4770 *global = true;
4760 res |= true; 4771 res = true;
4761 break; 4772 break;
4762 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 4773 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4763 if (print) 4774 if (print)
4764 _print_next_block((*par_num)++, 4775 _print_next_block((*par_num)++,
4765 "MCP UMP RX"); 4776 "MCP UMP RX");
4766 *global = true; 4777 *global = true;
4767 res |= true; 4778 res = true;
4768 break; 4779 break;
4769 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 4780 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4770 if (print) 4781 if (print)
4771 _print_next_block((*par_num)++, 4782 _print_next_block((*par_num)++,
4772 "MCP UMP TX"); 4783 "MCP UMP TX");
4773 *global = true; 4784 *global = true;
4774 res |= true; 4785 res = true;
4775 break; 4786 break;
4776 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 4787 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4777 if (print) 4788 if (print)
@@ -4803,7 +4814,7 @@ static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4803 for (i = 0; sig; i++) { 4814 for (i = 0; sig; i++) {
4804 cur_bit = (0x1UL << i); 4815 cur_bit = (0x1UL << i);
4805 if (sig & cur_bit) { 4816 if (sig & cur_bit) {
4806 res |= true; /* Each bit is real error! */ 4817 res = true; /* Each bit is real error! */
4807 if (print) { 4818 if (print) {
4808 switch (cur_bit) { 4819 switch (cur_bit) {
4809 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 4820 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
@@ -5452,6 +5463,14 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5452 break; 5463 break;
5453 5464
5454 goto next_spqe; 5465 goto next_spqe;
5466
5467 case EVENT_RING_OPCODE_SET_TIMESYNC:
5468 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
5469 "got set_timesync ramrod completion\n");
5470 if (f_obj->complete_cmd(bp, f_obj,
5471 BNX2X_F_CMD_SET_TIMESYNC))
5472 break;
5473 goto next_spqe;
5455 } 5474 }
5456 5475
5457 switch (opcode | bp->state) { 5476 switch (opcode | bp->state) {
@@ -6102,7 +6121,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
6102 } 6121 }
6103 6122
6104 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 6123 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
6105 if (bp->rx_mode != BNX2X_RX_MODE_NONE) { 6124 if (rx_mode != BNX2X_RX_MODE_NONE) {
6106 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); 6125 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6107 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); 6126 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6108 } 6127 }
@@ -7662,7 +7681,11 @@ static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7662 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; 7681 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7663 7682
7664 /* Function parameters */ 7683 /* Function parameters */
7665 switch_update_params->suspend = suspend; 7684 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
7685 &switch_update_params->changes);
7686 if (suspend)
7687 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
7688 &switch_update_params->changes);
7666 7689
7667 rc = bnx2x_func_state_change(bp, &func_params); 7690 rc = bnx2x_func_state_change(bp, &func_params);
7668 7691
@@ -9025,7 +9048,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
9025 struct bnx2x_func_state_params func_params = {NULL}; 9048 struct bnx2x_func_state_params func_params = {NULL};
9026 9049
9027 DP(NETIF_MSG_IFDOWN, 9050 DP(NETIF_MSG_IFDOWN,
9028 "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); 9051 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
9029 9052
9030 func_params.f_obj = &bp->func_obj; 9053 func_params.f_obj = &bp->func_obj;
9031 __set_bit(RAMROD_DRV_CLR_ONLY, 9054 __set_bit(RAMROD_DRV_CLR_ONLY,
@@ -9044,6 +9067,48 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
9044 return 0; 9067 return 0;
9045} 9068}
9046 9069
9070static void bnx2x_disable_ptp(struct bnx2x *bp)
9071{
9072 int port = BP_PORT(bp);
9073
9074 /* Disable sending PTP packets to host */
9075 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
9076 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
9077
9078 /* Reset PTP event detection rules */
9079 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
9080 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
9081 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
9082 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
9083 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
9084 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
9085 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
9086 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
9087
9088 /* Disable the PTP feature */
9089 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
9090 NIG_REG_P0_PTP_EN, 0x0);
9091}
9092
9093/* Called during unload, to stop PTP-related stuff */
9094void bnx2x_stop_ptp(struct bnx2x *bp)
9095{
9096 /* Cancel PTP work queue. Should be done after the Tx queues are
9097 * drained to prevent additional scheduling.
9098 */
9099 cancel_work_sync(&bp->ptp_task);
9100
9101 if (bp->ptp_tx_skb) {
9102 dev_kfree_skb_any(bp->ptp_tx_skb);
9103 bp->ptp_tx_skb = NULL;
9104 }
9105
9106 /* Disable PTP in HW */
9107 bnx2x_disable_ptp(bp);
9108
9109 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
9110}
9111
9047void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) 9112void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9048{ 9113{
9049 int port = BP_PORT(bp); 9114 int port = BP_PORT(bp);
@@ -9162,6 +9227,13 @@ unload_error:
9162#endif 9227#endif
9163 } 9228 }
9164 9229
9230 /* stop_ptp should be after the Tx queues are drained to prevent
9231 * scheduling to the cancelled PTP work queue. It should also be after
9232 * function stop ramrod is sent, since as part of this ramrod FW access
9233 * PTP registers.
9234 */
9235 bnx2x_stop_ptp(bp);
9236
9165 /* Disable HW interrupts, NAPI */ 9237 /* Disable HW interrupts, NAPI */
9166 bnx2x_netif_stop(bp, 1); 9238 bnx2x_netif_stop(bp, 1);
9167 /* Delete all NAPI objects */ 9239 /* Delete all NAPI objects */
@@ -11900,7 +11972,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11900 bp->disable_tpa = disable_tpa; 11972 bp->disable_tpa = disable_tpa;
11901 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); 11973 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
11902 /* Reduce memory usage in kdump environment by disabling TPA */ 11974 /* Reduce memory usage in kdump environment by disabling TPA */
11903 bp->disable_tpa |= reset_devices; 11975 bp->disable_tpa |= is_kdump_kernel();
11904 11976
11905 /* Set TPA flags */ 11977 /* Set TPA flags */
11906 if (bp->disable_tpa) { 11978 if (bp->disable_tpa) {
@@ -11976,6 +12048,9 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11976 12048
11977 bp->dump_preset_idx = 1; 12049 bp->dump_preset_idx = 1;
11978 12050
12051 if (CHIP_IS_E3B0(bp))
12052 bp->flags |= PTP_SUPPORTED;
12053
11979 return rc; 12054 return rc;
11980} 12055}
11981 12056
@@ -12308,13 +12383,17 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12308 struct bnx2x *bp = netdev_priv(dev); 12383 struct bnx2x *bp = netdev_priv(dev);
12309 struct mii_ioctl_data *mdio = if_mii(ifr); 12384 struct mii_ioctl_data *mdio = if_mii(ifr);
12310 12385
12311 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12312 mdio->phy_id, mdio->reg_num, mdio->val_in);
12313
12314 if (!netif_running(dev)) 12386 if (!netif_running(dev))
12315 return -EAGAIN; 12387 return -EAGAIN;
12316 12388
12317 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); 12389 switch (cmd) {
12390 case SIOCSHWTSTAMP:
12391 return bnx2x_hwtstamp_ioctl(bp, ifr);
12392 default:
12393 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12394 mdio->phy_id, mdio->reg_num, mdio->val_in);
12395 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12396 }
12318} 12397}
12319 12398
12320#ifdef CONFIG_NET_POLL_CONTROLLER 12399#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -12958,6 +13037,191 @@ static int set_is_vf(int chip_id)
12958 } 13037 }
12959} 13038}
12960 13039
13040/* nig_tsgen registers relative address */
13041#define tsgen_ctrl 0x0
13042#define tsgen_freecount 0x10
13043#define tsgen_synctime_t0 0x20
13044#define tsgen_offset_t0 0x28
13045#define tsgen_drift_t0 0x30
13046#define tsgen_synctime_t1 0x58
13047#define tsgen_offset_t1 0x60
13048#define tsgen_drift_t1 0x68
13049
13050/* FW workaround for setting drift */
13051static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
13052 int best_val, int best_period)
13053{
13054 struct bnx2x_func_state_params func_params = {NULL};
13055 struct bnx2x_func_set_timesync_params *set_timesync_params =
13056 &func_params.params.set_timesync;
13057
13058 /* Prepare parameters for function state transitions */
13059 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
13060 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
13061
13062 func_params.f_obj = &bp->func_obj;
13063 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
13064
13065 /* Function parameters */
13066 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
13067 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
13068 set_timesync_params->add_sub_drift_adjust_value =
13069 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
13070 set_timesync_params->drift_adjust_value = best_val;
13071 set_timesync_params->drift_adjust_period = best_period;
13072
13073 return bnx2x_func_state_change(bp, &func_params);
13074}
13075
13076static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13077{
13078 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13079 int rc;
13080 int drift_dir = 1;
13081 int val, period, period1, period2, dif, dif1, dif2;
13082 int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
13083
13084 DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
13085
13086 if (!netif_running(bp->dev)) {
13087 DP(BNX2X_MSG_PTP,
13088 "PTP adjfreq called while the interface is down\n");
13089 return -EFAULT;
13090 }
13091
13092 if (ppb < 0) {
13093 ppb = -ppb;
13094 drift_dir = 0;
13095 }
13096
13097 if (ppb == 0) {
13098 best_val = 1;
13099 best_period = 0x1FFFFFF;
13100 } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
13101 best_val = 31;
13102 best_period = 1;
13103 } else {
13104 /* Changed not to allow val = 8, 16, 24 as these values
13105 * are not supported in workaround.
13106 */
13107 for (val = 0; val <= 31; val++) {
13108 if ((val & 0x7) == 0)
13109 continue;
13110 period1 = val * 1000000 / ppb;
13111 period2 = period1 + 1;
13112 if (period1 != 0)
13113 dif1 = ppb - (val * 1000000 / period1);
13114 else
13115 dif1 = BNX2X_MAX_PHC_DRIFT;
13116 if (dif1 < 0)
13117 dif1 = -dif1;
13118 dif2 = ppb - (val * 1000000 / period2);
13119 if (dif2 < 0)
13120 dif2 = -dif2;
13121 dif = (dif1 < dif2) ? dif1 : dif2;
13122 period = (dif1 < dif2) ? period1 : period2;
13123 if (dif < best_dif) {
13124 best_dif = dif;
13125 best_val = val;
13126 best_period = period;
13127 }
13128 }
13129 }
13130
13131 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
13132 best_period);
13133 if (rc) {
13134 BNX2X_ERR("Failed to set drift\n");
13135 return -EFAULT;
13136 }
13137
13138 DP(BNX2X_MSG_PTP, "Configrued val = %d, period = %d\n", best_val,
13139 best_period);
13140
13141 return 0;
13142}
13143
13144static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13145{
13146 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13147 u64 now;
13148
13149 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13150
13151 now = timecounter_read(&bp->timecounter);
13152 now += delta;
13153 /* Re-init the timecounter */
13154 timecounter_init(&bp->timecounter, &bp->cyclecounter, now);
13155
13156 return 0;
13157}
13158
13159static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
13160{
13161 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13162 u64 ns;
13163 u32 remainder;
13164
13165 ns = timecounter_read(&bp->timecounter);
13166
13167 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
13168
13169 ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
13170 ts->tv_nsec = remainder;
13171
13172 return 0;
13173}
13174
13175static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13176 const struct timespec *ts)
13177{
13178 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13179 u64 ns;
13180
13181 ns = ts->tv_sec * 1000000000ULL;
13182 ns += ts->tv_nsec;
13183
13184 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
13185
13186 /* Re-init the timecounter */
13187 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
13188
13189 return 0;
13190}
13191
13192/* Enable (or disable) ancillary features of the phc subsystem */
13193static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13194 struct ptp_clock_request *rq, int on)
13195{
13196 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13197
13198 BNX2X_ERR("PHC ancillary features are not supported\n");
13199 return -ENOTSUPP;
13200}
13201
13202void bnx2x_register_phc(struct bnx2x *bp)
13203{
13204 /* Fill the ptp_clock_info struct and register PTP clock*/
13205 bp->ptp_clock_info.owner = THIS_MODULE;
13206 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
13207 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */
13208 bp->ptp_clock_info.n_alarm = 0;
13209 bp->ptp_clock_info.n_ext_ts = 0;
13210 bp->ptp_clock_info.n_per_out = 0;
13211 bp->ptp_clock_info.pps = 0;
13212 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
13213 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
13214 bp->ptp_clock_info.gettime = bnx2x_ptp_gettime;
13215 bp->ptp_clock_info.settime = bnx2x_ptp_settime;
13216 bp->ptp_clock_info.enable = bnx2x_ptp_enable;
13217
13218 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13219 if (IS_ERR(bp->ptp_clock)) {
13220 bp->ptp_clock = NULL;
13221 BNX2X_ERR("PTP clock registeration failed\n");
13222 }
13223}
13224
12961static int bnx2x_init_one(struct pci_dev *pdev, 13225static int bnx2x_init_one(struct pci_dev *pdev,
12962 const struct pci_device_id *ent) 13226 const struct pci_device_id *ent)
12963{ 13227{
@@ -13129,6 +13393,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
13129 "Unknown", 13393 "Unknown",
13130 dev->base_addr, bp->pdev->irq, dev->dev_addr); 13394 dev->base_addr, bp->pdev->irq, dev->dev_addr);
13131 13395
13396 bnx2x_register_phc(bp);
13397
13132 return 0; 13398 return 0;
13133 13399
13134init_one_exit: 13400init_one_exit:
@@ -13155,6 +13421,11 @@ static void __bnx2x_remove(struct pci_dev *pdev,
13155 struct bnx2x *bp, 13421 struct bnx2x *bp,
13156 bool remove_netdev) 13422 bool remove_netdev)
13157{ 13423{
13424 if (bp->ptp_clock) {
13425 ptp_clock_unregister(bp->ptp_clock);
13426 bp->ptp_clock = NULL;
13427 }
13428
13158 /* Delete storage MAC address */ 13429 /* Delete storage MAC address */
13159 if (!NO_FCOE(bp)) { 13430 if (!NO_FCOE(bp)) {
13160 rtnl_lock(); 13431 rtnl_lock();
@@ -14136,3 +14407,332 @@ int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
14136 REG_RD(bp, pretend_reg); 14407 REG_RD(bp, pretend_reg);
14137 return 0; 14408 return 0;
14138} 14409}
14410
14411static void bnx2x_ptp_task(struct work_struct *work)
14412{
14413 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
14414 int port = BP_PORT(bp);
14415 u32 val_seq;
14416 u64 timestamp, ns;
14417 struct skb_shared_hwtstamps shhwtstamps;
14418
14419 /* Read Tx timestamp registers */
14420 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
14421 NIG_REG_P0_TLLH_PTP_BUF_SEQID);
14422 if (val_seq & 0x10000) {
14423 /* There is a valid timestamp value */
14424 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
14425 NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
14426 timestamp <<= 32;
14427 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
14428 NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
14429 /* Reset timestamp register to allow new timestamp */
14430 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
14431 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
14432 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
14433
14434 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
14435 shhwtstamps.hwtstamp = ns_to_ktime(ns);
14436 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
14437 dev_kfree_skb_any(bp->ptp_tx_skb);
14438 bp->ptp_tx_skb = NULL;
14439
14440 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
14441 timestamp, ns);
14442 } else {
14443 DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
14444 /* Reschedule to keep checking for a valid timestamp value */
14445 schedule_work(&bp->ptp_task);
14446 }
14447}
14448
14449void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
14450{
14451 int port = BP_PORT(bp);
14452 u64 timestamp, ns;
14453
14454 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
14455 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
14456 timestamp <<= 32;
14457 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
14458 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
14459
14460 /* Reset timestamp register to allow new timestamp */
14461 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
14462 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
14463
14464 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
14465
14466 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
14467
14468 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
14469 timestamp, ns);
14470}
14471
14472/* Read the PHC */
14473static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc)
14474{
14475 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
14476 int port = BP_PORT(bp);
14477 u32 wb_data[2];
14478 u64 phc_cycles;
14479
14480 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
14481 NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
14482 phc_cycles = wb_data[1];
14483 phc_cycles = (phc_cycles << 32) + wb_data[0];
14484
14485 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
14486
14487 return phc_cycles;
14488}
14489
14490static void bnx2x_init_cyclecounter(struct bnx2x *bp)
14491{
14492 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
14493 bp->cyclecounter.read = bnx2x_cyclecounter_read;
14494 bp->cyclecounter.mask = CLOCKSOURCE_MASK(64);
14495 bp->cyclecounter.shift = 1;
14496 bp->cyclecounter.mult = 1;
14497}
14498
14499static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
14500{
14501 struct bnx2x_func_state_params func_params = {NULL};
14502 struct bnx2x_func_set_timesync_params *set_timesync_params =
14503 &func_params.params.set_timesync;
14504
14505 /* Prepare parameters for function state transitions */
14506 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
14507 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
14508
14509 func_params.f_obj = &bp->func_obj;
14510 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
14511
14512 /* Function parameters */
14513 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
14514 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
14515
14516 return bnx2x_func_state_change(bp, &func_params);
14517}
14518
14519int bnx2x_enable_ptp_packets(struct bnx2x *bp)
14520{
14521 struct bnx2x_queue_state_params q_params;
14522 int rc, i;
14523
14524 /* send queue update ramrod to enable PTP packets */
14525 memset(&q_params, 0, sizeof(q_params));
14526 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
14527 q_params.cmd = BNX2X_Q_CMD_UPDATE;
14528 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
14529 &q_params.params.update.update_flags);
14530 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
14531 &q_params.params.update.update_flags);
14532
14533 /* send the ramrod on all the queues of the PF */
14534 for_each_eth_queue(bp, i) {
14535 struct bnx2x_fastpath *fp = &bp->fp[i];
14536
14537 /* Set the appropriate Queue object */
14538 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
14539
14540 /* Update the Queue state */
14541 rc = bnx2x_queue_state_change(bp, &q_params);
14542 if (rc) {
14543 BNX2X_ERR("Failed to enable PTP packets\n");
14544 return rc;
14545 }
14546 }
14547
14548 return 0;
14549}
14550
14551int bnx2x_configure_ptp_filters(struct bnx2x *bp)
14552{
14553 int port = BP_PORT(bp);
14554 int rc;
14555
14556 if (!bp->hwtstamp_ioctl_called)
14557 return 0;
14558
14559 switch (bp->tx_type) {
14560 case HWTSTAMP_TX_ON:
14561 bp->flags |= TX_TIMESTAMPING_EN;
14562 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
14563 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
14564 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
14565 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
14566 break;
14567 case HWTSTAMP_TX_ONESTEP_SYNC:
14568 BNX2X_ERR("One-step timestamping is not supported\n");
14569 return -ERANGE;
14570 }
14571
14572 switch (bp->rx_filter) {
14573 case HWTSTAMP_FILTER_NONE:
14574 break;
14575 case HWTSTAMP_FILTER_ALL:
14576 case HWTSTAMP_FILTER_SOME:
14577 bp->rx_filter = HWTSTAMP_FILTER_NONE;
14578 break;
14579 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
14580 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
14581 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
14582 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
14583 /* Initialize PTP detection for UDP/IPv4 events */
14584 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
14585 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
14586 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
14587 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
14588 break;
14589 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
14590 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
14591 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
14592 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14593 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
14594 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
14595 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
14596 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
14597 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
14598 break;
14599 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
14600 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
14601 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
14602 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14603 /* Initialize PTP detection L2 events */
14604 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
14605 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
14606 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
14607 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
14608
14609 break;
14610 case HWTSTAMP_FILTER_PTP_V2_EVENT:
14611 case HWTSTAMP_FILTER_PTP_V2_SYNC:
14612 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
14613 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
14614 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
14615 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
14616 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
14617 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
14618 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
14619 break;
14620 }
14621
14622 /* Indicate to FW that this PF expects recorded PTP packets */
14623 rc = bnx2x_enable_ptp_packets(bp);
14624 if (rc)
14625 return rc;
14626
14627 /* Enable sending PTP packets to host */
14628 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
14629 NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
14630
14631 return 0;
14632}
14633
14634static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
14635{
14636 struct hwtstamp_config config;
14637 int rc;
14638
14639 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
14640
14641 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
14642 return -EFAULT;
14643
14644 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
14645 config.tx_type, config.rx_filter);
14646
14647 if (config.flags) {
14648 BNX2X_ERR("config.flags is reserved for future use\n");
14649 return -EINVAL;
14650 }
14651
14652 bp->hwtstamp_ioctl_called = 1;
14653 bp->tx_type = config.tx_type;
14654 bp->rx_filter = config.rx_filter;
14655
14656 rc = bnx2x_configure_ptp_filters(bp);
14657 if (rc)
14658 return rc;
14659
14660 config.rx_filter = bp->rx_filter;
14661
14662 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
14663 -EFAULT : 0;
14664}
14665
14666/* Configrues HW for PTP */
14667static int bnx2x_configure_ptp(struct bnx2x *bp)
14668{
14669 int rc, port = BP_PORT(bp);
14670 u32 wb_data[2];
14671
14672 /* Reset PTP event detection rules - will be configured in the IOCTL */
14673 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
14674 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
14675 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
14676 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
14677 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
14678 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
14679 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
14680 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
14681
14682 /* Disable PTP packets to host - will be configured in the IOCTL*/
14683 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
14684 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
14685
14686 /* Enable the PTP feature */
14687 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
14688 NIG_REG_P0_PTP_EN, 0x3F);
14689
14690 /* Enable the free-running counter */
14691 wb_data[0] = 0;
14692 wb_data[1] = 0;
14693 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
14694
14695 /* Reset drift register (offset register is not reset) */
14696 rc = bnx2x_send_reset_timesync_ramrod(bp);
14697 if (rc) {
14698 BNX2X_ERR("Failed to reset PHC drift register\n");
14699 return -EFAULT;
14700 }
14701
14702 /* Reset possibly old timestamps */
14703 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
14704 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
14705 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
14706 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
14707
14708 return 0;
14709}
14710
14711/* Called during load, to initialize PTP-related stuff */
14712void bnx2x_init_ptp(struct bnx2x *bp)
14713{
14714 int rc;
14715
14716 /* Configure PTP in HW */
14717 rc = bnx2x_configure_ptp(bp);
14718 if (rc) {
14719 BNX2X_ERR("Stopping PTP initialization\n");
14720 return;
14721 }
14722
14723 /* Init work queue for Tx timestamping */
14724 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
14725
14726 /* Init cyclecounter and timecounter. This is done only in the first
14727 * load. If done in every load, PTP application will fail when doing
14728 * unload / load (e.g. MTU change) while it is running.
14729 */
14730 if (!bp->timecounter_init_done) {
14731 bnx2x_init_cyclecounter(bp);
14732 timecounter_init(&bp->timecounter, &bp->cyclecounter,
14733 ktime_to_ns(ktime_get_real()));
14734 bp->timecounter_init_done = 1;
14735 }
14736
14737 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
14738}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 2beb5430b876..b0779d773343 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2182,6 +2182,45 @@
2182#define NIG_REG_P0_HWPFC_ENABLE 0x18078 2182#define NIG_REG_P0_HWPFC_ENABLE 0x18078
2183#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480 2183#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480
2184#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440 2184#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440
2185/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for
2186 * the host. Bits [15:0] return the sequence ID of the packet. Bit 16
2187 * indicates the validity of the data in the buffer. Writing a 1 to bit 16
2188 * will clear the buffer.
2189 */
2190#define NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID 0x1875c
2191/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
2192 * the host. This location returns the lower 32 bits of timestamp value.
2193 */
2194#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB 0x18754
2195/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
2196 * the host. This location returns the upper 32 bits of timestamp value.
2197 */
2198#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB 0x18758
2199/* [RW 11] Mask register for the various parameters used in determining PTP
2200 * packet presence. Set each bit to 1 to mask out the particular parameter.
2201 * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
2202 * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
2203 * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
2204 * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
2205 * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
2206 * MAC DA 2. The reset default is set to mask out all parameters.
2207 */
2208#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0
2209/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
2210 * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
2211 * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
2212 * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
2213 * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
2214 * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
2215 * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
2216 * default is to mask out all of the rules. Note that rules 0-3 are for IPv4
2217 * packets only and require that the packet is IPv4 for the rules to match.
2218 * Note that rules 4-7 are for IPv6 packets only and require that the packet
2219 * is IPv6 for the rules to match.
2220 */
2221#define NIG_REG_P0_LLH_PTP_RULE_MASK 0x187a4
2222/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */
2223#define NIG_REG_P0_LLH_PTP_TO_HOST 0x187ac
2185/* [RW 1] Input enable for RX MAC interface. */ 2224/* [RW 1] Input enable for RX MAC interface. */
2186#define NIG_REG_P0_MAC_IN_EN 0x185ac 2225#define NIG_REG_P0_MAC_IN_EN 0x185ac
2187/* [RW 1] Output enable for TX MAC interface */ 2226/* [RW 1] Output enable for TX MAC interface */
@@ -2194,6 +2233,17 @@
2194 * priority field is extracted from the outer-most VLAN in receive packet. 2233 * priority field is extracted from the outer-most VLAN in receive packet.
2195 * Only COS 0 and COS 1 are supported in E2. */ 2234 * Only COS 0 and COS 1 are supported in E2. */
2196#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054 2235#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054
2236/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits
2237 * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables
2238 * V1 frame format in timesync event detection on RX side. Bit 2 enables V2
2239 * frame format in timesync event detection on RX side. Bit 3 enables
2240 * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event
2241 * detection on TX side. Bit 5 enables V2 frame format in timesync event
2242 * detection on TX side. Note that for HW to detect PTP packet and extract
2243 * data from the packet, at least one of the version bits of that traffic
2244 * direction has to be enabled.
2245 */
2246#define NIG_REG_P0_PTP_EN 0x18788
2197/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A 2247/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
2198 * priority is mapped to COS 0 when the corresponding mask bit is 1. More 2248 * priority is mapped to COS 0 when the corresponding mask bit is 1. More
2199 * than one bit may be set; allowing multiple priorities to be mapped to one 2249 * than one bit may be set; allowing multiple priorities to be mapped to one
@@ -2300,7 +2350,46 @@
2300 * Ethernet header. */ 2350 * Ethernet header. */
2301#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c 2351#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c
2302#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0 2352#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0
2303#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460 2353#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460a
2354/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for
2355 * the host. Bits [15:0] return the sequence ID of the packet. Bit 16
2356 * indicates the validity of the data in the buffer. Writing a 1 to bit 16
2357 * will clear the buffer.
2358 */
2359#define NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID 0x18774
2360/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
2361 * the host. This location returns the lower 32 bits of timestamp value.
2362 */
2363#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB 0x1876c
2364/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
2365 * the host. This location returns the upper 32 bits of timestamp value.
2366 */
2367#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB 0x18770
2368/* [RW 11] Mask register for the various parameters used in determining PTP
2369 * packet presence. Set each bit to 1 to mask out the particular parameter.
2370 * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
2371 * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
2372 * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
2373 * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
2374 * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
2375 * MAC DA 2. The reset default is set to mask out all parameters.
2376 */
2377#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8
2378/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
2379 * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
2380 * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
2381 * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
2382 * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
2383 * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
2384 * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
2385 * default is to mask out all of the rules. Note that rules 0-3 are for IPv4
2386 * packets only and require that the packet is IPv4 for the rules to match.
2387 * Note that rules 4-7 are for IPv6 packets only and require that the packet
2388 * is IPv6 for the rules to match.
2389 */
2390#define NIG_REG_P1_LLH_PTP_RULE_MASK 0x187cc
2391/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */
2392#define NIG_REG_P1_LLH_PTP_TO_HOST 0x187d4
2304/* [RW 32] Specify the client number to be assigned to each priority of the 2393/* [RW 32] Specify the client number to be assigned to each priority of the
2305 * strict priority arbiter. This register specifies bits 31:0 of the 36-bit 2394 * strict priority arbiter. This register specifies bits 31:0 of the 36-bit
2306 * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 2395 * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
@@ -2342,6 +2431,17 @@
2342 * priority field is extracted from the outer-most VLAN in receive packet. 2431 * priority field is extracted from the outer-most VLAN in receive packet.
2343 * Only COS 0 and COS 1 are supported in E2. */ 2432 * Only COS 0 and COS 1 are supported in E2. */
2344#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8 2433#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8
2434/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits
2435 * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables
2436 * V1 frame format in timesync event detection on RX side. Bit 2 enables V2
2437 * frame format in timesync event detection on RX side. Bit 3 enables
2438 * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event
2439 * detection on TX side. Bit 5 enables V2 frame format in timesync event
2440 * detection on TX side. Note that for HW to detect PTP packet and extract
2441 * data from the packet, at least one of the version bits of that traffic
2442 * direction has to be enabled.
2443 */
2444#define NIG_REG_P1_PTP_EN 0x187b0
2345/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A 2445/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
2346 * priority is mapped to COS 0 when the corresponding mask bit is 1. More 2446 * priority is mapped to COS 0 when the corresponding mask bit is 1. More
2347 * than one bit may be set; allowing multiple priorities to be mapped to one 2447 * than one bit may be set; allowing multiple priorities to be mapped to one
@@ -2361,6 +2461,78 @@
2361#define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c 2461#define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c
2362/* [R 1] TLLH FIFO is empty. */ 2462/* [R 1] TLLH FIFO is empty. */
2363#define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338 2463#define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338
2464/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for
2465 * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16
2466 * indicates the validity of the data in the buffer. Bit 17 indicates that
2467 * the sequence ID is valid and it is waiting for the TX timestamp value.
2468 * Bit 18 indicates whether the timestamp is from a SW request (value of 1)
2469 * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer.
2470 */
2471#define NIG_REG_P0_TLLH_PTP_BUF_SEQID 0x187e0
2472/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
2473 * MCP. This location returns the lower 32 bits of timestamp value.
2474 */
2475#define NIG_REG_P0_TLLH_PTP_BUF_TS_LSB 0x187d8
2476/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
2477 * MCP. This location returns the upper 32 bits of timestamp value.
2478 */
2479#define NIG_REG_P0_TLLH_PTP_BUF_TS_MSB 0x187dc
2480/* [RW 11] Mask register for the various parameters used in determining PTP
2481 * packet presence. Set each bit to 1 to mask out the particular parameter.
2482 * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
2483 * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
2484 * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
2485 * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
2486 * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
2487 * MAC DA 2. The reset default is set to mask out all parameters.
2488 */
2489#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0
2490/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
2491 * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
2492 * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
2493 * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
2494 * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
2495 * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
2496 * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
2497 * default is to mask out all of the rules.
2498 */
2499#define NIG_REG_P0_TLLH_PTP_RULE_MASK 0x187f4
2500/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for
2501 * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16
2502 * indicates the validity of the data in the buffer. Bit 17 indicates that
2503 * the sequence ID is valid and it is waiting for the TX timestamp value.
2504 * Bit 18 indicates whether the timestamp is from a SW request (value of 1)
2505 * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer.
2506 */
2507#define NIG_REG_P1_TLLH_PTP_BUF_SEQID 0x187ec
2508/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
2509 * MCP. This location returns the lower 32 bits of timestamp value.
2510 */
2511#define NIG_REG_P1_TLLH_PTP_BUF_TS_LSB 0x187e4
2512/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for
2513 * MCP. This location returns the upper 32 bits of timestamp value.
2514 */
2515#define NIG_REG_P1_TLLH_PTP_BUF_TS_MSB 0x187e8
2516/* [RW 11] Mask register for the various parameters used in determining PTP
2517 * packet presence. Set each bit to 1 to mask out the particular parameter.
2518 * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of
2519 * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP
2520 * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC
2521 * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of
2522 * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable
2523 * MAC DA 2. The reset default is set to mask out all parameters.
2524 */
2525#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8
2526/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
2527 * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
2528 * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
2529 * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
2530 * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC
2531 * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype
2532 * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset
2533 * default is to mask out all of the rules.
2534 */
2535#define NIG_REG_P1_TLLH_PTP_RULE_MASK 0x187fc
2364/* [RW 32] Specify which of the credit registers the client is to be mapped 2536/* [RW 32] Specify which of the credit registers the client is to be mapped
2365 * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are 2537 * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
2366 * for client 0; bits [35:32] are for client 8. For clients that are not 2538 * for client 0; bits [35:32] are for client 8. For clients that are not
@@ -2513,6 +2685,10 @@
2513 swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then 2685 swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then
2514 ort swap is equal to ~nig_registers_port_swap.port_swap */ 2686 ort swap is equal to ~nig_registers_port_swap.port_swap */
2515#define NIG_REG_STRAP_OVERRIDE 0x10398 2687#define NIG_REG_STRAP_OVERRIDE 0x10398
2688/* [WB 64] Addresses for TimeSync related registers in the timesync
2689 * generator sub-module.
2690 */
2691#define NIG_REG_TIMESYNC_GEN_REG 0x18800
2516/* [RW 1] output enable for RX_XCM0 IF */ 2692/* [RW 1] output enable for RX_XCM0 IF */
2517#define NIG_REG_XCM0_OUT_EN 0x100f0 2693#define NIG_REG_XCM0_OUT_EN 0x100f0
2518/* [RW 1] output enable for RX_XCM1 IF */ 2694/* [RW 1] output enable for RX_XCM1 IF */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index b1936044767a..19d0c1152434 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4019,6 +4019,7 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4019 struct bnx2x_raw_obj *r = &o->raw; 4019 struct bnx2x_raw_obj *r = &o->raw;
4020 struct eth_rss_update_ramrod_data *data = 4020 struct eth_rss_update_ramrod_data *data =
4021 (struct eth_rss_update_ramrod_data *)(r->rdata); 4021 (struct eth_rss_update_ramrod_data *)(r->rdata);
4022 u16 caps = 0;
4022 u8 rss_mode = 0; 4023 u8 rss_mode = 0;
4023 int rc; 4024 int rc;
4024 4025
@@ -4042,28 +4043,34 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4042 4043
4043 /* RSS capabilities */ 4044 /* RSS capabilities */
4044 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) 4045 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4045 data->capabilities |= 4046 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4046 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4047 4047
4048 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) 4048 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4049 data->capabilities |= 4049 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4050 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4051 4050
4052 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags)) 4051 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4053 data->capabilities |= 4052 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4054 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4055 4053
4056 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) 4054 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4057 data->capabilities |= 4055 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4058 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4059 4056
4060 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) 4057 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4061 data->capabilities |= 4058 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4062 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4063 4059
4064 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) 4060 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4065 data->capabilities |= 4061 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4066 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; 4062
4063 if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags))
4064 caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY;
4065
4066 /* RSS keys */
4067 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4068 memcpy(&data->rss_key[0], &p->rss_key[0],
4069 sizeof(data->rss_key));
4070 caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4071 }
4072
4073 data->capabilities = cpu_to_le16(caps);
4067 4074
4068 /* Hashing mask */ 4075 /* Hashing mask */
4069 data->rss_result_mask = p->rss_result_mask; 4076 data->rss_result_mask = p->rss_result_mask;
@@ -4084,13 +4091,6 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4084 if (netif_msg_ifup(bp)) 4091 if (netif_msg_ifup(bp))
4085 bnx2x_debug_print_ind_table(bp, p); 4092 bnx2x_debug_print_ind_table(bp, p);
4086 4093
4087 /* RSS keys */
4088 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4089 memcpy(&data->rss_key[0], &p->rss_key[0],
4090 sizeof(data->rss_key));
4091 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4092 }
4093
4094 /* No need for an explicit memory barrier here as long as we 4094 /* No need for an explicit memory barrier here as long as we
4095 * ensure the ordering of writing to the SPQ element 4095 * ensure the ordering of writing to the SPQ element
4096 * and updating of the SPQ producer which involves a memory 4096 * and updating of the SPQ producer which involves a memory
@@ -4336,6 +4336,8 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4336 test_bit(BNX2X_Q_FLG_FCOE, flags) ? 4336 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4337 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; 4337 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4338 4338
4339 gen_data->fp_hsi_ver = ETH_FP_HSI_VERSION;
4340
4339 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n", 4341 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4340 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); 4342 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4341} 4343}
@@ -4357,12 +4359,13 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4357 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); 4359 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4358 tx_data->force_default_pri_flg = 4360 tx_data->force_default_pri_flg =
4359 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); 4361 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4360 4362 tx_data->refuse_outband_vlan_flg =
4363 test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
4361 tx_data->tunnel_lso_inc_ip_id = 4364 tx_data->tunnel_lso_inc_ip_id =
4362 test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags); 4365 test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4363 tx_data->tunnel_non_lso_pcsum_location = 4366 tx_data->tunnel_non_lso_pcsum_location =
4364 test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT : 4367 test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
4365 PCSUM_ON_BD; 4368 CSUM_ON_BD;
4366 4369
4367 tx_data->tx_status_block_id = params->fw_sb_id; 4370 tx_data->tx_status_block_id = params->fw_sb_id;
4368 tx_data->tx_sb_index_number = params->sb_cq_index; 4371 tx_data->tx_sb_index_number = params->sb_cq_index;
@@ -4722,6 +4725,12 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4722 data->tx_switching_change_flg = 4725 data->tx_switching_change_flg =
4723 test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, 4726 test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
4724 &params->update_flags); 4727 &params->update_flags);
4728
4729 /* PTP */
4730 data->handle_ptp_pkts_flg =
4731 test_bit(BNX2X_Q_UPDATE_PTP_PKTS, &params->update_flags);
4732 data->handle_ptp_pkts_change_flg =
4733 test_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, &params->update_flags);
4725} 4734}
4726 4735
4727static inline int bnx2x_q_send_update(struct bnx2x *bp, 4736static inline int bnx2x_q_send_update(struct bnx2x *bp,
@@ -5376,6 +5385,10 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
5376 (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 5385 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5377 next_state = BNX2X_F_STATE_STARTED; 5386 next_state = BNX2X_F_STATE_STARTED;
5378 5387
5388 else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
5389 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5390 next_state = BNX2X_F_STATE_STARTED;
5391
5379 else if (cmd == BNX2X_F_CMD_TX_STOP) 5392 else if (cmd == BNX2X_F_CMD_TX_STOP)
5380 next_state = BNX2X_F_STATE_TX_STOPPED; 5393 next_state = BNX2X_F_STATE_TX_STOPPED;
5381 5394
@@ -5385,6 +5398,10 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
5385 (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 5398 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5386 next_state = BNX2X_F_STATE_TX_STOPPED; 5399 next_state = BNX2X_F_STATE_TX_STOPPED;
5387 5400
5401 else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) &&
5402 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5403 next_state = BNX2X_F_STATE_TX_STOPPED;
5404
5388 else if (cmd == BNX2X_F_CMD_TX_START) 5405 else if (cmd == BNX2X_F_CMD_TX_START)
5389 next_state = BNX2X_F_STATE_STARTED; 5406 next_state = BNX2X_F_STATE_STARTED;
5390 5407
@@ -5652,8 +5669,11 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
5652 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); 5669 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5653 rdata->path_id = BP_PATH(bp); 5670 rdata->path_id = BP_PATH(bp);
5654 rdata->network_cos_mode = start_params->network_cos_mode; 5671 rdata->network_cos_mode = start_params->network_cos_mode;
5655 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode; 5672 rdata->tunnel_mode = start_params->tunnel_mode;
5656 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss; 5673 rdata->gre_tunnel_type = start_params->gre_tunnel_type;
5674 rdata->inner_gre_rss_en = start_params->inner_gre_rss_en;
5675 rdata->vxlan_dst_port = cpu_to_le16(4789);
5676 rdata->sd_vlan_eth_type = cpu_to_le16(0x8100);
5657 5677
5658 /* No need for an explicit memory barrier here as long we would 5678 /* No need for an explicit memory barrier here as long we would
5659 * need to ensure the ordering of writing to the SPQ element 5679 * need to ensure the ordering of writing to the SPQ element
@@ -5680,8 +5700,28 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5680 memset(rdata, 0, sizeof(*rdata)); 5700 memset(rdata, 0, sizeof(*rdata));
5681 5701
5682 /* Fill the ramrod data with provided parameters */ 5702 /* Fill the ramrod data with provided parameters */
5683 rdata->tx_switch_suspend_change_flg = 1; 5703 if (test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
5684 rdata->tx_switch_suspend = switch_update_params->suspend; 5704 &switch_update_params->changes)) {
5705 rdata->tx_switch_suspend_change_flg = 1;
5706 rdata->tx_switch_suspend =
5707 test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
5708 &switch_update_params->changes);
5709 }
5710
5711 if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
5712 &switch_update_params->changes)) {
5713 rdata->update_tunn_cfg_flg = 1;
5714 if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
5715 &switch_update_params->changes))
5716 rdata->tunn_clss_en = 1;
5717 if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
5718 &switch_update_params->changes))
5719 rdata->inner_gre_rss_en = 1;
5720 rdata->tunnel_mode = switch_update_params->tunnel_mode;
5721 rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type;
5722 rdata->vxlan_dst_port = cpu_to_le16(4789);
5723 }
5724
5685 rdata->echo = SWITCH_UPDATE; 5725 rdata->echo = SWITCH_UPDATE;
5686 5726
5687 /* No need for an explicit memory barrier here as long as we 5727 /* No need for an explicit memory barrier here as long as we
@@ -5817,6 +5857,42 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5817 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5857 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5818} 5858}
5819 5859
5860static inline
5861int bnx2x_func_send_set_timesync(struct bnx2x *bp,
5862 struct bnx2x_func_state_params *params)
5863{
5864 struct bnx2x_func_sp_obj *o = params->f_obj;
5865 struct set_timesync_ramrod_data *rdata =
5866 (struct set_timesync_ramrod_data *)o->rdata;
5867 dma_addr_t data_mapping = o->rdata_mapping;
5868 struct bnx2x_func_set_timesync_params *set_timesync_params =
5869 &params->params.set_timesync;
5870
5871 memset(rdata, 0, sizeof(*rdata));
5872
5873 /* Fill the ramrod data with provided parameters */
5874 rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd;
5875 rdata->offset_cmd = set_timesync_params->offset_cmd;
5876 rdata->add_sub_drift_adjust_value =
5877 set_timesync_params->add_sub_drift_adjust_value;
5878 rdata->drift_adjust_value = set_timesync_params->drift_adjust_value;
5879 rdata->drift_adjust_period = set_timesync_params->drift_adjust_period;
5880 rdata->offset_delta.lo =
5881 cpu_to_le32(U64_LO(set_timesync_params->offset_delta));
5882 rdata->offset_delta.hi =
5883 cpu_to_le32(U64_HI(set_timesync_params->offset_delta));
5884
5885 DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n",
5886 rdata->drift_adjust_cmd, rdata->offset_cmd,
5887 rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value,
5888 rdata->drift_adjust_period, rdata->offset_delta.lo,
5889 rdata->offset_delta.hi);
5890
5891 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0,
5892 U64_HI(data_mapping),
5893 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5894}
5895
5820static int bnx2x_func_send_cmd(struct bnx2x *bp, 5896static int bnx2x_func_send_cmd(struct bnx2x *bp,
5821 struct bnx2x_func_state_params *params) 5897 struct bnx2x_func_state_params *params)
5822{ 5898{
@@ -5839,6 +5915,8 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
5839 return bnx2x_func_send_tx_start(bp, params); 5915 return bnx2x_func_send_tx_start(bp, params);
5840 case BNX2X_F_CMD_SWITCH_UPDATE: 5916 case BNX2X_F_CMD_SWITCH_UPDATE:
5841 return bnx2x_func_send_switch_update(bp, params); 5917 return bnx2x_func_send_switch_update(bp, params);
5918 case BNX2X_F_CMD_SET_TIMESYNC:
5919 return bnx2x_func_send_set_timesync(bp, params);
5842 default: 5920 default:
5843 BNX2X_ERR("Unknown command: %d\n", params->cmd); 5921 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5844 return -EINVAL; 5922 return -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 718ecd294661..21c8f6fb89e5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -711,6 +711,7 @@ enum {
711 BNX2X_RSS_IPV6, 711 BNX2X_RSS_IPV6,
712 BNX2X_RSS_IPV6_TCP, 712 BNX2X_RSS_IPV6_TCP,
713 BNX2X_RSS_IPV6_UDP, 713 BNX2X_RSS_IPV6_UDP,
714 BNX2X_RSS_GRE_INNER_HDRS,
714}; 715};
715 716
716struct bnx2x_config_rss_params { 717struct bnx2x_config_rss_params {
@@ -769,7 +770,9 @@ enum {
769 BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 770 BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
770 BNX2X_Q_UPDATE_SILENT_VLAN_REM, 771 BNX2X_Q_UPDATE_SILENT_VLAN_REM,
771 BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, 772 BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
772 BNX2X_Q_UPDATE_TX_SWITCHING 773 BNX2X_Q_UPDATE_TX_SWITCHING,
774 BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
775 BNX2X_Q_UPDATE_PTP_PKTS,
773}; 776};
774 777
775/* Allowed Queue states */ 778/* Allowed Queue states */
@@ -831,6 +834,7 @@ enum {
831 BNX2X_Q_FLG_ANTI_SPOOF, 834 BNX2X_Q_FLG_ANTI_SPOOF,
832 BNX2X_Q_FLG_SILENT_VLAN_REM, 835 BNX2X_Q_FLG_SILENT_VLAN_REM,
833 BNX2X_Q_FLG_FORCE_DEFAULT_PRI, 836 BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
837 BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN,
834 BNX2X_Q_FLG_PCSUM_ON_PKT, 838 BNX2X_Q_FLG_PCSUM_ON_PKT,
835 BNX2X_Q_FLG_TUN_INC_INNER_IP_ID 839 BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
836}; 840};
@@ -851,6 +855,10 @@ enum bnx2x_q_type {
851#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */ 855#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */
852 856
853#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) 857#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
858/* DMAE channel to be used by FW for timesync workaroun. A driver that sends
859 * timesync-related ramrods must not use this DMAE command ID.
860 */
861#define FW_DMAE_CMD_ID 6
854 862
855struct bnx2x_queue_init_params { 863struct bnx2x_queue_init_params {
856 struct { 864 struct {
@@ -1085,6 +1093,16 @@ struct bnx2x_queue_sp_obj {
1085}; 1093};
1086 1094
1087/********************** Function state update *********************************/ 1095/********************** Function state update *********************************/
1096
1097/* UPDATE command options */
1098enum {
1099 BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
1100 BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
1101 BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
1102 BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
1103 BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
1104};
1105
1088/* Allowed Function states */ 1106/* Allowed Function states */
1089enum bnx2x_func_state { 1107enum bnx2x_func_state {
1090 BNX2X_F_STATE_RESET, 1108 BNX2X_F_STATE_RESET,
@@ -1105,6 +1123,7 @@ enum bnx2x_func_cmd {
1105 BNX2X_F_CMD_TX_STOP, 1123 BNX2X_F_CMD_TX_STOP,
1106 BNX2X_F_CMD_TX_START, 1124 BNX2X_F_CMD_TX_START,
1107 BNX2X_F_CMD_SWITCH_UPDATE, 1125 BNX2X_F_CMD_SWITCH_UPDATE,
1126 BNX2X_F_CMD_SET_TIMESYNC,
1108 BNX2X_F_CMD_MAX, 1127 BNX2X_F_CMD_MAX,
1109}; 1128};
1110 1129
@@ -1146,18 +1165,25 @@ struct bnx2x_func_start_params {
1146 /* Function cos mode */ 1165 /* Function cos mode */
1147 u8 network_cos_mode; 1166 u8 network_cos_mode;
1148 1167
1149 /* NVGRE classification enablement */ 1168 /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */
1150 u8 nvgre_clss_en; 1169 u8 tunnel_mode;
1151 1170
1152 /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ 1171 /* tunneling classification enablement */
1153 u8 gre_tunnel_mode; 1172 u8 tunn_clss_en;
1154 1173
1155 /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */ 1174 /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
1156 u8 gre_tunnel_rss; 1175 u8 gre_tunnel_type;
1176
1177 /* Enables Inner GRE RSS on the function, depends on the client RSS
1178 * capailities
1179 */
1180 u8 inner_gre_rss_en;
1157}; 1181};
1158 1182
1159struct bnx2x_func_switch_update_params { 1183struct bnx2x_func_switch_update_params {
1160 u8 suspend; 1184 unsigned long changes; /* BNX2X_F_UPDATE_XX bits */
1185 u8 tunnel_mode;
1186 u8 gre_tunnel_type;
1161}; 1187};
1162 1188
1163struct bnx2x_func_afex_update_params { 1189struct bnx2x_func_afex_update_params {
@@ -1172,6 +1198,7 @@ struct bnx2x_func_afex_viflists_params {
1172 u8 afex_vif_list_command; 1198 u8 afex_vif_list_command;
1173 u8 func_to_clear; 1199 u8 func_to_clear;
1174}; 1200};
1201
1175struct bnx2x_func_tx_start_params { 1202struct bnx2x_func_tx_start_params {
1176 struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; 1203 struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
1177 u8 dcb_enabled; 1204 u8 dcb_enabled;
@@ -1179,6 +1206,24 @@ struct bnx2x_func_tx_start_params {
1179 u8 dont_add_pri_0_en; 1206 u8 dont_add_pri_0_en;
1180}; 1207};
1181 1208
1209struct bnx2x_func_set_timesync_params {
1210 /* Reset, set or keep the current drift value */
1211 u8 drift_adjust_cmd;
1212
1213 /* Dec, inc or keep the current offset */
1214 u8 offset_cmd;
1215
1216 /* Drift value direction */
1217 u8 add_sub_drift_adjust_value;
1218
1219 /* Drift, period and offset values to be used according to the commands
1220 * above.
1221 */
1222 u8 drift_adjust_value;
1223 u32 drift_adjust_period;
1224 u64 offset_delta;
1225};
1226
1182struct bnx2x_func_state_params { 1227struct bnx2x_func_state_params {
1183 struct bnx2x_func_sp_obj *f_obj; 1228 struct bnx2x_func_sp_obj *f_obj;
1184 1229
@@ -1197,6 +1242,7 @@ struct bnx2x_func_state_params {
1197 struct bnx2x_func_afex_update_params afex_update; 1242 struct bnx2x_func_afex_update_params afex_update;
1198 struct bnx2x_func_afex_viflists_params afex_viflists; 1243 struct bnx2x_func_afex_viflists_params afex_viflists;
1199 struct bnx2x_func_tx_start_params tx_start; 1244 struct bnx2x_func_tx_start_params tx_start;
1245 struct bnx2x_func_set_timesync_params set_timesync;
1200 } params; 1246 } params;
1201}; 1247};
1202 1248
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 662310c5f4e9..c88b20af87df 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1125,7 +1125,7 @@ static int bnx2x_ari_enabled(struct pci_dev *dev)
1125 return dev->bus->self && dev->bus->self->ari_enabled; 1125 return dev->bus->self && dev->bus->self->ari_enabled;
1126} 1126}
1127 1127
1128static void 1128static int
1129bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1129bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1130{ 1130{
1131 int sb_id; 1131 int sb_id;
@@ -1150,6 +1150,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1150 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1150 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1151 } 1151 }
1152 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); 1152 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1153 return BP_VFDB(bp)->vf_sbs_pool;
1153} 1154}
1154 1155
1155static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1156static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
@@ -1314,15 +1315,17 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1314 } 1315 }
1315 1316
1316 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 1317 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1317 bnx2x_get_vf_igu_cam_info(bp); 1318 if (!bnx2x_get_vf_igu_cam_info(bp)) {
1319 BNX2X_ERR("No entries in IGU CAM for vfs\n");
1320 err = -EINVAL;
1321 goto failed;
1322 }
1318 1323
1319 /* allocate the queue arrays for all VFs */ 1324 /* allocate the queue arrays for all VFs */
1320 bp->vfdb->vfqs = kzalloc( 1325 bp->vfdb->vfqs = kzalloc(
1321 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), 1326 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
1322 GFP_KERNEL); 1327 GFP_KERNEL);
1323 1328
1324 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
1325
1326 if (!bp->vfdb->vfqs) { 1329 if (!bp->vfdb->vfqs) {
1327 BNX2X_ERR("failed to allocate vf queue array\n"); 1330 BNX2X_ERR("failed to allocate vf queue array\n");
1328 err = -ENOMEM; 1331 err = -ENOMEM;
@@ -1349,9 +1352,7 @@ void bnx2x_iov_remove_one(struct bnx2x *bp)
1349 if (!IS_SRIOV(bp)) 1352 if (!IS_SRIOV(bp))
1350 return; 1353 return;
1351 1354
1352 DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); 1355 bnx2x_disable_sriov(bp);
1353 pci_disable_sriov(bp->pdev);
1354 DP(BNX2X_MSG_IOV, "sriov disabled\n");
1355 1356
1356 /* disable access to all VFs */ 1357 /* disable access to all VFs */
1357 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { 1358 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
@@ -1985,21 +1986,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1985 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 1986 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
1986} 1987}
1987 1988
1988static inline
1989struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
1990{
1991 int i;
1992 struct bnx2x_virtf *vf = NULL;
1993
1994 for_each_vf(bp, i) {
1995 vf = BP_VF(bp, i);
1996 if (stat_id >= vf->igu_base_id &&
1997 stat_id < vf->igu_base_id + vf_sb_count(vf))
1998 break;
1999 }
2000 return vf;
2001}
2002
2003/* VF API helpers */ 1989/* VF API helpers */
2004static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 1990static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
2005 u8 enable) 1991 u8 enable)
@@ -2362,12 +2348,6 @@ int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2362 return rc; 2348 return rc;
2363} 2349}
2364 2350
2365static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
2366 struct bnx2x_virtf *vf, u32 *sbdf)
2367{
2368 *sbdf = vf->devfn | (vf->bus << 8);
2369}
2370
2371void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2351void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2372 enum channel_tlvs tlv) 2352 enum channel_tlvs tlv)
2373{ 2353{
@@ -2416,7 +2396,7 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2416 2396
2417 /* log the unlock */ 2397 /* log the unlock */
2418 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 2398 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2419 vf->abs_vfid, vf->op_current); 2399 vf->abs_vfid, current_tlv);
2420} 2400}
2421 2401
2422static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) 2402static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
@@ -2501,7 +2481,7 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
2501 bp->requested_nr_virtfn = num_vfs_param; 2481 bp->requested_nr_virtfn = num_vfs_param;
2502 if (num_vfs_param == 0) { 2482 if (num_vfs_param == 0) {
2503 bnx2x_set_pf_tx_switching(bp, false); 2483 bnx2x_set_pf_tx_switching(bp, false);
2504 pci_disable_sriov(dev); 2484 bnx2x_disable_sriov(bp);
2505 return 0; 2485 return 0;
2506 } else { 2486 } else {
2507 return bnx2x_enable_sriov(bp); 2487 return bnx2x_enable_sriov(bp);
@@ -2614,6 +2594,12 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
2614 2594
2615void bnx2x_disable_sriov(struct bnx2x *bp) 2595void bnx2x_disable_sriov(struct bnx2x *bp)
2616{ 2596{
2597 if (pci_vfs_assigned(bp->pdev)) {
2598 DP(BNX2X_MSG_IOV,
2599 "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2600 return;
2601 }
2602
2617 pci_disable_sriov(bp->pdev); 2603 pci_disable_sriov(bp->pdev);
2618} 2604}
2619 2605
@@ -2628,7 +2614,7 @@ static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
2628 } 2614 }
2629 2615
2630 if (!IS_SRIOV(bp)) { 2616 if (!IS_SRIOV(bp)) {
2631 BNX2X_ERR("sriov is disabled - can't utilize iov-realted functionality\n"); 2617 BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
2632 return -EINVAL; 2618 return -EINVAL;
2633 } 2619 }
2634 2620
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index ca1055f3d8af..01bafa4ac045 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -299,7 +299,8 @@ struct bnx2x_vfdb {
299#define BP_VFDB(bp) ((bp)->vfdb) 299#define BP_VFDB(bp) ((bp)->vfdb)
300 /* vf array */ 300 /* vf array */
301 struct bnx2x_virtf *vfs; 301 struct bnx2x_virtf *vfs;
302#define BP_VF(bp, idx) (&((bp)->vfdb->vfs[idx])) 302#define BP_VF(bp, idx) ((BP_VFDB(bp) && (bp)->vfdb->vfs) ? \
303 &((bp)->vfdb->vfs[idx]) : NULL)
303#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var) 304#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var)
304 305
305 /* queue array - for all vfs */ 306 /* queue array - for all vfs */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index ca47665f94bf..d1608297c773 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -137,7 +137,7 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
137 cpu_to_le16(bp->stats_counter++); 137 cpu_to_le16(bp->stats_counter++);
138 138
139 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", 139 DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
140 bp->fw_stats_req->hdr.drv_stats_counter); 140 le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
141 141
142 /* adjust the ramrod to include VF queues statistics */ 142 /* adjust the ramrod to include VF queues statistics */
143 bnx2x_iov_adjust_stats_req(bp); 143 bnx2x_iov_adjust_stats_req(bp);
@@ -200,7 +200,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
200 } 200 }
201} 201}
202 202
203static int bnx2x_stats_comp(struct bnx2x *bp) 203static void bnx2x_stats_comp(struct bnx2x *bp)
204{ 204{
205 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 205 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
206 int cnt = 10; 206 int cnt = 10;
@@ -214,7 +214,6 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
214 cnt--; 214 cnt--;
215 usleep_range(1000, 2000); 215 usleep_range(1000, 2000);
216 } 216 }
217 return 1;
218} 217}
219 218
220/* 219/*
@@ -1630,6 +1629,11 @@ void bnx2x_stats_init(struct bnx2x *bp)
1630 int /*abs*/port = BP_PORT(bp); 1629 int /*abs*/port = BP_PORT(bp);
1631 int mb_idx = BP_FW_MB_IDX(bp); 1630 int mb_idx = BP_FW_MB_IDX(bp);
1632 1631
1632 if (IS_VF(bp)) {
1633 bnx2x_memset_stats(bp);
1634 return;
1635 }
1636
1633 bp->stats_pending = 0; 1637 bp->stats_pending = 0;
1634 bp->executer_idx = 0; 1638 bp->executer_idx = 0;
1635 bp->stats_counter = 0; 1639 bp->stats_counter = 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 54e0427a9ee6..b1d9c44aa56c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -583,7 +583,6 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
583 flags |= VFPF_QUEUE_FLG_STATS; 583 flags |= VFPF_QUEUE_FLG_STATS;
584 flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; 584 flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
585 flags |= VFPF_QUEUE_FLG_VLAN; 585 flags |= VFPF_QUEUE_FLG_VLAN;
586 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
587 586
588 /* Common */ 587 /* Common */
589 req->vf_qid = fp_idx; 588 req->vf_qid = fp_idx;
@@ -952,14 +951,6 @@ static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
952 REG_WR8(bp, addr, 1); 951 REG_WR8(bp, addr, 1);
953} 952}
954 953
955static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
956{
957 int i;
958
959 for_each_vf(bp, i)
960 storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
961}
962
963/* enable vf_pf mailbox (aka vf-pf-channel) */ 954/* enable vf_pf mailbox (aka vf-pf-channel) */
964void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) 955void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
965{ 956{
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index a6a9f284c8dd..23f23c97c2ad 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -383,7 +383,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
383 break; 383 break;
384 384
385 rcu_read_lock(); 385 rcu_read_lock();
386 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) { 386 if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
387 rc = -ENODEV; 387 rc = -ENODEV;
388 rcu_read_unlock(); 388 rcu_read_unlock();
389 break; 389 break;
@@ -527,7 +527,7 @@ int cnic_unregister_driver(int ulp_type)
527 list_for_each_entry(dev, &cnic_dev_list, list) { 527 list_for_each_entry(dev, &cnic_dev_list, list) {
528 struct cnic_local *cp = dev->cnic_priv; 528 struct cnic_local *cp = dev->cnic_priv;
529 529
530 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 530 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
531 pr_err("%s: Type %d still has devices registered\n", 531 pr_err("%s: Type %d still has devices registered\n",
532 __func__, ulp_type); 532 __func__, ulp_type);
533 read_unlock(&cnic_dev_lock); 533 read_unlock(&cnic_dev_lock);
@@ -575,7 +575,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
575 mutex_unlock(&cnic_lock); 575 mutex_unlock(&cnic_lock);
576 return -EAGAIN; 576 return -EAGAIN;
577 } 577 }
578 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 578 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
579 pr_err("%s: Type %d has already been registered to this device\n", 579 pr_err("%s: Type %d has already been registered to this device\n",
580 __func__, ulp_type); 580 __func__, ulp_type);
581 mutex_unlock(&cnic_lock); 581 mutex_unlock(&cnic_lock);
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 962510f391df..5ba5ad071bb6 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -186,6 +186,7 @@ struct enic {
186 ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX]; 186 ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
187 unsigned int cq_count; 187 unsigned int cq_count;
188 struct enic_rfs_flw_tbl rfs_h; 188 struct enic_rfs_flw_tbl rfs_h;
189 u32 rx_copybreak;
189}; 190};
190 191
191static inline struct device *enic_get_dev(struct enic *enic) 192static inline struct device *enic_get_dev(struct enic *enic)
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 523c9ceb04c0..85173d620758 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -379,6 +379,43 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
379 return ret; 379 return ret;
380} 380}
381 381
382static int enic_get_tunable(struct net_device *dev,
383 const struct ethtool_tunable *tuna, void *data)
384{
385 struct enic *enic = netdev_priv(dev);
386 int ret = 0;
387
388 switch (tuna->id) {
389 case ETHTOOL_RX_COPYBREAK:
390 *(u32 *)data = enic->rx_copybreak;
391 break;
392 default:
393 ret = -EINVAL;
394 break;
395 }
396
397 return ret;
398}
399
400static int enic_set_tunable(struct net_device *dev,
401 const struct ethtool_tunable *tuna,
402 const void *data)
403{
404 struct enic *enic = netdev_priv(dev);
405 int ret = 0;
406
407 switch (tuna->id) {
408 case ETHTOOL_RX_COPYBREAK:
409 enic->rx_copybreak = *(u32 *)data;
410 break;
411 default:
412 ret = -EINVAL;
413 break;
414 }
415
416 return ret;
417}
418
382static const struct ethtool_ops enic_ethtool_ops = { 419static const struct ethtool_ops enic_ethtool_ops = {
383 .get_settings = enic_get_settings, 420 .get_settings = enic_get_settings,
384 .get_drvinfo = enic_get_drvinfo, 421 .get_drvinfo = enic_get_drvinfo,
@@ -391,6 +428,8 @@ static const struct ethtool_ops enic_ethtool_ops = {
391 .get_coalesce = enic_get_coalesce, 428 .get_coalesce = enic_get_coalesce,
392 .set_coalesce = enic_set_coalesce, 429 .set_coalesce = enic_set_coalesce,
393 .get_rxnfc = enic_get_rxnfc, 430 .get_rxnfc = enic_get_rxnfc,
431 .get_tunable = enic_get_tunable,
432 .set_tunable = enic_set_tunable,
394}; 433};
395 434
396void enic_set_ethtool_ops(struct net_device *netdev) 435void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index c8832bc1c5f7..929bfe70080a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -66,6 +66,8 @@
66#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */ 66#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
67#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ 67#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
68 68
69#define RX_COPYBREAK_DEFAULT 256
70
69/* Supported devices */ 71/* Supported devices */
70static const struct pci_device_id enic_id_table[] = { 72static const struct pci_device_id enic_id_table[] = {
71 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, 73 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
@@ -924,6 +926,7 @@ static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
924 pci_unmap_single(enic->pdev, buf->dma_addr, 926 pci_unmap_single(enic->pdev, buf->dma_addr,
925 buf->len, PCI_DMA_FROMDEVICE); 927 buf->len, PCI_DMA_FROMDEVICE);
926 dev_kfree_skb_any(buf->os_buf); 928 dev_kfree_skb_any(buf->os_buf);
929 buf->os_buf = NULL;
927} 930}
928 931
929static int enic_rq_alloc_buf(struct vnic_rq *rq) 932static int enic_rq_alloc_buf(struct vnic_rq *rq)
@@ -934,7 +937,24 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
934 unsigned int len = netdev->mtu + VLAN_ETH_HLEN; 937 unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
935 unsigned int os_buf_index = 0; 938 unsigned int os_buf_index = 0;
936 dma_addr_t dma_addr; 939 dma_addr_t dma_addr;
940 struct vnic_rq_buf *buf = rq->to_use;
941
942 if (buf->os_buf) {
943 buf = buf->next;
944 rq->to_use = buf;
945 rq->ring.desc_avail--;
946 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
947 /* Adding write memory barrier prevents compiler and/or
948 * CPU reordering, thus avoiding descriptor posting
949 * before descriptor is initialized. Otherwise, hardware
950 * can read stale descriptor fields.
951 */
952 wmb();
953 iowrite32(buf->index, &rq->ctrl->posted_index);
954 }
937 955
956 return 0;
957 }
938 skb = netdev_alloc_skb_ip_align(netdev, len); 958 skb = netdev_alloc_skb_ip_align(netdev, len);
939 if (!skb) 959 if (!skb)
940 return -ENOMEM; 960 return -ENOMEM;
@@ -957,6 +977,25 @@ static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
957 pkt_size->small_pkt_bytes_cnt += pkt_len; 977 pkt_size->small_pkt_bytes_cnt += pkt_len;
958} 978}
959 979
980static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
981 struct vnic_rq_buf *buf, u16 len)
982{
983 struct enic *enic = netdev_priv(netdev);
984 struct sk_buff *new_skb;
985
986 if (len > enic->rx_copybreak)
987 return false;
988 new_skb = netdev_alloc_skb_ip_align(netdev, len);
989 if (!new_skb)
990 return false;
991 pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len,
992 DMA_FROM_DEVICE);
993 memcpy(new_skb->data, (*skb)->data, len);
994 *skb = new_skb;
995
996 return true;
997}
998
960static void enic_rq_indicate_buf(struct vnic_rq *rq, 999static void enic_rq_indicate_buf(struct vnic_rq *rq,
961 struct cq_desc *cq_desc, struct vnic_rq_buf *buf, 1000 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
962 int skipped, void *opaque) 1001 int skipped, void *opaque)
@@ -978,9 +1017,6 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
978 return; 1017 return;
979 1018
980 skb = buf->os_buf; 1019 skb = buf->os_buf;
981 prefetch(skb->data - NET_IP_ALIGN);
982 pci_unmap_single(enic->pdev, buf->dma_addr,
983 buf->len, PCI_DMA_FROMDEVICE);
984 1020
985 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, 1021 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
986 &type, &color, &q_number, &completed_index, 1022 &type, &color, &q_number, &completed_index,
@@ -1011,6 +1047,13 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1011 /* Good receive 1047 /* Good receive
1012 */ 1048 */
1013 1049
1050 if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
1051 buf->os_buf = NULL;
1052 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1053 PCI_DMA_FROMDEVICE);
1054 }
1055 prefetch(skb->data - NET_IP_ALIGN);
1056
1014 skb_put(skb, bytes_written); 1057 skb_put(skb, bytes_written);
1015 skb->protocol = eth_type_trans(skb, netdev); 1058 skb->protocol = eth_type_trans(skb, netdev);
1016 skb_record_rx_queue(skb, q_number); 1059 skb_record_rx_queue(skb, q_number);
@@ -2531,6 +2574,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2531 dev_err(dev, "Cannot register net device, aborting\n"); 2574 dev_err(dev, "Cannot register net device, aborting\n");
2532 goto err_out_dev_deinit; 2575 goto err_out_dev_deinit;
2533 } 2576 }
2577 enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
2534 2578
2535 return 0; 2579 return 0;
2536 2580
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 322213d901d5..c8205606c775 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -328,10 +328,10 @@ static void allocate_rx_buffer(struct net_device *);
328static void update_cr6(u32, void __iomem *); 328static void update_cr6(u32, void __iomem *);
329static void send_filter_frame(struct DEVICE *); 329static void send_filter_frame(struct DEVICE *);
330static void dm9132_id_table(struct DEVICE *); 330static void dm9132_id_table(struct DEVICE *);
331static u16 phy_read(void __iomem *, u8, u8, u32); 331static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
332static void phy_write(void __iomem *, u8, u8, u16, u32); 332static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
333static void phy_write_1bit(void __iomem *, u32); 333static void dmfe_phy_write_1bit(void __iomem *, u32);
334static u16 phy_read_1bit(void __iomem *); 334static u16 dmfe_phy_read_1bit(void __iomem *);
335static u8 dmfe_sense_speed(struct dmfe_board_info *); 335static u8 dmfe_sense_speed(struct dmfe_board_info *);
336static void dmfe_process_mode(struct dmfe_board_info *); 336static void dmfe_process_mode(struct dmfe_board_info *);
337static void dmfe_timer(unsigned long); 337static void dmfe_timer(unsigned long);
@@ -770,7 +770,7 @@ static int dmfe_stop(struct DEVICE *dev)
770 /* Reset & stop DM910X board */ 770 /* Reset & stop DM910X board */
771 dw32(DCR0, DM910X_RESET); 771 dw32(DCR0, DM910X_RESET);
772 udelay(100); 772 udelay(100);
773 phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); 773 dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
774 774
775 /* free interrupt */ 775 /* free interrupt */
776 free_irq(db->pdev->irq, dev); 776 free_irq(db->pdev->irq, dev);
@@ -1154,7 +1154,7 @@ static void dmfe_timer(unsigned long data)
1154 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { 1154 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1155 db->cr6_data &= ~0x40000; 1155 db->cr6_data &= ~0x40000;
1156 update_cr6(db->cr6_data, ioaddr); 1156 update_cr6(db->cr6_data, ioaddr);
1157 phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); 1157 dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1158 db->cr6_data |= 0x40000; 1158 db->cr6_data |= 0x40000;
1159 update_cr6(db->cr6_data, ioaddr); 1159 update_cr6(db->cr6_data, ioaddr);
1160 db->timer.expires = DMFE_TIMER_WUT + HZ * 2; 1160 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
@@ -1230,9 +1230,9 @@ static void dmfe_timer(unsigned long data)
1230 */ 1230 */
1231 1231
1232 /* need a dummy read because of PHY's register latch*/ 1232 /* need a dummy read because of PHY's register latch*/
1233 phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id); 1233 dmfe_phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1234 link_ok_phy = (phy_read (db->ioaddr, 1234 link_ok_phy = (dmfe_phy_read (db->ioaddr,
1235 db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0; 1235 db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1236 1236
1237 if (link_ok_phy != link_ok) { 1237 if (link_ok_phy != link_ok) {
1238 DMFE_DBUG (0, "PHY and chip report different link status", 0); 1238 DMFE_DBUG (0, "PHY and chip report different link status", 0);
@@ -1247,8 +1247,8 @@ static void dmfe_timer(unsigned long data)
1247 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ 1247 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1248 /* AUTO or force 1M Homerun/Longrun don't need */ 1248 /* AUTO or force 1M Homerun/Longrun don't need */
1249 if ( !(db->media_mode & 0x38) ) 1249 if ( !(db->media_mode & 0x38) )
1250 phy_write(db->ioaddr, db->phy_addr, 1250 dmfe_phy_write(db->ioaddr, db->phy_addr,
1251 0, 0x1000, db->chip_id); 1251 0, 0x1000, db->chip_id);
1252 1252
1253 /* AUTO mode, if INT phyxcer link failed, select EXT device */ 1253 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1254 if (db->media_mode & DMFE_AUTO) { 1254 if (db->media_mode & DMFE_AUTO) {
@@ -1649,16 +1649,16 @@ static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1649 /* CR6 bit18=0, select 10/100M */ 1649 /* CR6 bit18=0, select 10/100M */
1650 update_cr6(db->cr6_data & ~0x40000, ioaddr); 1650 update_cr6(db->cr6_data & ~0x40000, ioaddr);
1651 1651
1652 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1652 phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1653 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1653 phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1654 1654
1655 if ( (phy_mode & 0x24) == 0x24 ) { 1655 if ( (phy_mode & 0x24) == 0x24 ) {
1656 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */ 1656 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
1657 phy_mode = phy_read(db->ioaddr, 1657 phy_mode = dmfe_phy_read(db->ioaddr,
1658 db->phy_addr, 7, db->chip_id) & 0xf000; 1658 db->phy_addr, 7, db->chip_id) & 0xf000;
1659 else /* DM9102/DM9102A */ 1659 else /* DM9102/DM9102A */
1660 phy_mode = phy_read(db->ioaddr, 1660 phy_mode = dmfe_phy_read(db->ioaddr,
1661 db->phy_addr, 17, db->chip_id) & 0xf000; 1661 db->phy_addr, 17, db->chip_id) & 0xf000;
1662 switch (phy_mode) { 1662 switch (phy_mode) {
1663 case 0x1000: db->op_mode = DMFE_10MHF; break; 1663 case 0x1000: db->op_mode = DMFE_10MHF; break;
1664 case 0x2000: db->op_mode = DMFE_10MFD; break; 1664 case 0x2000: db->op_mode = DMFE_10MFD; break;
@@ -1695,15 +1695,15 @@ static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1695 1695
1696 /* DM9009 Chip: Phyxcer reg18 bit12=0 */ 1696 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1697 if (db->chip_id == PCI_DM9009_ID) { 1697 if (db->chip_id == PCI_DM9009_ID) {
1698 phy_reg = phy_read(db->ioaddr, 1698 phy_reg = dmfe_phy_read(db->ioaddr,
1699 db->phy_addr, 18, db->chip_id) & ~0x1000; 1699 db->phy_addr, 18, db->chip_id) & ~0x1000;
1700 1700
1701 phy_write(db->ioaddr, 1701 dmfe_phy_write(db->ioaddr,
1702 db->phy_addr, 18, phy_reg, db->chip_id); 1702 db->phy_addr, 18, phy_reg, db->chip_id);
1703 } 1703 }
1704 1704
1705 /* Phyxcer capability setting */ 1705 /* Phyxcer capability setting */
1706 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; 1706 phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1707 1707
1708 if (db->media_mode & DMFE_AUTO) { 1708 if (db->media_mode & DMFE_AUTO) {
1709 /* AUTO Mode */ 1709 /* AUTO Mode */
@@ -1724,13 +1724,13 @@ static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1724 phy_reg|=db->PHY_reg4; 1724 phy_reg|=db->PHY_reg4;
1725 db->media_mode|=DMFE_AUTO; 1725 db->media_mode|=DMFE_AUTO;
1726 } 1726 }
1727 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id); 1727 dmfe_phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1728 1728
1729 /* Restart Auto-Negotiation */ 1729 /* Restart Auto-Negotiation */
1730 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) 1730 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1731 phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id); 1731 dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1732 if ( !db->chip_type ) 1732 if ( !db->chip_type )
1733 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id); 1733 dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1734} 1734}
1735 1735
1736 1736
@@ -1762,7 +1762,7 @@ static void dmfe_process_mode(struct dmfe_board_info *db)
1762 /* 10/100M phyxcer force mode need */ 1762 /* 10/100M phyxcer force mode need */
1763 if ( !(db->media_mode & 0x18)) { 1763 if ( !(db->media_mode & 0x18)) {
1764 /* Forece Mode */ 1764 /* Forece Mode */
1765 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id); 1765 phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1766 if ( !(phy_reg & 0x1) ) { 1766 if ( !(phy_reg & 0x1) ) {
1767 /* parter without N-Way capability */ 1767 /* parter without N-Way capability */
1768 phy_reg = 0x0; 1768 phy_reg = 0x0;
@@ -1772,12 +1772,12 @@ static void dmfe_process_mode(struct dmfe_board_info *db)
1772 case DMFE_100MHF: phy_reg = 0x2000; break; 1772 case DMFE_100MHF: phy_reg = 0x2000; break;
1773 case DMFE_100MFD: phy_reg = 0x2100; break; 1773 case DMFE_100MFD: phy_reg = 0x2100; break;
1774 } 1774 }
1775 phy_write(db->ioaddr, 1775 dmfe_phy_write(db->ioaddr,
1776 db->phy_addr, 0, phy_reg, db->chip_id); 1776 db->phy_addr, 0, phy_reg, db->chip_id);
1777 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) 1777 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1778 mdelay(20); 1778 mdelay(20);
1779 phy_write(db->ioaddr, 1779 dmfe_phy_write(db->ioaddr,
1780 db->phy_addr, 0, phy_reg, db->chip_id); 1780 db->phy_addr, 0, phy_reg, db->chip_id);
1781 } 1781 }
1782 } 1782 }
1783} 1783}
@@ -1787,8 +1787,8 @@ static void dmfe_process_mode(struct dmfe_board_info *db)
1787 * Write a word to Phy register 1787 * Write a word to Phy register
1788 */ 1788 */
1789 1789
1790static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset, 1790static void dmfe_phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1791 u16 phy_data, u32 chip_id) 1791 u16 phy_data, u32 chip_id)
1792{ 1792{
1793 u16 i; 1793 u16 i;
1794 1794
@@ -1799,34 +1799,34 @@ static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1799 1799
1800 /* Send 33 synchronization clock to Phy controller */ 1800 /* Send 33 synchronization clock to Phy controller */
1801 for (i = 0; i < 35; i++) 1801 for (i = 0; i < 35; i++)
1802 phy_write_1bit(ioaddr, PHY_DATA_1); 1802 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1803 1803
1804 /* Send start command(01) to Phy */ 1804 /* Send start command(01) to Phy */
1805 phy_write_1bit(ioaddr, PHY_DATA_0); 1805 dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1806 phy_write_1bit(ioaddr, PHY_DATA_1); 1806 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1807 1807
1808 /* Send write command(01) to Phy */ 1808 /* Send write command(01) to Phy */
1809 phy_write_1bit(ioaddr, PHY_DATA_0); 1809 dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1810 phy_write_1bit(ioaddr, PHY_DATA_1); 1810 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1811 1811
1812 /* Send Phy address */ 1812 /* Send Phy address */
1813 for (i = 0x10; i > 0; i = i >> 1) 1813 for (i = 0x10; i > 0; i = i >> 1)
1814 phy_write_1bit(ioaddr, 1814 dmfe_phy_write_1bit(ioaddr,
1815 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); 1815 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1816 1816
1817 /* Send register address */ 1817 /* Send register address */
1818 for (i = 0x10; i > 0; i = i >> 1) 1818 for (i = 0x10; i > 0; i = i >> 1)
1819 phy_write_1bit(ioaddr, 1819 dmfe_phy_write_1bit(ioaddr,
1820 offset & i ? PHY_DATA_1 : PHY_DATA_0); 1820 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1821 1821
1822 /* written trasnition */ 1822 /* written trasnition */
1823 phy_write_1bit(ioaddr, PHY_DATA_1); 1823 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1824 phy_write_1bit(ioaddr, PHY_DATA_0); 1824 dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1825 1825
1826 /* Write a word data to PHY controller */ 1826 /* Write a word data to PHY controller */
1827 for ( i = 0x8000; i > 0; i >>= 1) 1827 for ( i = 0x8000; i > 0; i >>= 1)
1828 phy_write_1bit(ioaddr, 1828 dmfe_phy_write_1bit(ioaddr,
1829 phy_data & i ? PHY_DATA_1 : PHY_DATA_0); 1829 phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1830 } 1830 }
1831} 1831}
1832 1832
@@ -1835,7 +1835,7 @@ static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1835 * Read a word data from phy register 1835 * Read a word data from phy register
1836 */ 1836 */
1837 1837
1838static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id) 1838static u16 dmfe_phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1839{ 1839{
1840 int i; 1840 int i;
1841 u16 phy_data; 1841 u16 phy_data;
@@ -1848,33 +1848,33 @@ static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1848 1848
1849 /* Send 33 synchronization clock to Phy controller */ 1849 /* Send 33 synchronization clock to Phy controller */
1850 for (i = 0; i < 35; i++) 1850 for (i = 0; i < 35; i++)
1851 phy_write_1bit(ioaddr, PHY_DATA_1); 1851 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1852 1852
1853 /* Send start command(01) to Phy */ 1853 /* Send start command(01) to Phy */
1854 phy_write_1bit(ioaddr, PHY_DATA_0); 1854 dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1855 phy_write_1bit(ioaddr, PHY_DATA_1); 1855 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1856 1856
1857 /* Send read command(10) to Phy */ 1857 /* Send read command(10) to Phy */
1858 phy_write_1bit(ioaddr, PHY_DATA_1); 1858 dmfe_phy_write_1bit(ioaddr, PHY_DATA_1);
1859 phy_write_1bit(ioaddr, PHY_DATA_0); 1859 dmfe_phy_write_1bit(ioaddr, PHY_DATA_0);
1860 1860
1861 /* Send Phy address */ 1861 /* Send Phy address */
1862 for (i = 0x10; i > 0; i = i >> 1) 1862 for (i = 0x10; i > 0; i = i >> 1)
1863 phy_write_1bit(ioaddr, 1863 dmfe_phy_write_1bit(ioaddr,
1864 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); 1864 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1865 1865
1866 /* Send register address */ 1866 /* Send register address */
1867 for (i = 0x10; i > 0; i = i >> 1) 1867 for (i = 0x10; i > 0; i = i >> 1)
1868 phy_write_1bit(ioaddr, 1868 dmfe_phy_write_1bit(ioaddr,
1869 offset & i ? PHY_DATA_1 : PHY_DATA_0); 1869 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1870 1870
1871 /* Skip transition state */ 1871 /* Skip transition state */
1872 phy_read_1bit(ioaddr); 1872 dmfe_phy_read_1bit(ioaddr);
1873 1873
1874 /* read 16bit data */ 1874 /* read 16bit data */
1875 for (phy_data = 0, i = 0; i < 16; i++) { 1875 for (phy_data = 0, i = 0; i < 16; i++) {
1876 phy_data <<= 1; 1876 phy_data <<= 1;
1877 phy_data |= phy_read_1bit(ioaddr); 1877 phy_data |= dmfe_phy_read_1bit(ioaddr);
1878 } 1878 }
1879 } 1879 }
1880 1880
@@ -1886,7 +1886,7 @@ static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1886 * Write one bit data to Phy Controller 1886 * Write one bit data to Phy Controller
1887 */ 1887 */
1888 1888
1889static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data) 1889static void dmfe_phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1890{ 1890{
1891 dw32(DCR9, phy_data); /* MII Clock Low */ 1891 dw32(DCR9, phy_data); /* MII Clock Low */
1892 udelay(1); 1892 udelay(1);
@@ -1901,7 +1901,7 @@ static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1901 * Read one bit phy data from PHY controller 1901 * Read one bit phy data from PHY controller
1902 */ 1902 */
1903 1903
1904static u16 phy_read_1bit(void __iomem *ioaddr) 1904static u16 dmfe_phy_read_1bit(void __iomem *ioaddr)
1905{ 1905{
1906 u16 phy_data; 1906 u16 phy_data;
1907 1907
@@ -1995,11 +1995,11 @@ static void dmfe_parse_srom(struct dmfe_board_info * db)
1995 /* Check DM9801 or DM9802 present or not */ 1995 /* Check DM9801 or DM9802 present or not */
1996 db->HPNA_present = 0; 1996 db->HPNA_present = 0;
1997 update_cr6(db->cr6_data | 0x40000, db->ioaddr); 1997 update_cr6(db->cr6_data | 0x40000, db->ioaddr);
1998 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id); 1998 tmp_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1999 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) { 1999 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
2000 /* DM9801 or DM9802 present */ 2000 /* DM9801 or DM9802 present */
2001 db->HPNA_timer = 8; 2001 db->HPNA_timer = 8;
2002 if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) { 2002 if ( dmfe_phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
2003 /* DM9801 HomeRun */ 2003 /* DM9801 HomeRun */
2004 db->HPNA_present = 1; 2004 db->HPNA_present = 1;
2005 dmfe_program_DM9801(db, tmp_reg); 2005 dmfe_program_DM9801(db, tmp_reg);
@@ -2025,29 +2025,29 @@ static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
2025 switch(HPNA_rev) { 2025 switch(HPNA_rev) {
2026 case 0xb900: /* DM9801 E3 */ 2026 case 0xb900: /* DM9801 E3 */
2027 db->HPNA_command |= 0x1000; 2027 db->HPNA_command |= 0x1000;
2028 reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id); 2028 reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2029 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000; 2029 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2030 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); 2030 reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2031 break; 2031 break;
2032 case 0xb901: /* DM9801 E4 */ 2032 case 0xb901: /* DM9801 E4 */
2033 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); 2033 reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2034 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor; 2034 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2035 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); 2035 reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2036 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3; 2036 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2037 break; 2037 break;
2038 case 0xb902: /* DM9801 E5 */ 2038 case 0xb902: /* DM9801 E5 */
2039 case 0xb903: /* DM9801 E6 */ 2039 case 0xb903: /* DM9801 E6 */
2040 default: 2040 default:
2041 db->HPNA_command |= 0x1000; 2041 db->HPNA_command |= 0x1000;
2042 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); 2042 reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2043 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5; 2043 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2044 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); 2044 reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2045 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor; 2045 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2046 break; 2046 break;
2047 } 2047 }
2048 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); 2048 dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2049 phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id); 2049 dmfe_phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2050 phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id); 2050 dmfe_phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2051} 2051}
2052 2052
2053 2053
@@ -2060,10 +2060,10 @@ static void dmfe_program_DM9802(struct dmfe_board_info * db)
2060 uint phy_reg; 2060 uint phy_reg;
2061 2061
2062 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR; 2062 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2063 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); 2063 dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2064 phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); 2064 phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2065 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor; 2065 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2066 phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id); 2066 dmfe_phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2067} 2067}
2068 2068
2069 2069
@@ -2077,7 +2077,7 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2077 uint phy_reg; 2077 uint phy_reg;
2078 2078
2079 /* Got remote device status */ 2079 /* Got remote device status */
2080 phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60; 2080 phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2081 switch(phy_reg) { 2081 switch(phy_reg) {
2082 case 0x00: phy_reg = 0x0a00;break; /* LP/LS */ 2082 case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2083 case 0x20: phy_reg = 0x0900;break; /* LP/HS */ 2083 case 0x20: phy_reg = 0x0900;break; /* LP/HS */
@@ -2087,8 +2087,8 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2087 2087
2088 /* Check remote device status match our setting ot not */ 2088 /* Check remote device status match our setting ot not */
2089 if ( phy_reg != (db->HPNA_command & 0x0f00) ) { 2089 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2090 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, 2090 dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2091 db->chip_id); 2091 db->chip_id);
2092 db->HPNA_timer=8; 2092 db->HPNA_timer=8;
2093 } else 2093 } else
2094 db->HPNA_timer=600; /* Match, every 10 minutes, check */ 2094 db->HPNA_timer=600; /* Match, every 10 minutes, check */
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 056b44b93477..d1017509b08a 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -1,5 +1,5 @@
1 /* 1 /*
2 * drivers/net/ethernet/beckhoff/ec_bhf.c 2 * drivers/net/ethernet/ec_bhf.c
3 * 3 *
4 * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl> 4 * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
5 * 5 *
@@ -18,9 +18,6 @@
18 * Those can be found on Bechhoff CX50xx industrial PCs. 18 * Those can be found on Bechhoff CX50xx industrial PCs.
19 */ 19 */
20 20
21#if 0
22#define DEBUG
23#endif
24#include <linux/kernel.h> 21#include <linux/kernel.h>
25#include <linux/module.h> 22#include <linux/module.h>
26#include <linux/moduleparam.h> 23#include <linux/moduleparam.h>
@@ -74,6 +71,8 @@
74 71
75#define DMA_WINDOW_SIZE_MASK 0xfffffffc 72#define DMA_WINDOW_SIZE_MASK 0xfffffffc
76 73
74#define ETHERCAT_MASTER_ID 0x14
75
77static struct pci_device_id ids[] = { 76static struct pci_device_id ids[] = {
78 { PCI_DEVICE(0x15ec, 0x5000), }, 77 { PCI_DEVICE(0x15ec, 0x5000), },
79 { 0, } 78 { 0, }
@@ -131,7 +130,6 @@ struct bhf_dma {
131 130
132struct ec_bhf_priv { 131struct ec_bhf_priv {
133 struct net_device *net_dev; 132 struct net_device *net_dev;
134
135 struct pci_dev *dev; 133 struct pci_dev *dev;
136 134
137 void __iomem *io; 135 void __iomem *io;
@@ -162,32 +160,6 @@ struct ec_bhf_priv {
162 160
163#define PRIV_TO_DEV(priv) (&(priv)->dev->dev) 161#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
164 162
165#define ETHERCAT_MASTER_ID 0x14
166
167static void ec_bhf_print_status(struct ec_bhf_priv *priv)
168{
169 struct device *dev = PRIV_TO_DEV(priv);
170
171 dev_dbg(dev, "Frame error counter: %d\n",
172 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
173 dev_dbg(dev, "RX error counter: %d\n",
174 ioread8(priv->mac_io + MAC_RX_ERR_CNT));
175 dev_dbg(dev, "CRC error counter: %d\n",
176 ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
177 dev_dbg(dev, "TX frame counter: %d\n",
178 ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
179 dev_dbg(dev, "RX frame counter: %d\n",
180 ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
181 dev_dbg(dev, "TX fifo level: %d\n",
182 ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
183 dev_dbg(dev, "Dropped frames: %d\n",
184 ioread8(priv->mac_io + MAC_DROPPED_FRMS));
185 dev_dbg(dev, "Connected with CCAT slot: %d\n",
186 ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
187 dev_dbg(dev, "Link status: %d\n",
188 ioread8(priv->mii_io + MII_LINK_STATUS));
189}
190
191static void ec_bhf_reset(struct ec_bhf_priv *priv) 163static void ec_bhf_reset(struct ec_bhf_priv *priv)
192{ 164{
193 iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT); 165 iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
@@ -210,8 +182,6 @@ static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
210 u32 addr = (u8 *)desc - priv->tx_buf.buf; 182 u32 addr = (u8 *)desc - priv->tx_buf.buf;
211 183
212 iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG); 184 iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
213
214 dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
215} 185}
216 186
217static int ec_bhf_desc_sent(struct tx_desc *desc) 187static int ec_bhf_desc_sent(struct tx_desc *desc)
@@ -244,7 +214,6 @@ static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
244static void ec_bhf_process_rx(struct ec_bhf_priv *priv) 214static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
245{ 215{
246 struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext]; 216 struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
247 struct device *dev = PRIV_TO_DEV(priv);
248 217
249 while (ec_bhf_pkt_received(desc)) { 218 while (ec_bhf_pkt_received(desc)) {
250 int pkt_size = (le16_to_cpu(desc->header.len) & 219 int pkt_size = (le16_to_cpu(desc->header.len) &
@@ -253,20 +222,16 @@ static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
253 struct sk_buff *skb; 222 struct sk_buff *skb;
254 223
255 skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size); 224 skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
256 dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
257
258 if (skb) { 225 if (skb) {
259 memcpy(skb_put(skb, pkt_size), data, pkt_size); 226 memcpy(skb_put(skb, pkt_size), data, pkt_size);
260 skb->protocol = eth_type_trans(skb, priv->net_dev); 227 skb->protocol = eth_type_trans(skb, priv->net_dev);
261 dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
262
263 priv->stat_rx_bytes += pkt_size; 228 priv->stat_rx_bytes += pkt_size;
264 229
265 netif_rx(skb); 230 netif_rx(skb);
266 } else { 231 } else {
267 dev_err_ratelimited(dev, 232 dev_err_ratelimited(PRIV_TO_DEV(priv),
268 "Couldn't allocate a skb_buff for a packet of size %u\n", 233 "Couldn't allocate a skb_buff for a packet of size %u\n",
269 pkt_size); 234 pkt_size);
270 } 235 }
271 236
272 desc->header.recv = 0; 237 desc->header.recv = 0;
@@ -276,7 +241,6 @@ static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
276 priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount; 241 priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
277 desc = &priv->rx_descs[priv->rx_dnext]; 242 desc = &priv->rx_descs[priv->rx_dnext];
278 } 243 }
279
280} 244}
281 245
282static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer) 246static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
@@ -299,14 +263,7 @@ static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
299 unsigned block_count, i; 263 unsigned block_count, i;
300 void __iomem *ec_info; 264 void __iomem *ec_info;
301 265
302 dev_dbg(dev, "Info block:\n");
303 dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
304 dev_dbg(dev, "Revision of function: %x\n",
305 (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
306
307 block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT); 266 block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
308 dev_dbg(dev, "Number of function blocks: %x\n", block_count);
309
310 for (i = 0; i < block_count; i++) { 267 for (i = 0; i < block_count; i++) {
311 u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE + 268 u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
312 INFO_BLOCK_TYPE); 269 INFO_BLOCK_TYPE);
@@ -317,29 +274,17 @@ static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
317 dev_err(dev, "EtherCAT master with DMA block not found\n"); 274 dev_err(dev, "EtherCAT master with DMA block not found\n");
318 return -ENODEV; 275 return -ENODEV;
319 } 276 }
320 dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
321 277
322 ec_info = priv->io + i * INFO_BLOCK_SIZE; 278 ec_info = priv->io + i * INFO_BLOCK_SIZE;
323 dev_dbg(dev, "EtherCAT master revision: %d\n",
324 ioread16(ec_info + INFO_BLOCK_REV));
325 279
326 priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN); 280 priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
327 dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
328 priv->tx_dma_chan);
329
330 priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN); 281 priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
331 dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
332 priv->rx_dma_chan);
333 282
334 priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET); 283 priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
335 priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET); 284 priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
336 priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET); 285 priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
337 priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET); 286 priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
338 287
339 dev_dbg(dev,
340 "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
341 priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
342
343 return 0; 288 return 0;
344} 289}
345 290
@@ -350,8 +295,6 @@ static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
350 struct tx_desc *desc; 295 struct tx_desc *desc;
351 unsigned len; 296 unsigned len;
352 297
353 dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
354
355 desc = &priv->tx_descs[priv->tx_dnext]; 298 desc = &priv->tx_descs[priv->tx_dnext];
356 299
357 skb_copy_and_csum_dev(skb, desc->data); 300 skb_copy_and_csum_dev(skb, desc->data);
@@ -366,15 +309,12 @@ static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
366 priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount; 309 priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
367 310
368 if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) { 311 if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
369 /* Make sure that update updates to tx_dnext are perceived 312 /* Make sure that updates to tx_dnext are perceived
370 * by timer routine. 313 * by timer routine.
371 */ 314 */
372 smp_wmb(); 315 smp_wmb();
373 316
374 netif_stop_queue(net_dev); 317 netif_stop_queue(net_dev);
375
376 dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
377 ec_bhf_print_status(priv);
378 } 318 }
379 319
380 priv->stat_tx_bytes += len; 320 priv->stat_tx_bytes += len;
@@ -397,7 +337,6 @@ static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
397 337
398 mask = ioread32(priv->dma_io + offset); 338 mask = ioread32(priv->dma_io + offset);
399 mask &= DMA_WINDOW_SIZE_MASK; 339 mask &= DMA_WINDOW_SIZE_MASK;
400 dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
401 340
402 /* We want to allocate a chunk of memory that is: 341 /* We want to allocate a chunk of memory that is:
403 * - aligned to the mask we just read 342 * - aligned to the mask we just read
@@ -408,12 +347,10 @@ static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
408 buf->len = min_t(int, ~mask + 1, size); 347 buf->len = min_t(int, ~mask + 1, size);
409 buf->alloc_len = 2 * buf->len; 348 buf->alloc_len = 2 * buf->len;
410 349
411 dev_dbg(dev, "Allocating %d bytes for channel %d",
412 (int)buf->alloc_len, channel);
413 buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys, 350 buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
414 GFP_KERNEL); 351 GFP_KERNEL);
415 if (buf->alloc == NULL) { 352 if (buf->alloc == NULL) {
416 dev_info(dev, "Failed to allocate buffer\n"); 353 dev_err(dev, "Failed to allocate buffer\n");
417 return -ENOMEM; 354 return -ENOMEM;
418 } 355 }
419 356
@@ -422,8 +359,6 @@ static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
422 359
423 iowrite32(0, priv->dma_io + offset + 4); 360 iowrite32(0, priv->dma_io + offset + 4);
424 iowrite32(buf->buf_phys, priv->dma_io + offset); 361 iowrite32(buf->buf_phys, priv->dma_io + offset);
425 dev_dbg(dev, "Buffer: %x and read from dev: %x",
426 (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
427 362
428 return 0; 363 return 0;
429} 364}
@@ -433,7 +368,7 @@ static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
433 int i = 0; 368 int i = 0;
434 369
435 priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc); 370 priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
436 priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf; 371 priv->tx_descs = (struct tx_desc *)priv->tx_buf.buf;
437 priv->tx_dnext = 0; 372 priv->tx_dnext = 0;
438 373
439 for (i = 0; i < priv->tx_dcount; i++) 374 for (i = 0; i < priv->tx_dcount; i++)
@@ -445,7 +380,7 @@ static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
445 int i; 380 int i;
446 381
447 priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc); 382 priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
448 priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf; 383 priv->rx_descs = (struct rx_desc *)priv->rx_buf.buf;
449 priv->rx_dnext = 0; 384 priv->rx_dnext = 0;
450 385
451 for (i = 0; i < priv->rx_dcount; i++) { 386 for (i = 0; i < priv->rx_dcount; i++) {
@@ -469,8 +404,6 @@ static int ec_bhf_open(struct net_device *net_dev)
469 struct device *dev = PRIV_TO_DEV(priv); 404 struct device *dev = PRIV_TO_DEV(priv);
470 int err = 0; 405 int err = 0;
471 406
472 dev_info(dev, "Opening device\n");
473
474 ec_bhf_reset(priv); 407 ec_bhf_reset(priv);
475 408
476 err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan, 409 err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
@@ -481,20 +414,13 @@ static int ec_bhf_open(struct net_device *net_dev)
481 } 414 }
482 ec_bhf_setup_rx_descs(priv); 415 ec_bhf_setup_rx_descs(priv);
483 416
484 dev_info(dev, "RX buffer allocated, address: %x\n",
485 (unsigned)priv->rx_buf.buf_phys);
486
487 err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan, 417 err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
488 FIFO_SIZE * sizeof(struct tx_desc)); 418 FIFO_SIZE * sizeof(struct tx_desc));
489 if (err) { 419 if (err) {
490 dev_err(dev, "Failed to allocate tx buffer\n"); 420 dev_err(dev, "Failed to allocate tx buffer\n");
491 goto error_rx_free; 421 goto error_rx_free;
492 } 422 }
493 dev_dbg(dev, "TX buffer allocated, addres: %x\n",
494 (unsigned)priv->tx_buf.buf_phys);
495
496 iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG); 423 iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
497
498 ec_bhf_setup_tx_descs(priv); 424 ec_bhf_setup_tx_descs(priv);
499 425
500 netif_start_queue(net_dev); 426 netif_start_queue(net_dev);
@@ -504,10 +430,6 @@ static int ec_bhf_open(struct net_device *net_dev)
504 hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency), 430 hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
505 HRTIMER_MODE_REL); 431 HRTIMER_MODE_REL);
506 432
507 dev_info(PRIV_TO_DEV(priv), "Device open\n");
508
509 ec_bhf_print_status(priv);
510
511 return 0; 433 return 0;
512 434
513error_rx_free: 435error_rx_free:
@@ -640,9 +562,6 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
640 562
641 memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6); 563 memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
642 564
643 dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
644 net_dev->dev_addr);
645
646 err = register_netdev(net_dev); 565 err = register_netdev(net_dev);
647 if (err < 0) 566 if (err < 0)
648 goto err_free_net_dev; 567 goto err_free_net_dev;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 43e08d0bc3d3..a9f239adc3e3 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -86,6 +86,8 @@ static inline char *nic_name(struct pci_dev *pdev)
86 86
87#define BE_MAX_JUMBO_FRAME_SIZE 9018 87#define BE_MAX_JUMBO_FRAME_SIZE 9018
88#define BE_MIN_MTU 256 88#define BE_MIN_MTU 256
89#define BE_MAX_MTU (BE_MAX_JUMBO_FRAME_SIZE - \
90 (ETH_HLEN + ETH_FCS_LEN))
89 91
90#define BE_NUM_VLANS_SUPPORTED 64 92#define BE_NUM_VLANS_SUPPORTED 64
91#define BE_MAX_EQD 128u 93#define BE_MAX_EQD 128u
@@ -112,7 +114,6 @@ static inline char *nic_name(struct pci_dev *pdev)
112#define MAX_ROCE_EQS 5 114#define MAX_ROCE_EQS 5
113#define MAX_MSIX_VECTORS 32 115#define MAX_MSIX_VECTORS 32
114#define MIN_MSIX_VECTORS 1 116#define MIN_MSIX_VECTORS 1
115#define BE_TX_BUDGET 256
116#define BE_NAPI_WEIGHT 64 117#define BE_NAPI_WEIGHT 64
117#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ 118#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
118#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) 119#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
@@ -198,7 +199,6 @@ struct be_eq_obj {
198 199
199 u8 idx; /* array index */ 200 u8 idx; /* array index */
200 u8 msix_idx; 201 u8 msix_idx;
201 u16 tx_budget;
202 u16 spurious_intr; 202 u16 spurious_intr;
203 struct napi_struct napi; 203 struct napi_struct napi;
204 struct be_adapter *adapter; 204 struct be_adapter *adapter;
@@ -248,6 +248,13 @@ struct be_tx_stats {
248 ulong tx_jiffies; 248 ulong tx_jiffies;
249 u32 tx_stops; 249 u32 tx_stops;
250 u32 tx_drv_drops; /* pkts dropped by driver */ 250 u32 tx_drv_drops; /* pkts dropped by driver */
251 /* the error counters are described in be_ethtool.c */
252 u32 tx_hdr_parse_err;
253 u32 tx_dma_err;
254 u32 tx_tso_err;
255 u32 tx_spoof_check_err;
256 u32 tx_qinq_err;
257 u32 tx_internal_parity_err;
251 struct u64_stats_sync sync; 258 struct u64_stats_sync sync;
252 struct u64_stats_sync sync_compl; 259 struct u64_stats_sync sync_compl;
253}; 260};
@@ -316,6 +323,7 @@ struct be_rx_obj {
316struct be_drv_stats { 323struct be_drv_stats {
317 u32 be_on_die_temperature; 324 u32 be_on_die_temperature;
318 u32 eth_red_drops; 325 u32 eth_red_drops;
326 u32 dma_map_errors;
319 u32 rx_drops_no_pbuf; 327 u32 rx_drops_no_pbuf;
320 u32 rx_drops_no_txpb; 328 u32 rx_drops_no_txpb;
321 u32 rx_drops_no_erx_descr; 329 u32 rx_drops_no_erx_descr;
@@ -613,6 +621,10 @@ extern const struct ethtool_ops be_ethtool_ops;
613 for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\ 621 for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\
614 i += adapter->num_evt_qs, rxo += adapter->num_evt_qs) 622 i += adapter->num_evt_qs, rxo += adapter->num_evt_qs)
615 623
624#define for_all_tx_queues_on_eq(adapter, eqo, txo, i) \
625 for (i = eqo->idx, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs;\
626 i += adapter->num_evt_qs, txo += adapter->num_evt_qs)
627
616#define is_mcc_eqo(eqo) (eqo->idx == 0) 628#define is_mcc_eqo(eqo) (eqo->idx == 0)
617#define mcc_eqo(adapter) (&adapter->eq_obj[0]) 629#define mcc_eqo(adapter) (&adapter->eq_obj[0])
618 630
@@ -661,6 +673,18 @@ static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
661 amap_mask(sizeof(((_struct *)0)->field)), \ 673 amap_mask(sizeof(((_struct *)0)->field)), \
662 AMAP_BIT_OFFSET(_struct, field)) 674 AMAP_BIT_OFFSET(_struct, field))
663 675
676#define GET_RX_COMPL_V0_BITS(field, ptr) \
677 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, field, ptr)
678
679#define GET_RX_COMPL_V1_BITS(field, ptr) \
680 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, field, ptr)
681
682#define GET_TX_COMPL_BITS(field, ptr) \
683 AMAP_GET_BITS(struct amap_eth_tx_compl, field, ptr)
684
685#define SET_TX_WRB_HDR_BITS(field, ptr, val) \
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, field, ptr, val)
687
664#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len) 688#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
665#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len) 689#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
666static inline void swap_dws(void *wrb, int len) 690static inline void swap_dws(void *wrb, int len)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 4370ec1952ac..5be100d1bc0a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1681,17 +1681,17 @@ err:
1681 return status; 1681 return status;
1682} 1682}
1683 1683
1684void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) 1684int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1685{ 1685{
1686 struct be_dma_mem get_fat_cmd; 1686 struct be_dma_mem get_fat_cmd;
1687 struct be_mcc_wrb *wrb; 1687 struct be_mcc_wrb *wrb;
1688 struct be_cmd_req_get_fat *req; 1688 struct be_cmd_req_get_fat *req;
1689 u32 offset = 0, total_size, buf_size, 1689 u32 offset = 0, total_size, buf_size,
1690 log_offset = sizeof(u32), payload_len; 1690 log_offset = sizeof(u32), payload_len;
1691 int status; 1691 int status = 0;
1692 1692
1693 if (buf_len == 0) 1693 if (buf_len == 0)
1694 return; 1694 return -EIO;
1695 1695
1696 total_size = buf_len; 1696 total_size = buf_len;
1697 1697
@@ -1700,10 +1700,9 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1700 get_fat_cmd.size, 1700 get_fat_cmd.size,
1701 &get_fat_cmd.dma); 1701 &get_fat_cmd.dma);
1702 if (!get_fat_cmd.va) { 1702 if (!get_fat_cmd.va) {
1703 status = -ENOMEM;
1704 dev_err(&adapter->pdev->dev, 1703 dev_err(&adapter->pdev->dev,
1705 "Memory allocation failure while retrieving FAT data\n"); 1704 "Memory allocation failure while retrieving FAT data\n");
1706 return; 1705 return -ENOMEM;
1707 } 1706 }
1708 1707
1709 spin_lock_bh(&adapter->mcc_lock); 1708 spin_lock_bh(&adapter->mcc_lock);
@@ -1746,6 +1745,7 @@ err:
1746 pci_free_consistent(adapter->pdev, get_fat_cmd.size, 1745 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1747 get_fat_cmd.va, get_fat_cmd.dma); 1746 get_fat_cmd.va, get_fat_cmd.dma);
1748 spin_unlock_bh(&adapter->mcc_lock); 1747 spin_unlock_bh(&adapter->mcc_lock);
1748 return status;
1749} 1749}
1750 1750
1751/* Uses synchronous mcc */ 1751/* Uses synchronous mcc */
@@ -1771,6 +1771,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
1771 status = be_mcc_notify_wait(adapter); 1771 status = be_mcc_notify_wait(adapter);
1772 if (!status) { 1772 if (!status) {
1773 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); 1773 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1774
1774 strcpy(adapter->fw_ver, resp->firmware_version_string); 1775 strcpy(adapter->fw_ver, resp->firmware_version_string);
1775 strcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string); 1776 strcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string);
1776 } 1777 }
@@ -2018,6 +2019,9 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter)
2018 adapter->function_mode = le32_to_cpu(resp->function_mode); 2019 adapter->function_mode = le32_to_cpu(resp->function_mode);
2019 adapter->function_caps = le32_to_cpu(resp->function_caps); 2020 adapter->function_caps = le32_to_cpu(resp->function_caps);
2020 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF; 2021 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
2022 dev_info(&adapter->pdev->dev,
2023 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2024 adapter->function_mode, adapter->function_caps);
2021 } 2025 }
2022 2026
2023 mutex_unlock(&adapter->mbox_lock); 2027 mutex_unlock(&adapter->mbox_lock);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 5284b825bba2..0e1186856aa6 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2101,7 +2101,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter);
2101int be_cmd_get_cntl_attributes(struct be_adapter *adapter); 2101int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
2102int be_cmd_req_native_mode(struct be_adapter *adapter); 2102int be_cmd_req_native_mode(struct be_adapter *adapter);
2103int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); 2103int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
2104void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); 2104int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
2105int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, 2105int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2106 u32 domain); 2106 u32 domain);
2107int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, 2107int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 0cd3311409a8..2fd38261bedb 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -78,6 +78,11 @@ static const struct be_ethtool_stat et_stats[] = {
78 * fifo must never overflow. 78 * fifo must never overflow.
79 */ 79 */
80 {DRVSTAT_INFO(rxpp_fifo_overflow_drop)}, 80 {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
81 /* Received packets dropped when the RX block runs out of space in
82 * one of its input FIFOs. This could happen due a long burst of
83 * minimum-sized (64b) frames in the receive path.
84 * This counter may also be erroneously incremented rarely.
85 */
81 {DRVSTAT_INFO(rx_input_fifo_overflow_drop)}, 86 {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
82 {DRVSTAT_INFO(rx_ip_checksum_errs)}, 87 {DRVSTAT_INFO(rx_ip_checksum_errs)},
83 {DRVSTAT_INFO(rx_tcp_checksum_errs)}, 88 {DRVSTAT_INFO(rx_tcp_checksum_errs)},
@@ -114,6 +119,8 @@ static const struct be_ethtool_stat et_stats[] = {
114 * is more than 9018 bytes 119 * is more than 9018 bytes
115 */ 120 */
116 {DRVSTAT_INFO(rx_drops_mtu)}, 121 {DRVSTAT_INFO(rx_drops_mtu)},
122 /* Number of dma mapping errors */
123 {DRVSTAT_INFO(dma_map_errors)},
117 /* Number of packets dropped due to random early drop function */ 124 /* Number of packets dropped due to random early drop function */
118 {DRVSTAT_INFO(eth_red_drops)}, 125 {DRVSTAT_INFO(eth_red_drops)},
119 {DRVSTAT_INFO(be_on_die_temperature)}, 126 {DRVSTAT_INFO(be_on_die_temperature)},
@@ -152,6 +159,34 @@ static const struct be_ethtool_stat et_rx_stats[] = {
152 */ 159 */
153static const struct be_ethtool_stat et_tx_stats[] = { 160static const struct be_ethtool_stat et_tx_stats[] = {
154 {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */ 161 {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
162 /* This counter is incremented when the HW encounters an error while
163 * parsing the packet header of an outgoing TX request. This counter is
164 * applicable only for BE2, BE3 and Skyhawk based adapters.
165 */
166 {DRVSTAT_TX_INFO(tx_hdr_parse_err)},
167 /* This counter is incremented when an error occurs in the DMA
168 * operation associated with the TX request from the host to the device.
169 */
170 {DRVSTAT_TX_INFO(tx_dma_err)},
171 /* This counter is incremented when MAC or VLAN spoof checking is
172 * enabled on the interface and the TX request fails the spoof check
173 * in HW.
174 */
175 {DRVSTAT_TX_INFO(tx_spoof_check_err)},
176 /* This counter is incremented when the HW encounters an error while
177 * performing TSO offload. This counter is applicable only for Lancer
178 * adapters.
179 */
180 {DRVSTAT_TX_INFO(tx_tso_err)},
181 /* This counter is incremented when the HW detects Q-in-Q style VLAN
182 * tagging in a packet and such tagging is not expected on the outgoing
183 * interface. This counter is applicable only for Lancer adapters.
184 */
185 {DRVSTAT_TX_INFO(tx_qinq_err)},
186 /* This counter is incremented when the HW detects parity errors in the
187 * packet data. This counter is applicable only for Lancer adapters.
188 */
189 {DRVSTAT_TX_INFO(tx_internal_parity_err)},
155 {DRVSTAT_TX_INFO(tx_bytes)}, 190 {DRVSTAT_TX_INFO(tx_bytes)},
156 {DRVSTAT_TX_INFO(tx_pkts)}, 191 {DRVSTAT_TX_INFO(tx_pkts)},
157 /* Number of skbs queued for trasmission by the driver */ 192 /* Number of skbs queued for trasmission by the driver */
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 8840c64aaeca..295ee0835ba0 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -315,6 +315,18 @@ struct be_eth_hdr_wrb {
315 u32 dw[4]; 315 u32 dw[4];
316}; 316};
317 317
318/********* Tx Compl Status Encoding *********/
319#define BE_TX_COMP_HDR_PARSE_ERR 0x2
320#define BE_TX_COMP_NDMA_ERR 0x3
321#define BE_TX_COMP_ACL_ERR 0x5
322
323#define LANCER_TX_COMP_LSO_ERR 0x1
324#define LANCER_TX_COMP_HSW_DROP_MAC_ERR 0x3
325#define LANCER_TX_COMP_HSW_DROP_VLAN_ERR 0x5
326#define LANCER_TX_COMP_QINQ_ERR 0x7
327#define LANCER_TX_COMP_PARITY_ERR 0xb
328#define LANCER_TX_COMP_DMA_ERR 0xd
329
318/* TX Compl Queue Descriptor */ 330/* TX Compl Queue Descriptor */
319 331
320/* Pseudo amap definition for eth_tx_compl in which each bit of the 332/* Pseudo amap definition for eth_tx_compl in which each bit of the
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 93ff8ef39352..5b26c4c9ab2b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -738,38 +738,37 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
738 738
739 memset(hdr, 0, sizeof(*hdr)); 739 memset(hdr, 0, sizeof(*hdr));
740 740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1); 741 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
742 742
743 if (skb_is_gso(skb)) { 743 if (skb_is_gso(skb)) {
744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); 744 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, 745 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
746 hdr, skb_shinfo(skb)->gso_size);
747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) 746 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1); 747 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 748 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
750 if (skb->encapsulation) { 749 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1); 750 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb); 751 proto = skb_inner_ip_proto(skb);
753 } else { 752 } else {
754 proto = skb_ip_proto(skb); 753 proto = skb_ip_proto(skb);
755 } 754 }
756 if (proto == IPPROTO_TCP) 755 if (proto == IPPROTO_TCP)
757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); 756 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
758 else if (proto == IPPROTO_UDP) 757 else if (proto == IPPROTO_UDP)
759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1); 758 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
760 } 759 }
761 760
762 if (vlan_tx_tag_present(skb)) { 761 if (vlan_tx_tag_present(skb)) {
763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1); 762 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
764 vlan_tag = be_get_tx_vlan_tag(adapter, skb); 763 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag); 764 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
766 } 765 }
767 766
768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */ 767 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan); 768 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1); 769 SET_TX_WRB_HDR_BITS(event, hdr, 1);
771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt); 770 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); 771 SET_TX_WRB_HDR_BITS(len, hdr, len);
773} 772}
774 773
775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, 774static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
@@ -850,6 +849,7 @@ dma_err:
850 unmap_tx_frag(dev, wrb, map_single); 849 unmap_tx_frag(dev, wrb, map_single);
851 map_single = false; 850 map_single = false;
852 copied -= wrb->frag_len; 851 copied -= wrb->frag_len;
852 adapter->drv_stats.dma_map_errors++;
853 queue_head_inc(txq); 853 queue_head_inc(txq);
854 } 854 }
855 return 0; 855 return 0;
@@ -1073,15 +1073,15 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1073static int be_change_mtu(struct net_device *netdev, int new_mtu) 1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{ 1074{
1075 struct be_adapter *adapter = netdev_priv(netdev); 1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU || 1076 struct device *dev = &adapter->pdev->dev;
1077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) { 1077
1078 dev_info(&adapter->pdev->dev, 1078 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1079 "MTU must be between %d and %d bytes\n", 1079 dev_info(dev, "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU, 1080 BE_MIN_MTU, BE_MAX_MTU);
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1082 return -EINVAL; 1081 return -EINVAL;
1083 } 1082 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", 1083
1084 dev_info(dev, "MTU changed from %d to %d bytes\n",
1085 netdev->mtu, new_mtu); 1085 netdev->mtu, new_mtu);
1086 netdev->mtu = new_mtu; 1086 netdev->mtu = new_mtu;
1087 return 0; 1087 return 0;
@@ -1683,7 +1683,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1683 if (netdev->features & NETIF_F_RXHASH) 1683 if (netdev->features & NETIF_F_RXHASH)
1684 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); 1684 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1685 1685
1686 skb->encapsulation = rxcp->tunneled; 1686 skb->csum_level = rxcp->tunneled;
1687 skb_mark_napi_id(skb, napi); 1687 skb_mark_napi_id(skb, napi);
1688 1688
1689 if (rxcp->vlanf) 1689 if (rxcp->vlanf)
@@ -1741,7 +1741,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1741 if (adapter->netdev->features & NETIF_F_RXHASH) 1741 if (adapter->netdev->features & NETIF_F_RXHASH)
1742 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); 1742 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1743 1743
1744 skb->encapsulation = rxcp->tunneled; 1744 skb->csum_level = rxcp->tunneled;
1745 skb_mark_napi_id(skb, napi); 1745 skb_mark_napi_id(skb, napi);
1746 1746
1747 if (rxcp->vlanf) 1747 if (rxcp->vlanf)
@@ -1753,65 +1753,46 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1753static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl, 1753static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1754 struct be_rx_compl_info *rxcp) 1754 struct be_rx_compl_info *rxcp)
1755{ 1755{
1756 rxcp->pkt_size = 1756 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1757 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl); 1757 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1758 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl); 1758 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1759 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl); 1759 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1760 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl); 1760 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1761 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl); 1761 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1762 rxcp->ip_csum = 1762 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1763 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl); 1763 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1764 rxcp->l4_csum = 1764 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1765 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl); 1765 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1766 rxcp->ipv6 = 1766 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
1767 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1768 rxcp->num_rcvd =
1769 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1770 rxcp->pkt_type =
1771 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1772 rxcp->rss_hash =
1773 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1774 if (rxcp->vlanf) { 1767 if (rxcp->vlanf) {
1775 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq, 1768 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1776 compl); 1769 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
1777 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1778 vlan_tag, compl);
1779 } 1770 }
1780 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl); 1771 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
1781 rxcp->tunneled = 1772 rxcp->tunneled =
1782 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl); 1773 GET_RX_COMPL_V1_BITS(tunneled, compl);
1783} 1774}
1784 1775
1785static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl, 1776static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1786 struct be_rx_compl_info *rxcp) 1777 struct be_rx_compl_info *rxcp)
1787{ 1778{
1788 rxcp->pkt_size = 1779 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1789 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl); 1780 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1790 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl); 1781 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1791 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl); 1782 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1792 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl); 1783 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1793 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl); 1784 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1794 rxcp->ip_csum = 1785 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1795 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl); 1786 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1796 rxcp->l4_csum = 1787 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1797 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl); 1788 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1798 rxcp->ipv6 = 1789 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
1799 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1800 rxcp->num_rcvd =
1801 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1802 rxcp->pkt_type =
1803 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1804 rxcp->rss_hash =
1805 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1806 if (rxcp->vlanf) { 1790 if (rxcp->vlanf) {
1807 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq, 1791 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1808 compl); 1792 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
1809 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1810 vlan_tag, compl);
1811 } 1793 }
1812 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl); 1794 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1813 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, 1795 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
1814 ip_frag, compl);
1815} 1796}
1816 1797
1817static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) 1798static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
@@ -1897,7 +1878,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1897 if (dma_mapping_error(dev, page_dmaaddr)) { 1878 if (dma_mapping_error(dev, page_dmaaddr)) {
1898 put_page(pagep); 1879 put_page(pagep);
1899 pagep = NULL; 1880 pagep = NULL;
1900 rx_stats(rxo)->rx_post_fail++; 1881 adapter->drv_stats.dma_map_errors++;
1901 break; 1882 break;
1902 } 1883 }
1903 page_offset = 0; 1884 page_offset = 0;
@@ -1991,7 +1972,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
1991 queue_tail_inc(txq); 1972 queue_tail_inc(txq);
1992 } while (cur_index != last_index); 1973 } while (cur_index != last_index);
1993 1974
1994 dev_kfree_skb_any(sent_skb); 1975 dev_consume_skb_any(sent_skb);
1995 return num_wrbs; 1976 return num_wrbs;
1996} 1977}
1997 1978
@@ -2091,9 +2072,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
2091 num_wrbs = 0; 2072 num_wrbs = 0;
2092 txq = &txo->q; 2073 txq = &txo->q;
2093 while ((txcp = be_tx_compl_get(&txo->cq))) { 2074 while ((txcp = be_tx_compl_get(&txo->cq))) {
2094 end_idx = 2075 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2095 AMAP_GET_BITS(struct amap_eth_tx_compl,
2096 wrb_index, txcp);
2097 num_wrbs += be_tx_compl_process(adapter, txo, 2076 num_wrbs += be_tx_compl_process(adapter, txo,
2098 end_idx); 2077 end_idx);
2099 cmpl++; 2078 cmpl++;
@@ -2164,7 +2143,6 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2164 napi_hash_add(&eqo->napi); 2143 napi_hash_add(&eqo->napi);
2165 aic = &adapter->aic_obj[i]; 2144 aic = &adapter->aic_obj[i];
2166 eqo->adapter = adapter; 2145 eqo->adapter = adapter;
2167 eqo->tx_budget = BE_TX_BUDGET;
2168 eqo->idx = i; 2146 eqo->idx = i;
2169 aic->max_eqd = BE_MAX_EQD; 2147 aic->max_eqd = BE_MAX_EQD;
2170 aic->enable = true; 2148 aic->enable = true;
@@ -2443,20 +2421,63 @@ loop_continue:
2443 return work_done; 2421 return work_done;
2444} 2422}
2445 2423
2446static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, 2424static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2447 int budget, int idx) 2425{
2426 switch (status) {
2427 case BE_TX_COMP_HDR_PARSE_ERR:
2428 tx_stats(txo)->tx_hdr_parse_err++;
2429 break;
2430 case BE_TX_COMP_NDMA_ERR:
2431 tx_stats(txo)->tx_dma_err++;
2432 break;
2433 case BE_TX_COMP_ACL_ERR:
2434 tx_stats(txo)->tx_spoof_check_err++;
2435 break;
2436 }
2437}
2438
2439static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2440{
2441 switch (status) {
2442 case LANCER_TX_COMP_LSO_ERR:
2443 tx_stats(txo)->tx_tso_err++;
2444 break;
2445 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2446 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2447 tx_stats(txo)->tx_spoof_check_err++;
2448 break;
2449 case LANCER_TX_COMP_QINQ_ERR:
2450 tx_stats(txo)->tx_qinq_err++;
2451 break;
2452 case LANCER_TX_COMP_PARITY_ERR:
2453 tx_stats(txo)->tx_internal_parity_err++;
2454 break;
2455 case LANCER_TX_COMP_DMA_ERR:
2456 tx_stats(txo)->tx_dma_err++;
2457 break;
2458 }
2459}
2460
2461static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2462 int idx)
2448{ 2463{
2449 struct be_eth_tx_compl *txcp; 2464 struct be_eth_tx_compl *txcp;
2450 int num_wrbs = 0, work_done; 2465 int num_wrbs = 0, work_done = 0;
2466 u32 compl_status;
2467 u16 last_idx;
2451 2468
2452 for (work_done = 0; work_done < budget; work_done++) { 2469 while ((txcp = be_tx_compl_get(&txo->cq))) {
2453 txcp = be_tx_compl_get(&txo->cq); 2470 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2454 if (!txcp) 2471 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2455 break; 2472 work_done++;
2456 num_wrbs += be_tx_compl_process(adapter, txo, 2473
2457 AMAP_GET_BITS(struct 2474 compl_status = GET_TX_COMPL_BITS(status, txcp);
2458 amap_eth_tx_compl, 2475 if (compl_status) {
2459 wrb_index, txcp)); 2476 if (lancer_chip(adapter))
2477 lancer_update_tx_err(txo, compl_status);
2478 else
2479 be_update_tx_err(txo, compl_status);
2480 }
2460 } 2481 }
2461 2482
2462 if (work_done) { 2483 if (work_done) {
@@ -2474,7 +2495,6 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2474 tx_stats(txo)->tx_compl += work_done; 2495 tx_stats(txo)->tx_compl += work_done;
2475 u64_stats_update_end(&tx_stats(txo)->sync_compl); 2496 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2476 } 2497 }
2477 return (work_done < budget); /* Done */
2478} 2498}
2479 2499
2480int be_poll(struct napi_struct *napi, int budget) 2500int be_poll(struct napi_struct *napi, int budget)
@@ -2483,17 +2503,12 @@ int be_poll(struct napi_struct *napi, int budget)
2483 struct be_adapter *adapter = eqo->adapter; 2503 struct be_adapter *adapter = eqo->adapter;
2484 int max_work = 0, work, i, num_evts; 2504 int max_work = 0, work, i, num_evts;
2485 struct be_rx_obj *rxo; 2505 struct be_rx_obj *rxo;
2486 bool tx_done; 2506 struct be_tx_obj *txo;
2487 2507
2488 num_evts = events_get(eqo); 2508 num_evts = events_get(eqo);
2489 2509
2490 /* Process all TXQs serviced by this EQ */ 2510 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2491 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) { 2511 be_process_tx(adapter, txo, i);
2492 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2493 eqo->tx_budget, i);
2494 if (!tx_done)
2495 max_work = budget;
2496 }
2497 2512
2498 if (be_lock_napi(eqo)) { 2513 if (be_lock_napi(eqo)) {
2499 /* This loop will iterate twice for EQ0 in which 2514 /* This loop will iterate twice for EQ0 in which
@@ -3309,10 +3324,20 @@ static void BEx_get_resources(struct be_adapter *adapter,
3309 */ 3324 */
3310 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) || 3325 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3311 !be_physfn(adapter) || (be_is_mc(adapter) && 3326 !be_physfn(adapter) || (be_is_mc(adapter) &&
3312 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) 3327 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
3313 res->max_tx_qs = 1; 3328 res->max_tx_qs = 1;
3314 else 3329 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3330 struct be_resources super_nic_res = {0};
3331
3332 /* On a SuperNIC profile, the driver needs to use the
3333 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3334 */
3335 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3336 /* Some old versions of BE3 FW don't report max_tx_qs value */
3337 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3338 } else {
3315 res->max_tx_qs = BE3_MAX_TX_QS; 3339 res->max_tx_qs = BE3_MAX_TX_QS;
3340 }
3316 3341
3317 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && 3342 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3318 !use_sriov && be_physfn(adapter)) 3343 !use_sriov && be_physfn(adapter))
@@ -3413,16 +3438,16 @@ static int be_get_resources(struct be_adapter *adapter)
3413 if (be_roce_supported(adapter)) 3438 if (be_roce_supported(adapter))
3414 res.max_evt_qs /= 2; 3439 res.max_evt_qs /= 2;
3415 adapter->res = res; 3440 adapter->res = res;
3416
3417 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3418 be_max_txqs(adapter), be_max_rxqs(adapter),
3419 be_max_rss(adapter), be_max_eqs(adapter),
3420 be_max_vfs(adapter));
3421 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3422 be_max_uc(adapter), be_max_mc(adapter),
3423 be_max_vlans(adapter));
3424 } 3441 }
3425 3442
3443 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3444 be_max_txqs(adapter), be_max_rxqs(adapter),
3445 be_max_rss(adapter), be_max_eqs(adapter),
3446 be_max_vfs(adapter));
3447 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3448 be_max_uc(adapter), be_max_mc(adapter),
3449 be_max_vlans(adapter));
3450
3426 return 0; 3451 return 0;
3427} 3452}
3428 3453
@@ -3633,6 +3658,7 @@ static int be_setup(struct be_adapter *adapter)
3633 goto err; 3658 goto err;
3634 3659
3635 be_cmd_get_fw_ver(adapter); 3660 be_cmd_get_fw_ver(adapter);
3661 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
3636 3662
3637 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) { 3663 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3638 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.", 3664 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
@@ -4052,6 +4078,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
4052{ 4078{
4053#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024) 4079#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4054#define LANCER_FW_DOWNLOAD_LOCATION "/prg" 4080#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
4081 struct device *dev = &adapter->pdev->dev;
4055 struct be_dma_mem flash_cmd; 4082 struct be_dma_mem flash_cmd;
4056 const u8 *data_ptr = NULL; 4083 const u8 *data_ptr = NULL;
4057 u8 *dest_image_ptr = NULL; 4084 u8 *dest_image_ptr = NULL;
@@ -4064,21 +4091,16 @@ static int lancer_fw_download(struct be_adapter *adapter,
4064 u8 change_status; 4091 u8 change_status;
4065 4092
4066 if (!IS_ALIGNED(fw->size, sizeof(u32))) { 4093 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
4067 dev_err(&adapter->pdev->dev, 4094 dev_err(dev, "FW image size should be multiple of 4\n");
4068 "FW Image not properly aligned. " 4095 return -EINVAL;
4069 "Length must be 4 byte aligned.\n");
4070 status = -EINVAL;
4071 goto lancer_fw_exit;
4072 } 4096 }
4073 4097
4074 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) 4098 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4075 + LANCER_FW_DOWNLOAD_CHUNK; 4099 + LANCER_FW_DOWNLOAD_CHUNK;
4076 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, 4100 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
4077 &flash_cmd.dma, GFP_KERNEL); 4101 &flash_cmd.dma, GFP_KERNEL);
4078 if (!flash_cmd.va) { 4102 if (!flash_cmd.va)
4079 status = -ENOMEM; 4103 return -ENOMEM;
4080 goto lancer_fw_exit;
4081 }
4082 4104
4083 dest_image_ptr = flash_cmd.va + 4105 dest_image_ptr = flash_cmd.va +
4084 sizeof(struct lancer_cmd_req_write_object); 4106 sizeof(struct lancer_cmd_req_write_object);
@@ -4113,35 +4135,27 @@ static int lancer_fw_download(struct be_adapter *adapter,
4113 &add_status); 4135 &add_status);
4114 } 4136 }
4115 4137
4116 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, 4138 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4117 flash_cmd.dma);
4118 if (status) { 4139 if (status) {
4119 dev_err(&adapter->pdev->dev, 4140 dev_err(dev, "Firmware load error\n");
4120 "Firmware load error. " 4141 return be_cmd_status(status);
4121 "Status code: 0x%x Additional Status: 0x%x\n",
4122 status, add_status);
4123 goto lancer_fw_exit;
4124 } 4142 }
4125 4143
4144 dev_info(dev, "Firmware flashed successfully\n");
4145
4126 if (change_status == LANCER_FW_RESET_NEEDED) { 4146 if (change_status == LANCER_FW_RESET_NEEDED) {
4127 dev_info(&adapter->pdev->dev, 4147 dev_info(dev, "Resetting adapter to activate new FW\n");
4128 "Resetting adapter to activate new FW\n");
4129 status = lancer_physdev_ctrl(adapter, 4148 status = lancer_physdev_ctrl(adapter,
4130 PHYSDEV_CONTROL_FW_RESET_MASK); 4149 PHYSDEV_CONTROL_FW_RESET_MASK);
4131 if (status) { 4150 if (status) {
4132 dev_err(&adapter->pdev->dev, 4151 dev_err(dev, "Adapter busy, could not reset FW\n");
4133 "Adapter busy for FW reset.\n" 4152 dev_err(dev, "Reboot server to activate new FW\n");
4134 "New FW will not be active.\n");
4135 goto lancer_fw_exit;
4136 } 4153 }
4137 } else if (change_status != LANCER_NO_RESET_NEEDED) { 4154 } else if (change_status != LANCER_NO_RESET_NEEDED) {
4138 dev_err(&adapter->pdev->dev, 4155 dev_info(dev, "Reboot server to activate new FW\n");
4139 "System reboot required for new FW to be active\n");
4140 } 4156 }
4141 4157
4142 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); 4158 return 0;
4143lancer_fw_exit:
4144 return status;
4145} 4159}
4146 4160
4147#define UFI_TYPE2 2 4161#define UFI_TYPE2 2
@@ -4506,6 +4520,7 @@ static int be_map_pci_bars(struct be_adapter *adapter)
4506 return 0; 4520 return 0;
4507 4521
4508pci_map_err: 4522pci_map_err:
4523 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
4509 be_unmap_pci_bars(adapter); 4524 be_unmap_pci_bars(adapter);
4510 return -ENOMEM; 4525 return -ENOMEM;
4511} 4526}
@@ -4822,6 +4837,8 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4822 struct net_device *netdev; 4837 struct net_device *netdev;
4823 char port_name; 4838 char port_name;
4824 4839
4840 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4841
4825 status = pci_enable_device(pdev); 4842 status = pci_enable_device(pdev);
4826 if (status) 4843 if (status)
4827 goto do_none; 4844 goto do_none;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index cca5bca44e73..9b50272824a1 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1,35 +1,30 @@
1/******************************************************************************* 1/*******************************************************************************
2 2 * Intel PRO/1000 Linux driver
3 Intel PRO/1000 Linux driver 3 * Copyright(c) 1999 - 2006 Intel Corporation.
4 Copyright(c) 1999 - 2006 Intel Corporation. 4 *
5 5 * This program is free software; you can redistribute it and/or modify it
6 This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License,
7 under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation.
8 version 2, as published by the Free Software Foundation. 8 *
9 9 * This program is distributed in the hope it will be useful, but WITHOUT
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details.
13 more details. 13 *
14 14 * The full GNU General Public License is included in this distribution in
15 You should have received a copy of the GNU General Public License along with 15 * the file called "COPYING".
16 this program; if not, write to the Free Software Foundation, Inc., 16 *
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Contact Information:
18 18 * Linux NICS <linux.nics@intel.com>
19 The full GNU General Public License is included in this distribution in 19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 the file called "COPYING". 20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 21 *
22 Contact Information: 22 ******************************************************************************/
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 23
29/* ethtool support for e1000 */ 24/* ethtool support for e1000 */
30 25
31#include "e1000.h" 26#include "e1000.h"
32#include <asm/uaccess.h> 27#include <linux/uaccess.h>
33 28
34enum {NETDEV_STATS, E1000_STATS}; 29enum {NETDEV_STATS, E1000_STATS};
35 30
@@ -42,7 +37,7 @@ struct e1000_stats {
42 37
43#define E1000_STAT(m) E1000_STATS, \ 38#define E1000_STAT(m) E1000_STATS, \
44 sizeof(((struct e1000_adapter *)0)->m), \ 39 sizeof(((struct e1000_adapter *)0)->m), \
45 offsetof(struct e1000_adapter, m) 40 offsetof(struct e1000_adapter, m)
46#define E1000_NETDEV_STAT(m) NETDEV_STATS, \ 41#define E1000_NETDEV_STAT(m) NETDEV_STATS, \
47 sizeof(((struct net_device *)0)->m), \ 42 sizeof(((struct net_device *)0)->m), \
48 offsetof(struct net_device, m) 43 offsetof(struct net_device, m)
@@ -104,6 +99,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
104 "Interrupt test (offline)", "Loopback test (offline)", 99 "Interrupt test (offline)", "Loopback test (offline)",
105 "Link test (on/offline)" 100 "Link test (on/offline)"
106}; 101};
102
107#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) 103#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
108 104
109static int e1000_get_settings(struct net_device *netdev, 105static int e1000_get_settings(struct net_device *netdev,
@@ -113,7 +109,6 @@ static int e1000_get_settings(struct net_device *netdev,
113 struct e1000_hw *hw = &adapter->hw; 109 struct e1000_hw *hw = &adapter->hw;
114 110
115 if (hw->media_type == e1000_media_type_copper) { 111 if (hw->media_type == e1000_media_type_copper) {
116
117 ecmd->supported = (SUPPORTED_10baseT_Half | 112 ecmd->supported = (SUPPORTED_10baseT_Half |
118 SUPPORTED_10baseT_Full | 113 SUPPORTED_10baseT_Full |
119 SUPPORTED_100baseT_Half | 114 SUPPORTED_100baseT_Half |
@@ -155,9 +150,8 @@ static int e1000_get_settings(struct net_device *netdev,
155 } 150 }
156 151
157 if (er32(STATUS) & E1000_STATUS_LU) { 152 if (er32(STATUS) & E1000_STATUS_LU) {
158
159 e1000_get_speed_and_duplex(hw, &adapter->link_speed, 153 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
160 &adapter->link_duplex); 154 &adapter->link_duplex);
161 ethtool_cmd_speed_set(ecmd, adapter->link_speed); 155 ethtool_cmd_speed_set(ecmd, adapter->link_speed);
162 156
163 /* unfortunately FULL_DUPLEX != DUPLEX_FULL 157 /* unfortunately FULL_DUPLEX != DUPLEX_FULL
@@ -247,9 +241,9 @@ static int e1000_set_settings(struct net_device *netdev,
247 if (netif_running(adapter->netdev)) { 241 if (netif_running(adapter->netdev)) {
248 e1000_down(adapter); 242 e1000_down(adapter);
249 e1000_up(adapter); 243 e1000_up(adapter);
250 } else 244 } else {
251 e1000_reset(adapter); 245 e1000_reset(adapter);
252 246 }
253 clear_bit(__E1000_RESETTING, &adapter->flags); 247 clear_bit(__E1000_RESETTING, &adapter->flags);
254 return 0; 248 return 0;
255} 249}
@@ -279,11 +273,11 @@ static void e1000_get_pauseparam(struct net_device *netdev,
279 pause->autoneg = 273 pause->autoneg =
280 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 274 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
281 275
282 if (hw->fc == E1000_FC_RX_PAUSE) 276 if (hw->fc == E1000_FC_RX_PAUSE) {
283 pause->rx_pause = 1; 277 pause->rx_pause = 1;
284 else if (hw->fc == E1000_FC_TX_PAUSE) 278 } else if (hw->fc == E1000_FC_TX_PAUSE) {
285 pause->tx_pause = 1; 279 pause->tx_pause = 1;
286 else if (hw->fc == E1000_FC_FULL) { 280 } else if (hw->fc == E1000_FC_FULL) {
287 pause->rx_pause = 1; 281 pause->rx_pause = 1;
288 pause->tx_pause = 1; 282 pause->tx_pause = 1;
289 } 283 }
@@ -316,8 +310,9 @@ static int e1000_set_pauseparam(struct net_device *netdev,
316 if (netif_running(adapter->netdev)) { 310 if (netif_running(adapter->netdev)) {
317 e1000_down(adapter); 311 e1000_down(adapter);
318 e1000_up(adapter); 312 e1000_up(adapter);
319 } else 313 } else {
320 e1000_reset(adapter); 314 e1000_reset(adapter);
315 }
321 } else 316 } else
322 retval = ((hw->media_type == e1000_media_type_fiber) ? 317 retval = ((hw->media_type == e1000_media_type_fiber) ?
323 e1000_setup_link(hw) : e1000_force_mac_fc(hw)); 318 e1000_setup_link(hw) : e1000_force_mac_fc(hw));
@@ -329,12 +324,14 @@ static int e1000_set_pauseparam(struct net_device *netdev,
329static u32 e1000_get_msglevel(struct net_device *netdev) 324static u32 e1000_get_msglevel(struct net_device *netdev)
330{ 325{
331 struct e1000_adapter *adapter = netdev_priv(netdev); 326 struct e1000_adapter *adapter = netdev_priv(netdev);
327
332 return adapter->msg_enable; 328 return adapter->msg_enable;
333} 329}
334 330
335static void e1000_set_msglevel(struct net_device *netdev, u32 data) 331static void e1000_set_msglevel(struct net_device *netdev, u32 data)
336{ 332{
337 struct e1000_adapter *adapter = netdev_priv(netdev); 333 struct e1000_adapter *adapter = netdev_priv(netdev);
334
338 adapter->msg_enable = data; 335 adapter->msg_enable = data;
339} 336}
340 337
@@ -526,7 +523,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
526 * only the first byte of the word is being modified 523 * only the first byte of the word is being modified
527 */ 524 */
528 ret_val = e1000_read_eeprom(hw, last_word, 1, 525 ret_val = e1000_read_eeprom(hw, last_word, 1,
529 &eeprom_buff[last_word - first_word]); 526 &eeprom_buff[last_word - first_word]);
530 } 527 }
531 528
532 /* Device's eeprom is always little-endian, word addressable */ 529 /* Device's eeprom is always little-endian, word addressable */
@@ -618,13 +615,12 @@ static int e1000_set_ringparam(struct net_device *netdev,
618 adapter->tx_ring = txdr; 615 adapter->tx_ring = txdr;
619 adapter->rx_ring = rxdr; 616 adapter->rx_ring = rxdr;
620 617
621 rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD); 618 rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD);
622 rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ? 619 rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ?
623 E1000_MAX_RXD : E1000_MAX_82544_RXD)); 620 E1000_MAX_RXD : E1000_MAX_82544_RXD));
624 rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); 621 rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
625 622 txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD);
626 txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD); 623 txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ?
627 txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
628 E1000_MAX_TXD : E1000_MAX_82544_TXD)); 624 E1000_MAX_TXD : E1000_MAX_82544_TXD));
629 txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 625 txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
630 626
@@ -680,8 +676,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
680 u32 mask, u32 write) 676 u32 mask, u32 write)
681{ 677{
682 struct e1000_hw *hw = &adapter->hw; 678 struct e1000_hw *hw = &adapter->hw;
683 static const u32 test[] = 679 static const u32 test[] = {
684 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 680 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
681 };
685 u8 __iomem *address = hw->hw_addr + reg; 682 u8 __iomem *address = hw->hw_addr + reg;
686 u32 read; 683 u32 read;
687 int i; 684 int i;
@@ -793,8 +790,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
793 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); 790 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
794 value = E1000_RAR_ENTRIES; 791 value = E1000_RAR_ENTRIES;
795 for (i = 0; i < value; i++) { 792 for (i = 0; i < value; i++) {
796 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, 793 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2),
797 0xFFFFFFFF); 794 0x8003FFFF, 0xFFFFFFFF);
798 } 795 }
799 } else { 796 } else {
800 REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); 797 REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
@@ -877,7 +874,6 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
877 874
878 /* Test each interrupt */ 875 /* Test each interrupt */
879 for (; i < 10; i++) { 876 for (; i < 10; i++) {
880
881 /* Interrupt to test */ 877 /* Interrupt to test */
882 mask = 1 << i; 878 mask = 1 << i;
883 879
@@ -1149,8 +1145,7 @@ static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
1149 */ 1145 */
1150 e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); 1146 e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
1151 phy_reg |= M88E1000_EPSCR_TX_CLK_25; 1147 phy_reg |= M88E1000_EPSCR_TX_CLK_25;
1152 e1000_write_phy_reg(hw, 1148 e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
1153 M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
1154 1149
1155 /* In addition, because of the s/w reset above, we need to enable 1150 /* In addition, because of the s/w reset above, we need to enable
1156 * CRS on TX. This must be set for both full and half duplex 1151 * CRS on TX. This must be set for both full and half duplex
@@ -1158,8 +1153,7 @@ static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
1158 */ 1153 */
1159 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); 1154 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
1160 phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 1155 phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
1161 e1000_write_phy_reg(hw, 1156 e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
1162 M88E1000_PHY_SPEC_CTRL, phy_reg);
1163} 1157}
1164 1158
1165static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) 1159static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
@@ -1216,7 +1210,7 @@ static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
1216 /* Check Phy Configuration */ 1210 /* Check Phy Configuration */
1217 e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); 1211 e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
1218 if (phy_reg != 0x4100) 1212 if (phy_reg != 0x4100)
1219 return 9; 1213 return 9;
1220 1214
1221 e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); 1215 e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
1222 if (phy_reg != 0x0070) 1216 if (phy_reg != 0x0070)
@@ -1261,7 +1255,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1261 E1000_CTRL_FD); /* Force Duplex to FULL */ 1255 E1000_CTRL_FD); /* Force Duplex to FULL */
1262 1256
1263 if (hw->media_type == e1000_media_type_copper && 1257 if (hw->media_type == e1000_media_type_copper &&
1264 hw->phy_type == e1000_phy_m88) 1258 hw->phy_type == e1000_phy_m88)
1265 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1259 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1266 else { 1260 else {
1267 /* Set the ILOS bit on the fiber Nic is half 1261 /* Set the ILOS bit on the fiber Nic is half
@@ -1299,7 +1293,7 @@ static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
1299 * attempt this 10 times. 1293 * attempt this 10 times.
1300 */ 1294 */
1301 while (e1000_nonintegrated_phy_loopback(adapter) && 1295 while (e1000_nonintegrated_phy_loopback(adapter) &&
1302 count++ < 10); 1296 count++ < 10);
1303 if (count < 11) 1297 if (count < 11)
1304 return 0; 1298 return 0;
1305 } 1299 }
@@ -1348,8 +1342,9 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
1348 ew32(RCTL, rctl); 1342 ew32(RCTL, rctl);
1349 return 0; 1343 return 0;
1350 } 1344 }
1351 } else if (hw->media_type == e1000_media_type_copper) 1345 } else if (hw->media_type == e1000_media_type_copper) {
1352 return e1000_set_phy_loopback(adapter); 1346 return e1000_set_phy_loopback(adapter);
1347 }
1353 1348
1354 return 7; 1349 return 7;
1355} 1350}
@@ -1395,9 +1390,9 @@ static int e1000_check_lbtest_frame(struct sk_buff *skb,
1395 unsigned int frame_size) 1390 unsigned int frame_size)
1396{ 1391{
1397 frame_size &= ~1; 1392 frame_size &= ~1;
1398 if (*(skb->data + 3) == 0xFF) { 1393 if (skb->data[3] == 0xFF) {
1399 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1394 if (skb->data[frame_size / 2 + 10] == 0xBE &&
1400 (*(skb->data + frame_size / 2 + 12) == 0xAF)) { 1395 skb->data[frame_size / 2 + 12] == 0xAF) {
1401 return 0; 1396 return 0;
1402 } 1397 }
1403 } 1398 }
@@ -1410,7 +1405,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1410 struct e1000_tx_ring *txdr = &adapter->test_tx_ring; 1405 struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
1411 struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; 1406 struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
1412 struct pci_dev *pdev = adapter->pdev; 1407 struct pci_dev *pdev = adapter->pdev;
1413 int i, j, k, l, lc, good_cnt, ret_val=0; 1408 int i, j, k, l, lc, good_cnt, ret_val = 0;
1414 unsigned long time; 1409 unsigned long time;
1415 1410
1416 ew32(RDT, rxdr->count - 1); 1411 ew32(RDT, rxdr->count - 1);
@@ -1429,12 +1424,13 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1429 for (j = 0; j <= lc; j++) { /* loop count loop */ 1424 for (j = 0; j <= lc; j++) { /* loop count loop */
1430 for (i = 0; i < 64; i++) { /* send the packets */ 1425 for (i = 0; i < 64; i++) { /* send the packets */
1431 e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1426 e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1432 1024); 1427 1024);
1433 dma_sync_single_for_device(&pdev->dev, 1428 dma_sync_single_for_device(&pdev->dev,
1434 txdr->buffer_info[k].dma, 1429 txdr->buffer_info[k].dma,
1435 txdr->buffer_info[k].length, 1430 txdr->buffer_info[k].length,
1436 DMA_TO_DEVICE); 1431 DMA_TO_DEVICE);
1437 if (unlikely(++k == txdr->count)) k = 0; 1432 if (unlikely(++k == txdr->count))
1433 k = 0;
1438 } 1434 }
1439 ew32(TDT, k); 1435 ew32(TDT, k);
1440 E1000_WRITE_FLUSH(); 1436 E1000_WRITE_FLUSH();
@@ -1452,7 +1448,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1452 1024); 1448 1024);
1453 if (!ret_val) 1449 if (!ret_val)
1454 good_cnt++; 1450 good_cnt++;
1455 if (unlikely(++l == rxdr->count)) l = 0; 1451 if (unlikely(++l == rxdr->count))
1452 l = 0;
1456 /* time + 20 msecs (200 msecs on 2.4) is more than 1453 /* time + 20 msecs (200 msecs on 2.4) is more than
1457 * enough time to complete the receives, if it's 1454 * enough time to complete the receives, if it's
1458 * exceeded, break and error off 1455 * exceeded, break and error off
@@ -1494,6 +1491,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1494 *data = 0; 1491 *data = 0;
1495 if (hw->media_type == e1000_media_type_internal_serdes) { 1492 if (hw->media_type == e1000_media_type_internal_serdes) {
1496 int i = 0; 1493 int i = 0;
1494
1497 hw->serdes_has_link = false; 1495 hw->serdes_has_link = false;
1498 1496
1499 /* On some blade server designs, link establishment 1497 /* On some blade server designs, link establishment
@@ -1512,9 +1510,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1512 if (hw->autoneg) /* if auto_neg is set wait for it */ 1510 if (hw->autoneg) /* if auto_neg is set wait for it */
1513 msleep(4000); 1511 msleep(4000);
1514 1512
1515 if (!(er32(STATUS) & E1000_STATUS_LU)) { 1513 if (!(er32(STATUS) & E1000_STATUS_LU))
1516 *data = 1; 1514 *data = 1;
1517 }
1518 } 1515 }
1519 return *data; 1516 return *data;
1520} 1517}
@@ -1665,8 +1662,7 @@ static void e1000_get_wol(struct net_device *netdev,
1665 struct e1000_adapter *adapter = netdev_priv(netdev); 1662 struct e1000_adapter *adapter = netdev_priv(netdev);
1666 struct e1000_hw *hw = &adapter->hw; 1663 struct e1000_hw *hw = &adapter->hw;
1667 1664
1668 wol->supported = WAKE_UCAST | WAKE_MCAST | 1665 wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
1669 WAKE_BCAST | WAKE_MAGIC;
1670 wol->wolopts = 0; 1666 wol->wolopts = 0;
1671 1667
1672 /* this function will set ->supported = 0 and return 1 if wol is not 1668 /* this function will set ->supported = 0 and return 1 if wol is not
@@ -1819,6 +1815,7 @@ static int e1000_set_coalesce(struct net_device *netdev,
1819static int e1000_nway_reset(struct net_device *netdev) 1815static int e1000_nway_reset(struct net_device *netdev)
1820{ 1816{
1821 struct e1000_adapter *adapter = netdev_priv(netdev); 1817 struct e1000_adapter *adapter = netdev_priv(netdev);
1818
1822 if (netif_running(netdev)) 1819 if (netif_running(netdev))
1823 e1000_reinit_locked(adapter); 1820 e1000_reinit_locked(adapter);
1824 return 0; 1821 return 0;
@@ -1830,22 +1827,29 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1830 struct e1000_adapter *adapter = netdev_priv(netdev); 1827 struct e1000_adapter *adapter = netdev_priv(netdev);
1831 int i; 1828 int i;
1832 char *p = NULL; 1829 char *p = NULL;
1830 const struct e1000_stats *stat = e1000_gstrings_stats;
1833 1831
1834 e1000_update_stats(adapter); 1832 e1000_update_stats(adapter);
1835 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1833 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1836 switch (e1000_gstrings_stats[i].type) { 1834 switch (stat->type) {
1837 case NETDEV_STATS: 1835 case NETDEV_STATS:
1838 p = (char *) netdev + 1836 p = (char *)netdev + stat->stat_offset;
1839 e1000_gstrings_stats[i].stat_offset;
1840 break; 1837 break;
1841 case E1000_STATS: 1838 case E1000_STATS:
1842 p = (char *) adapter + 1839 p = (char *)adapter + stat->stat_offset;
1843 e1000_gstrings_stats[i].stat_offset; 1840 break;
1841 default:
1842 WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
1843 stat->type, i);
1844 break; 1844 break;
1845 } 1845 }
1846 1846
1847 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 1847 if (stat->sizeof_stat == sizeof(u64))
1848 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1848 data[i] = *(u64 *)p;
1849 else
1850 data[i] = *(u32 *)p;
1851
1852 stat++;
1849 } 1853 }
1850/* BUG_ON(i != E1000_STATS_LEN); */ 1854/* BUG_ON(i != E1000_STATS_LEN); */
1851} 1855}
@@ -1858,8 +1862,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
1858 1862
1859 switch (stringset) { 1863 switch (stringset) {
1860 case ETH_SS_TEST: 1864 case ETH_SS_TEST:
1861 memcpy(data, *e1000_gstrings_test, 1865 memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
1862 sizeof(e1000_gstrings_test));
1863 break; 1866 break;
1864 case ETH_SS_STATS: 1867 case ETH_SS_STATS:
1865 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1868 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 801da392a20e..f1e33f896439 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -144,6 +144,8 @@ enum i40e_state_t {
144 __I40E_PTP_TX_IN_PROGRESS, 144 __I40E_PTP_TX_IN_PROGRESS,
145 __I40E_BAD_EEPROM, 145 __I40E_BAD_EEPROM,
146 __I40E_DOWN_REQUESTED, 146 __I40E_DOWN_REQUESTED,
147 __I40E_FD_FLUSH_REQUESTED,
148 __I40E_RESET_FAILED,
147}; 149};
148 150
149enum i40e_interrupt_policy { 151enum i40e_interrupt_policy {
@@ -250,6 +252,11 @@ struct i40e_pf {
250 u16 fdir_pf_active_filters; 252 u16 fdir_pf_active_filters;
251 u16 fd_sb_cnt_idx; 253 u16 fd_sb_cnt_idx;
252 u16 fd_atr_cnt_idx; 254 u16 fd_atr_cnt_idx;
255 unsigned long fd_flush_timestamp;
256 u32 fd_flush_cnt;
257 u32 fd_add_err;
258 u32 fd_atr_cnt;
259 u32 fd_tcp_rule;
253 260
254#ifdef CONFIG_I40E_VXLAN 261#ifdef CONFIG_I40E_VXLAN
255 __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; 262 __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
@@ -310,6 +317,7 @@ struct i40e_pf {
310 u32 tx_timeout_count; 317 u32 tx_timeout_count;
311 u32 tx_timeout_recovery_level; 318 u32 tx_timeout_recovery_level;
312 unsigned long tx_timeout_last_recovery; 319 unsigned long tx_timeout_last_recovery;
320 u32 tx_sluggish_count;
313 u32 hw_csum_rx_error; 321 u32 hw_csum_rx_error;
314 u32 led_status; 322 u32 led_status;
315 u16 corer_count; /* Core reset count */ 323 u16 corer_count; /* Core reset count */
@@ -608,6 +616,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
608void i40e_fdir_check_and_reenable(struct i40e_pf *pf); 616void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
609int i40e_get_current_fd_count(struct i40e_pf *pf); 617int i40e_get_current_fd_count(struct i40e_pf *pf);
610int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf); 618int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
619int i40e_get_current_atr_cnt(struct i40e_pf *pf);
611bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); 620bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
612void i40e_set_ethtool_ops(struct net_device *netdev); 621void i40e_set_ethtool_ops(struct net_device *netdev);
613struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 622struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index b29c157b1f57..72f5d25a222f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -840,7 +840,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
840 840
841 /* bump the tail */ 841 /* bump the tail */
842 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); 842 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
843 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff); 843 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
844 buff, buff_size);
844 (hw->aq.asq.next_to_use)++; 845 (hw->aq.asq.next_to_use)++;
845 if (hw->aq.asq.next_to_use == hw->aq.asq.count) 846 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
846 hw->aq.asq.next_to_use = 0; 847 hw->aq.asq.next_to_use = 0;
@@ -891,7 +892,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
891 892
892 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 893 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
893 "AQTX: desc and buffer writeback:\n"); 894 "AQTX: desc and buffer writeback:\n");
894 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff); 895 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
895 896
896 /* update the error if time out occurred */ 897 /* update the error if time out occurred */
897 if ((!cmd_completed) && 898 if ((!cmd_completed) &&
@@ -987,7 +988,8 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
987 e->msg_size); 988 e->msg_size);
988 989
989 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); 990 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
990 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf); 991 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
992 hw->aq.arq_buf_size);
991 993
992 /* Restore the original datalen and buffer address in the desc, 994 /* Restore the original datalen and buffer address in the desc,
993 * FW updates datalen to indicate the event message 995 * FW updates datalen to indicate the event message
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index df43e7c6777c..30056b25d94e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -75,13 +75,15 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
75 * @mask: debug mask 75 * @mask: debug mask
76 * @desc: pointer to admin queue descriptor 76 * @desc: pointer to admin queue descriptor
77 * @buffer: pointer to command buffer 77 * @buffer: pointer to command buffer
78 * @buf_len: max length of buffer
78 * 79 *
79 * Dumps debug log about adminq command with descriptor contents. 80 * Dumps debug log about adminq command with descriptor contents.
80 **/ 81 **/
81void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 82void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
82 void *buffer) 83 void *buffer, u16 buf_len)
83{ 84{
84 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 85 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
86 u16 len = le16_to_cpu(aq_desc->datalen);
85 u8 *aq_buffer = (u8 *)buffer; 87 u8 *aq_buffer = (u8 *)buffer;
86 u32 data[4]; 88 u32 data[4];
87 u32 i = 0; 89 u32 i = 0;
@@ -105,7 +107,9 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
105 if ((buffer != NULL) && (aq_desc->datalen != 0)) { 107 if ((buffer != NULL) && (aq_desc->datalen != 0)) {
106 memset(data, 0, sizeof(data)); 108 memset(data, 0, sizeof(data));
107 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 109 i40e_debug(hw, mask, "AQ CMD Buffer:\n");
108 for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) { 110 if (buf_len < len)
111 len = buf_len;
112 for (i = 0; i < len; i++) {
109 data[((i % 16) / 4)] |= 113 data[((i % 16) / 4)] |=
110 ((u32)aq_buffer[i]) << (8 * (i % 4)); 114 ((u32)aq_buffer[i]) << (8 * (i % 4));
111 if ((i % 16) == 15) { 115 if ((i % 16) == 15) {
@@ -748,6 +752,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
748 switch (hw->phy.link_info.phy_type) { 752 switch (hw->phy.link_info.phy_type) {
749 case I40E_PHY_TYPE_10GBASE_SR: 753 case I40E_PHY_TYPE_10GBASE_SR:
750 case I40E_PHY_TYPE_10GBASE_LR: 754 case I40E_PHY_TYPE_10GBASE_LR:
755 case I40E_PHY_TYPE_1000BASE_SX:
756 case I40E_PHY_TYPE_1000BASE_LX:
751 case I40E_PHY_TYPE_40GBASE_SR4: 757 case I40E_PHY_TYPE_40GBASE_SR4:
752 case I40E_PHY_TYPE_40GBASE_LR4: 758 case I40E_PHY_TYPE_40GBASE_LR4:
753 media = I40E_MEDIA_TYPE_FIBER; 759 media = I40E_MEDIA_TYPE_FIBER;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 5a0cabeb35ed..7067f4b9159c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1356,6 +1356,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1356 "emp reset count: %d\n", pf->empr_count); 1356 "emp reset count: %d\n", pf->empr_count);
1357 dev_info(&pf->pdev->dev, 1357 dev_info(&pf->pdev->dev,
1358 "pf reset count: %d\n", pf->pfr_count); 1358 "pf reset count: %d\n", pf->pfr_count);
1359 dev_info(&pf->pdev->dev,
1360 "pf tx sluggish count: %d\n",
1361 pf->tx_sluggish_count);
1359 } else if (strncmp(&cmd_buf[5], "port", 4) == 0) { 1362 } else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
1360 struct i40e_aqc_query_port_ets_config_resp *bw_data; 1363 struct i40e_aqc_query_port_ets_config_resp *bw_data;
1361 struct i40e_dcbx_config *cfg = 1364 struct i40e_dcbx_config *cfg =
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index e8ba7470700a..1dda467ae1ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -145,6 +145,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
145 I40E_PF_STAT("rx_jabber", stats.rx_jabber), 145 I40E_PF_STAT("rx_jabber", stats.rx_jabber),
146 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), 146 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
147 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), 147 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
148 I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
148 I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), 149 I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
149 I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), 150 I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
150 151
@@ -312,7 +313,10 @@ static int i40e_get_settings(struct net_device *netdev,
312 break; 313 break;
313 case I40E_PHY_TYPE_10GBASE_SR: 314 case I40E_PHY_TYPE_10GBASE_SR:
314 case I40E_PHY_TYPE_10GBASE_LR: 315 case I40E_PHY_TYPE_10GBASE_LR:
316 case I40E_PHY_TYPE_1000BASE_SX:
317 case I40E_PHY_TYPE_1000BASE_LX:
315 ecmd->supported = SUPPORTED_10000baseT_Full; 318 ecmd->supported = SUPPORTED_10000baseT_Full;
319 ecmd->supported |= SUPPORTED_1000baseT_Full;
316 break; 320 break;
317 case I40E_PHY_TYPE_10GBASE_CR1_CU: 321 case I40E_PHY_TYPE_10GBASE_CR1_CU:
318 case I40E_PHY_TYPE_10GBASE_CR1: 322 case I40E_PHY_TYPE_10GBASE_CR1:
@@ -351,7 +355,8 @@ static int i40e_get_settings(struct net_device *netdev,
351 break; 355 break;
352 default: 356 default:
353 /* if we got here and link is up something bad is afoot */ 357 /* if we got here and link is up something bad is afoot */
354 WARN_ON(link_up); 358 netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
359 hw_link_info->phy_type);
355 } 360 }
356 361
357no_valid_phy_type: 362no_valid_phy_type:
@@ -461,7 +466,8 @@ static int i40e_set_settings(struct net_device *netdev,
461 466
462 if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && 467 if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
463 hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && 468 hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
464 hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE) 469 hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
470 hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
465 return -EOPNOTSUPP; 471 return -EOPNOTSUPP;
466 472
467 /* get our own copy of the bits to check against */ 473 /* get our own copy of the bits to check against */
@@ -492,11 +498,10 @@ static int i40e_set_settings(struct net_device *netdev,
492 if (status) 498 if (status)
493 return -EAGAIN; 499 return -EAGAIN;
494 500
495 /* Copy link_speed and abilities to config in case they are not 501 /* Copy abilities to config in case autoneg is not
496 * set below 502 * set below
497 */ 503 */
498 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 504 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
499 config.link_speed = abilities.link_speed;
500 config.abilities = abilities.abilities; 505 config.abilities = abilities.abilities;
501 506
502 /* Check autoneg */ 507 /* Check autoneg */
@@ -533,42 +538,38 @@ static int i40e_set_settings(struct net_device *netdev,
533 return -EINVAL; 538 return -EINVAL;
534 539
535 if (advertise & ADVERTISED_100baseT_Full) 540 if (advertise & ADVERTISED_100baseT_Full)
536 if (!(abilities.link_speed & I40E_LINK_SPEED_100MB)) { 541 config.link_speed |= I40E_LINK_SPEED_100MB;
537 config.link_speed |= I40E_LINK_SPEED_100MB;
538 change = true;
539 }
540 if (advertise & ADVERTISED_1000baseT_Full || 542 if (advertise & ADVERTISED_1000baseT_Full ||
541 advertise & ADVERTISED_1000baseKX_Full) 543 advertise & ADVERTISED_1000baseKX_Full)
542 if (!(abilities.link_speed & I40E_LINK_SPEED_1GB)) { 544 config.link_speed |= I40E_LINK_SPEED_1GB;
543 config.link_speed |= I40E_LINK_SPEED_1GB;
544 change = true;
545 }
546 if (advertise & ADVERTISED_10000baseT_Full || 545 if (advertise & ADVERTISED_10000baseT_Full ||
547 advertise & ADVERTISED_10000baseKX4_Full || 546 advertise & ADVERTISED_10000baseKX4_Full ||
548 advertise & ADVERTISED_10000baseKR_Full) 547 advertise & ADVERTISED_10000baseKR_Full)
549 if (!(abilities.link_speed & I40E_LINK_SPEED_10GB)) { 548 config.link_speed |= I40E_LINK_SPEED_10GB;
550 config.link_speed |= I40E_LINK_SPEED_10GB;
551 change = true;
552 }
553 if (advertise & ADVERTISED_40000baseKR4_Full || 549 if (advertise & ADVERTISED_40000baseKR4_Full ||
554 advertise & ADVERTISED_40000baseCR4_Full || 550 advertise & ADVERTISED_40000baseCR4_Full ||
555 advertise & ADVERTISED_40000baseSR4_Full || 551 advertise & ADVERTISED_40000baseSR4_Full ||
556 advertise & ADVERTISED_40000baseLR4_Full) 552 advertise & ADVERTISED_40000baseLR4_Full)
557 if (!(abilities.link_speed & I40E_LINK_SPEED_40GB)) { 553 config.link_speed |= I40E_LINK_SPEED_40GB;
558 config.link_speed |= I40E_LINK_SPEED_40GB;
559 change = true;
560 }
561 554
562 if (change) { 555 if (change || (abilities.link_speed != config.link_speed)) {
563 /* copy over the rest of the abilities */ 556 /* copy over the rest of the abilities */
564 config.phy_type = abilities.phy_type; 557 config.phy_type = abilities.phy_type;
565 config.eee_capability = abilities.eee_capability; 558 config.eee_capability = abilities.eee_capability;
566 config.eeer = abilities.eeer_val; 559 config.eeer = abilities.eeer_val;
567 config.low_power_ctrl = abilities.d3_lpan; 560 config.low_power_ctrl = abilities.d3_lpan;
568 561
569 /* If link is up set link and an so changes take effect */ 562 /* set link and auto negotiation so changes take effect */
570 if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) 563 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
571 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 564 /* If link is up put link down */
565 if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) {
566 /* Tell the OS link is going down, the link will go
567 * back up when fw says it is ready asynchronously
568 */
569 netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n");
570 netif_carrier_off(netdev);
571 netif_tx_stop_all_queues(netdev);
572 }
572 573
573 /* make the aq call */ 574 /* make the aq call */
574 status = i40e_aq_set_phy_config(hw, &config, NULL); 575 status = i40e_aq_set_phy_config(hw, &config, NULL);
@@ -685,6 +686,13 @@ static int i40e_set_pauseparam(struct net_device *netdev,
685 else 686 else
686 return -EINVAL; 687 return -EINVAL;
687 688
689 /* Tell the OS link is going down, the link will go back up when fw
690 * says it is ready asynchronously
691 */
692 netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n");
693 netif_carrier_off(netdev);
694 netif_tx_stop_all_queues(netdev);
695
688 /* Set the fc mode and only restart an if link is up*/ 696 /* Set the fc mode and only restart an if link is up*/
689 status = i40e_set_fc(hw, &aq_failures, link_up); 697 status = i40e_set_fc(hw, &aq_failures, link_up);
690 698
@@ -1977,6 +1985,13 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
1977 struct i40e_pf *pf = vsi->back; 1985 struct i40e_pf *pf = vsi->back;
1978 int ret = 0; 1986 int ret = 0;
1979 1987
1988 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
1989 test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
1990 return -EBUSY;
1991
1992 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
1993 return -EBUSY;
1994
1980 ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); 1995 ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
1981 1996
1982 i40e_fdir_check_and_reenable(pf); 1997 i40e_fdir_check_and_reenable(pf);
@@ -2010,6 +2025,13 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
2010 if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED) 2025 if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
2011 return -ENOSPC; 2026 return -ENOSPC;
2012 2027
2028 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
2029 test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
2030 return -EBUSY;
2031
2032 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
2033 return -EBUSY;
2034
2013 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 2035 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
2014 2036
2015 if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + 2037 if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index eddec6ba095b..ed5f1c15fb0f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -37,9 +37,9 @@ static const char i40e_driver_string[] =
37 37
38#define DRV_KERN "-k" 38#define DRV_KERN "-k"
39 39
40#define DRV_VERSION_MAJOR 0 40#define DRV_VERSION_MAJOR 1
41#define DRV_VERSION_MINOR 4 41#define DRV_VERSION_MINOR 0
42#define DRV_VERSION_BUILD 21 42#define DRV_VERSION_BUILD 11
43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44 __stringify(DRV_VERSION_MINOR) "." \ 44 __stringify(DRV_VERSION_MINOR) "." \
45 __stringify(DRV_VERSION_BUILD) DRV_KERN 45 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -1239,8 +1239,11 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1239 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM 1239 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1240 * @vsi: the PF Main VSI - inappropriate for any other VSI 1240 * @vsi: the PF Main VSI - inappropriate for any other VSI
1241 * @macaddr: the MAC address 1241 * @macaddr: the MAC address
1242 *
1243 * Some older firmware configurations set up a default promiscuous VLAN
1244 * filter that needs to be removed.
1242 **/ 1245 **/
1243static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) 1246static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1244{ 1247{
1245 struct i40e_aqc_remove_macvlan_element_data element; 1248 struct i40e_aqc_remove_macvlan_element_data element;
1246 struct i40e_pf *pf = vsi->back; 1249 struct i40e_pf *pf = vsi->back;
@@ -1248,15 +1251,18 @@ static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1248 1251
1249 /* Only appropriate for the PF main VSI */ 1252 /* Only appropriate for the PF main VSI */
1250 if (vsi->type != I40E_VSI_MAIN) 1253 if (vsi->type != I40E_VSI_MAIN)
1251 return; 1254 return -EINVAL;
1252 1255
1256 memset(&element, 0, sizeof(element));
1253 ether_addr_copy(element.mac_addr, macaddr); 1257 ether_addr_copy(element.mac_addr, macaddr);
1254 element.vlan_tag = 0; 1258 element.vlan_tag = 0;
1255 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | 1259 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1256 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; 1260 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1257 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); 1261 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1258 if (aq_ret) 1262 if (aq_ret)
1259 dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n"); 1263 return -ENOENT;
1264
1265 return 0;
1260} 1266}
1261 1267
1262/** 1268/**
@@ -1385,18 +1391,30 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
1385{ 1391{
1386 struct i40e_netdev_priv *np = netdev_priv(netdev); 1392 struct i40e_netdev_priv *np = netdev_priv(netdev);
1387 struct i40e_vsi *vsi = np->vsi; 1393 struct i40e_vsi *vsi = np->vsi;
1394 struct i40e_pf *pf = vsi->back;
1395 struct i40e_hw *hw = &pf->hw;
1388 struct sockaddr *addr = p; 1396 struct sockaddr *addr = p;
1389 struct i40e_mac_filter *f; 1397 struct i40e_mac_filter *f;
1390 1398
1391 if (!is_valid_ether_addr(addr->sa_data)) 1399 if (!is_valid_ether_addr(addr->sa_data))
1392 return -EADDRNOTAVAIL; 1400 return -EADDRNOTAVAIL;
1393 1401
1394 netdev_info(netdev, "set mac address=%pM\n", addr->sa_data); 1402 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1403 netdev_info(netdev, "already using mac address %pM\n",
1404 addr->sa_data);
1405 return 0;
1406 }
1395 1407
1396 if (test_bit(__I40E_DOWN, &vsi->back->state) || 1408 if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1397 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) 1409 test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1398 return -EADDRNOTAVAIL; 1410 return -EADDRNOTAVAIL;
1399 1411
1412 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1413 netdev_info(netdev, "returning to hw mac address %pM\n",
1414 hw->mac.addr);
1415 else
1416 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1417
1400 if (vsi->type == I40E_VSI_MAIN) { 1418 if (vsi->type == I40E_VSI_MAIN) {
1401 i40e_status ret; 1419 i40e_status ret;
1402 ret = i40e_aq_mac_address_write(&vsi->back->hw, 1420 ret = i40e_aq_mac_address_write(&vsi->back->hw,
@@ -1410,25 +1428,34 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
1410 } 1428 }
1411 } 1429 }
1412 1430
1413 f = i40e_find_mac(vsi, addr->sa_data, false, true); 1431 if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1414 if (!f) { 1432 struct i40e_aqc_remove_macvlan_element_data element;
1415 /* In order to be sure to not drop any packets, add the
1416 * new address first then delete the old one.
1417 */
1418 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1419 false, false);
1420 if (!f)
1421 return -ENOMEM;
1422 1433
1423 i40e_sync_vsi_filters(vsi); 1434 memset(&element, 0, sizeof(element));
1435 ether_addr_copy(element.mac_addr, netdev->dev_addr);
1436 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1437 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1438 } else {
1424 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, 1439 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1425 false, false); 1440 false, false);
1426 i40e_sync_vsi_filters(vsi);
1427 } 1441 }
1428 1442
1429 f->is_laa = true; 1443 if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1430 if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) 1444 struct i40e_aqc_add_macvlan_element_data element;
1431 ether_addr_copy(netdev->dev_addr, addr->sa_data); 1445
1446 memset(&element, 0, sizeof(element));
1447 ether_addr_copy(element.mac_addr, hw->mac.addr);
1448 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1449 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1450 } else {
1451 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1452 false, false);
1453 if (f)
1454 f->is_laa = true;
1455 }
1456
1457 i40e_sync_vsi_filters(vsi);
1458 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1432 1459
1433 return 0; 1460 return 0;
1434} 1461}
@@ -1796,9 +1823,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1796 kfree(add_list); 1823 kfree(add_list);
1797 add_list = NULL; 1824 add_list = NULL;
1798 1825
1799 if (add_happened && (!aq_ret)) { 1826 if (add_happened && aq_ret &&
1800 /* do nothing */; 1827 pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
1801 } else if (add_happened && (aq_ret)) {
1802 dev_info(&pf->pdev->dev, 1828 dev_info(&pf->pdev->dev,
1803 "add filter failed, err %d, aq_err %d\n", 1829 "add filter failed, err %d, aq_err %d\n",
1804 aq_ret, pf->hw.aq.asq_last_status); 1830 aq_ret, pf->hw.aq.asq_last_status);
@@ -4480,11 +4506,26 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
4480 netif_carrier_on(vsi->netdev); 4506 netif_carrier_on(vsi->netdev);
4481 } else if (vsi->netdev) { 4507 } else if (vsi->netdev) {
4482 i40e_print_link_message(vsi, false); 4508 i40e_print_link_message(vsi, false);
4509 /* need to check for qualified module here*/
4510 if ((pf->hw.phy.link_info.link_info &
4511 I40E_AQ_MEDIA_AVAILABLE) &&
4512 (!(pf->hw.phy.link_info.an_info &
4513 I40E_AQ_QUALIFIED_MODULE)))
4514 netdev_err(vsi->netdev,
4515 "the driver failed to link because an unqualified module was detected.");
4483 } 4516 }
4484 4517
4485 /* replay FDIR SB filters */ 4518 /* replay FDIR SB filters */
4486 if (vsi->type == I40E_VSI_FDIR) 4519 if (vsi->type == I40E_VSI_FDIR) {
4520 /* reset fd counters */
4521 pf->fd_add_err = pf->fd_atr_cnt = 0;
4522 if (pf->fd_tcp_rule > 0) {
4523 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4524 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
4525 pf->fd_tcp_rule = 0;
4526 }
4487 i40e_fdir_filter_restore(vsi); 4527 i40e_fdir_filter_restore(vsi);
4528 }
4488 i40e_service_event_schedule(pf); 4529 i40e_service_event_schedule(pf);
4489 4530
4490 return 0; 4531 return 0;
@@ -5125,6 +5166,7 @@ int i40e_get_current_fd_count(struct i40e_pf *pf)
5125 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); 5166 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5126 return fcnt_prog; 5167 return fcnt_prog;
5127} 5168}
5169
5128/** 5170/**
5129 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled 5171 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5130 * @pf: board private structure 5172 * @pf: board private structure
@@ -5133,15 +5175,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5133{ 5175{
5134 u32 fcnt_prog, fcnt_avail; 5176 u32 fcnt_prog, fcnt_avail;
5135 5177
5178 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5179 return;
5180
5136 /* Check if, FD SB or ATR was auto disabled and if there is enough room 5181 /* Check if, FD SB or ATR was auto disabled and if there is enough room
5137 * to re-enable 5182 * to re-enable
5138 */ 5183 */
5139 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5140 (pf->flags & I40E_FLAG_FD_SB_ENABLED))
5141 return;
5142 fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf); 5184 fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
5143 fcnt_avail = pf->fdir_pf_filter_count; 5185 fcnt_avail = pf->fdir_pf_filter_count;
5144 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) { 5186 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5187 (pf->fd_add_err == 0) ||
5188 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5145 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && 5189 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5146 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { 5190 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5147 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 5191 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
@@ -5158,23 +5202,84 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5158 } 5202 }
5159} 5203}
5160 5204
5205#define I40E_MIN_FD_FLUSH_INTERVAL 10
5206/**
5207 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5208 * @pf: board private structure
5209 **/
5210static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5211{
5212 int flush_wait_retry = 50;
5213 int reg;
5214
5215 if (time_after(jiffies, pf->fd_flush_timestamp +
5216 (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
5217 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5218 pf->fd_flush_timestamp = jiffies;
5219 pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED;
5220 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5221 /* flush all filters */
5222 wr32(&pf->hw, I40E_PFQF_CTL_1,
5223 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5224 i40e_flush(&pf->hw);
5225 pf->fd_flush_cnt++;
5226 pf->fd_add_err = 0;
5227 do {
5228 /* Check FD flush status every 5-6msec */
5229 usleep_range(5000, 6000);
5230 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5231 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5232 break;
5233 } while (flush_wait_retry--);
5234 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5235 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5236 } else {
5237 /* replay sideband filters */
5238 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5239
5240 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5241 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5242 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5243 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5244 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5245 }
5246 }
5247}
5248
5249/**
5250 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5251 * @pf: board private structure
5252 **/
5253int i40e_get_current_atr_cnt(struct i40e_pf *pf)
5254{
5255 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5256}
5257
5258/* We can see up to 256 filter programming desc in transit if the filters are
5259 * being applied really fast; before we see the first
5260 * filter miss error on Rx queue 0. Accumulating enough error messages before
5261 * reacting will make sure we don't cause flush too often.
5262 */
5263#define I40E_MAX_FD_PROGRAM_ERROR 256
5264
5161/** 5265/**
5162 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 5266 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5163 * @pf: board private structure 5267 * @pf: board private structure
5164 **/ 5268 **/
5165static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) 5269static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5166{ 5270{
5167 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
5168 return;
5169 5271
5170 /* if interface is down do nothing */ 5272 /* if interface is down do nothing */
5171 if (test_bit(__I40E_DOWN, &pf->state)) 5273 if (test_bit(__I40E_DOWN, &pf->state))
5172 return; 5274 return;
5275
5276 if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
5277 (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
5278 (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
5279 i40e_fdir_flush_and_replay(pf);
5280
5173 i40e_fdir_check_and_reenable(pf); 5281 i40e_fdir_check_and_reenable(pf);
5174 5282
5175 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5176 (pf->flags & I40E_FLAG_FD_SB_ENABLED))
5177 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
5178} 5283}
5179 5284
5180/** 5285/**
@@ -5184,7 +5289,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5184 **/ 5289 **/
5185static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) 5290static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5186{ 5291{
5187 if (!vsi) 5292 if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
5188 return; 5293 return;
5189 5294
5190 switch (vsi->type) { 5295 switch (vsi->type) {
@@ -5420,6 +5525,13 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
5420 memcpy(&pf->hw.phy.link_info_old, hw_link_info, 5525 memcpy(&pf->hw.phy.link_info_old, hw_link_info,
5421 sizeof(pf->hw.phy.link_info_old)); 5526 sizeof(pf->hw.phy.link_info_old));
5422 5527
5528 /* check for unqualified module, if link is down */
5529 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
5530 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
5531 (!(status->link_info & I40E_AQ_LINK_UP)))
5532 dev_err(&pf->pdev->dev,
5533 "The driver failed to link because an unqualified module was detected.\n");
5534
5423 /* update link status */ 5535 /* update link status */
5424 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type; 5536 hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
5425 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed; 5537 hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
@@ -5456,6 +5568,10 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5456 u32 oldval; 5568 u32 oldval;
5457 u32 val; 5569 u32 val;
5458 5570
5571 /* Do not run clean AQ when PF reset fails */
5572 if (test_bit(__I40E_RESET_FAILED, &pf->state))
5573 return;
5574
5459 /* check for error indications */ 5575 /* check for error indications */
5460 val = rd32(&pf->hw, pf->hw.aq.arq.len); 5576 val = rd32(&pf->hw, pf->hw.aq.arq.len);
5461 oldval = val; 5577 oldval = val;
@@ -5861,19 +5977,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5861 ret = i40e_pf_reset(hw); 5977 ret = i40e_pf_reset(hw);
5862 if (ret) { 5978 if (ret) {
5863 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); 5979 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
5864 goto end_core_reset; 5980 set_bit(__I40E_RESET_FAILED, &pf->state);
5981 goto clear_recovery;
5865 } 5982 }
5866 pf->pfr_count++; 5983 pf->pfr_count++;
5867 5984
5868 if (test_bit(__I40E_DOWN, &pf->state)) 5985 if (test_bit(__I40E_DOWN, &pf->state))
5869 goto end_core_reset; 5986 goto clear_recovery;
5870 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 5987 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
5871 5988
5872 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 5989 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
5873 ret = i40e_init_adminq(&pf->hw); 5990 ret = i40e_init_adminq(&pf->hw);
5874 if (ret) { 5991 if (ret) {
5875 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); 5992 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
5876 goto end_core_reset; 5993 goto clear_recovery;
5877 } 5994 }
5878 5995
5879 /* re-verify the eeprom if we just had an EMP reset */ 5996 /* re-verify the eeprom if we just had an EMP reset */
@@ -5991,6 +6108,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5991 i40e_send_version(pf); 6108 i40e_send_version(pf);
5992 6109
5993end_core_reset: 6110end_core_reset:
6111 clear_bit(__I40E_RESET_FAILED, &pf->state);
6112clear_recovery:
5994 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 6113 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
5995} 6114}
5996 6115
@@ -6036,9 +6155,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
6036 I40E_GL_MDET_TX_EVENT_SHIFT; 6155 I40E_GL_MDET_TX_EVENT_SHIFT;
6037 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 6156 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6038 I40E_GL_MDET_TX_QUEUE_SHIFT; 6157 I40E_GL_MDET_TX_QUEUE_SHIFT;
6039 dev_info(&pf->pdev->dev, 6158 if (netif_msg_tx_err(pf))
6040 "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n", 6159 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
6041 event, queue, pf_num, vf_num); 6160 event, queue, pf_num, vf_num);
6042 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 6161 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6043 mdd_detected = true; 6162 mdd_detected = true;
6044 } 6163 }
@@ -6050,9 +6169,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
6050 I40E_GL_MDET_RX_EVENT_SHIFT; 6169 I40E_GL_MDET_RX_EVENT_SHIFT;
6051 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 6170 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6052 I40E_GL_MDET_RX_QUEUE_SHIFT; 6171 I40E_GL_MDET_RX_QUEUE_SHIFT;
6053 dev_info(&pf->pdev->dev, 6172 if (netif_msg_rx_err(pf))
6054 "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", 6173 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6055 event, queue, func); 6174 event, queue, func);
6056 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 6175 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6057 mdd_detected = true; 6176 mdd_detected = true;
6058 } 6177 }
@@ -6061,17 +6180,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
6061 reg = rd32(hw, I40E_PF_MDET_TX); 6180 reg = rd32(hw, I40E_PF_MDET_TX);
6062 if (reg & I40E_PF_MDET_TX_VALID_MASK) { 6181 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6063 wr32(hw, I40E_PF_MDET_TX, 0xFFFF); 6182 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
6064 dev_info(&pf->pdev->dev, 6183 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
6065 "MDD TX event is for this function 0x%08x, requesting PF reset.\n",
6066 reg);
6067 pf_mdd_detected = true; 6184 pf_mdd_detected = true;
6068 } 6185 }
6069 reg = rd32(hw, I40E_PF_MDET_RX); 6186 reg = rd32(hw, I40E_PF_MDET_RX);
6070 if (reg & I40E_PF_MDET_RX_VALID_MASK) { 6187 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6071 wr32(hw, I40E_PF_MDET_RX, 0xFFFF); 6188 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
6072 dev_info(&pf->pdev->dev, 6189 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
6073 "MDD RX event is for this function 0x%08x, requesting PF reset.\n",
6074 reg);
6075 pf_mdd_detected = true; 6190 pf_mdd_detected = true;
6076 } 6191 }
6077 /* Queue belongs to the PF, initiate a reset */ 6192 /* Queue belongs to the PF, initiate a reset */
@@ -6088,14 +6203,16 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
6088 if (reg & I40E_VP_MDET_TX_VALID_MASK) { 6203 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6089 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); 6204 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
6090 vf->num_mdd_events++; 6205 vf->num_mdd_events++;
6091 dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i); 6206 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
6207 i);
6092 } 6208 }
6093 6209
6094 reg = rd32(hw, I40E_VP_MDET_RX(i)); 6210 reg = rd32(hw, I40E_VP_MDET_RX(i));
6095 if (reg & I40E_VP_MDET_RX_VALID_MASK) { 6211 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6096 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); 6212 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
6097 vf->num_mdd_events++; 6213 vf->num_mdd_events++;
6098 dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i); 6214 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
6215 i);
6099 } 6216 }
6100 6217
6101 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { 6218 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
@@ -7086,6 +7203,11 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
7086 } 7203 }
7087 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; 7204 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7088 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; 7205 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
7206 /* reset fd counters */
7207 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
7208 pf->fdir_pf_active_filters = 0;
7209 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7210 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
7089 /* if ATR was auto disabled it can be re-enabled. */ 7211 /* if ATR was auto disabled it can be re-enabled. */
7090 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && 7212 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
7091 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) 7213 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
@@ -7352,7 +7474,7 @@ static const struct net_device_ops i40e_netdev_ops = {
7352 .ndo_set_vf_rate = i40e_ndo_set_vf_bw, 7474 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
7353 .ndo_get_vf_config = i40e_ndo_get_vf_config, 7475 .ndo_get_vf_config = i40e_ndo_get_vf_config,
7354 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, 7476 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
7355 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofck, 7477 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
7356#ifdef CONFIG_I40E_VXLAN 7478#ifdef CONFIG_I40E_VXLAN
7357 .ndo_add_vxlan_port = i40e_add_vxlan_port, 7479 .ndo_add_vxlan_port = i40e_add_vxlan_port,
7358 .ndo_del_vxlan_port = i40e_del_vxlan_port, 7480 .ndo_del_vxlan_port = i40e_del_vxlan_port,
@@ -7421,14 +7543,14 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
7421 if (vsi->type == I40E_VSI_MAIN) { 7543 if (vsi->type == I40E_VSI_MAIN) {
7422 SET_NETDEV_DEV(netdev, &pf->pdev->dev); 7544 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
7423 ether_addr_copy(mac_addr, hw->mac.perm_addr); 7545 ether_addr_copy(mac_addr, hw->mac.perm_addr);
7424 /* The following two steps are necessary to prevent reception 7546 /* The following steps are necessary to prevent reception
7425 * of tagged packets - by default the NVM loads a MAC-VLAN 7547 * of tagged packets - some older NVM configurations load a
7426 * filter that will accept any tagged packet. This is to 7548 * default a MAC-VLAN filter that accepts any tagged packet
7427 * prevent that during normal operations until a specific 7549 * which must be replaced by a normal filter.
7428 * VLAN tag filter has been set.
7429 */ 7550 */
7430 i40e_rm_default_mac_filter(vsi, mac_addr); 7551 if (!i40e_rm_default_mac_filter(vsi, mac_addr))
7431 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true); 7552 i40e_add_filter(vsi, mac_addr,
7553 I40E_VLAN_ANY, false, true);
7432 } else { 7554 } else {
7433 /* relate the VSI_VMDQ name to the VSI_MAIN name */ 7555 /* relate the VSI_VMDQ name to the VSI_MAIN name */
7434 snprintf(netdev->name, IFNAMSIZ, "%sv%%d", 7556 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -7644,7 +7766,22 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
7644 f_count++; 7766 f_count++;
7645 7767
7646 if (f->is_laa && vsi->type == I40E_VSI_MAIN) { 7768 if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
7647 i40e_aq_mac_address_write(&vsi->back->hw, 7769 struct i40e_aqc_remove_macvlan_element_data element;
7770
7771 memset(&element, 0, sizeof(element));
7772 ether_addr_copy(element.mac_addr, f->macaddr);
7773 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7774 ret = i40e_aq_remove_macvlan(hw, vsi->seid,
7775 &element, 1, NULL);
7776 if (ret) {
7777 /* some older FW has a different default */
7778 element.flags |=
7779 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
7780 i40e_aq_remove_macvlan(hw, vsi->seid,
7781 &element, 1, NULL);
7782 }
7783
7784 i40e_aq_mac_address_write(hw,
7648 I40E_AQC_WRITE_TYPE_LAA_WOL, 7785 I40E_AQC_WRITE_TYPE_LAA_WOL,
7649 f->macaddr, NULL); 7786 f->macaddr, NULL);
7650 } 7787 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 949a9a01778b..0988b5c1fe87 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -52,10 +52,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
52 struct i40e_asq_cmd_details *cmd_details); 52 struct i40e_asq_cmd_details *cmd_details);
53 53
54/* debug function for adminq */ 54/* debug function for adminq */
55void i40e_debug_aq(struct i40e_hw *hw, 55void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
56 enum i40e_debug_mask mask, 56 void *desc, void *buffer, u16 buf_len);
57 void *desc,
58 void *buffer);
59 57
60void i40e_idle_aq(struct i40e_hw *hw); 58void i40e_idle_aq(struct i40e_hw *hw);
61bool i40e_check_asq_alive(struct i40e_hw *hw); 59bool i40e_check_asq_alive(struct i40e_hw *hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 369848e107f8..be039dd6114d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -224,15 +224,19 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
224 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); 224 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
225 if (ret) { 225 if (ret) {
226 dev_info(&pf->pdev->dev, 226 dev_info(&pf->pdev->dev,
227 "Filter command send failed for PCTYPE %d (ret = %d)\n", 227 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
228 fd_data->pctype, ret); 228 fd_data->pctype, fd_data->fd_id, ret);
229 err = true; 229 err = true;
230 } else { 230 } else {
231 dev_info(&pf->pdev->dev, 231 if (add)
232 "Filter OK for PCTYPE %d (ret = %d)\n", 232 dev_info(&pf->pdev->dev,
233 fd_data->pctype, ret); 233 "Filter OK for PCTYPE %d loc = %d\n",
234 fd_data->pctype, fd_data->fd_id);
235 else
236 dev_info(&pf->pdev->dev,
237 "Filter deleted for PCTYPE %d loc = %d\n",
238 fd_data->pctype, fd_data->fd_id);
234 } 239 }
235
236 return err ? -EOPNOTSUPP : 0; 240 return err ? -EOPNOTSUPP : 0;
237} 241}
238 242
@@ -276,10 +280,18 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
276 tcp->source = fd_data->src_port; 280 tcp->source = fd_data->src_port;
277 281
278 if (add) { 282 if (add) {
283 pf->fd_tcp_rule++;
279 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { 284 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
280 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); 285 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
281 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; 286 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
282 } 287 }
288 } else {
289 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
290 (pf->fd_tcp_rule - 1) : 0;
291 if (pf->fd_tcp_rule == 0) {
292 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
293 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
294 }
283 } 295 }
284 296
285 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; 297 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
@@ -287,12 +299,17 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
287 299
288 if (ret) { 300 if (ret) {
289 dev_info(&pf->pdev->dev, 301 dev_info(&pf->pdev->dev,
290 "Filter command send failed for PCTYPE %d (ret = %d)\n", 302 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
291 fd_data->pctype, ret); 303 fd_data->pctype, fd_data->fd_id, ret);
292 err = true; 304 err = true;
293 } else { 305 } else {
294 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n", 306 if (add)
295 fd_data->pctype, ret); 307 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
308 fd_data->pctype, fd_data->fd_id);
309 else
310 dev_info(&pf->pdev->dev,
311 "Filter deleted for PCTYPE %d loc = %d\n",
312 fd_data->pctype, fd_data->fd_id);
296 } 313 }
297 314
298 return err ? -EOPNOTSUPP : 0; 315 return err ? -EOPNOTSUPP : 0;
@@ -355,13 +372,18 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
355 372
356 if (ret) { 373 if (ret) {
357 dev_info(&pf->pdev->dev, 374 dev_info(&pf->pdev->dev,
358 "Filter command send failed for PCTYPE %d (ret = %d)\n", 375 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
359 fd_data->pctype, ret); 376 fd_data->pctype, fd_data->fd_id, ret);
360 err = true; 377 err = true;
361 } else { 378 } else {
362 dev_info(&pf->pdev->dev, 379 if (add)
363 "Filter OK for PCTYPE %d (ret = %d)\n", 380 dev_info(&pf->pdev->dev,
364 fd_data->pctype, ret); 381 "Filter OK for PCTYPE %d loc = %d\n",
382 fd_data->pctype, fd_data->fd_id);
383 else
384 dev_info(&pf->pdev->dev,
385 "Filter deleted for PCTYPE %d loc = %d\n",
386 fd_data->pctype, fd_data->fd_id);
365 } 387 }
366 } 388 }
367 389
@@ -443,8 +465,14 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
443 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; 465 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
444 466
445 if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { 467 if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
446 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", 468 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
447 rx_desc->wb.qword0.hi_dword.fd_id); 469 (I40E_DEBUG_FD & pf->hw.debug_mask))
470 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
471 rx_desc->wb.qword0.hi_dword.fd_id);
472
473 pf->fd_add_err++;
474 /* store the current atr filter count */
475 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
448 476
449 /* filter programming failed most likely due to table full */ 477 /* filter programming failed most likely due to table full */
450 fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf); 478 fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
@@ -454,29 +482,21 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
454 * FD ATR/SB and then re-enable it when there is room. 482 * FD ATR/SB and then re-enable it when there is room.
455 */ 483 */
456 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { 484 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
457 /* Turn off ATR first */ 485 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
458 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
459 !(pf->auto_disable_flags & 486 !(pf->auto_disable_flags &
460 I40E_FLAG_FD_ATR_ENABLED)) {
461 dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
462 pf->auto_disable_flags |=
463 I40E_FLAG_FD_ATR_ENABLED;
464 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
465 } else if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
466 !(pf->auto_disable_flags &
467 I40E_FLAG_FD_SB_ENABLED)) { 487 I40E_FLAG_FD_SB_ENABLED)) {
468 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); 488 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
469 pf->auto_disable_flags |= 489 pf->auto_disable_flags |=
470 I40E_FLAG_FD_SB_ENABLED; 490 I40E_FLAG_FD_SB_ENABLED;
471 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
472 } 491 }
473 } else { 492 } else {
474 dev_info(&pdev->dev, "FD filter programming error\n"); 493 dev_info(&pdev->dev,
494 "FD filter programming failed due to incorrect filter parameters\n");
475 } 495 }
476 } else if (error == 496 } else if (error ==
477 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { 497 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
478 if (I40E_DEBUG_FD & pf->hw.debug_mask) 498 if (I40E_DEBUG_FD & pf->hw.debug_mask)
479 dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n", 499 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
480 rx_desc->wb.qword0.hi_dword.fd_id); 500 rx_desc->wb.qword0.hi_dword.fd_id);
481 } 501 }
482} 502}
@@ -587,6 +607,7 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
587static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) 607static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
588{ 608{
589 u32 tx_pending = i40e_get_tx_pending(tx_ring); 609 u32 tx_pending = i40e_get_tx_pending(tx_ring);
610 struct i40e_pf *pf = tx_ring->vsi->back;
590 bool ret = false; 611 bool ret = false;
591 612
592 clear_check_for_tx_hang(tx_ring); 613 clear_check_for_tx_hang(tx_ring);
@@ -603,10 +624,17 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
603 * pending but without time to complete it yet. 624 * pending but without time to complete it yet.
604 */ 625 */
605 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 626 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
606 tx_pending) { 627 (tx_pending >= I40E_MIN_DESC_PENDING)) {
607 /* make sure it is true for two checks in a row */ 628 /* make sure it is true for two checks in a row */
608 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, 629 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
609 &tx_ring->state); 630 &tx_ring->state);
631 } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
632 (tx_pending < I40E_MIN_DESC_PENDING) &&
633 (tx_pending > 0)) {
634 if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
635 dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
636 tx_pending, tx_ring->queue_index);
637 pf->tx_sluggish_count++;
610 } else { 638 } else {
611 /* update completed stats and disarm the hang check */ 639 /* update completed stats and disarm the hang check */
612 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; 640 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
@@ -1213,7 +1241,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1213 ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && 1241 ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1214 (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); 1242 (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1215 1243
1216 skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
1217 skb->ip_summed = CHECKSUM_NONE; 1244 skb->ip_summed = CHECKSUM_NONE;
1218 1245
1219 /* Rx csum enabled and ip headers found? */ 1246 /* Rx csum enabled and ip headers found? */
@@ -1287,6 +1314,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1287 } 1314 }
1288 1315
1289 skb->ip_summed = CHECKSUM_UNNECESSARY; 1316 skb->ip_summed = CHECKSUM_UNNECESSARY;
1317 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
1290 1318
1291 return; 1319 return;
1292 1320
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 73f4fa425697..d7a625a6a14f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -121,6 +121,7 @@ enum i40e_dyn_idx_t {
121/* Tx Descriptors needed, worst case */ 121/* Tx Descriptors needed, worst case */
122#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) 122#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
123#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 123#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
124#define I40E_MIN_DESC_PENDING 4
124 125
125#define I40E_TX_FLAGS_CSUM (u32)(1) 126#define I40E_TX_FLAGS_CSUM (u32)(1)
126#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) 127#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 3ac6a0d2f143..4eeed267e4b7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -73,7 +73,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
73{ 73{
74 struct i40e_pf *pf = vf->pf; 74 struct i40e_pf *pf = vf->pf;
75 75
76 return qid < pf->vsi[vsi_id]->num_queue_pairs; 76 return qid < pf->vsi[vsi_id]->alloc_queue_pairs;
77} 77}
78 78
79/** 79/**
@@ -350,6 +350,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
350 rx_ctx.lrxqthresh = 2; 350 rx_ctx.lrxqthresh = 2;
351 rx_ctx.crcstrip = 1; 351 rx_ctx.crcstrip = 1;
352 rx_ctx.prefena = 1; 352 rx_ctx.prefena = 1;
353 rx_ctx.l2tsel = 1;
353 354
354 /* clear the context in the HMC */ 355 /* clear the context in the HMC */
355 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); 356 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
@@ -468,7 +469,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
468 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); 469 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
469 470
470 /* map PF queues to VF queues */ 471 /* map PF queues to VF queues */
471 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { 472 for (j = 0; j < pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; j++) {
472 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j); 473 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
473 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); 474 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
474 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); 475 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
@@ -477,7 +478,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
477 478
478 /* map PF queues to VSI */ 479 /* map PF queues to VSI */
479 for (j = 0; j < 7; j++) { 480 for (j = 0; j < 7; j++) {
480 if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) { 481 if (j * 2 >= pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs) {
481 reg = 0x07FF07FF; /* unused */ 482 reg = 0x07FF07FF; /* unused */
482 } else { 483 } else {
483 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, 484 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
@@ -584,7 +585,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
584 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); 585 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
585 if (ret) 586 if (ret)
586 goto error_alloc; 587 goto error_alloc;
587 total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs; 588 total_queue_pairs += pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
588 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); 589 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
589 590
590 /* store the total qps number for the runtime 591 /* store the total qps number for the runtime
@@ -706,35 +707,6 @@ complete_reset:
706 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); 707 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
707 i40e_flush(hw); 708 i40e_flush(hw);
708} 709}
709
710/**
711 * i40e_vfs_are_assigned
712 * @pf: pointer to the pf structure
713 *
714 * Determine if any VFs are assigned to VMs
715 **/
716static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
717{
718 struct pci_dev *pdev = pf->pdev;
719 struct pci_dev *vfdev;
720
721 /* loop through all the VFs to see if we own any that are assigned */
722 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL);
723 while (vfdev) {
724 /* if we don't own it we don't care */
725 if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
726 /* if it is assigned we cannot release it */
727 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
728 return true;
729 }
730
731 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
732 I40E_DEV_ID_VF,
733 vfdev);
734 }
735
736 return false;
737}
738#ifdef CONFIG_PCI_IOV 710#ifdef CONFIG_PCI_IOV
739 711
740/** 712/**
@@ -842,7 +814,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
842 * assigned. Setting the number of VFs to 0 through sysfs is caught 814 * assigned. Setting the number of VFs to 0 through sysfs is caught
843 * before this function ever gets called. 815 * before this function ever gets called.
844 */ 816 */
845 if (!i40e_vfs_are_assigned(pf)) { 817 if (!pci_vfs_assigned(pf->pdev)) {
846 pci_disable_sriov(pf->pdev); 818 pci_disable_sriov(pf->pdev);
847 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to 819 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
848 * work correctly when SR-IOV gets re-enabled. 820 * work correctly when SR-IOV gets re-enabled.
@@ -979,7 +951,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
979 if (num_vfs) 951 if (num_vfs)
980 return i40e_pci_sriov_enable(pdev, num_vfs); 952 return i40e_pci_sriov_enable(pdev, num_vfs);
981 953
982 if (!i40e_vfs_are_assigned(pf)) { 954 if (!pci_vfs_assigned(pf->pdev)) {
983 i40e_free_vfs(pf); 955 i40e_free_vfs(pf);
984 } else { 956 } else {
985 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); 957 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
@@ -1123,7 +1095,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
1123 vfres->vsi_res[i].vsi_id = vf->lan_vsi_index; 1095 vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
1124 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; 1096 vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
1125 vfres->vsi_res[i].num_queue_pairs = 1097 vfres->vsi_res[i].num_queue_pairs =
1126 pf->vsi[vf->lan_vsi_index]->num_queue_pairs; 1098 pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
1127 memcpy(vfres->vsi_res[i].default_mac_addr, 1099 memcpy(vfres->vsi_res[i].default_mac_addr,
1128 vf->default_lan_addr.addr, ETH_ALEN); 1100 vf->default_lan_addr.addr, ETH_ALEN);
1129 i++; 1101 i++;
@@ -1209,6 +1181,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1209 struct i40e_virtchnl_vsi_queue_config_info *qci = 1181 struct i40e_virtchnl_vsi_queue_config_info *qci =
1210 (struct i40e_virtchnl_vsi_queue_config_info *)msg; 1182 (struct i40e_virtchnl_vsi_queue_config_info *)msg;
1211 struct i40e_virtchnl_queue_pair_info *qpi; 1183 struct i40e_virtchnl_queue_pair_info *qpi;
1184 struct i40e_pf *pf = vf->pf;
1212 u16 vsi_id, vsi_queue_id; 1185 u16 vsi_id, vsi_queue_id;
1213 i40e_status aq_ret = 0; 1186 i40e_status aq_ret = 0;
1214 int i; 1187 int i;
@@ -1242,6 +1215,8 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1242 goto error_param; 1215 goto error_param;
1243 } 1216 }
1244 } 1217 }
1218 /* set vsi num_queue_pairs in use to num configured by vf */
1219 pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs;
1245 1220
1246error_param: 1221error_param:
1247 /* send the response to the vf */ 1222 /* send the response to the vf */
@@ -2094,7 +2069,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2094 /* Force the VF driver stop so it has to reload with new MAC address */ 2069 /* Force the VF driver stop so it has to reload with new MAC address */
2095 i40e_vc_disable_vf(pf, vf); 2070 i40e_vc_disable_vf(pf, vf);
2096 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); 2071 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
2097 ret = 0;
2098 2072
2099error_param: 2073error_param:
2100 return ret; 2074 return ret;
@@ -2419,7 +2393,7 @@ error_out:
2419 * 2393 *
2420 * Enable or disable VF spoof checking 2394 * Enable or disable VF spoof checking
2421 **/ 2395 **/
2422int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable) 2396int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
2423{ 2397{
2424 struct i40e_netdev_priv *np = netdev_priv(netdev); 2398 struct i40e_netdev_priv *np = netdev_priv(netdev);
2425 struct i40e_vsi *vsi = np->vsi; 2399 struct i40e_vsi *vsi = np->vsi;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 63e7e0d81ad2..0adc61e1052d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -122,7 +122,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
122int i40e_ndo_get_vf_config(struct net_device *netdev, 122int i40e_ndo_get_vf_config(struct net_device *netdev,
123 int vf_id, struct ifla_vf_info *ivi); 123 int vf_id, struct ifla_vf_info *ivi);
124int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); 124int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
125int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable); 125int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
126 126
127void i40e_vc_notify_link_state(struct i40e_pf *pf); 127void i40e_vc_notify_link_state(struct i40e_pf *pf);
128void i40e_vc_notify_reset(struct i40e_pf *pf); 128void i40e_vc_notify_reset(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 003006033614..f206be917842 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -788,7 +788,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
788 788
789 /* bump the tail */ 789 /* bump the tail */
790 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); 790 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
791 i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff); 791 i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
792 buff, buff_size);
792 (hw->aq.asq.next_to_use)++; 793 (hw->aq.asq.next_to_use)++;
793 if (hw->aq.asq.next_to_use == hw->aq.asq.count) 794 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
794 hw->aq.asq.next_to_use = 0; 795 hw->aq.asq.next_to_use = 0;
@@ -842,7 +843,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
842 843
843 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, 844 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
844 "AQTX: desc and buffer writeback:\n"); 845 "AQTX: desc and buffer writeback:\n");
845 i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff); 846 i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
847 buff_size);
846 848
847 /* update the error if time out occurred */ 849 /* update the error if time out occurred */
848 if ((!cmd_completed) && 850 if ((!cmd_completed) &&
@@ -938,7 +940,8 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
938 hw->aq.nvm_busy = false; 940 hw->aq.nvm_busy = false;
939 941
940 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); 942 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
941 i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf); 943 i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
944 hw->aq.arq_buf_size);
942 945
943 /* Restore the original datalen and buffer address in the desc, 946 /* Restore the original datalen and buffer address in the desc,
944 * FW updates datalen to indicate the event message 947 * FW updates datalen to indicate the event message
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 4ea90bf239bb..952560551964 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -75,13 +75,15 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
75 * @mask: debug mask 75 * @mask: debug mask
76 * @desc: pointer to admin queue descriptor 76 * @desc: pointer to admin queue descriptor
77 * @buffer: pointer to command buffer 77 * @buffer: pointer to command buffer
78 * @buf_len: max length of buffer
78 * 79 *
79 * Dumps debug log about adminq command with descriptor contents. 80 * Dumps debug log about adminq command with descriptor contents.
80 **/ 81 **/
81void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 82void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
82 void *buffer) 83 void *buffer, u16 buf_len)
83{ 84{
84 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 85 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
86 u16 len = le16_to_cpu(aq_desc->datalen);
85 u8 *aq_buffer = (u8 *)buffer; 87 u8 *aq_buffer = (u8 *)buffer;
86 u32 data[4]; 88 u32 data[4];
87 u32 i = 0; 89 u32 i = 0;
@@ -105,7 +107,9 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
105 if ((buffer != NULL) && (aq_desc->datalen != 0)) { 107 if ((buffer != NULL) && (aq_desc->datalen != 0)) {
106 memset(data, 0, sizeof(data)); 108 memset(data, 0, sizeof(data));
107 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 109 i40e_debug(hw, mask, "AQ CMD Buffer:\n");
108 for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) { 110 if (buf_len < len)
111 len = buf_len;
112 for (i = 0; i < len; i++) {
109 data[((i % 16) / 4)] |= 113 data[((i % 16) / 4)] |=
110 ((u32)aq_buffer[i]) << (8 * (i % 4)); 114 ((u32)aq_buffer[i]) << (8 * (i % 4));
111 if ((i % 16) == 15) { 115 if ((i % 16) == 15) {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 849edcc2e398..9173834825ac 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -53,10 +53,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
53bool i40evf_asq_done(struct i40e_hw *hw); 53bool i40evf_asq_done(struct i40e_hw *hw);
54 54
55/* debug function for adminq */ 55/* debug function for adminq */
56void i40evf_debug_aq(struct i40e_hw *hw, 56void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
57 enum i40e_debug_mask mask, 57 void *desc, void *buffer, u16 buf_len);
58 void *desc,
59 void *buffer);
60 58
61void i40e_idle_aq(struct i40e_hw *hw); 59void i40e_idle_aq(struct i40e_hw *hw);
62void i40evf_resume_aq(struct i40e_hw *hw); 60void i40evf_resume_aq(struct i40e_hw *hw);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 95a3ec236b49..04c7c1557a0c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -163,11 +163,13 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
163 * pending but without time to complete it yet. 163 * pending but without time to complete it yet.
164 */ 164 */
165 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 165 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
166 tx_pending) { 166 (tx_pending >= I40E_MIN_DESC_PENDING)) {
167 /* make sure it is true for two checks in a row */ 167 /* make sure it is true for two checks in a row */
168 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, 168 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
169 &tx_ring->state); 169 &tx_ring->state);
170 } else { 170 } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
171 !(tx_pending < I40E_MIN_DESC_PENDING) ||
172 !(tx_pending > 0)) {
171 /* update completed stats and disarm the hang check */ 173 /* update completed stats and disarm the hang check */
172 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; 174 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
173 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); 175 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
@@ -744,7 +746,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
744 ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && 746 ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
745 (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); 747 (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
746 748
747 skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
748 skb->ip_summed = CHECKSUM_NONE; 749 skb->ip_summed = CHECKSUM_NONE;
749 750
750 /* Rx csum enabled and ip headers found? */ 751 /* Rx csum enabled and ip headers found? */
@@ -818,6 +819,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
818 } 819 }
819 820
820 skb->ip_summed = CHECKSUM_UNNECESSARY; 821 skb->ip_summed = CHECKSUM_UNNECESSARY;
822 skb->csum_level = ipv4_tunnel || ipv6_tunnel;
821 823
822 return; 824 return;
823 825
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 8bc6858163b0..f6dcf9dd9290 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -121,6 +121,7 @@ enum i40e_dyn_idx_t {
121/* Tx Descriptors needed, worst case */ 121/* Tx Descriptors needed, worst case */
122#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) 122#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
123#define DESC_NEEDED (MAX_SKB_FRAGS + 4) 123#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
124#define I40E_MIN_DESC_PENDING 4
124 125
125#define I40E_TX_FLAGS_CSUM (u32)(1) 126#define I40E_TX_FLAGS_CSUM (u32)(1)
126#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) 127#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 38429fae4fcf..c51bc7a33bc5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
36static const char i40evf_driver_string[] = 36static const char i40evf_driver_string[] =
37 "Intel(R) XL710/X710 Virtual Function Network Driver"; 37 "Intel(R) XL710/X710 Virtual Function Network Driver";
38 38
39#define DRV_VERSION "0.9.40" 39#define DRV_VERSION "1.0.5"
40const char i40evf_driver_version[] = DRV_VERSION; 40const char i40evf_driver_version[] = DRV_VERSION;
41static const char i40evf_copyright[] = 41static const char i40evf_copyright[] =
42 "Copyright (c) 2013 - 2014 Intel Corporation."; 42 "Copyright (c) 2013 - 2014 Intel Corporation.";
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 236a6183a865..051ea94bdcd3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2548,11 +2548,13 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
2548/** 2548/**
2549 * igb_set_eee_i350 - Enable/disable EEE support 2549 * igb_set_eee_i350 - Enable/disable EEE support
2550 * @hw: pointer to the HW structure 2550 * @hw: pointer to the HW structure
2551 * @adv1G: boolean flag enabling 1G EEE advertisement
2552 * @adv100m: boolean flag enabling 100M EEE advertisement
2551 * 2553 *
2552 * Enable/disable EEE based on setting in dev_spec structure. 2554 * Enable/disable EEE based on setting in dev_spec structure.
2553 * 2555 *
2554 **/ 2556 **/
2555s32 igb_set_eee_i350(struct e1000_hw *hw) 2557s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
2556{ 2558{
2557 u32 ipcnfg, eeer; 2559 u32 ipcnfg, eeer;
2558 2560
@@ -2566,7 +2568,16 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
2566 if (!(hw->dev_spec._82575.eee_disable)) { 2568 if (!(hw->dev_spec._82575.eee_disable)) {
2567 u32 eee_su = rd32(E1000_EEE_SU); 2569 u32 eee_su = rd32(E1000_EEE_SU);
2568 2570
2569 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); 2571 if (adv100M)
2572 ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
2573 else
2574 ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
2575
2576 if (adv1G)
2577 ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
2578 else
2579 ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
2580
2570 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | 2581 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2571 E1000_EEER_LPI_FC); 2582 E1000_EEER_LPI_FC);
2572 2583
@@ -2593,11 +2604,13 @@ out:
2593/** 2604/**
2594 * igb_set_eee_i354 - Enable/disable EEE support 2605 * igb_set_eee_i354 - Enable/disable EEE support
2595 * @hw: pointer to the HW structure 2606 * @hw: pointer to the HW structure
2607 * @adv1G: boolean flag enabling 1G EEE advertisement
2608 * @adv100m: boolean flag enabling 100M EEE advertisement
2596 * 2609 *
2597 * Enable/disable EEE legacy mode based on setting in dev_spec structure. 2610 * Enable/disable EEE legacy mode based on setting in dev_spec structure.
2598 * 2611 *
2599 **/ 2612 **/
2600s32 igb_set_eee_i354(struct e1000_hw *hw) 2613s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
2601{ 2614{
2602 struct e1000_phy_info *phy = &hw->phy; 2615 struct e1000_phy_info *phy = &hw->phy;
2603 s32 ret_val = 0; 2616 s32 ret_val = 0;
@@ -2636,8 +2649,16 @@ s32 igb_set_eee_i354(struct e1000_hw *hw)
2636 if (ret_val) 2649 if (ret_val)
2637 goto out; 2650 goto out;
2638 2651
2639 phy_data |= E1000_EEE_ADV_100_SUPPORTED | 2652 if (adv100M)
2640 E1000_EEE_ADV_1000_SUPPORTED; 2653 phy_data |= E1000_EEE_ADV_100_SUPPORTED;
2654 else
2655 phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
2656
2657 if (adv1G)
2658 phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
2659 else
2660 phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
2661
2641 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2662 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2642 E1000_EEE_ADV_DEV_I354, 2663 E1000_EEE_ADV_DEV_I354,
2643 phy_data); 2664 phy_data);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index b407c55738fa..2154aea7aa7e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -263,8 +263,8 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
263void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); 263void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
264u16 igb_rxpbs_adjust_82580(u32 data); 264u16 igb_rxpbs_adjust_82580(u32 data);
265s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data); 265s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
266s32 igb_set_eee_i350(struct e1000_hw *); 266s32 igb_set_eee_i350(struct e1000_hw *, bool adv1G, bool adv100M);
267s32 igb_set_eee_i354(struct e1000_hw *); 267s32 igb_set_eee_i354(struct e1000_hw *, bool adv1G, bool adv100M);
268s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status); 268s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status);
269 269
270#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 270#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index c737d1f40838..02cfd3b14762 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2675,6 +2675,7 @@ static int igb_set_eee(struct net_device *netdev,
2675 struct igb_adapter *adapter = netdev_priv(netdev); 2675 struct igb_adapter *adapter = netdev_priv(netdev);
2676 struct e1000_hw *hw = &adapter->hw; 2676 struct e1000_hw *hw = &adapter->hw;
2677 struct ethtool_eee eee_curr; 2677 struct ethtool_eee eee_curr;
2678 bool adv1g_eee = true, adv100m_eee = true;
2678 s32 ret_val; 2679 s32 ret_val;
2679 2680
2680 if ((hw->mac.type < e1000_i350) || 2681 if ((hw->mac.type < e1000_i350) ||
@@ -2701,12 +2702,14 @@ static int igb_set_eee(struct net_device *netdev,
2701 return -EINVAL; 2702 return -EINVAL;
2702 } 2703 }
2703 2704
2704 if (edata->advertised & 2705 if (!edata->advertised || (edata->advertised &
2705 ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) { 2706 ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) {
2706 dev_err(&adapter->pdev->dev, 2707 dev_err(&adapter->pdev->dev,
2707 "EEE Advertisement supports only 100Tx and or 100T full duplex\n"); 2708 "EEE Advertisement supports only 100Tx and/or 100T full duplex\n");
2708 return -EINVAL; 2709 return -EINVAL;
2709 } 2710 }
2711 adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL);
2712 adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL);
2710 2713
2711 } else if (!edata->eee_enabled) { 2714 } else if (!edata->eee_enabled) {
2712 dev_err(&adapter->pdev->dev, 2715 dev_err(&adapter->pdev->dev,
@@ -2718,10 +2721,6 @@ static int igb_set_eee(struct net_device *netdev,
2718 if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { 2721 if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
2719 hw->dev_spec._82575.eee_disable = !edata->eee_enabled; 2722 hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
2720 adapter->flags |= IGB_FLAG_EEE; 2723 adapter->flags |= IGB_FLAG_EEE;
2721 if (hw->mac.type == e1000_i350)
2722 igb_set_eee_i350(hw);
2723 else
2724 igb_set_eee_i354(hw);
2725 2724
2726 /* reset link */ 2725 /* reset link */
2727 if (netif_running(netdev)) 2726 if (netif_running(netdev))
@@ -2730,6 +2729,17 @@ static int igb_set_eee(struct net_device *netdev,
2730 igb_reset(adapter); 2729 igb_reset(adapter);
2731 } 2730 }
2732 2731
2732 if (hw->mac.type == e1000_i354)
2733 ret_val = igb_set_eee_i354(hw, adv1g_eee, adv100m_eee);
2734 else
2735 ret_val = igb_set_eee_i350(hw, adv1g_eee, adv100m_eee);
2736
2737 if (ret_val) {
2738 dev_err(&adapter->pdev->dev,
2739 "Problem setting EEE advertisement options\n");
2740 return -EINVAL;
2741 }
2742
2733 return 0; 2743 return 0;
2734} 2744}
2735 2745
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index cb14bbdfb056..6cf0c17ad9c4 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2012,10 +2012,10 @@ void igb_reset(struct igb_adapter *adapter)
2012 case e1000_i350: 2012 case e1000_i350:
2013 case e1000_i210: 2013 case e1000_i210:
2014 case e1000_i211: 2014 case e1000_i211:
2015 igb_set_eee_i350(hw); 2015 igb_set_eee_i350(hw, true, true);
2016 break; 2016 break;
2017 case e1000_i354: 2017 case e1000_i354:
2018 igb_set_eee_i354(hw); 2018 igb_set_eee_i354(hw, true, true);
2019 break; 2019 break;
2020 default: 2020 default:
2021 break; 2021 break;
@@ -2619,7 +2619,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2619 case e1000_i210: 2619 case e1000_i210:
2620 case e1000_i211: 2620 case e1000_i211:
2621 /* Enable EEE for internal copper PHY devices */ 2621 /* Enable EEE for internal copper PHY devices */
2622 err = igb_set_eee_i350(hw); 2622 err = igb_set_eee_i350(hw, true, true);
2623 if ((!err) && 2623 if ((!err) &&
2624 (!hw->dev_spec._82575.eee_disable)) { 2624 (!hw->dev_spec._82575.eee_disable)) {
2625 adapter->eee_advert = 2625 adapter->eee_advert =
@@ -2630,7 +2630,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2630 case e1000_i354: 2630 case e1000_i354:
2631 if ((rd32(E1000_CTRL_EXT) & 2631 if ((rd32(E1000_CTRL_EXT) &
2632 E1000_CTRL_EXT_LINK_MODE_SGMII)) { 2632 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
2633 err = igb_set_eee_i354(hw); 2633 err = igb_set_eee_i354(hw, true, true);
2634 if ((!err) && 2634 if ((!err) &&
2635 (!hw->dev_spec._82575.eee_disable)) { 2635 (!hw->dev_spec._82575.eee_disable)) {
2636 adapter->eee_advert = 2636 adapter->eee_advert =
@@ -4813,6 +4813,41 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4813 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 4813 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
4814} 4814}
4815 4815
4816static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4817{
4818 struct net_device *netdev = tx_ring->netdev;
4819
4820 netif_stop_subqueue(netdev, tx_ring->queue_index);
4821
4822 /* Herbert's original patch had:
4823 * smp_mb__after_netif_stop_queue();
4824 * but since that doesn't exist yet, just open code it.
4825 */
4826 smp_mb();
4827
4828 /* We need to check again in a case another CPU has just
4829 * made room available.
4830 */
4831 if (igb_desc_unused(tx_ring) < size)
4832 return -EBUSY;
4833
4834 /* A reprieve! */
4835 netif_wake_subqueue(netdev, tx_ring->queue_index);
4836
4837 u64_stats_update_begin(&tx_ring->tx_syncp2);
4838 tx_ring->tx_stats.restart_queue2++;
4839 u64_stats_update_end(&tx_ring->tx_syncp2);
4840
4841 return 0;
4842}
4843
4844static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4845{
4846 if (igb_desc_unused(tx_ring) >= size)
4847 return 0;
4848 return __igb_maybe_stop_tx(tx_ring, size);
4849}
4850
4816static void igb_tx_map(struct igb_ring *tx_ring, 4851static void igb_tx_map(struct igb_ring *tx_ring,
4817 struct igb_tx_buffer *first, 4852 struct igb_tx_buffer *first,
4818 const u8 hdr_len) 4853 const u8 hdr_len)
@@ -4915,13 +4950,17 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4915 4950
4916 tx_ring->next_to_use = i; 4951 tx_ring->next_to_use = i;
4917 4952
4918 writel(i, tx_ring->tail); 4953 /* Make sure there is space in the ring for the next send. */
4954 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
4919 4955
4920 /* we need this if more than one processor can write to our tail 4956 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
4921 * at a time, it synchronizes IO on IA64/Altix systems 4957 writel(i, tx_ring->tail);
4922 */
4923 mmiowb();
4924 4958
4959 /* we need this if more than one processor can write to our tail
4960 * at a time, it synchronizes IO on IA64/Altix systems
4961 */
4962 mmiowb();
4963 }
4925 return; 4964 return;
4926 4965
4927dma_error: 4966dma_error:
@@ -4941,41 +4980,6 @@ dma_error:
4941 tx_ring->next_to_use = i; 4980 tx_ring->next_to_use = i;
4942} 4981}
4943 4982
4944static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4945{
4946 struct net_device *netdev = tx_ring->netdev;
4947
4948 netif_stop_subqueue(netdev, tx_ring->queue_index);
4949
4950 /* Herbert's original patch had:
4951 * smp_mb__after_netif_stop_queue();
4952 * but since that doesn't exist yet, just open code it.
4953 */
4954 smp_mb();
4955
4956 /* We need to check again in a case another CPU has just
4957 * made room available.
4958 */
4959 if (igb_desc_unused(tx_ring) < size)
4960 return -EBUSY;
4961
4962 /* A reprieve! */
4963 netif_wake_subqueue(netdev, tx_ring->queue_index);
4964
4965 u64_stats_update_begin(&tx_ring->tx_syncp2);
4966 tx_ring->tx_stats.restart_queue2++;
4967 u64_stats_update_end(&tx_ring->tx_syncp2);
4968
4969 return 0;
4970}
4971
4972static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4973{
4974 if (igb_desc_unused(tx_ring) >= size)
4975 return 0;
4976 return __igb_maybe_stop_tx(tx_ring, size);
4977}
4978
4979netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, 4983netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4980 struct igb_ring *tx_ring) 4984 struct igb_ring *tx_ring)
4981{ 4985{
@@ -5046,9 +5050,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
5046 5050
5047 igb_tx_map(tx_ring, first, hdr_len); 5051 igb_tx_map(tx_ring, first, hdr_len);
5048 5052
5049 /* Make sure there is space in the ring for the next send. */
5050 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
5051
5052 return NETDEV_TX_OK; 5053 return NETDEV_TX_OK;
5053 5054
5054out_drop: 5055out_drop:
@@ -6768,113 +6769,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring,
6768} 6769}
6769 6770
6770/** 6771/**
6771 * igb_get_headlen - determine size of header for LRO/GRO
6772 * @data: pointer to the start of the headers
6773 * @max_len: total length of section to find headers in
6774 *
6775 * This function is meant to determine the length of headers that will
6776 * be recognized by hardware for LRO, and GRO offloads. The main
6777 * motivation of doing this is to only perform one pull for IPv4 TCP
6778 * packets so that we can do basic things like calculating the gso_size
6779 * based on the average data per packet.
6780 **/
6781static unsigned int igb_get_headlen(unsigned char *data,
6782 unsigned int max_len)
6783{
6784 union {
6785 unsigned char *network;
6786 /* l2 headers */
6787 struct ethhdr *eth;
6788 struct vlan_hdr *vlan;
6789 /* l3 headers */
6790 struct iphdr *ipv4;
6791 struct ipv6hdr *ipv6;
6792 } hdr;
6793 __be16 protocol;
6794 u8 nexthdr = 0; /* default to not TCP */
6795 u8 hlen;
6796
6797 /* this should never happen, but better safe than sorry */
6798 if (max_len < ETH_HLEN)
6799 return max_len;
6800
6801 /* initialize network frame pointer */
6802 hdr.network = data;
6803
6804 /* set first protocol and move network header forward */
6805 protocol = hdr.eth->h_proto;
6806 hdr.network += ETH_HLEN;
6807
6808 /* handle any vlan tag if present */
6809 if (protocol == htons(ETH_P_8021Q)) {
6810 if ((hdr.network - data) > (max_len - VLAN_HLEN))
6811 return max_len;
6812
6813 protocol = hdr.vlan->h_vlan_encapsulated_proto;
6814 hdr.network += VLAN_HLEN;
6815 }
6816
6817 /* handle L3 protocols */
6818 if (protocol == htons(ETH_P_IP)) {
6819 if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
6820 return max_len;
6821
6822 /* access ihl as a u8 to avoid unaligned access on ia64 */
6823 hlen = (hdr.network[0] & 0x0F) << 2;
6824
6825 /* verify hlen meets minimum size requirements */
6826 if (hlen < sizeof(struct iphdr))
6827 return hdr.network - data;
6828
6829 /* record next protocol if header is present */
6830 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
6831 nexthdr = hdr.ipv4->protocol;
6832 } else if (protocol == htons(ETH_P_IPV6)) {
6833 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
6834 return max_len;
6835
6836 /* record next protocol */
6837 nexthdr = hdr.ipv6->nexthdr;
6838 hlen = sizeof(struct ipv6hdr);
6839 } else {
6840 return hdr.network - data;
6841 }
6842
6843 /* relocate pointer to start of L4 header */
6844 hdr.network += hlen;
6845
6846 /* finally sort out TCP */
6847 if (nexthdr == IPPROTO_TCP) {
6848 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
6849 return max_len;
6850
6851 /* access doff as a u8 to avoid unaligned access on ia64 */
6852 hlen = (hdr.network[12] & 0xF0) >> 2;
6853
6854 /* verify hlen meets minimum size requirements */
6855 if (hlen < sizeof(struct tcphdr))
6856 return hdr.network - data;
6857
6858 hdr.network += hlen;
6859 } else if (nexthdr == IPPROTO_UDP) {
6860 if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
6861 return max_len;
6862
6863 hdr.network += sizeof(struct udphdr);
6864 }
6865
6866 /* If everything has gone correctly hdr.network should be the
6867 * data section of the packet and will be the end of the header.
6868 * If not then it probably represents the end of the last recognized
6869 * header.
6870 */
6871 if ((hdr.network - data) < max_len)
6872 return hdr.network - data;
6873 else
6874 return max_len;
6875}
6876
6877/**
6878 * igb_pull_tail - igb specific version of skb_pull_tail 6772 * igb_pull_tail - igb specific version of skb_pull_tail
6879 * @rx_ring: rx descriptor ring packet is being transacted on 6773 * @rx_ring: rx descriptor ring packet is being transacted on
6880 * @rx_desc: pointer to the EOP Rx descriptor 6774 * @rx_desc: pointer to the EOP Rx descriptor
@@ -6918,7 +6812,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
6918 /* we need the header to contain the greater of either ETH_HLEN or 6812 /* we need the header to contain the greater of either ETH_HLEN or
6919 * 60 bytes if the skb->len is less than 60 for skb_pad. 6813 * 60 bytes if the skb->len is less than 60 for skb_pad.
6920 */ 6814 */
6921 pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN); 6815 pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
6922 6816
6923 /* align pull length to size of long to optimize memcpy performance */ 6817 /* align pull length to size of long to optimize memcpy performance */
6924 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); 6818 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 2d9451e39686..ae36fd61a3aa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1086,6 +1086,11 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1086 return; 1086 return;
1087 } 1087 }
1088 1088
1089 /* At this point, we do not have MSI-X capabilities. We need to
1090 * reconfigure or disable various features which require MSI-X
1091 * capability.
1092 */
1093
1089 /* disable DCB if number of TCs exceeds 1 */ 1094 /* disable DCB if number of TCs exceeds 1 */
1090 if (netdev_get_num_tc(adapter->netdev) > 1) { 1095 if (netdev_get_num_tc(adapter->netdev) > 1) {
1091 e_err(probe, "num TCs exceeds number of queues - disabling DCB\n"); 1096 e_err(probe, "num TCs exceeds number of queues - disabling DCB\n");
@@ -1107,6 +1112,9 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1107 /* disable RSS */ 1112 /* disable RSS */
1108 adapter->ring_feature[RING_F_RSS].limit = 1; 1113 adapter->ring_feature[RING_F_RSS].limit = 1;
1109 1114
1115 /* recalculate number of queues now that many features have been
1116 * changed or disabled.
1117 */
1110 ixgbe_set_num_queues(adapter); 1118 ixgbe_set_num_queues(adapter);
1111 adapter->num_q_vectors = 1; 1119 adapter->num_q_vectors = 1;
1112 1120
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 87bd53fdd209..166dc0015a5e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1521,120 +1521,6 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1521 ixgbe_release_rx_desc(rx_ring, i); 1521 ixgbe_release_rx_desc(rx_ring, i);
1522} 1522}
1523 1523
1524/**
1525 * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
1526 * @data: pointer to the start of the headers
1527 * @max_len: total length of section to find headers in
1528 *
1529 * This function is meant to determine the length of headers that will
1530 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1531 * motivation of doing this is to only perform one pull for IPv4 TCP
1532 * packets so that we can do basic things like calculating the gso_size
1533 * based on the average data per packet.
1534 **/
1535static unsigned int ixgbe_get_headlen(unsigned char *data,
1536 unsigned int max_len)
1537{
1538 union {
1539 unsigned char *network;
1540 /* l2 headers */
1541 struct ethhdr *eth;
1542 struct vlan_hdr *vlan;
1543 /* l3 headers */
1544 struct iphdr *ipv4;
1545 struct ipv6hdr *ipv6;
1546 } hdr;
1547 __be16 protocol;
1548 u8 nexthdr = 0; /* default to not TCP */
1549 u8 hlen;
1550
1551 /* this should never happen, but better safe than sorry */
1552 if (max_len < ETH_HLEN)
1553 return max_len;
1554
1555 /* initialize network frame pointer */
1556 hdr.network = data;
1557
1558 /* set first protocol and move network header forward */
1559 protocol = hdr.eth->h_proto;
1560 hdr.network += ETH_HLEN;
1561
1562 /* handle any vlan tag if present */
1563 if (protocol == htons(ETH_P_8021Q)) {
1564 if ((hdr.network - data) > (max_len - VLAN_HLEN))
1565 return max_len;
1566
1567 protocol = hdr.vlan->h_vlan_encapsulated_proto;
1568 hdr.network += VLAN_HLEN;
1569 }
1570
1571 /* handle L3 protocols */
1572 if (protocol == htons(ETH_P_IP)) {
1573 if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
1574 return max_len;
1575
1576 /* access ihl as a u8 to avoid unaligned access on ia64 */
1577 hlen = (hdr.network[0] & 0x0F) << 2;
1578
1579 /* verify hlen meets minimum size requirements */
1580 if (hlen < sizeof(struct iphdr))
1581 return hdr.network - data;
1582
1583 /* record next protocol if header is present */
1584 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
1585 nexthdr = hdr.ipv4->protocol;
1586 } else if (protocol == htons(ETH_P_IPV6)) {
1587 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
1588 return max_len;
1589
1590 /* record next protocol */
1591 nexthdr = hdr.ipv6->nexthdr;
1592 hlen = sizeof(struct ipv6hdr);
1593#ifdef IXGBE_FCOE
1594 } else if (protocol == htons(ETH_P_FCOE)) {
1595 if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
1596 return max_len;
1597 hlen = FCOE_HEADER_LEN;
1598#endif
1599 } else {
1600 return hdr.network - data;
1601 }
1602
1603 /* relocate pointer to start of L4 header */
1604 hdr.network += hlen;
1605
1606 /* finally sort out TCP/UDP */
1607 if (nexthdr == IPPROTO_TCP) {
1608 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
1609 return max_len;
1610
1611 /* access doff as a u8 to avoid unaligned access on ia64 */
1612 hlen = (hdr.network[12] & 0xF0) >> 2;
1613
1614 /* verify hlen meets minimum size requirements */
1615 if (hlen < sizeof(struct tcphdr))
1616 return hdr.network - data;
1617
1618 hdr.network += hlen;
1619 } else if (nexthdr == IPPROTO_UDP) {
1620 if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
1621 return max_len;
1622
1623 hdr.network += sizeof(struct udphdr);
1624 }
1625
1626 /*
1627 * If everything has gone correctly hdr.network should be the
1628 * data section of the packet and will be the end of the header.
1629 * If not then it probably represents the end of the last recognized
1630 * header.
1631 */
1632 if ((hdr.network - data) < max_len)
1633 return hdr.network - data;
1634 else
1635 return max_len;
1636}
1637
1638static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, 1524static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
1639 struct sk_buff *skb) 1525 struct sk_buff *skb)
1640{ 1526{
@@ -1793,7 +1679,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
1793 * we need the header to contain the greater of either ETH_HLEN or 1679 * we need the header to contain the greater of either ETH_HLEN or
1794 * 60 bytes if the skb->len is less than 60 for skb_pad. 1680 * 60 bytes if the skb->len is less than 60 for skb_pad.
1795 */ 1681 */
1796 pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); 1682 pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
1797 1683
1798 /* align pull length to size of long to optimize memcpy performance */ 1684 /* align pull length to size of long to optimize memcpy performance */
1799 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); 1685 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
@@ -6319,25 +6205,55 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
6319 ixgbe_ping_all_vfs(adapter); 6205 ixgbe_ping_all_vfs(adapter);
6320} 6206}
6321 6207
6208static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
6209{
6210 int i;
6211
6212 for (i = 0; i < adapter->num_tx_queues; i++) {
6213 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
6214
6215 if (tx_ring->next_to_use != tx_ring->next_to_clean)
6216 return true;
6217 }
6218
6219 return false;
6220}
6221
6222static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
6223{
6224 struct ixgbe_hw *hw = &adapter->hw;
6225 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
6226 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
6227
6228 int i, j;
6229
6230 if (!adapter->num_vfs)
6231 return false;
6232
6233 for (i = 0; i < adapter->num_vfs; i++) {
6234 for (j = 0; j < q_per_pool; j++) {
6235 u32 h, t;
6236
6237 h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
6238 t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
6239
6240 if (h != t)
6241 return true;
6242 }
6243 }
6244
6245 return false;
6246}
6247
6322/** 6248/**
6323 * ixgbe_watchdog_flush_tx - flush queues on link down 6249 * ixgbe_watchdog_flush_tx - flush queues on link down
6324 * @adapter: pointer to the device adapter structure 6250 * @adapter: pointer to the device adapter structure
6325 **/ 6251 **/
6326static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) 6252static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
6327{ 6253{
6328 int i;
6329 int some_tx_pending = 0;
6330
6331 if (!netif_carrier_ok(adapter->netdev)) { 6254 if (!netif_carrier_ok(adapter->netdev)) {
6332 for (i = 0; i < adapter->num_tx_queues; i++) { 6255 if (ixgbe_ring_tx_pending(adapter) ||
6333 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; 6256 ixgbe_vf_tx_pending(adapter)) {
6334 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
6335 some_tx_pending = 1;
6336 break;
6337 }
6338 }
6339
6340 if (some_tx_pending) {
6341 /* We've lost link, so the controller stops DMA, 6257 /* We've lost link, so the controller stops DMA,
6342 * but we've got queued Tx work that's never going 6258 * but we've got queued Tx work that's never going
6343 * to get done, so reset controller to flush Tx. 6259 * to get done, so reset controller to flush Tx.
@@ -6837,6 +6753,36 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
6837 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 6753 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6838} 6754}
6839 6755
6756static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6757{
6758 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
6759
6760 /* Herbert's original patch had:
6761 * smp_mb__after_netif_stop_queue();
6762 * but since that doesn't exist yet, just open code it.
6763 */
6764 smp_mb();
6765
6766 /* We need to check again in a case another CPU has just
6767 * made room available.
6768 */
6769 if (likely(ixgbe_desc_unused(tx_ring) < size))
6770 return -EBUSY;
6771
6772 /* A reprieve! - use start_queue because it doesn't call schedule */
6773 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
6774 ++tx_ring->tx_stats.restart_queue;
6775 return 0;
6776}
6777
6778static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6779{
6780 if (likely(ixgbe_desc_unused(tx_ring) >= size))
6781 return 0;
6782
6783 return __ixgbe_maybe_stop_tx(tx_ring, size);
6784}
6785
6840#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ 6786#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
6841 IXGBE_TXD_CMD_RS) 6787 IXGBE_TXD_CMD_RS)
6842 6788
@@ -6958,8 +6904,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
6958 6904
6959 tx_ring->next_to_use = i; 6905 tx_ring->next_to_use = i;
6960 6906
6961 /* notify HW of packet */ 6907 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6962 ixgbe_write_tail(tx_ring, i); 6908
6909 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
6910 /* notify HW of packet */
6911 ixgbe_write_tail(tx_ring, i);
6912 }
6963 6913
6964 return; 6914 return;
6965dma_error: 6915dma_error:
@@ -7067,32 +7017,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
7067 input, common, ring->queue_index); 7017 input, common, ring->queue_index);
7068} 7018}
7069 7019
7070static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7071{
7072 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
7073 /* Herbert's original patch had:
7074 * smp_mb__after_netif_stop_queue();
7075 * but since that doesn't exist yet, just open code it. */
7076 smp_mb();
7077
7078 /* We need to check again in a case another CPU has just
7079 * made room available. */
7080 if (likely(ixgbe_desc_unused(tx_ring) < size))
7081 return -EBUSY;
7082
7083 /* A reprieve! - use start_queue because it doesn't call schedule */
7084 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
7085 ++tx_ring->tx_stats.restart_queue;
7086 return 0;
7087}
7088
7089static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
7090{
7091 if (likely(ixgbe_desc_unused(tx_ring) >= size))
7092 return 0;
7093 return __ixgbe_maybe_stop_tx(tx_ring, size);
7094}
7095
7096static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, 7020static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
7097 void *accel_priv, select_queue_fallback_t fallback) 7021 void *accel_priv, select_queue_fallback_t fallback)
7098{ 7022{
@@ -7261,8 +7185,6 @@ xmit_fcoe:
7261#endif /* IXGBE_FCOE */ 7185#endif /* IXGBE_FCOE */
7262 ixgbe_tx_map(tx_ring, first, hdr_len); 7186 ixgbe_tx_map(tx_ring, first, hdr_len);
7263 7187
7264 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
7265
7266 return NETDEV_TX_OK; 7188 return NETDEV_TX_OK;
7267 7189
7268out_drop: 7190out_drop:
@@ -7735,39 +7657,13 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7735 const unsigned char *addr, 7657 const unsigned char *addr,
7736 u16 flags) 7658 u16 flags)
7737{ 7659{
7738 struct ixgbe_adapter *adapter = netdev_priv(dev); 7660 /* guarantee we can provide a unique filter for the unicast address */
7739 int err;
7740
7741 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7742 return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
7743
7744 /* Hardware does not support aging addresses so if a
7745 * ndm_state is given only allow permanent addresses
7746 */
7747 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
7748 pr_info("%s: FDB only supports static addresses\n",
7749 ixgbe_driver_name);
7750 return -EINVAL;
7751 }
7752
7753 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { 7661 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
7754 u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS; 7662 if (IXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev))
7755 7663 return -ENOMEM;
7756 if (netdev_uc_count(dev) < rar_uc_entries)
7757 err = dev_uc_add_excl(dev, addr);
7758 else
7759 err = -ENOMEM;
7760 } else if (is_multicast_ether_addr(addr)) {
7761 err = dev_mc_add_excl(dev, addr);
7762 } else {
7763 err = -EINVAL;
7764 } 7664 }
7765 7665
7766 /* Only return duplicate errors if NLM_F_EXCL is set */ 7666 return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
7767 if (err == -EEXIST && !(flags & NLM_F_EXCL))
7768 err = 0;
7769
7770 return err;
7771} 7667}
7772 7668
7773static int ixgbe_ndo_bridge_setlink(struct net_device *dev, 7669static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
@@ -7830,9 +7726,17 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
7830{ 7726{
7831 struct ixgbe_fwd_adapter *fwd_adapter = NULL; 7727 struct ixgbe_fwd_adapter *fwd_adapter = NULL;
7832 struct ixgbe_adapter *adapter = netdev_priv(pdev); 7728 struct ixgbe_adapter *adapter = netdev_priv(pdev);
7729 int used_pools = adapter->num_vfs + adapter->num_rx_pools;
7833 unsigned int limit; 7730 unsigned int limit;
7834 int pool, err; 7731 int pool, err;
7835 7732
7733 /* Hardware has a limited number of available pools. Each VF, and the
7734 * PF require a pool. Check to ensure we don't attempt to use more
7735 * then the available number of pools.
7736 */
7737 if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
7738 return ERR_PTR(-EINVAL);
7739
7836#ifdef CONFIG_RPS 7740#ifdef CONFIG_RPS
7837 if (vdev->num_rx_queues != vdev->num_tx_queues) { 7741 if (vdev->num_rx_queues != vdev->num_tx_queues) {
7838 netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n", 7742 netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index c14d4d89672f..706fc69aa0c5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -250,13 +250,15 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
250 if (err) 250 if (err)
251 return err; 251 return err;
252 252
253 /* While the SR-IOV capability structure reports total VFs to be 253 /* While the SR-IOV capability structure reports total VFs to be 64,
254 * 64 we limit the actual number that can be allocated to 63 so 254 * we have to limit the actual number allocated based on two factors.
255 * that some transmit/receive resources can be reserved to the 255 * First, we reserve some transmit/receive resources for the PF.
256 * PF. The PCI bus driver already checks for other values out of 256 * Second, VMDQ also uses the same pools that SR-IOV does. We need to
257 * range. 257 * account for this, so that we don't accidentally allocate more VFs
258 * than we have available pools. The PCI bus driver already checks for
259 * other values out of range.
258 */ 260 */
259 if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT) 261 if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VF_FUNCTIONS)
260 return -EPERM; 262 return -EPERM;
261 263
262 adapter->num_vfs = num_vfs; 264 adapter->num_vfs = num_vfs;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index e6b07c2a01fe..dfd55d83bc03 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2194,6 +2194,8 @@ enum {
2194#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600)) 2194#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600))
2195#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) 2195#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
2196/* Translated register #defines */ 2196/* Translated register #defines */
2197#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
2198#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
2197#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) 2199#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
2198#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) 2200#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
2199 2201
@@ -2202,6 +2204,11 @@ enum {
2202#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \ 2204#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
2203 (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index))) 2205 (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
2204 2206
2207#define IXGBE_PVFTDHN(q_per_pool, vf_number, vf_q_index) \
2208 (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index)))
2209#define IXGBE_PVFTDTN(q_per_pool, vf_number, vf_q_index) \
2210 (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index)))
2211
2205enum ixgbe_fdir_pballoc_type { 2212enum ixgbe_fdir_pballoc_type {
2206 IXGBE_FDIR_PBALLOC_NONE = 0, 2213 IXGBE_FDIR_PBALLOC_NONE = 0,
2207 IXGBE_FDIR_PBALLOC_64K = 1, 2214 IXGBE_FDIR_PBALLOC_64K = 1,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 4d44d64ae387..9cddd56d02c3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -434,6 +434,21 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
434 if (!(links_reg & IXGBE_LINKS_UP)) 434 if (!(links_reg & IXGBE_LINKS_UP))
435 goto out; 435 goto out;
436 436
437 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
438 * before the link status is correct
439 */
440 if (mac->type == ixgbe_mac_82599_vf) {
441 int i;
442
443 for (i = 0; i < 5; i++) {
444 udelay(100);
445 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
446
447 if (!(links_reg & IXGBE_LINKS_UP))
448 goto out;
449 }
450 }
451
437 switch (links_reg & IXGBE_LINKS_SPEED_82599) { 452 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
438 case IXGBE_LINKS_SPEED_10G_82599: 453 case IXGBE_LINKS_SPEED_10G_82599:
439 *speed = IXGBE_LINK_SPEED_10GB_FULL; 454 *speed = IXGBE_LINK_SPEED_10GB_FULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9c909d23f14c..14686b6f4bc5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -588,6 +588,8 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
588 skb_copy_to_linear_data(skb, va, length); 588 skb_copy_to_linear_data(skb, va, length);
589 skb->tail += length; 589 skb->tail += length;
590 } else { 590 } else {
591 unsigned int pull_len;
592
591 /* Move relevant fragments to skb */ 593 /* Move relevant fragments to skb */
592 used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags, 594 used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
593 skb, length); 595 skb, length);
@@ -597,16 +599,17 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
597 } 599 }
598 skb_shinfo(skb)->nr_frags = used_frags; 600 skb_shinfo(skb)->nr_frags = used_frags;
599 601
602 pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE);
600 /* Copy headers into the skb linear buffer */ 603 /* Copy headers into the skb linear buffer */
601 memcpy(skb->data, va, HEADER_COPY_SIZE); 604 memcpy(skb->data, va, pull_len);
602 skb->tail += HEADER_COPY_SIZE; 605 skb->tail += pull_len;
603 606
604 /* Skip headers in first fragment */ 607 /* Skip headers in first fragment */
605 skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE; 608 skb_shinfo(skb)->frags[0].page_offset += pull_len;
606 609
607 /* Adjust size of first fragment */ 610 /* Adjust size of first fragment */
608 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE); 611 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len);
609 skb->data_len = length - HEADER_COPY_SIZE; 612 skb->data_len = length - pull_len;
610 } 613 }
611 return skb; 614 return skb;
612} 615}
@@ -769,7 +772,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
769 gro_skb->ip_summed = CHECKSUM_UNNECESSARY; 772 gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
770 773
771 if (l2_tunnel) 774 if (l2_tunnel)
772 gro_skb->encapsulation = 1; 775 gro_skb->csum_level = 1;
773 if ((cqe->vlan_my_qpn & 776 if ((cqe->vlan_my_qpn &
774 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && 777 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
775 (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 778 (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
@@ -823,8 +826,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
823 skb->protocol = eth_type_trans(skb, dev); 826 skb->protocol = eth_type_trans(skb, dev);
824 skb_record_rx_queue(skb, cq->ring); 827 skb_record_rx_queue(skb, cq->ring);
825 828
826 if (l2_tunnel) 829 if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
827 skb->encapsulation = 1; 830 skb->csum_level = 1;
828 831
829 if (dev->features & NETIF_F_RXHASH) 832 if (dev->features & NETIF_F_RXHASH)
830 skb_set_hash(skb, 833 skb_set_hash(skb,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index dae3da6d8dd0..bc8f51c77d80 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -319,7 +319,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
319 } 319 }
320 } 320 }
321 } 321 }
322 dev_kfree_skb_any(skb); 322 dev_consume_skb_any(skb);
323 return tx_info->nr_txbb; 323 return tx_info->nr_txbb;
324} 324}
325 325
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 925b296d8ab8..f39cae620f61 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1481,7 +1481,7 @@ static int phy_init(struct net_device *dev)
1481 } 1481 }
1482 1482
1483 /* phy vendor specific configuration */ 1483 /* phy vendor specific configuration */
1484 if ((np->phy_oui == PHY_OUI_CICADA)) { 1484 if (np->phy_oui == PHY_OUI_CICADA) {
1485 if (init_cicada(dev, np, phyinterface)) { 1485 if (init_cicada(dev, np, phyinterface)) {
1486 netdev_info(dev, "%s: phy init failed\n", 1486 netdev_info(dev, "%s: phy init failed\n",
1487 pci_name(np->pci_dev)); 1487 pci_name(np->pci_dev));
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index b84f5ea3d659..e56c1bb36141 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -39,8 +39,8 @@
39 39
40#define _QLCNIC_LINUX_MAJOR 5 40#define _QLCNIC_LINUX_MAJOR 5
41#define _QLCNIC_LINUX_MINOR 3 41#define _QLCNIC_LINUX_MINOR 3
42#define _QLCNIC_LINUX_SUBVERSION 61 42#define _QLCNIC_LINUX_SUBVERSION 62
43#define QLCNIC_LINUX_VERSIONID "5.3.61" 43#define QLCNIC_LINUX_VERSIONID "5.3.62"
44#define QLCNIC_DRV_IDC_VER 0x01 44#define QLCNIC_DRV_IDC_VER 0x01
45#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 45#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
46 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 46 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -540,6 +540,8 @@ struct qlcnic_hardware_context {
540 u8 lb_mode; 540 u8 lb_mode;
541 u16 vxlan_port; 541 u16 vxlan_port;
542 struct device *hwmon_dev; 542 struct device *hwmon_dev;
543 u32 post_mode;
544 bool run_post;
543}; 545};
544 546
545struct qlcnic_adapter_stats { 547struct qlcnic_adapter_stats {
@@ -2283,6 +2285,7 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
2283 2285
2284#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 2286#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
2285#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030 2287#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
2288#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830
2286#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430 2289#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
2287#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040 2290#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040
2288#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440 2291#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440
@@ -2307,6 +2310,7 @@ static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
2307 bool status; 2310 bool status;
2308 2311
2309 status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) || 2312 status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
2313 (device == PCI_DEVICE_ID_QLOGIC_QLE8830) ||
2310 (device == PCI_DEVICE_ID_QLOGIC_QLE844X) || 2314 (device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
2311 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) || 2315 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
2312 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false; 2316 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 476e4998ef99..840bf36b5e9d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -35,6 +35,35 @@ static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
35#define QLC_SKIP_INACTIVE_PCI_REGS 7 35#define QLC_SKIP_INACTIVE_PCI_REGS 7
36#define QLC_MAX_LEGACY_FUNC_SUPP 8 36#define QLC_MAX_LEGACY_FUNC_SUPP 8
37 37
38/* 83xx Module type */
39#define QLC_83XX_MODULE_FIBRE_10GBASE_LRM 0x1 /* 10GBase-LRM */
40#define QLC_83XX_MODULE_FIBRE_10GBASE_LR 0x2 /* 10GBase-LR */
41#define QLC_83XX_MODULE_FIBRE_10GBASE_SR 0x3 /* 10GBase-SR */
42#define QLC_83XX_MODULE_DA_10GE_PASSIVE_CP 0x4 /* 10GE passive
43 * copper(compliant)
44 */
45#define QLC_83XX_MODULE_DA_10GE_ACTIVE_CP 0x5 /* 10GE active limiting
46 * copper(compliant)
47 */
48#define QLC_83XX_MODULE_DA_10GE_LEGACY_CP 0x6 /* 10GE passive copper
49 * (legacy, best effort)
50 */
51#define QLC_83XX_MODULE_FIBRE_1000BASE_SX 0x7 /* 1000Base-SX */
52#define QLC_83XX_MODULE_FIBRE_1000BASE_LX 0x8 /* 1000Base-LX */
53#define QLC_83XX_MODULE_FIBRE_1000BASE_CX 0x9 /* 1000Base-CX */
54#define QLC_83XX_MODULE_TP_1000BASE_T 0xa /* 1000Base-T*/
55#define QLC_83XX_MODULE_DA_1GE_PASSIVE_CP 0xb /* 1GE passive copper
56 * (legacy, best effort)
57 */
58#define QLC_83XX_MODULE_UNKNOWN 0xf /* Unknown module type */
59
60/* Port types */
61#define QLC_83XX_10_CAPABLE BIT_8
62#define QLC_83XX_100_CAPABLE BIT_9
63#define QLC_83XX_1G_CAPABLE BIT_10
64#define QLC_83XX_10G_CAPABLE BIT_11
65#define QLC_83XX_AUTONEG_ENABLE BIT_15
66
38static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { 67static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
39 {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1}, 68 {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
40 {QLCNIC_CMD_CONFIG_INTRPT, 18, 34}, 69 {QLCNIC_CMD_CONFIG_INTRPT, 18, 34},
@@ -667,6 +696,7 @@ void qlcnic_83xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
667 696
668int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter) 697int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
669{ 698{
699 struct qlcnic_hardware_context *ahw = adapter->ahw;
670 int status; 700 int status;
671 701
672 status = qlcnic_83xx_get_port_config(adapter); 702 status = qlcnic_83xx_get_port_config(adapter);
@@ -674,13 +704,20 @@ int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
674 dev_err(&adapter->pdev->dev, 704 dev_err(&adapter->pdev->dev,
675 "Get Port Info failed\n"); 705 "Get Port Info failed\n");
676 } else { 706 } else {
677 if (QLC_83XX_SFP_10G_CAPABLE(adapter->ahw->port_config))
678 adapter->ahw->port_type = QLCNIC_XGBE;
679 else
680 adapter->ahw->port_type = QLCNIC_GBE;
681 707
682 if (QLC_83XX_AUTONEG(adapter->ahw->port_config)) 708 if (ahw->port_config & QLC_83XX_10G_CAPABLE) {
683 adapter->ahw->link_autoneg = AUTONEG_ENABLE; 709 ahw->port_type = QLCNIC_XGBE;
710 } else if (ahw->port_config & QLC_83XX_10_CAPABLE ||
711 ahw->port_config & QLC_83XX_100_CAPABLE ||
712 ahw->port_config & QLC_83XX_1G_CAPABLE) {
713 ahw->port_type = QLCNIC_GBE;
714 } else {
715 ahw->port_type = QLCNIC_XGBE;
716 }
717
718 if (QLC_83XX_AUTONEG(ahw->port_config))
719 ahw->link_autoneg = AUTONEG_ENABLE;
720
684 } 721 }
685 return status; 722 return status;
686} 723}
@@ -2664,7 +2701,7 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
2664 QLC_83XX_FLASH_STATUS_READY) 2701 QLC_83XX_FLASH_STATUS_READY)
2665 break; 2702 break;
2666 2703
2667 msleep(QLC_83XX_FLASH_STATUS_REG_POLL_DELAY); 2704 usleep_range(1000, 1100);
2668 } while (--retries); 2705 } while (--retries);
2669 2706
2670 if (!retries) 2707 if (!retries)
@@ -3176,22 +3213,33 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
3176 break; 3213 break;
3177 } 3214 }
3178 config = cmd.rsp.arg[3]; 3215 config = cmd.rsp.arg[3];
3179 if (QLC_83XX_SFP_PRESENT(config)) { 3216 switch (QLC_83XX_SFP_MODULE_TYPE(config)) {
3180 switch (ahw->module_type) { 3217 case QLC_83XX_MODULE_FIBRE_10GBASE_LRM:
3181 case LINKEVENT_MODULE_OPTICAL_UNKNOWN: 3218 case QLC_83XX_MODULE_FIBRE_10GBASE_LR:
3182 case LINKEVENT_MODULE_OPTICAL_SRLR: 3219 case QLC_83XX_MODULE_FIBRE_10GBASE_SR:
3183 case LINKEVENT_MODULE_OPTICAL_LRM: 3220 ahw->supported_type = PORT_FIBRE;
3184 case LINKEVENT_MODULE_OPTICAL_SFP_1G: 3221 ahw->port_type = QLCNIC_XGBE;
3185 ahw->supported_type = PORT_FIBRE; 3222 break;
3186 break; 3223 case QLC_83XX_MODULE_FIBRE_1000BASE_SX:
3187 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: 3224 case QLC_83XX_MODULE_FIBRE_1000BASE_LX:
3188 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: 3225 case QLC_83XX_MODULE_FIBRE_1000BASE_CX:
3189 case LINKEVENT_MODULE_TWINAX: 3226 ahw->supported_type = PORT_FIBRE;
3190 ahw->supported_type = PORT_TP; 3227 ahw->port_type = QLCNIC_GBE;
3191 break; 3228 break;
3192 default: 3229 case QLC_83XX_MODULE_TP_1000BASE_T:
3193 ahw->supported_type = PORT_OTHER; 3230 ahw->supported_type = PORT_TP;
3194 } 3231 ahw->port_type = QLCNIC_GBE;
3232 break;
3233 case QLC_83XX_MODULE_DA_10GE_PASSIVE_CP:
3234 case QLC_83XX_MODULE_DA_10GE_ACTIVE_CP:
3235 case QLC_83XX_MODULE_DA_10GE_LEGACY_CP:
3236 case QLC_83XX_MODULE_DA_1GE_PASSIVE_CP:
3237 ahw->supported_type = PORT_DA;
3238 ahw->port_type = QLCNIC_XGBE;
3239 break;
3240 default:
3241 ahw->supported_type = PORT_OTHER;
3242 ahw->port_type = QLCNIC_XGBE;
3195 } 3243 }
3196 if (config & 1) 3244 if (config & 1)
3197 err = 1; 3245 err = 1;
@@ -3204,9 +3252,9 @@ out:
3204int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, 3252int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
3205 struct ethtool_cmd *ecmd) 3253 struct ethtool_cmd *ecmd)
3206{ 3254{
3255 struct qlcnic_hardware_context *ahw = adapter->ahw;
3207 u32 config = 0; 3256 u32 config = 0;
3208 int status = 0; 3257 int status = 0;
3209 struct qlcnic_hardware_context *ahw = adapter->ahw;
3210 3258
3211 if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) { 3259 if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
3212 /* Get port configuration info */ 3260 /* Get port configuration info */
@@ -3229,20 +3277,41 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
3229 ecmd->autoneg = AUTONEG_DISABLE; 3277 ecmd->autoneg = AUTONEG_DISABLE;
3230 } 3278 }
3231 3279
3232 if (ahw->port_type == QLCNIC_XGBE) { 3280 ecmd->supported = (SUPPORTED_10baseT_Full |
3233 ecmd->supported = SUPPORTED_10000baseT_Full; 3281 SUPPORTED_100baseT_Full |
3234 ecmd->advertising = ADVERTISED_10000baseT_Full; 3282 SUPPORTED_1000baseT_Full |
3283 SUPPORTED_10000baseT_Full |
3284 SUPPORTED_Autoneg);
3285
3286 if (ecmd->autoneg == AUTONEG_ENABLE) {
3287 if (ahw->port_config & QLC_83XX_10_CAPABLE)
3288 ecmd->advertising |= SUPPORTED_10baseT_Full;
3289 if (ahw->port_config & QLC_83XX_100_CAPABLE)
3290 ecmd->advertising |= SUPPORTED_100baseT_Full;
3291 if (ahw->port_config & QLC_83XX_1G_CAPABLE)
3292 ecmd->advertising |= SUPPORTED_1000baseT_Full;
3293 if (ahw->port_config & QLC_83XX_10G_CAPABLE)
3294 ecmd->advertising |= SUPPORTED_10000baseT_Full;
3295 if (ahw->port_config & QLC_83XX_AUTONEG_ENABLE)
3296 ecmd->advertising |= ADVERTISED_Autoneg;
3235 } else { 3297 } else {
3236 ecmd->supported = (SUPPORTED_10baseT_Half | 3298 switch (ahw->link_speed) {
3237 SUPPORTED_10baseT_Full | 3299 case SPEED_10:
3238 SUPPORTED_100baseT_Half | 3300 ecmd->advertising = SUPPORTED_10baseT_Full;
3239 SUPPORTED_100baseT_Full | 3301 break;
3240 SUPPORTED_1000baseT_Half | 3302 case SPEED_100:
3241 SUPPORTED_1000baseT_Full); 3303 ecmd->advertising = SUPPORTED_100baseT_Full;
3242 ecmd->advertising = (ADVERTISED_100baseT_Half | 3304 break;
3243 ADVERTISED_100baseT_Full | 3305 case SPEED_1000:
3244 ADVERTISED_1000baseT_Half | 3306 ecmd->advertising = SUPPORTED_1000baseT_Full;
3245 ADVERTISED_1000baseT_Full); 3307 break;
3308 case SPEED_10000:
3309 ecmd->advertising = SUPPORTED_10000baseT_Full;
3310 break;
3311 default:
3312 break;
3313 }
3314
3246 } 3315 }
3247 3316
3248 switch (ahw->supported_type) { 3317 switch (ahw->supported_type) {
@@ -3258,6 +3327,12 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
3258 ecmd->port = PORT_TP; 3327 ecmd->port = PORT_TP;
3259 ecmd->transceiver = XCVR_INTERNAL; 3328 ecmd->transceiver = XCVR_INTERNAL;
3260 break; 3329 break;
3330 case PORT_DA:
3331 ecmd->supported |= SUPPORTED_FIBRE;
3332 ecmd->advertising |= ADVERTISED_FIBRE;
3333 ecmd->port = PORT_DA;
3334 ecmd->transceiver = XCVR_EXTERNAL;
3335 break;
3261 default: 3336 default:
3262 ecmd->supported |= SUPPORTED_FIBRE; 3337 ecmd->supported |= SUPPORTED_FIBRE;
3263 ecmd->advertising |= ADVERTISED_FIBRE; 3338 ecmd->advertising |= ADVERTISED_FIBRE;
@@ -3272,35 +3347,60 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
3272int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter, 3347int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter,
3273 struct ethtool_cmd *ecmd) 3348 struct ethtool_cmd *ecmd)
3274{ 3349{
3275 int status = 0; 3350 struct qlcnic_hardware_context *ahw = adapter->ahw;
3276 u32 config = adapter->ahw->port_config; 3351 u32 config = adapter->ahw->port_config;
3352 int status = 0;
3277 3353
3278 if (ecmd->autoneg) 3354 /* 83xx devices do not support Half duplex */
3279 adapter->ahw->port_config |= BIT_15; 3355 if (ecmd->duplex == DUPLEX_HALF) {
3280 3356 netdev_info(adapter->netdev,
3281 switch (ethtool_cmd_speed(ecmd)) { 3357 "Half duplex mode not supported\n");
3282 case SPEED_10: 3358 return -EINVAL;
3283 adapter->ahw->port_config |= BIT_8;
3284 break;
3285 case SPEED_100:
3286 adapter->ahw->port_config |= BIT_9;
3287 break;
3288 case SPEED_1000:
3289 adapter->ahw->port_config |= BIT_10;
3290 break;
3291 case SPEED_10000:
3292 adapter->ahw->port_config |= BIT_11;
3293 break;
3294 default:
3295 return -EINVAL;
3296 } 3359 }
3297 3360
3361 if (ecmd->autoneg) {
3362 ahw->port_config |= QLC_83XX_AUTONEG_ENABLE;
3363 ahw->port_config |= (QLC_83XX_100_CAPABLE |
3364 QLC_83XX_1G_CAPABLE |
3365 QLC_83XX_10G_CAPABLE);
3366 } else { /* force speed */
3367 ahw->port_config &= ~QLC_83XX_AUTONEG_ENABLE;
3368 switch (ethtool_cmd_speed(ecmd)) {
3369 case SPEED_10:
3370 ahw->port_config &= ~(QLC_83XX_100_CAPABLE |
3371 QLC_83XX_1G_CAPABLE |
3372 QLC_83XX_10G_CAPABLE);
3373 ahw->port_config |= QLC_83XX_10_CAPABLE;
3374 break;
3375 case SPEED_100:
3376 ahw->port_config &= ~(QLC_83XX_10_CAPABLE |
3377 QLC_83XX_1G_CAPABLE |
3378 QLC_83XX_10G_CAPABLE);
3379 ahw->port_config |= QLC_83XX_100_CAPABLE;
3380 break;
3381 case SPEED_1000:
3382 ahw->port_config &= ~(QLC_83XX_10_CAPABLE |
3383 QLC_83XX_100_CAPABLE |
3384 QLC_83XX_10G_CAPABLE);
3385 ahw->port_config |= QLC_83XX_1G_CAPABLE;
3386 break;
3387 case SPEED_10000:
3388 ahw->port_config &= ~(QLC_83XX_10_CAPABLE |
3389 QLC_83XX_100_CAPABLE |
3390 QLC_83XX_1G_CAPABLE);
3391 ahw->port_config |= QLC_83XX_10G_CAPABLE;
3392 break;
3393 default:
3394 return -EINVAL;
3395 }
3396 }
3298 status = qlcnic_83xx_set_port_config(adapter); 3397 status = qlcnic_83xx_set_port_config(adapter);
3299 if (status) { 3398 if (status) {
3300 dev_info(&adapter->pdev->dev, 3399 netdev_info(adapter->netdev,
3301 "Failed to Set Link Speed and autoneg.\n"); 3400 "Failed to Set Link Speed and autoneg.\n");
3302 adapter->ahw->port_config = config; 3401 ahw->port_config = config;
3303 } 3402 }
3403
3304 return status; 3404 return status;
3305} 3405}
3306 3406
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 2bf101a47d02..f3346a3779d3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -83,6 +83,7 @@
83/* Firmware image definitions */ 83/* Firmware image definitions */
84#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000 84#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
85#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin" 85#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin"
86#define QLC_83XX_POST_FW_FILE_NAME "83xx_post_fw.bin"
86#define QLC_84XX_FW_FILE_NAME "84xx_fw.bin" 87#define QLC_84XX_FW_FILE_NAME "84xx_fw.bin"
87#define QLC_83XX_BOOT_FROM_FLASH 0 88#define QLC_83XX_BOOT_FROM_FLASH 0
88#define QLC_83XX_BOOT_FROM_FILE 0x12345678 89#define QLC_83XX_BOOT_FROM_FILE 0x12345678
@@ -360,7 +361,6 @@ enum qlcnic_83xx_states {
360#define QLC_83XX_SFP_MODULE_TYPE(data) (((data) >> 4) & 0x1F) 361#define QLC_83XX_SFP_MODULE_TYPE(data) (((data) >> 4) & 0x1F)
361#define QLC_83XX_SFP_CU_LENGTH(data) (LSB((data) >> 16)) 362#define QLC_83XX_SFP_CU_LENGTH(data) (LSB((data) >> 16))
362#define QLC_83XX_SFP_TX_FAULT(data) ((data) & BIT_10) 363#define QLC_83XX_SFP_TX_FAULT(data) ((data) & BIT_10)
363#define QLC_83XX_SFP_10G_CAPABLE(data) ((data) & BIT_11)
364#define QLC_83XX_LINK_STATS(data) ((data) & BIT_0) 364#define QLC_83XX_LINK_STATS(data) ((data) & BIT_0)
365#define QLC_83XX_CURRENT_LINK_SPEED(data) (((data) >> 3) & 7) 365#define QLC_83XX_CURRENT_LINK_SPEED(data) (((data) >> 3) & 7)
366#define QLC_83XX_LINK_PAUSE(data) (((data) >> 6) & 3) 366#define QLC_83XX_LINK_PAUSE(data) (((data) >> 6) & 3)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 86783e1afcf7..9a2cfe4efac6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2075,6 +2075,121 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
2075 dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); 2075 dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
2076} 2076}
2077 2077
2078/* POST FW related definations*/
2079#define QLC_83XX_POST_SIGNATURE_REG 0x41602014
2080#define QLC_83XX_POST_MODE_REG 0x41602018
2081#define QLC_83XX_POST_FAST_MODE 0
2082#define QLC_83XX_POST_MEDIUM_MODE 1
2083#define QLC_83XX_POST_SLOW_MODE 2
2084
2085/* POST Timeout values in milliseconds */
2086#define QLC_83XX_POST_FAST_MODE_TIMEOUT 690
2087#define QLC_83XX_POST_MED_MODE_TIMEOUT 2930
2088#define QLC_83XX_POST_SLOW_MODE_TIMEOUT 7500
2089
2090/* POST result values */
2091#define QLC_83XX_POST_PASS 0xfffffff0
2092#define QLC_83XX_POST_ASIC_STRESS_TEST_FAIL 0xffffffff
2093#define QLC_83XX_POST_DDR_TEST_FAIL 0xfffffffe
2094#define QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL 0xfffffffc
2095#define QLC_83XX_POST_FLASH_TEST_FAIL 0xfffffff8
2096
2097static int qlcnic_83xx_run_post(struct qlcnic_adapter *adapter)
2098{
2099 struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
2100 struct device *dev = &adapter->pdev->dev;
2101 int timeout, count, ret = 0;
2102 u32 signature;
2103
2104 /* Set timeout values with extra 2 seconds of buffer */
2105 switch (adapter->ahw->post_mode) {
2106 case QLC_83XX_POST_FAST_MODE:
2107 timeout = QLC_83XX_POST_FAST_MODE_TIMEOUT + 2000;
2108 break;
2109 case QLC_83XX_POST_MEDIUM_MODE:
2110 timeout = QLC_83XX_POST_MED_MODE_TIMEOUT + 2000;
2111 break;
2112 case QLC_83XX_POST_SLOW_MODE:
2113 timeout = QLC_83XX_POST_SLOW_MODE_TIMEOUT + 2000;
2114 break;
2115 default:
2116 return -EINVAL;
2117 }
2118
2119 strncpy(fw_info->fw_file_name, QLC_83XX_POST_FW_FILE_NAME,
2120 QLC_FW_FILE_NAME_LEN);
2121
2122 ret = request_firmware(&fw_info->fw, fw_info->fw_file_name, dev);
2123 if (ret) {
2124 dev_err(dev, "POST firmware can not be loaded, skipping POST\n");
2125 return 0;
2126 }
2127
2128 ret = qlcnic_83xx_copy_fw_file(adapter);
2129 if (ret)
2130 return ret;
2131
2132 /* clear QLC_83XX_POST_SIGNATURE_REG register */
2133 qlcnic_ind_wr(adapter, QLC_83XX_POST_SIGNATURE_REG, 0);
2134
2135 /* Set POST mode */
2136 qlcnic_ind_wr(adapter, QLC_83XX_POST_MODE_REG,
2137 adapter->ahw->post_mode);
2138
2139 QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
2140 QLC_83XX_BOOT_FROM_FILE);
2141
2142 qlcnic_83xx_start_hw(adapter);
2143
2144 count = 0;
2145 do {
2146 msleep(100);
2147 count += 100;
2148
2149 signature = qlcnic_ind_rd(adapter, QLC_83XX_POST_SIGNATURE_REG);
2150 if (signature == QLC_83XX_POST_PASS)
2151 break;
2152 } while (timeout > count);
2153
2154 if (timeout <= count) {
2155 dev_err(dev, "POST timed out, signature = 0x%08x\n", signature);
2156 return -EIO;
2157 }
2158
2159 switch (signature) {
2160 case QLC_83XX_POST_PASS:
2161 dev_info(dev, "POST passed, Signature = 0x%08x\n", signature);
2162 break;
2163 case QLC_83XX_POST_ASIC_STRESS_TEST_FAIL:
2164 dev_err(dev, "POST failed, Test case : ASIC STRESS TEST, Signature = 0x%08x\n",
2165 signature);
2166 ret = -EIO;
2167 break;
2168 case QLC_83XX_POST_DDR_TEST_FAIL:
2169 dev_err(dev, "POST failed, Test case : DDT TEST, Signature = 0x%08x\n",
2170 signature);
2171 ret = -EIO;
2172 break;
2173 case QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL:
2174 dev_err(dev, "POST failed, Test case : ASIC MEMORY TEST, Signature = 0x%08x\n",
2175 signature);
2176 ret = -EIO;
2177 break;
2178 case QLC_83XX_POST_FLASH_TEST_FAIL:
2179 dev_err(dev, "POST failed, Test case : FLASH TEST, Signature = 0x%08x\n",
2180 signature);
2181 ret = -EIO;
2182 break;
2183 default:
2184 dev_err(dev, "POST failed, Test case : INVALID, Signature = 0x%08x\n",
2185 signature);
2186 ret = -EIO;
2187 break;
2188 }
2189
2190 return ret;
2191}
2192
2078static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter) 2193static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
2079{ 2194{
2080 struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; 2195 struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
@@ -2119,8 +2234,27 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
2119 2234
2120 if (qlcnic_83xx_copy_bootloader(adapter)) 2235 if (qlcnic_83xx_copy_bootloader(adapter))
2121 return err; 2236 return err;
2237
2238 /* Check if POST needs to be run */
2239 if (adapter->ahw->run_post) {
2240 err = qlcnic_83xx_run_post(adapter);
2241 if (err)
2242 return err;
2243
2244 /* No need to run POST in next reset sequence */
2245 adapter->ahw->run_post = false;
2246
2247 /* Again reset the adapter to load regular firmware */
2248 qlcnic_83xx_stop_hw(adapter);
2249 qlcnic_83xx_init_hw(adapter);
2250
2251 err = qlcnic_83xx_copy_bootloader(adapter);
2252 if (err)
2253 return err;
2254 }
2255
2122 /* Boot either flash image or firmware image from host file system */ 2256 /* Boot either flash image or firmware image from host file system */
2123 if (qlcnic_load_fw_file) { 2257 if (qlcnic_load_fw_file == 1) {
2124 if (qlcnic_83xx_load_fw_image_from_host(adapter)) 2258 if (qlcnic_83xx_load_fw_image_from_host(adapter))
2125 return err; 2259 return err;
2126 } else { 2260 } else {
@@ -2284,6 +2418,7 @@ static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
2284 fw_info = ahw->fw_info; 2418 fw_info = ahw->fw_info;
2285 switch (pdev->device) { 2419 switch (pdev->device) {
2286 case PCI_DEVICE_ID_QLOGIC_QLE834X: 2420 case PCI_DEVICE_ID_QLOGIC_QLE834X:
2421 case PCI_DEVICE_ID_QLOGIC_QLE8830:
2287 strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME, 2422 strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME,
2288 QLC_FW_FILE_NAME_LEN); 2423 QLC_FW_FILE_NAME_LEN);
2289 break; 2424 break;
@@ -2328,6 +2463,25 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2328 adapter->rx_mac_learn = false; 2463 adapter->rx_mac_learn = false;
2329 ahw->msix_supported = !!qlcnic_use_msi_x; 2464 ahw->msix_supported = !!qlcnic_use_msi_x;
2330 2465
2466 /* Check if POST needs to be run */
2467 switch (qlcnic_load_fw_file) {
2468 case 2:
2469 ahw->post_mode = QLC_83XX_POST_FAST_MODE;
2470 ahw->run_post = true;
2471 break;
2472 case 3:
2473 ahw->post_mode = QLC_83XX_POST_MEDIUM_MODE;
2474 ahw->run_post = true;
2475 break;
2476 case 4:
2477 ahw->post_mode = QLC_83XX_POST_SLOW_MODE;
2478 ahw->run_post = true;
2479 break;
2480 default:
2481 ahw->run_post = false;
2482 break;
2483 }
2484
2331 qlcnic_83xx_init_rings(adapter); 2485 qlcnic_83xx_init_rings(adapter);
2332 2486
2333 err = qlcnic_83xx_init_mailbox_work(adapter); 2487 err = qlcnic_83xx_init_mailbox_work(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 851cb4a80d50..8102673cb37f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
341 } 341 }
342 return -EIO; 342 return -EIO;
343 } 343 }
344 msleep(1); 344 usleep_range(1000, 1500);
345 } 345 }
346 346
347 if (id_reg) 347 if (id_reg)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index c4262c23ed7c..be41e4c77b65 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -537,7 +537,7 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
537 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0); 537 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
538 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0); 538 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
539 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0); 539 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
540 msleep(1); 540 usleep_range(1000, 1500);
541 541
542 QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0); 542 QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
543 QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0); 543 QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
@@ -1198,7 +1198,7 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
1198 flashaddr += 8; 1198 flashaddr += 8;
1199 } 1199 }
1200 } 1200 }
1201 msleep(1); 1201 usleep_range(1000, 1500);
1202 1202
1203 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020); 1203 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
1204 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e); 1204 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
@@ -1295,7 +1295,7 @@ next:
1295 rc = qlcnic_validate_firmware(adapter); 1295 rc = qlcnic_validate_firmware(adapter);
1296 if (rc != 0) { 1296 if (rc != 0) {
1297 release_firmware(adapter->fw); 1297 release_firmware(adapter->fw);
1298 msleep(1); 1298 usleep_range(1000, 1500);
1299 goto next; 1299 goto next;
1300 } 1300 }
1301 } 1301 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index e45bf09af0c9..18e5de72e9b4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -1753,7 +1753,7 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1753 1753
1754 if (qlcnic_encap_length(sts_data[1]) && 1754 if (qlcnic_encap_length(sts_data[1]) &&
1755 skb->ip_summed == CHECKSUM_UNNECESSARY) { 1755 skb->ip_summed == CHECKSUM_UNNECESSARY) {
1756 skb->encapsulation = 1; 1756 skb->csum_level = 1;
1757 adapter->stats.encap_rx_csummed++; 1757 adapter->stats.encap_rx_csummed++;
1758 } 1758 }
1759 1759
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index cf08b2de071e..f5e29f7bdae3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)");
52module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644); 52module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
53 53
54int qlcnic_load_fw_file; 54int qlcnic_load_fw_file;
55MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)"); 55MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file, 2=POST in fast mode, 3= POST in medium mode, 4=POST in slow mode)");
56module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); 56module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
57 57
58static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 58static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -111,6 +111,7 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
111static const struct pci_device_id qlcnic_pci_tbl[] = { 111static const struct pci_device_id qlcnic_pci_tbl[] = {
112 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), 112 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
113 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X), 113 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
114 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
114 ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X), 115 ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
115 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X), 116 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
116 ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X), 117 ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
@@ -228,6 +229,11 @@ static const struct qlcnic_board_info qlcnic_boards[] = {
228 PCI_DEVICE_ID_QLOGIC_QLE834X, 229 PCI_DEVICE_ID_QLOGIC_QLE834X,
229 0x0, 0x0, "8300 Series 1/10GbE Controller" }, 230 0x0, 0x0, "8300 Series 1/10GbE Controller" },
230 { PCI_VENDOR_ID_QLOGIC, 231 { PCI_VENDOR_ID_QLOGIC,
232 PCI_DEVICE_ID_QLOGIC_QLE8830,
233 0x0,
234 0x0,
235 "8830 Series 1/10GbE Controller" },
236 { PCI_VENDOR_ID_QLOGIC,
231 PCI_DEVICE_ID_QLOGIC_QLE824X, 237 PCI_DEVICE_ID_QLOGIC_QLE824X,
232 PCI_VENDOR_ID_QLOGIC, 238 PCI_VENDOR_ID_QLOGIC,
233 0x203, 239 0x203,
@@ -1131,6 +1137,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
1131 *bar = QLCNIC_82XX_BAR0_LENGTH; 1137 *bar = QLCNIC_82XX_BAR0_LENGTH;
1132 break; 1138 break;
1133 case PCI_DEVICE_ID_QLOGIC_QLE834X: 1139 case PCI_DEVICE_ID_QLOGIC_QLE834X:
1140 case PCI_DEVICE_ID_QLOGIC_QLE8830:
1134 case PCI_DEVICE_ID_QLOGIC_QLE844X: 1141 case PCI_DEVICE_ID_QLOGIC_QLE844X:
1135 case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: 1142 case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
1136 case PCI_DEVICE_ID_QLOGIC_VF_QLE844X: 1143 case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
@@ -2474,6 +2481,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2474 ahw->reg_tbl = (u32 *) qlcnic_reg_tbl; 2481 ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
2475 break; 2482 break;
2476 case PCI_DEVICE_ID_QLOGIC_QLE834X: 2483 case PCI_DEVICE_ID_QLOGIC_QLE834X:
2484 case PCI_DEVICE_ID_QLOGIC_QLE8830:
2477 case PCI_DEVICE_ID_QLOGIC_QLE844X: 2485 case PCI_DEVICE_ID_QLOGIC_QLE844X:
2478 qlcnic_83xx_register_map(ahw); 2486 qlcnic_83xx_register_map(ahw);
2479 break; 2487 break;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 91652e7235e4..02dd92ac1764 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -52,6 +52,10 @@
52#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" 52#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw"
53#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" 53#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw"
54#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" 54#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw"
55#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw"
56#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw"
57#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
58#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
55 59
56#ifdef RTL8169_DEBUG 60#ifdef RTL8169_DEBUG
57#define assert(expr) \ 61#define assert(expr) \
@@ -147,6 +151,10 @@ enum mac_version {
147 RTL_GIGA_MAC_VER_42, 151 RTL_GIGA_MAC_VER_42,
148 RTL_GIGA_MAC_VER_43, 152 RTL_GIGA_MAC_VER_43,
149 RTL_GIGA_MAC_VER_44, 153 RTL_GIGA_MAC_VER_44,
154 RTL_GIGA_MAC_VER_45,
155 RTL_GIGA_MAC_VER_46,
156 RTL_GIGA_MAC_VER_47,
157 RTL_GIGA_MAC_VER_48,
150 RTL_GIGA_MAC_NONE = 0xff, 158 RTL_GIGA_MAC_NONE = 0xff,
151}; 159};
152 160
@@ -282,6 +290,18 @@ static const struct {
282 [RTL_GIGA_MAC_VER_44] = 290 [RTL_GIGA_MAC_VER_44] =
283 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, 291 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2,
284 JUMBO_9K, false), 292 JUMBO_9K, false),
293 [RTL_GIGA_MAC_VER_45] =
294 _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1,
295 JUMBO_9K, false),
296 [RTL_GIGA_MAC_VER_46] =
297 _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2,
298 JUMBO_9K, false),
299 [RTL_GIGA_MAC_VER_47] =
300 _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1,
301 JUMBO_1K, false),
302 [RTL_GIGA_MAC_VER_48] =
303 _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2,
304 JUMBO_1K, false),
285}; 305};
286#undef _R 306#undef _R
287 307
@@ -410,6 +430,7 @@ enum rtl8168_8101_registers {
410#define EPHYAR_DATA_MASK 0xffff 430#define EPHYAR_DATA_MASK 0xffff
411 DLLPR = 0xd0, 431 DLLPR = 0xd0,
412#define PFM_EN (1 << 6) 432#define PFM_EN (1 << 6)
433#define TX_10M_PS_EN (1 << 7)
413 DBG_REG = 0xd1, 434 DBG_REG = 0xd1,
414#define FIX_NAK_1 (1 << 4) 435#define FIX_NAK_1 (1 << 4)
415#define FIX_NAK_2 (1 << 3) 436#define FIX_NAK_2 (1 << 3)
@@ -429,6 +450,8 @@ enum rtl8168_8101_registers {
429#define EFUSEAR_REG_MASK 0x03ff 450#define EFUSEAR_REG_MASK 0x03ff
430#define EFUSEAR_REG_SHIFT 8 451#define EFUSEAR_REG_SHIFT 8
431#define EFUSEAR_DATA_MASK 0xff 452#define EFUSEAR_DATA_MASK 0xff
453 MISC_1 = 0xf2,
454#define PFM_D3COLD_EN (1 << 6)
432}; 455};
433 456
434enum rtl8168_registers { 457enum rtl8168_registers {
@@ -447,6 +470,7 @@ enum rtl8168_registers {
447#define ERIAR_MASK_SHIFT 12 470#define ERIAR_MASK_SHIFT 12
448#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) 471#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
449#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) 472#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
473#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT)
450#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) 474#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
451#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) 475#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
452 EPHY_RXER_NUM = 0x7c, 476 EPHY_RXER_NUM = 0x7c,
@@ -598,6 +622,9 @@ enum rtl_register_content {
598 622
599 /* DumpCounterCommand */ 623 /* DumpCounterCommand */
600 CounterDump = 0x8, 624 CounterDump = 0x8,
625
626 /* magic enable v2 */
627 MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */
601}; 628};
602 629
603enum rtl_desc_bit { 630enum rtl_desc_bit {
@@ -823,6 +850,10 @@ MODULE_FIRMWARE(FIRMWARE_8106E_1);
823MODULE_FIRMWARE(FIRMWARE_8106E_2); 850MODULE_FIRMWARE(FIRMWARE_8106E_2);
824MODULE_FIRMWARE(FIRMWARE_8168G_2); 851MODULE_FIRMWARE(FIRMWARE_8168G_2);
825MODULE_FIRMWARE(FIRMWARE_8168G_3); 852MODULE_FIRMWARE(FIRMWARE_8168G_3);
853MODULE_FIRMWARE(FIRMWARE_8168H_1);
854MODULE_FIRMWARE(FIRMWARE_8168H_2);
855MODULE_FIRMWARE(FIRMWARE_8107E_1);
856MODULE_FIRMWARE(FIRMWARE_8107E_2);
826 857
827static void rtl_lock_work(struct rtl8169_private *tp) 858static void rtl_lock_work(struct rtl8169_private *tp)
828{ 859{
@@ -1514,8 +1545,17 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1514 options = RTL_R8(Config3); 1545 options = RTL_R8(Config3);
1515 if (options & LinkUp) 1546 if (options & LinkUp)
1516 wolopts |= WAKE_PHY; 1547 wolopts |= WAKE_PHY;
1517 if (options & MagicPacket) 1548 switch (tp->mac_version) {
1518 wolopts |= WAKE_MAGIC; 1549 case RTL_GIGA_MAC_VER_45:
1550 case RTL_GIGA_MAC_VER_46:
1551 if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
1552 wolopts |= WAKE_MAGIC;
1553 break;
1554 default:
1555 if (options & MagicPacket)
1556 wolopts |= WAKE_MAGIC;
1557 break;
1558 }
1519 1559
1520 options = RTL_R8(Config5); 1560 options = RTL_R8(Config5);
1521 if (options & UWF) 1561 if (options & UWF)
@@ -1543,24 +1583,48 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1543static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) 1583static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1544{ 1584{
1545 void __iomem *ioaddr = tp->mmio_addr; 1585 void __iomem *ioaddr = tp->mmio_addr;
1546 unsigned int i; 1586 unsigned int i, tmp;
1547 static const struct { 1587 static const struct {
1548 u32 opt; 1588 u32 opt;
1549 u16 reg; 1589 u16 reg;
1550 u8 mask; 1590 u8 mask;
1551 } cfg[] = { 1591 } cfg[] = {
1552 { WAKE_PHY, Config3, LinkUp }, 1592 { WAKE_PHY, Config3, LinkUp },
1553 { WAKE_MAGIC, Config3, MagicPacket },
1554 { WAKE_UCAST, Config5, UWF }, 1593 { WAKE_UCAST, Config5, UWF },
1555 { WAKE_BCAST, Config5, BWF }, 1594 { WAKE_BCAST, Config5, BWF },
1556 { WAKE_MCAST, Config5, MWF }, 1595 { WAKE_MCAST, Config5, MWF },
1557 { WAKE_ANY, Config5, LanWake } 1596 { WAKE_ANY, Config5, LanWake },
1597 { WAKE_MAGIC, Config3, MagicPacket }
1558 }; 1598 };
1559 u8 options; 1599 u8 options;
1560 1600
1561 RTL_W8(Cfg9346, Cfg9346_Unlock); 1601 RTL_W8(Cfg9346, Cfg9346_Unlock);
1562 1602
1563 for (i = 0; i < ARRAY_SIZE(cfg); i++) { 1603 switch (tp->mac_version) {
1604 case RTL_GIGA_MAC_VER_45:
1605 case RTL_GIGA_MAC_VER_46:
1606 tmp = ARRAY_SIZE(cfg) - 1;
1607 if (wolopts & WAKE_MAGIC)
1608 rtl_w1w0_eri(tp,
1609 0x0dc,
1610 ERIAR_MASK_0100,
1611 MagicPacket_v2,
1612 0x0000,
1613 ERIAR_EXGMAC);
1614 else
1615 rtl_w1w0_eri(tp,
1616 0x0dc,
1617 ERIAR_MASK_0100,
1618 0x0000,
1619 MagicPacket_v2,
1620 ERIAR_EXGMAC);
1621 break;
1622 default:
1623 tmp = ARRAY_SIZE(cfg);
1624 break;
1625 }
1626
1627 for (i = 0; i < tmp; i++) {
1564 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; 1628 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1565 if (wolopts & cfg[i].opt) 1629 if (wolopts & cfg[i].opt)
1566 options |= cfg[i].mask; 1630 options |= cfg[i].mask;
@@ -2044,6 +2108,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2044 u32 val; 2108 u32 val;
2045 int mac_version; 2109 int mac_version;
2046 } mac_info[] = { 2110 } mac_info[] = {
2111 /* 8168H family. */
2112 { 0x7cf00000, 0x54100000, RTL_GIGA_MAC_VER_46 },
2113 { 0x7cf00000, 0x54000000, RTL_GIGA_MAC_VER_45 },
2114
2047 /* 8168G family. */ 2115 /* 8168G family. */
2048 { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 }, 2116 { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 },
2049 { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 }, 2117 { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 },
@@ -2139,6 +2207,14 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2139 tp->mac_version = tp->mii.supports_gmii ? 2207 tp->mac_version = tp->mii.supports_gmii ?
2140 RTL_GIGA_MAC_VER_42 : 2208 RTL_GIGA_MAC_VER_42 :
2141 RTL_GIGA_MAC_VER_43; 2209 RTL_GIGA_MAC_VER_43;
2210 } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) {
2211 tp->mac_version = tp->mii.supports_gmii ?
2212 RTL_GIGA_MAC_VER_45 :
2213 RTL_GIGA_MAC_VER_47;
2214 } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) {
2215 tp->mac_version = tp->mii.supports_gmii ?
2216 RTL_GIGA_MAC_VER_46 :
2217 RTL_GIGA_MAC_VER_48;
2142 } 2218 }
2143} 2219}
2144 2220
@@ -3464,6 +3540,189 @@ static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
3464 rtl_apply_firmware(tp); 3540 rtl_apply_firmware(tp);
3465} 3541}
3466 3542
3543static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
3544{
3545 u16 dout_tapbin;
3546 u32 data;
3547
3548 rtl_apply_firmware(tp);
3549
3550 /* CHN EST parameters adjust - giga master */
3551 rtl_writephy(tp, 0x1f, 0x0a43);
3552 rtl_writephy(tp, 0x13, 0x809b);
3553 rtl_w1w0_phy(tp, 0x14, 0x8000, 0xf800);
3554 rtl_writephy(tp, 0x13, 0x80a2);
3555 rtl_w1w0_phy(tp, 0x14, 0x8000, 0xff00);
3556 rtl_writephy(tp, 0x13, 0x80a4);
3557 rtl_w1w0_phy(tp, 0x14, 0x8500, 0xff00);
3558 rtl_writephy(tp, 0x13, 0x809c);
3559 rtl_w1w0_phy(tp, 0x14, 0xbd00, 0xff00);
3560 rtl_writephy(tp, 0x1f, 0x0000);
3561
3562 /* CHN EST parameters adjust - giga slave */
3563 rtl_writephy(tp, 0x1f, 0x0a43);
3564 rtl_writephy(tp, 0x13, 0x80ad);
3565 rtl_w1w0_phy(tp, 0x14, 0x7000, 0xf800);
3566 rtl_writephy(tp, 0x13, 0x80b4);
3567 rtl_w1w0_phy(tp, 0x14, 0x5000, 0xff00);
3568 rtl_writephy(tp, 0x13, 0x80ac);
3569 rtl_w1w0_phy(tp, 0x14, 0x4000, 0xff00);
3570 rtl_writephy(tp, 0x1f, 0x0000);
3571
3572 /* CHN EST parameters adjust - fnet */
3573 rtl_writephy(tp, 0x1f, 0x0a43);
3574 rtl_writephy(tp, 0x13, 0x808e);
3575 rtl_w1w0_phy(tp, 0x14, 0x1200, 0xff00);
3576 rtl_writephy(tp, 0x13, 0x8090);
3577 rtl_w1w0_phy(tp, 0x14, 0xe500, 0xff00);
3578 rtl_writephy(tp, 0x13, 0x8092);
3579 rtl_w1w0_phy(tp, 0x14, 0x9f00, 0xff00);
3580 rtl_writephy(tp, 0x1f, 0x0000);
3581
3582 /* enable R-tune & PGA-retune function */
3583 dout_tapbin = 0;
3584 rtl_writephy(tp, 0x1f, 0x0a46);
3585 data = rtl_readphy(tp, 0x13);
3586 data &= 3;
3587 data <<= 2;
3588 dout_tapbin |= data;
3589 data = rtl_readphy(tp, 0x12);
3590 data &= 0xc000;
3591 data >>= 14;
3592 dout_tapbin |= data;
3593 dout_tapbin = ~(dout_tapbin^0x08);
3594 dout_tapbin <<= 12;
3595 dout_tapbin &= 0xf000;
3596 rtl_writephy(tp, 0x1f, 0x0a43);
3597 rtl_writephy(tp, 0x13, 0x827a);
3598 rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
3599 rtl_writephy(tp, 0x13, 0x827b);
3600 rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
3601 rtl_writephy(tp, 0x13, 0x827c);
3602 rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
3603 rtl_writephy(tp, 0x13, 0x827d);
3604 rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000);
3605
3606 rtl_writephy(tp, 0x1f, 0x0a43);
3607 rtl_writephy(tp, 0x13, 0x0811);
3608 rtl_w1w0_phy(tp, 0x14, 0x0800, 0x0000);
3609 rtl_writephy(tp, 0x1f, 0x0a42);
3610 rtl_w1w0_phy(tp, 0x16, 0x0002, 0x0000);
3611 rtl_writephy(tp, 0x1f, 0x0000);
3612
3613 /* enable GPHY 10M */
3614 rtl_writephy(tp, 0x1f, 0x0a44);
3615 rtl_w1w0_phy(tp, 0x11, 0x0800, 0x0000);
3616 rtl_writephy(tp, 0x1f, 0x0000);
3617
3618 /* SAR ADC performance */
3619 rtl_writephy(tp, 0x1f, 0x0bca);
3620 rtl_w1w0_phy(tp, 0x17, 0x4000, 0x3000);
3621 rtl_writephy(tp, 0x1f, 0x0000);
3622
3623 rtl_writephy(tp, 0x1f, 0x0a43);
3624 rtl_writephy(tp, 0x13, 0x803f);
3625 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
3626 rtl_writephy(tp, 0x13, 0x8047);
3627 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
3628 rtl_writephy(tp, 0x13, 0x804f);
3629 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
3630 rtl_writephy(tp, 0x13, 0x8057);
3631 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
3632 rtl_writephy(tp, 0x13, 0x805f);
3633 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
3634 rtl_writephy(tp, 0x13, 0x8067);
3635 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
3636 rtl_writephy(tp, 0x13, 0x806f);
3637 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000);
3638 rtl_writephy(tp, 0x1f, 0x0000);
3639
3640 /* disable phy pfm mode */
3641 rtl_writephy(tp, 0x1f, 0x0a44);
3642 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x0080);
3643 rtl_writephy(tp, 0x1f, 0x0000);
3644
3645 /* Check ALDPS bit, disable it if enabled */
3646 rtl_writephy(tp, 0x1f, 0x0a43);
3647 if (rtl_readphy(tp, 0x10) & 0x0004)
3648 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
3649
3650 rtl_writephy(tp, 0x1f, 0x0000);
3651}
3652
3653static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
3654{
3655 u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0;
3656 u16 rlen;
3657 u32 data;
3658
3659 rtl_apply_firmware(tp);
3660
3661 /* CHIN EST parameter update */
3662 rtl_writephy(tp, 0x1f, 0x0a43);
3663 rtl_writephy(tp, 0x13, 0x808a);
3664 rtl_w1w0_phy(tp, 0x14, 0x000a, 0x003f);
3665 rtl_writephy(tp, 0x1f, 0x0000);
3666
3667 /* enable R-tune & PGA-retune function */
3668 rtl_writephy(tp, 0x1f, 0x0a43);
3669 rtl_writephy(tp, 0x13, 0x0811);
3670 rtl_w1w0_phy(tp, 0x14, 0x0800, 0x0000);
3671 rtl_writephy(tp, 0x1f, 0x0a42);
3672 rtl_w1w0_phy(tp, 0x16, 0x0002, 0x0000);
3673 rtl_writephy(tp, 0x1f, 0x0000);
3674
3675 /* enable GPHY 10M */
3676 rtl_writephy(tp, 0x1f, 0x0a44);
3677 rtl_w1w0_phy(tp, 0x11, 0x0800, 0x0000);
3678 rtl_writephy(tp, 0x1f, 0x0000);
3679
3680 r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
3681 data = r8168_mac_ocp_read(tp, 0xdd02);
3682 ioffset_p3 = ((data & 0x80)>>7);
3683 ioffset_p3 <<= 3;
3684
3685 data = r8168_mac_ocp_read(tp, 0xdd00);
3686 ioffset_p3 |= ((data & (0xe000))>>13);
3687 ioffset_p2 = ((data & (0x1e00))>>9);
3688 ioffset_p1 = ((data & (0x01e0))>>5);
3689 ioffset_p0 = ((data & 0x0010)>>4);
3690 ioffset_p0 <<= 3;
3691 ioffset_p0 |= (data & (0x07));
3692 data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
3693
3694 if ((ioffset_p3 != 0x0F) || (ioffset_p2 != 0x0F) ||
3695 (ioffset_p1 != 0x0F) || (ioffset_p0 == 0x0F)) {
3696 rtl_writephy(tp, 0x1f, 0x0bcf);
3697 rtl_writephy(tp, 0x16, data);
3698 rtl_writephy(tp, 0x1f, 0x0000);
3699 }
3700
3701 /* Modify rlen (TX LPF corner frequency) level */
3702 rtl_writephy(tp, 0x1f, 0x0bcd);
3703 data = rtl_readphy(tp, 0x16);
3704 data &= 0x000f;
3705 rlen = 0;
3706 if (data > 3)
3707 rlen = data - 3;
3708 data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12);
3709 rtl_writephy(tp, 0x17, data);
3710 rtl_writephy(tp, 0x1f, 0x0bcd);
3711 rtl_writephy(tp, 0x1f, 0x0000);
3712
3713 /* disable phy pfm mode */
3714 rtl_writephy(tp, 0x1f, 0x0a44);
3715 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x0080);
3716 rtl_writephy(tp, 0x1f, 0x0000);
3717
3718 /* Check ALDPS bit, disable it if enabled */
3719 rtl_writephy(tp, 0x1f, 0x0a43);
3720 if (rtl_readphy(tp, 0x10) & 0x0004)
3721 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
3722
3723 rtl_writephy(tp, 0x1f, 0x0000);
3724}
3725
3467static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) 3726static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3468{ 3727{
3469 static const struct phy_reg phy_reg_init[] = { 3728 static const struct phy_reg phy_reg_init[] = {
@@ -3654,6 +3913,14 @@ static void rtl_hw_phy_config(struct net_device *dev)
3654 case RTL_GIGA_MAC_VER_44: 3913 case RTL_GIGA_MAC_VER_44:
3655 rtl8168g_2_hw_phy_config(tp); 3914 rtl8168g_2_hw_phy_config(tp);
3656 break; 3915 break;
3916 case RTL_GIGA_MAC_VER_45:
3917 case RTL_GIGA_MAC_VER_47:
3918 rtl8168h_1_hw_phy_config(tp);
3919 break;
3920 case RTL_GIGA_MAC_VER_46:
3921 case RTL_GIGA_MAC_VER_48:
3922 rtl8168h_2_hw_phy_config(tp);
3923 break;
3657 3924
3658 case RTL_GIGA_MAC_VER_41: 3925 case RTL_GIGA_MAC_VER_41:
3659 default: 3926 default:
@@ -3865,6 +4132,10 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3865 case RTL_GIGA_MAC_VER_42: 4132 case RTL_GIGA_MAC_VER_42:
3866 case RTL_GIGA_MAC_VER_43: 4133 case RTL_GIGA_MAC_VER_43:
3867 case RTL_GIGA_MAC_VER_44: 4134 case RTL_GIGA_MAC_VER_44:
4135 case RTL_GIGA_MAC_VER_45:
4136 case RTL_GIGA_MAC_VER_46:
4137 case RTL_GIGA_MAC_VER_47:
4138 case RTL_GIGA_MAC_VER_48:
3868 ops->write = r8168g_mdio_write; 4139 ops->write = r8168g_mdio_write;
3869 ops->read = r8168g_mdio_read; 4140 ops->read = r8168g_mdio_read;
3870 break; 4141 break;
@@ -3919,6 +4190,10 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3919 case RTL_GIGA_MAC_VER_42: 4190 case RTL_GIGA_MAC_VER_42:
3920 case RTL_GIGA_MAC_VER_43: 4191 case RTL_GIGA_MAC_VER_43:
3921 case RTL_GIGA_MAC_VER_44: 4192 case RTL_GIGA_MAC_VER_44:
4193 case RTL_GIGA_MAC_VER_45:
4194 case RTL_GIGA_MAC_VER_46:
4195 case RTL_GIGA_MAC_VER_47:
4196 case RTL_GIGA_MAC_VER_48:
3922 RTL_W32(RxConfig, RTL_R32(RxConfig) | 4197 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3923 AcceptBroadcast | AcceptMulticast | AcceptMyPhys); 4198 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3924 break; 4199 break;
@@ -3987,6 +4262,10 @@ static void r810x_pll_power_up(struct rtl8169_private *tp)
3987 case RTL_GIGA_MAC_VER_13: 4262 case RTL_GIGA_MAC_VER_13:
3988 case RTL_GIGA_MAC_VER_16: 4263 case RTL_GIGA_MAC_VER_16:
3989 break; 4264 break;
4265 case RTL_GIGA_MAC_VER_47:
4266 case RTL_GIGA_MAC_VER_48:
4267 RTL_W8(PMCH, RTL_R8(PMCH) | 0xC0);
4268 break;
3990 default: 4269 default:
3991 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); 4270 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3992 break; 4271 break;
@@ -4087,6 +4366,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
4087 case RTL_GIGA_MAC_VER_31: 4366 case RTL_GIGA_MAC_VER_31:
4088 case RTL_GIGA_MAC_VER_32: 4367 case RTL_GIGA_MAC_VER_32:
4089 case RTL_GIGA_MAC_VER_33: 4368 case RTL_GIGA_MAC_VER_33:
4369 case RTL_GIGA_MAC_VER_45:
4370 case RTL_GIGA_MAC_VER_46:
4090 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); 4371 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4091 break; 4372 break;
4092 case RTL_GIGA_MAC_VER_40: 4373 case RTL_GIGA_MAC_VER_40:
@@ -4111,6 +4392,10 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
4111 case RTL_GIGA_MAC_VER_33: 4392 case RTL_GIGA_MAC_VER_33:
4112 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); 4393 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4113 break; 4394 break;
4395 case RTL_GIGA_MAC_VER_45:
4396 case RTL_GIGA_MAC_VER_46:
4397 RTL_W8(PMCH, RTL_R8(PMCH) | 0xC0);
4398 break;
4114 case RTL_GIGA_MAC_VER_40: 4399 case RTL_GIGA_MAC_VER_40:
4115 case RTL_GIGA_MAC_VER_41: 4400 case RTL_GIGA_MAC_VER_41:
4116 rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000, 4401 rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
@@ -4153,6 +4438,8 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4153 case RTL_GIGA_MAC_VER_37: 4438 case RTL_GIGA_MAC_VER_37:
4154 case RTL_GIGA_MAC_VER_39: 4439 case RTL_GIGA_MAC_VER_39:
4155 case RTL_GIGA_MAC_VER_43: 4440 case RTL_GIGA_MAC_VER_43:
4441 case RTL_GIGA_MAC_VER_47:
4442 case RTL_GIGA_MAC_VER_48:
4156 ops->down = r810x_pll_power_down; 4443 ops->down = r810x_pll_power_down;
4157 ops->up = r810x_pll_power_up; 4444 ops->up = r810x_pll_power_up;
4158 break; 4445 break;
@@ -4182,6 +4469,8 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4182 case RTL_GIGA_MAC_VER_41: 4469 case RTL_GIGA_MAC_VER_41:
4183 case RTL_GIGA_MAC_VER_42: 4470 case RTL_GIGA_MAC_VER_42:
4184 case RTL_GIGA_MAC_VER_44: 4471 case RTL_GIGA_MAC_VER_44:
4472 case RTL_GIGA_MAC_VER_45:
4473 case RTL_GIGA_MAC_VER_46:
4185 ops->down = r8168_pll_power_down; 4474 ops->down = r8168_pll_power_down;
4186 ops->up = r8168_pll_power_up; 4475 ops->up = r8168_pll_power_up;
4187 break; 4476 break;
@@ -4232,6 +4521,10 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4232 case RTL_GIGA_MAC_VER_42: 4521 case RTL_GIGA_MAC_VER_42:
4233 case RTL_GIGA_MAC_VER_43: 4522 case RTL_GIGA_MAC_VER_43:
4234 case RTL_GIGA_MAC_VER_44: 4523 case RTL_GIGA_MAC_VER_44:
4524 case RTL_GIGA_MAC_VER_45:
4525 case RTL_GIGA_MAC_VER_46:
4526 case RTL_GIGA_MAC_VER_47:
4527 case RTL_GIGA_MAC_VER_48:
4235 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); 4528 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
4236 break; 4529 break;
4237 default: 4530 default:
@@ -4393,6 +4686,10 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4393 case RTL_GIGA_MAC_VER_42: 4686 case RTL_GIGA_MAC_VER_42:
4394 case RTL_GIGA_MAC_VER_43: 4687 case RTL_GIGA_MAC_VER_43:
4395 case RTL_GIGA_MAC_VER_44: 4688 case RTL_GIGA_MAC_VER_44:
4689 case RTL_GIGA_MAC_VER_45:
4690 case RTL_GIGA_MAC_VER_46:
4691 case RTL_GIGA_MAC_VER_47:
4692 case RTL_GIGA_MAC_VER_48:
4396 default: 4693 default:
4397 ops->disable = NULL; 4694 ops->disable = NULL;
4398 ops->enable = NULL; 4695 ops->enable = NULL;
@@ -4495,15 +4792,19 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
4495 tp->mac_version == RTL_GIGA_MAC_VER_31) { 4792 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4496 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42); 4793 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4497 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || 4794 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4498 tp->mac_version == RTL_GIGA_MAC_VER_35 || 4795 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4499 tp->mac_version == RTL_GIGA_MAC_VER_36 || 4796 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4500 tp->mac_version == RTL_GIGA_MAC_VER_37 || 4797 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4501 tp->mac_version == RTL_GIGA_MAC_VER_40 || 4798 tp->mac_version == RTL_GIGA_MAC_VER_38 ||
4502 tp->mac_version == RTL_GIGA_MAC_VER_41 || 4799 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4503 tp->mac_version == RTL_GIGA_MAC_VER_42 || 4800 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4504 tp->mac_version == RTL_GIGA_MAC_VER_43 || 4801 tp->mac_version == RTL_GIGA_MAC_VER_42 ||
4505 tp->mac_version == RTL_GIGA_MAC_VER_44 || 4802 tp->mac_version == RTL_GIGA_MAC_VER_43 ||
4506 tp->mac_version == RTL_GIGA_MAC_VER_38) { 4803 tp->mac_version == RTL_GIGA_MAC_VER_44 ||
4804 tp->mac_version == RTL_GIGA_MAC_VER_45 ||
4805 tp->mac_version == RTL_GIGA_MAC_VER_46 ||
4806 tp->mac_version == RTL_GIGA_MAC_VER_47 ||
4807 tp->mac_version == RTL_GIGA_MAC_VER_48) {
4507 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); 4808 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4508 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); 4809 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4509 } else { 4810 } else {
@@ -5330,6 +5631,105 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
5330 rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); 5631 rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
5331} 5632}
5332 5633
5634static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
5635{
5636 void __iomem *ioaddr = tp->mmio_addr;
5637 struct pci_dev *pdev = tp->pci_dev;
5638 u16 rg_saw_cnt;
5639 u32 data;
5640 static const struct ephy_info e_info_8168h_1[] = {
5641 { 0x1e, 0x0800, 0x0001 },
5642 { 0x1d, 0x0000, 0x0800 },
5643 { 0x05, 0xffff, 0x2089 },
5644 { 0x06, 0xffff, 0x5881 },
5645 { 0x04, 0xffff, 0x154a },
5646 { 0x01, 0xffff, 0x068b }
5647 };
5648
5649 /* disable aspm and clock request before access ephy */
5650 RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
5651 RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
5652 rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
5653
5654 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5655
5656 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
5657 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5658 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5659 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5660
5661 rtl_csi_access_enable_1(tp);
5662
5663 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5664
5665 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5666 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5667
5668 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC);
5669
5670 rtl_w1w0_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC);
5671
5672 rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
5673
5674 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5675 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
5676 RTL_W8(MaxTxPacketSize, EarlySize);
5677
5678 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5679 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5680
5681 /* Adjust EEE LED frequency */
5682 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5683
5684 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5685 RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN);
5686
5687 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN);
5688
5689 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
5690
5691 rtl_pcie_state_l2l3_enable(tp, false);
5692
5693 rtl_writephy(tp, 0x1f, 0x0c42);
5694 rg_saw_cnt = rtl_readphy(tp, 0x13);
5695 rtl_writephy(tp, 0x1f, 0x0000);
5696 if (rg_saw_cnt > 0) {
5697 u16 sw_cnt_1ms_ini;
5698
5699 sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
5700 sw_cnt_1ms_ini &= 0x0fff;
5701 data = r8168_mac_ocp_read(tp, 0xd412);
5702 data &= 0x0fff;
5703 data |= sw_cnt_1ms_ini;
5704 r8168_mac_ocp_write(tp, 0xd412, data);
5705 }
5706
5707 data = r8168_mac_ocp_read(tp, 0xe056);
5708 data &= 0xf0;
5709 data |= 0x07;
5710 r8168_mac_ocp_write(tp, 0xe056, data);
5711
5712 data = r8168_mac_ocp_read(tp, 0xe052);
5713 data &= 0x8008;
5714 data |= 0x6000;
5715 r8168_mac_ocp_write(tp, 0xe052, data);
5716
5717 data = r8168_mac_ocp_read(tp, 0xe0d6);
5718 data &= 0x01ff;
5719 data |= 0x017f;
5720 r8168_mac_ocp_write(tp, 0xe0d6, data);
5721
5722 data = r8168_mac_ocp_read(tp, 0xd420);
5723 data &= 0x0fff;
5724 data |= 0x047f;
5725 r8168_mac_ocp_write(tp, 0xd420, data);
5726
5727 r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
5728 r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
5729 r8168_mac_ocp_write(tp, 0xc094, 0x0000);
5730 r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
5731}
5732
5333static void rtl_hw_start_8168(struct net_device *dev) 5733static void rtl_hw_start_8168(struct net_device *dev)
5334{ 5734{
5335 struct rtl8169_private *tp = netdev_priv(dev); 5735 struct rtl8169_private *tp = netdev_priv(dev);
@@ -5440,6 +5840,11 @@ static void rtl_hw_start_8168(struct net_device *dev)
5440 rtl_hw_start_8411_2(tp); 5840 rtl_hw_start_8411_2(tp);
5441 break; 5841 break;
5442 5842
5843 case RTL_GIGA_MAC_VER_45:
5844 case RTL_GIGA_MAC_VER_46:
5845 rtl_hw_start_8168h_1(tp);
5846 break;
5847
5443 default: 5848 default:
5444 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", 5849 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5445 dev->name, tp->mac_version); 5850 dev->name, tp->mac_version);
@@ -5655,6 +6060,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
5655 case RTL_GIGA_MAC_VER_43: 6060 case RTL_GIGA_MAC_VER_43:
5656 rtl_hw_start_8168g_2(tp); 6061 rtl_hw_start_8168g_2(tp);
5657 break; 6062 break;
6063 case RTL_GIGA_MAC_VER_47:
6064 case RTL_GIGA_MAC_VER_48:
6065 rtl_hw_start_8168h_1(tp);
6066 break;
5658 } 6067 }
5659 6068
5660 RTL_W8(Cfg9346, Cfg9346_Lock); 6069 RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -5895,7 +6304,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5895{ 6304{
5896 struct skb_shared_info *info = skb_shinfo(skb); 6305 struct skb_shared_info *info = skb_shinfo(skb);
5897 unsigned int cur_frag, entry; 6306 unsigned int cur_frag, entry;
5898 struct TxDesc * uninitialized_var(txd); 6307 struct TxDesc *uninitialized_var(txd);
5899 struct device *d = &tp->pci_dev->dev; 6308 struct device *d = &tp->pci_dev->dev;
5900 6309
5901 entry = tp->cur_tx; 6310 entry = tp->cur_tx;
@@ -7110,6 +7519,10 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
7110 case RTL_GIGA_MAC_VER_42: 7519 case RTL_GIGA_MAC_VER_42:
7111 case RTL_GIGA_MAC_VER_43: 7520 case RTL_GIGA_MAC_VER_43:
7112 case RTL_GIGA_MAC_VER_44: 7521 case RTL_GIGA_MAC_VER_44:
7522 case RTL_GIGA_MAC_VER_45:
7523 case RTL_GIGA_MAC_VER_46:
7524 case RTL_GIGA_MAC_VER_47:
7525 case RTL_GIGA_MAC_VER_48:
7113 rtl_hw_init_8168g(tp); 7526 rtl_hw_init_8168g(tp);
7114 break; 7527 break;
7115 7528
@@ -7255,8 +7668,19 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7255 RTL_W8(Cfg9346, Cfg9346_Unlock); 7668 RTL_W8(Cfg9346, Cfg9346_Unlock);
7256 RTL_W8(Config1, RTL_R8(Config1) | PMEnable); 7669 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
7257 RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus)); 7670 RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
7258 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) 7671 switch (tp->mac_version) {
7259 tp->features |= RTL_FEATURE_WOL; 7672 case RTL_GIGA_MAC_VER_45:
7673 case RTL_GIGA_MAC_VER_46:
7674 if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
7675 tp->features |= RTL_FEATURE_WOL;
7676 if ((RTL_R8(Config3) & LinkUp) != 0)
7677 tp->features |= RTL_FEATURE_WOL;
7678 break;
7679 default:
7680 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
7681 tp->features |= RTL_FEATURE_WOL;
7682 break;
7683 }
7260 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) 7684 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
7261 tp->features |= RTL_FEATURE_WOL; 7685 tp->features |= RTL_FEATURE_WOL;
7262 tp->features |= rtl_try_msi(tp, cfg); 7686 tp->features |= rtl_try_msi(tp, cfg);
@@ -7283,6 +7707,18 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7283 u64_stats_init(&tp->tx_stats.syncp); 7707 u64_stats_init(&tp->tx_stats.syncp);
7284 7708
7285 /* Get MAC address */ 7709 /* Get MAC address */
7710 if (tp->mac_version == RTL_GIGA_MAC_VER_45 ||
7711 tp->mac_version == RTL_GIGA_MAC_VER_46 ||
7712 tp->mac_version == RTL_GIGA_MAC_VER_47 ||
7713 tp->mac_version == RTL_GIGA_MAC_VER_48) {
7714 u16 mac_addr[3];
7715
7716 *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xE0, ERIAR_EXGMAC);
7717 *(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xE4, ERIAR_EXGMAC);
7718
7719 if (is_valid_ether_addr((u8 *)mac_addr))
7720 rtl_rar_set(tp, (u8 *)mac_addr);
7721 }
7286 for (i = 0; i < ETH_ALEN; i++) 7722 for (i = 0; i < ETH_ALEN; i++)
7287 dev->dev_addr[i] = RTL_R8(MAC0 + i); 7723 dev->dev_addr[i] = RTL_R8(MAC0 + i);
7288 7724
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index ec632e666c56..ddc6115720a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -17,6 +17,7 @@
17 17
18#include <linux/mfd/syscon.h> 18#include <linux/mfd/syscon.h>
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/of_address.h>
20#include <linux/of_net.h> 21#include <linux/of_net.h>
21#include <linux/phy.h> 22#include <linux/phy.h>
22#include <linux/regmap.h> 23#include <linux/regmap.h>
@@ -30,6 +31,12 @@
30#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2 31#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2
31#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 32#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
32 33
34#define EMAC_SPLITTER_CTRL_REG 0x0
35#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
36#define EMAC_SPLITTER_CTRL_SPEED_10 0x2
37#define EMAC_SPLITTER_CTRL_SPEED_100 0x3
38#define EMAC_SPLITTER_CTRL_SPEED_1000 0x0
39
33struct socfpga_dwmac { 40struct socfpga_dwmac {
34 int interface; 41 int interface;
35 u32 reg_offset; 42 u32 reg_offset;
@@ -37,14 +44,46 @@ struct socfpga_dwmac {
37 struct device *dev; 44 struct device *dev;
38 struct regmap *sys_mgr_base_addr; 45 struct regmap *sys_mgr_base_addr;
39 struct reset_control *stmmac_rst; 46 struct reset_control *stmmac_rst;
47 void __iomem *splitter_base;
40}; 48};
41 49
50static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
51{
52 struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
53 void __iomem *splitter_base = dwmac->splitter_base;
54 u32 val;
55
56 if (!splitter_base)
57 return;
58
59 val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
60 val &= ~EMAC_SPLITTER_CTRL_SPEED_MASK;
61
62 switch (speed) {
63 case 1000:
64 val |= EMAC_SPLITTER_CTRL_SPEED_1000;
65 break;
66 case 100:
67 val |= EMAC_SPLITTER_CTRL_SPEED_100;
68 break;
69 case 10:
70 val |= EMAC_SPLITTER_CTRL_SPEED_10;
71 break;
72 default:
73 return;
74 }
75
76 writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
77}
78
42static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev) 79static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
43{ 80{
44 struct device_node *np = dev->of_node; 81 struct device_node *np = dev->of_node;
45 struct regmap *sys_mgr_base_addr; 82 struct regmap *sys_mgr_base_addr;
46 u32 reg_offset, reg_shift; 83 u32 reg_offset, reg_shift;
47 int ret; 84 int ret;
85 struct device_node *np_splitter;
86 struct resource res_splitter;
48 87
49 dwmac->stmmac_rst = devm_reset_control_get(dev, 88 dwmac->stmmac_rst = devm_reset_control_get(dev,
50 STMMAC_RESOURCE_NAME); 89 STMMAC_RESOURCE_NAME);
@@ -73,6 +112,20 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
73 return -EINVAL; 112 return -EINVAL;
74 } 113 }
75 114
115 np_splitter = of_parse_phandle(np, "altr,emac-splitter", 0);
116 if (np_splitter) {
117 if (of_address_to_resource(np_splitter, 0, &res_splitter)) {
118 dev_info(dev, "Missing emac splitter address\n");
119 return -EINVAL;
120 }
121
122 dwmac->splitter_base = devm_ioremap_resource(dev, &res_splitter);
123 if (!dwmac->splitter_base) {
124 dev_info(dev, "Failed to mapping emac splitter\n");
125 return -EINVAL;
126 }
127 }
128
76 dwmac->reg_offset = reg_offset; 129 dwmac->reg_offset = reg_offset;
77 dwmac->reg_shift = reg_shift; 130 dwmac->reg_shift = reg_shift;
78 dwmac->sys_mgr_base_addr = sys_mgr_base_addr; 131 dwmac->sys_mgr_base_addr = sys_mgr_base_addr;
@@ -91,6 +144,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
91 144
92 switch (phymode) { 145 switch (phymode) {
93 case PHY_INTERFACE_MODE_RGMII: 146 case PHY_INTERFACE_MODE_RGMII:
147 case PHY_INTERFACE_MODE_RGMII_ID:
94 val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII; 148 val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
95 break; 149 break;
96 case PHY_INTERFACE_MODE_MII: 150 case PHY_INTERFACE_MODE_MII:
@@ -102,6 +156,13 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
102 return -EINVAL; 156 return -EINVAL;
103 } 157 }
104 158
159 /* Overwrite val to GMII if splitter core is enabled. The phymode here
160 * is the actual phy mode on phy hardware, but phy interface from
161 * EMAC core is GMII.
162 */
163 if (dwmac->splitter_base)
164 val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
165
105 regmap_read(sys_mgr_base_addr, reg_offset, &ctrl); 166 regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
106 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); 167 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
107 ctrl |= val << reg_shift; 168 ctrl |= val << reg_shift;
@@ -196,4 +257,5 @@ const struct stmmac_of_data socfpga_gmac_data = {
196 .setup = socfpga_dwmac_probe, 257 .setup = socfpga_dwmac_probe,
197 .init = socfpga_dwmac_init, 258 .init = socfpga_dwmac_init,
198 .exit = socfpga_dwmac_exit, 259 .exit = socfpga_dwmac_exit,
260 .fix_mac_speed = socfpga_dwmac_fix_mac_speed,
199}; 261};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index cf4f38db1c0a..3a08a1f78c73 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -261,11 +261,11 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
261 ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed); 261 ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
262 262
263 /* Get and convert ADV/LP_ADV from the HW AN registers */ 263 /* Get and convert ADV/LP_ADV from the HW AN registers */
264 if (priv->hw->mac->get_adv) 264 if (!priv->hw->mac->get_adv)
265 priv->hw->mac->get_adv(priv->hw, &adv);
266 else
267 return -EOPNOTSUPP; /* should never happen indeed */ 265 return -EOPNOTSUPP; /* should never happen indeed */
268 266
267 priv->hw->mac->get_adv(priv->hw, &adv);
268
269 /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ 269 /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
270 270
271 if (adv.pause & STMMAC_PCS_PAUSE) 271 if (adv.pause & STMMAC_PCS_PAUSE)
@@ -340,19 +340,17 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
340 if (cmd->autoneg != AUTONEG_ENABLE) 340 if (cmd->autoneg != AUTONEG_ENABLE)
341 return -EINVAL; 341 return -EINVAL;
342 342
343 if (cmd->autoneg == AUTONEG_ENABLE) { 343 mask &= (ADVERTISED_1000baseT_Half |
344 mask &= (ADVERTISED_1000baseT_Half |
345 ADVERTISED_1000baseT_Full | 344 ADVERTISED_1000baseT_Full |
346 ADVERTISED_100baseT_Half | 345 ADVERTISED_100baseT_Half |
347 ADVERTISED_100baseT_Full | 346 ADVERTISED_100baseT_Full |
348 ADVERTISED_10baseT_Half | 347 ADVERTISED_10baseT_Half |
349 ADVERTISED_10baseT_Full); 348 ADVERTISED_10baseT_Full);
350 349
351 spin_lock(&priv->lock); 350 spin_lock(&priv->lock);
352 if (priv->hw->mac->ctrl_ane) 351 if (priv->hw->mac->ctrl_ane)
353 priv->hw->mac->ctrl_ane(priv->hw, 1); 352 priv->hw->mac->ctrl_ane(priv->hw, 1);
354 spin_unlock(&priv->lock); 353 spin_unlock(&priv->lock);
355 }
356 354
357 return 0; 355 return 0;
358 } 356 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6e6ee226de04..9dbb02d9d9c2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -834,7 +834,7 @@ static int stmmac_init_phy(struct net_device *dev)
834 /* Stop Advertising 1000BASE Capability if interface is not GMII */ 834 /* Stop Advertising 1000BASE Capability if interface is not GMII */
835 if ((interface == PHY_INTERFACE_MODE_MII) || 835 if ((interface == PHY_INTERFACE_MODE_MII) ||
836 (interface == PHY_INTERFACE_MODE_RMII) || 836 (interface == PHY_INTERFACE_MODE_RMII) ||
837 (max_speed < 1000 && max_speed > 0)) 837 (max_speed < 1000 && max_speed > 0))
838 phydev->advertising &= ~(SUPPORTED_1000baseT_Half | 838 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
839 SUPPORTED_1000baseT_Full); 839 SUPPORTED_1000baseT_Full);
840 840
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index a5b1e1b776fe..8dd040827c69 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -253,7 +253,7 @@ int stmmac_mdio_register(struct net_device *ndev)
253 } 253 }
254 254
255 /* 255 /*
256 * If we're going to bind the MAC to this PHY bus, 256 * If we're going to bind the MAC to this PHY bus,
257 * and no PHY number was provided to the MAC, 257 * and no PHY number was provided to the MAC,
258 * use the one probed here. 258 * use the one probed here.
259 */ 259 */
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index f7415b6bf141..fef5dec2cffe 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -115,7 +115,7 @@ static const struct pci_device_id gem_pci_tbl[] = {
115 115
116MODULE_DEVICE_TABLE(pci, gem_pci_tbl); 116MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
117 117
118static u16 __phy_read(struct gem *gp, int phy_addr, int reg) 118static u16 __sungem_phy_read(struct gem *gp, int phy_addr, int reg)
119{ 119{
120 u32 cmd; 120 u32 cmd;
121 int limit = 10000; 121 int limit = 10000;
@@ -141,18 +141,18 @@ static u16 __phy_read(struct gem *gp, int phy_addr, int reg)
141 return cmd & MIF_FRAME_DATA; 141 return cmd & MIF_FRAME_DATA;
142} 142}
143 143
144static inline int _phy_read(struct net_device *dev, int mii_id, int reg) 144static inline int _sungem_phy_read(struct net_device *dev, int mii_id, int reg)
145{ 145{
146 struct gem *gp = netdev_priv(dev); 146 struct gem *gp = netdev_priv(dev);
147 return __phy_read(gp, mii_id, reg); 147 return __sungem_phy_read(gp, mii_id, reg);
148} 148}
149 149
150static inline u16 phy_read(struct gem *gp, int reg) 150static inline u16 sungem_phy_read(struct gem *gp, int reg)
151{ 151{
152 return __phy_read(gp, gp->mii_phy_addr, reg); 152 return __sungem_phy_read(gp, gp->mii_phy_addr, reg);
153} 153}
154 154
155static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) 155static void __sungem_phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
156{ 156{
157 u32 cmd; 157 u32 cmd;
158 int limit = 10000; 158 int limit = 10000;
@@ -174,15 +174,15 @@ static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
174 } 174 }
175} 175}
176 176
177static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val) 177static inline void _sungem_phy_write(struct net_device *dev, int mii_id, int reg, int val)
178{ 178{
179 struct gem *gp = netdev_priv(dev); 179 struct gem *gp = netdev_priv(dev);
180 __phy_write(gp, mii_id, reg, val & 0xffff); 180 __sungem_phy_write(gp, mii_id, reg, val & 0xffff);
181} 181}
182 182
183static inline void phy_write(struct gem *gp, int reg, u16 val) 183static inline void sungem_phy_write(struct gem *gp, int reg, u16 val)
184{ 184{
185 __phy_write(gp, gp->mii_phy_addr, reg, val); 185 __sungem_phy_write(gp, gp->mii_phy_addr, reg, val);
186} 186}
187 187
188static inline void gem_enable_ints(struct gem *gp) 188static inline void gem_enable_ints(struct gem *gp)
@@ -1687,9 +1687,9 @@ static void gem_init_phy(struct gem *gp)
1687 /* Some PHYs used by apple have problem getting back to us, 1687 /* Some PHYs used by apple have problem getting back to us,
1688 * we do an additional reset here 1688 * we do an additional reset here
1689 */ 1689 */
1690 phy_write(gp, MII_BMCR, BMCR_RESET); 1690 sungem_phy_write(gp, MII_BMCR, BMCR_RESET);
1691 msleep(20); 1691 msleep(20);
1692 if (phy_read(gp, MII_BMCR) != 0xffff) 1692 if (sungem_phy_read(gp, MII_BMCR) != 0xffff)
1693 break; 1693 break;
1694 if (i == 2) 1694 if (i == 2)
1695 netdev_warn(gp->dev, "GMAC PHY not responding !\n"); 1695 netdev_warn(gp->dev, "GMAC PHY not responding !\n");
@@ -2012,7 +2012,7 @@ static int gem_check_invariants(struct gem *gp)
2012 2012
2013 for (i = 0; i < 32; i++) { 2013 for (i = 0; i < 32; i++) {
2014 gp->mii_phy_addr = i; 2014 gp->mii_phy_addr = i;
2015 if (phy_read(gp, MII_BMCR) != 0xffff) 2015 if (sungem_phy_read(gp, MII_BMCR) != 0xffff)
2016 break; 2016 break;
2017 } 2017 }
2018 if (i == 32) { 2018 if (i == 32) {
@@ -2696,13 +2696,13 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2696 /* Fallthrough... */ 2696 /* Fallthrough... */
2697 2697
2698 case SIOCGMIIREG: /* Read MII PHY register. */ 2698 case SIOCGMIIREG: /* Read MII PHY register. */
2699 data->val_out = __phy_read(gp, data->phy_id & 0x1f, 2699 data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f,
2700 data->reg_num & 0x1f); 2700 data->reg_num & 0x1f);
2701 rc = 0; 2701 rc = 0;
2702 break; 2702 break;
2703 2703
2704 case SIOCSMIIREG: /* Write MII PHY register. */ 2704 case SIOCSMIIREG: /* Write MII PHY register. */
2705 __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, 2705 __sungem_phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
2706 data->val_in); 2706 data->val_in);
2707 rc = 0; 2707 rc = 0;
2708 break; 2708 break;
@@ -2933,8 +2933,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2933 2933
2934 /* Fill up the mii_phy structure (even if we won't use it) */ 2934 /* Fill up the mii_phy structure (even if we won't use it) */
2935 gp->phy_mii.dev = dev; 2935 gp->phy_mii.dev = dev;
2936 gp->phy_mii.mdio_read = _phy_read; 2936 gp->phy_mii.mdio_read = _sungem_phy_read;
2937 gp->phy_mii.mdio_write = _phy_write; 2937 gp->phy_mii.mdio_write = _sungem_phy_write;
2938#ifdef CONFIG_PPC_PMAC 2938#ifdef CONFIG_PPC_PMAC
2939 gp->phy_mii.platform_data = gp->of_node; 2939 gp->phy_mii.platform_data = gp->of_node;
2940#endif 2940#endif
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 999fb72688d2..03b409988566 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2232,18 +2232,24 @@ static int cpsw_probe(struct platform_device *pdev)
2232 } 2232 }
2233 2233
2234 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 2234 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
2235 for (i = res->start; i <= res->end; i++) { 2235 if (k >= ARRAY_SIZE(priv->irqs_table)) {
2236 if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, 2236 ret = -EINVAL;
2237 dev_name(&pdev->dev), priv)) { 2237 goto clean_ale_ret;
2238 dev_err(priv->dev, "error attaching irq\n");
2239 goto clean_ale_ret;
2240 }
2241 priv->irqs_table[k] = i;
2242 priv->num_irqs = k + 1;
2243 } 2238 }
2239
2240 ret = devm_request_irq(&pdev->dev, res->start, cpsw_interrupt,
2241 0, dev_name(&pdev->dev), priv);
2242 if (ret < 0) {
2243 dev_err(priv->dev, "error attaching irq (%d)\n", ret);
2244 goto clean_ale_ret;
2245 }
2246
2247 priv->irqs_table[k] = res->start;
2244 k++; 2248 k++;
2245 } 2249 }
2246 2250
2251 priv->num_irqs = k;
2252
2247 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 2253 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2248 2254
2249 ndev->netdev_ops = &cpsw_netdev_ops; 2255 ndev->netdev_ops = &cpsw_netdev_ops;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d5e07def6a59..2f48f790c9b4 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -591,7 +591,7 @@ struct nvsp_message {
591 591
592#define NETVSC_RECEIVE_BUFFER_ID 0xcafe 592#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
593 593
594#define NETVSC_PACKET_SIZE 2048 594#define NETVSC_PACKET_SIZE 4096
595 595
596#define VRSS_SEND_TAB_SIZE 16 596#define VRSS_SEND_TAB_SIZE 16
597 597
@@ -642,7 +642,7 @@ struct netvsc_device {
642 int ring_size; 642 int ring_size;
643 643
644 /* The primary channel callback buffer */ 644 /* The primary channel callback buffer */
645 unsigned char cb_buffer[NETVSC_PACKET_SIZE]; 645 unsigned char *cb_buffer;
646 /* The sub channel callback buffer */ 646 /* The sub channel callback buffer */
647 unsigned char *sub_cb_buf; 647 unsigned char *sub_cb_buf;
648}; 648};
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 66979cf7fca6..977984bc238a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -42,6 +42,12 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
42 if (!net_device) 42 if (!net_device)
43 return NULL; 43 return NULL;
44 44
45 net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
46 if (!net_device->cb_buffer) {
47 kfree(net_device);
48 return NULL;
49 }
50
45 init_waitqueue_head(&net_device->wait_drain); 51 init_waitqueue_head(&net_device->wait_drain);
46 net_device->start_remove = false; 52 net_device->start_remove = false;
47 net_device->destroy = false; 53 net_device->destroy = false;
@@ -52,6 +58,12 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
52 return net_device; 58 return net_device;
53} 59}
54 60
61static void free_netvsc_device(struct netvsc_device *nvdev)
62{
63 kfree(nvdev->cb_buffer);
64 kfree(nvdev);
65}
66
55static struct netvsc_device *get_outbound_net_device(struct hv_device *device) 67static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
56{ 68{
57 struct netvsc_device *net_device; 69 struct netvsc_device *net_device;
@@ -551,7 +563,7 @@ int netvsc_device_remove(struct hv_device *device)
551 if (net_device->sub_cb_buf) 563 if (net_device->sub_cb_buf)
552 vfree(net_device->sub_cb_buf); 564 vfree(net_device->sub_cb_buf);
553 565
554 kfree(net_device); 566 free_netvsc_device(net_device);
555 return 0; 567 return 0;
556} 568}
557 569
@@ -1042,10 +1054,8 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
1042 struct net_device *ndev; 1054 struct net_device *ndev;
1043 1055
1044 net_device = alloc_net_device(device); 1056 net_device = alloc_net_device(device);
1045 if (!net_device) { 1057 if (!net_device)
1046 ret = -ENOMEM; 1058 return -ENOMEM;
1047 goto cleanup;
1048 }
1049 1059
1050 net_device->ring_size = ring_size; 1060 net_device->ring_size = ring_size;
1051 1061
@@ -1093,7 +1103,7 @@ close:
1093 vmbus_close(device->channel); 1103 vmbus_close(device->channel);
1094 1104
1095cleanup: 1105cleanup:
1096 kfree(net_device); 1106 free_netvsc_device(net_device);
1097 1107
1098 return ret; 1108 return ret;
1099} 1109}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 65de0cab8d07..14afa4f24424 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -159,8 +159,6 @@ config MDIO_OCTEON
159config MDIO_SUN4I 159config MDIO_SUN4I
160 tristate "Allwinner sun4i MDIO interface support" 160 tristate "Allwinner sun4i MDIO interface support"
161 depends on ARCH_SUNXI 161 depends on ARCH_SUNXI
162 select REGULATOR
163 select REGULATOR_FIXED_VOLTAGE
164 help 162 help
165 This driver supports the MDIO interface found in the network 163 This driver supports the MDIO interface found in the network
166 interface units of the Allwinner SoC that have an EMAC (A10, 164 interface units of the Allwinner SoC that have an EMAC (A10,
@@ -205,6 +203,14 @@ config MDIO_BUS_MUX_MMIOREG
205 203
206 Currently, only 8-bit registers are supported. 204 Currently, only 8-bit registers are supported.
207 205
206config MDIO_BCM_UNIMAC
207 tristate "Broadcom UniMAC MDIO bus controller"
208 help
209 This module provides a driver for the Broadcom UniMAC MDIO busses.
210 This hardware can be found in the Broadcom GENET Ethernet MAC
211 controllers as well as some Broadcom Ethernet switches such as the
212 Starfighter 2 switches.
213
208endif # PHYLIB 214endif # PHYLIB
209 215
210config MICREL_KS8995MA 216config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 7dc3d5b304cf..eb3b18b5978b 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -34,3 +34,4 @@ obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
34obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o 34obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
35obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o 35obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
36obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o 36obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o
37obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index f3230eef41fd..c456559f6e7f 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -75,7 +75,6 @@
75#include <linux/of_device.h> 75#include <linux/of_device.h>
76#include <linux/uaccess.h> 76#include <linux/uaccess.h>
77 77
78
79MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); 78MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
80MODULE_LICENSE("Dual BSD/GPL"); 79MODULE_LICENSE("Dual BSD/GPL");
81MODULE_VERSION("1.0.0-a"); 80MODULE_VERSION("1.0.0-a");
@@ -100,9 +99,11 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
100#ifndef MDIO_PMA_10GBR_PMD_CTRL 99#ifndef MDIO_PMA_10GBR_PMD_CTRL
101#define MDIO_PMA_10GBR_PMD_CTRL 0x0096 100#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
102#endif 101#endif
102
103#ifndef MDIO_PMA_10GBR_FEC_CTRL 103#ifndef MDIO_PMA_10GBR_FEC_CTRL
104#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab 104#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
105#endif 105#endif
106
106#ifndef MDIO_AN_XNP 107#ifndef MDIO_AN_XNP
107#define MDIO_AN_XNP 0x0016 108#define MDIO_AN_XNP 0x0016
108#endif 109#endif
@@ -110,14 +111,23 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
110#ifndef MDIO_AN_INTMASK 111#ifndef MDIO_AN_INTMASK
111#define MDIO_AN_INTMASK 0x8001 112#define MDIO_AN_INTMASK 0x8001
112#endif 113#endif
114
113#ifndef MDIO_AN_INT 115#ifndef MDIO_AN_INT
114#define MDIO_AN_INT 0x8002 116#define MDIO_AN_INT 0x8002
115#endif 117#endif
116 118
119#ifndef MDIO_AN_KR_CTRL
120#define MDIO_AN_KR_CTRL 0x8003
121#endif
122
117#ifndef MDIO_CTRL1_SPEED1G 123#ifndef MDIO_CTRL1_SPEED1G
118#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) 124#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
119#endif 125#endif
120 126
127#ifndef MDIO_KR_CTRL_PDETECT
128#define MDIO_KR_CTRL_PDETECT 0x01
129#endif
130
121/* SerDes integration register offsets */ 131/* SerDes integration register offsets */
122#define SIR0_KR_RT_1 0x002c 132#define SIR0_KR_RT_1 0x002c
123#define SIR0_STATUS 0x0040 133#define SIR0_STATUS 0x0040
@@ -161,7 +171,6 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
161#define SPEED_1000_TXAMP 0xf 171#define SPEED_1000_TXAMP 0xf
162#define SPEED_1000_WORD 0x1 172#define SPEED_1000_WORD 0x1
163 173
164
165/* SerDes RxTx register offsets */ 174/* SerDes RxTx register offsets */
166#define RXTX_REG20 0x0050 175#define RXTX_REG20 0x0050
167#define RXTX_REG114 0x01c8 176#define RXTX_REG114 0x01c8
@@ -255,7 +264,6 @@ do { \
255 XSIR1_IOWRITE((_priv), _reg, reg_val); \ 264 XSIR1_IOWRITE((_priv), _reg, reg_val); \
256} while (0) 265} while (0)
257 266
258
259/* Macros for reading or writing SerDes RxTx registers 267/* Macros for reading or writing SerDes RxTx registers
260 * The ioread macros will get bit fields or full values using the 268 * The ioread macros will get bit fields or full values using the
261 * register definitions formed using the input names 269 * register definitions formed using the input names
@@ -283,7 +291,6 @@ do { \
283 XRXTX_IOWRITE((_priv), _reg, reg_val); \ 291 XRXTX_IOWRITE((_priv), _reg, reg_val); \
284} while (0) 292} while (0)
285 293
286
287enum amd_xgbe_phy_an { 294enum amd_xgbe_phy_an {
288 AMD_XGBE_AN_READY = 0, 295 AMD_XGBE_AN_READY = 0,
289 AMD_XGBE_AN_START, 296 AMD_XGBE_AN_START,
@@ -331,7 +338,6 @@ struct amd_xgbe_phy_priv {
331 338
332 /* Maintain link status for re-starting auto-negotiation */ 339 /* Maintain link status for re-starting auto-negotiation */
333 unsigned int link; 340 unsigned int link;
334 enum amd_xgbe_phy_mode mode;
335 unsigned int speed_set; 341 unsigned int speed_set;
336 342
337 /* Auto-negotiation state machine support */ 343 /* Auto-negotiation state machine support */
@@ -342,6 +348,7 @@ struct amd_xgbe_phy_priv {
342 enum amd_xgbe_phy_rx kx_state; 348 enum amd_xgbe_phy_rx kx_state;
343 struct work_struct an_work; 349 struct work_struct an_work;
344 struct workqueue_struct *an_workqueue; 350 struct workqueue_struct *an_workqueue;
351 unsigned int parallel_detect;
345}; 352};
346 353
347static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev) 354static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
@@ -468,8 +475,6 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
468 475
469 amd_xgbe_phy_serdes_complete_ratechange(phydev); 476 amd_xgbe_phy_serdes_complete_ratechange(phydev);
470 477
471 priv->mode = AMD_XGBE_MODE_KR;
472
473 return 0; 478 return 0;
474} 479}
475 480
@@ -518,8 +523,6 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
518 523
519 amd_xgbe_phy_serdes_complete_ratechange(phydev); 524 amd_xgbe_phy_serdes_complete_ratechange(phydev);
520 525
521 priv->mode = AMD_XGBE_MODE_KX;
522
523 return 0; 526 return 0;
524} 527}
525 528
@@ -568,18 +571,43 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
568 571
569 amd_xgbe_phy_serdes_complete_ratechange(phydev); 572 amd_xgbe_phy_serdes_complete_ratechange(phydev);
570 573
571 priv->mode = AMD_XGBE_MODE_KX; 574 return 0;
575}
576
577static int amd_xgbe_phy_cur_mode(struct phy_device *phydev,
578 enum amd_xgbe_phy_mode *mode)
579{
580 int ret;
581
582 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
583 if (ret < 0)
584 return ret;
585
586 if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
587 *mode = AMD_XGBE_MODE_KR;
588 else
589 *mode = AMD_XGBE_MODE_KX;
572 590
573 return 0; 591 return 0;
574} 592}
575 593
594static bool amd_xgbe_phy_in_kr_mode(struct phy_device *phydev)
595{
596 enum amd_xgbe_phy_mode mode;
597
598 if (amd_xgbe_phy_cur_mode(phydev, &mode))
599 return false;
600
601 return (mode == AMD_XGBE_MODE_KR);
602}
603
576static int amd_xgbe_phy_switch_mode(struct phy_device *phydev) 604static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
577{ 605{
578 struct amd_xgbe_phy_priv *priv = phydev->priv; 606 struct amd_xgbe_phy_priv *priv = phydev->priv;
579 int ret; 607 int ret;
580 608
581 /* If we are in KR switch to KX, and vice-versa */ 609 /* If we are in KR switch to KX, and vice-versa */
582 if (priv->mode == AMD_XGBE_MODE_KR) { 610 if (amd_xgbe_phy_in_kr_mode(phydev)) {
583 if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000) 611 if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
584 ret = amd_xgbe_phy_gmii_mode(phydev); 612 ret = amd_xgbe_phy_gmii_mode(phydev);
585 else 613 else
@@ -591,15 +619,20 @@ static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
591 return ret; 619 return ret;
592} 620}
593 621
594static enum amd_xgbe_phy_an amd_xgbe_an_switch_mode(struct phy_device *phydev) 622static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
623 enum amd_xgbe_phy_mode mode)
595{ 624{
625 enum amd_xgbe_phy_mode cur_mode;
596 int ret; 626 int ret;
597 627
598 ret = amd_xgbe_phy_switch_mode(phydev); 628 ret = amd_xgbe_phy_cur_mode(phydev, &cur_mode);
599 if (ret < 0) 629 if (ret)
600 return AMD_XGBE_AN_ERROR; 630 return ret;
601 631
602 return AMD_XGBE_AN_START; 632 if (mode != cur_mode)
633 ret = amd_xgbe_phy_switch_mode(phydev);
634
635 return ret;
603} 636}
604 637
605static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev, 638static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
@@ -610,8 +643,8 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
610 643
611 *state = AMD_XGBE_RX_COMPLETE; 644 *state = AMD_XGBE_RX_COMPLETE;
612 645
613 /* If we're in KX mode then we're done */ 646 /* If we're not in KR mode then we're done */
614 if (priv->mode == AMD_XGBE_MODE_KX) 647 if (!amd_xgbe_phy_in_kr_mode(phydev))
615 return AMD_XGBE_AN_EVENT; 648 return AMD_XGBE_AN_EVENT;
616 649
617 /* Enable/Disable FEC */ 650 /* Enable/Disable FEC */
@@ -669,7 +702,6 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
669static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev, 702static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
670 enum amd_xgbe_phy_rx *state) 703 enum amd_xgbe_phy_rx *state)
671{ 704{
672 struct amd_xgbe_phy_priv *priv = phydev->priv;
673 unsigned int link_support; 705 unsigned int link_support;
674 int ret, ad_reg, lp_reg; 706 int ret, ad_reg, lp_reg;
675 707
@@ -679,9 +711,9 @@ static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
679 return AMD_XGBE_AN_ERROR; 711 return AMD_XGBE_AN_ERROR;
680 712
681 /* Check for a supported mode, otherwise restart in a different one */ 713 /* Check for a supported mode, otherwise restart in a different one */
682 link_support = (priv->mode == AMD_XGBE_MODE_KR) ? 0x80 : 0x20; 714 link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20;
683 if (!(ret & link_support)) 715 if (!(ret & link_support))
684 return amd_xgbe_an_switch_mode(phydev); 716 return AMD_XGBE_AN_INCOMPAT_LINK;
685 717
686 /* Check Extended Next Page support */ 718 /* Check Extended Next Page support */
687 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE); 719 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
@@ -722,7 +754,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
722 int ret; 754 int ret;
723 755
724 /* Be sure we aren't looping trying to negotiate */ 756 /* Be sure we aren't looping trying to negotiate */
725 if (priv->mode == AMD_XGBE_MODE_KR) { 757 if (amd_xgbe_phy_in_kr_mode(phydev)) {
726 if (priv->kr_state != AMD_XGBE_RX_READY) 758 if (priv->kr_state != AMD_XGBE_RX_READY)
727 return AMD_XGBE_AN_NO_LINK; 759 return AMD_XGBE_AN_NO_LINK;
728 priv->kr_state = AMD_XGBE_RX_BPA; 760 priv->kr_state = AMD_XGBE_RX_BPA;
@@ -785,6 +817,13 @@ static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
785 /* Enable and start auto-negotiation */ 817 /* Enable and start auto-negotiation */
786 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0); 818 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
787 819
820 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL);
821 if (ret < 0)
822 return AMD_XGBE_AN_ERROR;
823
824 ret |= MDIO_KR_CTRL_PDETECT;
825 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL, ret);
826
788 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); 827 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
789 if (ret < 0) 828 if (ret < 0)
790 return AMD_XGBE_AN_ERROR; 829 return AMD_XGBE_AN_ERROR;
@@ -825,8 +864,8 @@ static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
825 enum amd_xgbe_phy_rx *state; 864 enum amd_xgbe_phy_rx *state;
826 int ret; 865 int ret;
827 866
828 state = (priv->mode == AMD_XGBE_MODE_KR) ? &priv->kr_state 867 state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
829 : &priv->kx_state; 868 : &priv->kx_state;
830 869
831 switch (*state) { 870 switch (*state) {
832 case AMD_XGBE_RX_BPA: 871 case AMD_XGBE_RX_BPA:
@@ -846,7 +885,13 @@ static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
846 885
847static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev) 886static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
848{ 887{
849 return amd_xgbe_an_switch_mode(phydev); 888 int ret;
889
890 ret = amd_xgbe_phy_switch_mode(phydev);
891 if (ret)
892 return AMD_XGBE_AN_ERROR;
893
894 return AMD_XGBE_AN_START;
850} 895}
851 896
852static void amd_xgbe_an_state_machine(struct work_struct *work) 897static void amd_xgbe_an_state_machine(struct work_struct *work)
@@ -859,6 +904,10 @@ static void amd_xgbe_an_state_machine(struct work_struct *work)
859 int sleep; 904 int sleep;
860 unsigned int an_supported = 0; 905 unsigned int an_supported = 0;
861 906
907 /* Start in KX mode */
908 if (amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX))
909 priv->an_state = AMD_XGBE_AN_ERROR;
910
862 while (1) { 911 while (1) {
863 mutex_lock(&priv->an_mutex); 912 mutex_lock(&priv->an_mutex);
864 913
@@ -866,8 +915,9 @@ static void amd_xgbe_an_state_machine(struct work_struct *work)
866 915
867 switch (priv->an_state) { 916 switch (priv->an_state) {
868 case AMD_XGBE_AN_START: 917 case AMD_XGBE_AN_START:
869 priv->an_state = amd_xgbe_an_start(phydev);
870 an_supported = 0; 918 an_supported = 0;
919 priv->parallel_detect = 0;
920 priv->an_state = amd_xgbe_an_start(phydev);
871 break; 921 break;
872 922
873 case AMD_XGBE_AN_EVENT: 923 case AMD_XGBE_AN_EVENT:
@@ -884,6 +934,7 @@ static void amd_xgbe_an_state_machine(struct work_struct *work)
884 break; 934 break;
885 935
886 case AMD_XGBE_AN_COMPLETE: 936 case AMD_XGBE_AN_COMPLETE:
937 priv->parallel_detect = an_supported ? 0 : 1;
887 netdev_info(phydev->attached_dev, "%s successful\n", 938 netdev_info(phydev->attached_dev, "%s successful\n",
888 an_supported ? "Auto negotiation" 939 an_supported ? "Auto negotiation"
889 : "Parallel detection"); 940 : "Parallel detection");
@@ -1018,7 +1069,6 @@ static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
1018{ 1069{
1019 struct amd_xgbe_phy_priv *priv = phydev->priv; 1070 struct amd_xgbe_phy_priv *priv = phydev->priv;
1020 u32 mmd_mask = phydev->c45_ids.devices_in_package; 1071 u32 mmd_mask = phydev->c45_ids.devices_in_package;
1021 int ret;
1022 1072
1023 if (phydev->autoneg != AUTONEG_ENABLE) 1073 if (phydev->autoneg != AUTONEG_ENABLE)
1024 return amd_xgbe_phy_setup_forced(phydev); 1074 return amd_xgbe_phy_setup_forced(phydev);
@@ -1027,11 +1077,6 @@ static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
1027 if (!(mmd_mask & MDIO_DEVS_AN)) 1077 if (!(mmd_mask & MDIO_DEVS_AN))
1028 return -EINVAL; 1078 return -EINVAL;
1029 1079
1030 /* Get the current speed mode */
1031 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
1032 if (ret < 0)
1033 return ret;
1034
1035 /* Start/Restart the auto-negotiation state machine */ 1080 /* Start/Restart the auto-negotiation state machine */
1036 mutex_lock(&priv->an_mutex); 1081 mutex_lock(&priv->an_mutex);
1037 priv->an_result = AMD_XGBE_AN_READY; 1082 priv->an_result = AMD_XGBE_AN_READY;
@@ -1121,18 +1166,14 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev)
1121{ 1166{
1122 struct amd_xgbe_phy_priv *priv = phydev->priv; 1167 struct amd_xgbe_phy_priv *priv = phydev->priv;
1123 u32 mmd_mask = phydev->c45_ids.devices_in_package; 1168 u32 mmd_mask = phydev->c45_ids.devices_in_package;
1124 int ret, mode, ad_ret, lp_ret; 1169 int ret, ad_ret, lp_ret;
1125 1170
1126 ret = amd_xgbe_phy_update_link(phydev); 1171 ret = amd_xgbe_phy_update_link(phydev);
1127 if (ret) 1172 if (ret)
1128 return ret; 1173 return ret;
1129 1174
1130 mode = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); 1175 if ((phydev->autoneg == AUTONEG_ENABLE) &&
1131 if (mode < 0) 1176 !priv->parallel_detect) {
1132 return mode;
1133 mode &= MDIO_PCS_CTRL2_TYPE;
1134
1135 if (phydev->autoneg == AUTONEG_ENABLE) {
1136 if (!(mmd_mask & MDIO_DEVS_AN)) 1177 if (!(mmd_mask & MDIO_DEVS_AN))
1137 return -EINVAL; 1178 return -EINVAL;
1138 1179
@@ -1163,40 +1204,39 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev)
1163 ad_ret &= lp_ret; 1204 ad_ret &= lp_ret;
1164 if (ad_ret & 0x80) { 1205 if (ad_ret & 0x80) {
1165 phydev->speed = SPEED_10000; 1206 phydev->speed = SPEED_10000;
1166 if (mode != MDIO_PCS_CTRL2_10GBR) { 1207 ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
1167 ret = amd_xgbe_phy_xgmii_mode(phydev); 1208 if (ret)
1168 if (ret < 0) 1209 return ret;
1169 return ret;
1170 }
1171 } else { 1210 } else {
1172 int (*mode_fcn)(struct phy_device *); 1211 switch (priv->speed_set) {
1173 1212 case AMD_XGBE_PHY_SPEEDSET_1000_10000:
1174 if (priv->speed_set ==
1175 AMD_XGBE_PHY_SPEEDSET_1000_10000) {
1176 phydev->speed = SPEED_1000; 1213 phydev->speed = SPEED_1000;
1177 mode_fcn = amd_xgbe_phy_gmii_mode; 1214 break;
1178 } else { 1215
1216 case AMD_XGBE_PHY_SPEEDSET_2500_10000:
1179 phydev->speed = SPEED_2500; 1217 phydev->speed = SPEED_2500;
1180 mode_fcn = amd_xgbe_phy_gmii_2500_mode; 1218 break;
1181 } 1219 }
1182 1220
1183 if (mode == MDIO_PCS_CTRL2_10GBR) { 1221 ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
1184 ret = mode_fcn(phydev); 1222 if (ret)
1185 if (ret < 0) 1223 return ret;
1186 return ret;
1187 }
1188 } 1224 }
1189 1225
1190 phydev->duplex = DUPLEX_FULL; 1226 phydev->duplex = DUPLEX_FULL;
1191 } else { 1227 } else {
1192 if (mode == MDIO_PCS_CTRL2_10GBR) { 1228 if (amd_xgbe_phy_in_kr_mode(phydev)) {
1193 phydev->speed = SPEED_10000; 1229 phydev->speed = SPEED_10000;
1194 } else { 1230 } else {
1195 if (priv->speed_set == 1231 switch (priv->speed_set) {
1196 AMD_XGBE_PHY_SPEEDSET_1000_10000) 1232 case AMD_XGBE_PHY_SPEEDSET_1000_10000:
1197 phydev->speed = SPEED_1000; 1233 phydev->speed = SPEED_1000;
1198 else 1234 break;
1235
1236 case AMD_XGBE_PHY_SPEEDSET_2500_10000:
1199 phydev->speed = SPEED_2500; 1237 phydev->speed = SPEED_2500;
1238 break;
1239 }
1200 } 1240 }
1201 phydev->duplex = DUPLEX_FULL; 1241 phydev->duplex = DUPLEX_FULL;
1202 phydev->pause = 0; 1242 phydev->pause = 0;
@@ -1329,14 +1369,6 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
1329 1369
1330 priv->link = 1; 1370 priv->link = 1;
1331 1371
1332 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
1333 if (ret < 0)
1334 goto err_sir1;
1335 if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
1336 priv->mode = AMD_XGBE_MODE_KR;
1337 else
1338 priv->mode = AMD_XGBE_MODE_KX;
1339
1340 mutex_init(&priv->an_mutex); 1372 mutex_init(&priv->an_mutex);
1341 INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine); 1373 INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
1342 priv->an_workqueue = create_singlethread_workqueue(wq_name); 1374 priv->an_workqueue = create_singlethread_workqueue(wq_name);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index fdce1ea28790..09dd6e1dc6e1 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -14,6 +14,7 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/bitops.h> 15#include <linux/bitops.h>
16#include <linux/brcmphy.h> 16#include <linux/brcmphy.h>
17#include <linux/mdio.h>
17 18
18/* Broadcom BCM7xxx internal PHY registers */ 19/* Broadcom BCM7xxx internal PHY registers */
19#define MII_BCM7XXX_CHANNEL_WIDTH 0x2000 20#define MII_BCM7XXX_CHANNEL_WIDTH 0x2000
@@ -146,6 +147,53 @@ static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev)
146 return 0; 147 return 0;
147} 148}
148 149
150static int bcm7xxx_apd_enable(struct phy_device *phydev)
151{
152 int val;
153
154 /* Enable powering down of the DLL during auto-power down */
155 val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_SCR3);
156 if (val < 0)
157 return val;
158
159 val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
160 bcm54xx_shadow_write(phydev, BCM54XX_SHD_SCR3, val);
161
162 /* Enable auto-power down */
163 val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_APD);
164 if (val < 0)
165 return val;
166
167 val |= BCM54XX_SHD_APD_EN;
168 return bcm54xx_shadow_write(phydev, BCM54XX_SHD_APD, val);
169}
170
171static int bcm7xxx_eee_enable(struct phy_device *phydev)
172{
173 int val;
174
175 val = phy_read_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL,
176 MDIO_MMD_AN, phydev->addr);
177 if (val < 0)
178 return val;
179
180 /* Enable general EEE feature at the PHY level */
181 val |= LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X;
182
183 phy_write_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL,
184 MDIO_MMD_AN, phydev->addr, val);
185
186 /* Advertise supported modes */
187 val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
188 MDIO_MMD_AN, phydev->addr);
189
190 val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
191 phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
192 MDIO_MMD_AN, phydev->addr, val);
193
194 return 0;
195}
196
149static int bcm7xxx_28nm_config_init(struct phy_device *phydev) 197static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
150{ 198{
151 int ret; 199 int ret;
@@ -154,7 +202,15 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
154 if (ret) 202 if (ret)
155 return ret; 203 return ret;
156 204
157 return bcm7xxx_28nm_afe_config_init(phydev); 205 ret = bcm7xxx_28nm_afe_config_init(phydev);
206 if (ret)
207 return ret;
208
209 ret = bcm7xxx_eee_enable(phydev);
210 if (ret)
211 return ret;
212
213 return bcm7xxx_apd_enable(phydev);
158} 214}
159 215
160static int bcm7xxx_28nm_resume(struct phy_device *phydev) 216static int bcm7xxx_28nm_resume(struct phy_device *phydev)
@@ -263,44 +319,28 @@ static int bcm7xxx_dummy_config_init(struct phy_device *phydev)
263 return 0; 319 return 0;
264} 320}
265 321
322#define BCM7XXX_28NM_GPHY(_oui, _name) \
323{ \
324 .phy_id = (_oui), \
325 .phy_id_mask = 0xfffffff0, \
326 .name = _name, \
327 .features = PHY_GBIT_FEATURES | \
328 SUPPORTED_Pause | SUPPORTED_Asym_Pause, \
329 .flags = PHY_IS_INTERNAL, \
330 .config_init = bcm7xxx_28nm_afe_config_init, \
331 .config_aneg = genphy_config_aneg, \
332 .read_status = genphy_read_status, \
333 .resume = bcm7xxx_28nm_resume, \
334 .driver = { .owner = THIS_MODULE }, \
335}
336
266static struct phy_driver bcm7xxx_driver[] = { 337static struct phy_driver bcm7xxx_driver[] = {
338 BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
339 BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"),
340 BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"),
341 BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
342 BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
267{ 343{
268 .phy_id = PHY_ID_BCM7366,
269 .phy_id_mask = 0xfffffff0,
270 .name = "Broadcom BCM7366",
271 .features = PHY_GBIT_FEATURES |
272 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
273 .flags = PHY_IS_INTERNAL,
274 .config_init = bcm7xxx_28nm_afe_config_init,
275 .config_aneg = genphy_config_aneg,
276 .read_status = genphy_read_status,
277 .resume = bcm7xxx_28nm_resume,
278 .driver = { .owner = THIS_MODULE },
279}, {
280 .phy_id = PHY_ID_BCM7439,
281 .phy_id_mask = 0xfffffff0,
282 .name = "Broadcom BCM7439",
283 .features = PHY_GBIT_FEATURES |
284 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
285 .flags = PHY_IS_INTERNAL,
286 .config_init = bcm7xxx_28nm_afe_config_init,
287 .config_aneg = genphy_config_aneg,
288 .read_status = genphy_read_status,
289 .resume = bcm7xxx_28nm_resume,
290 .driver = { .owner = THIS_MODULE },
291}, {
292 .phy_id = PHY_ID_BCM7445,
293 .phy_id_mask = 0xfffffff0,
294 .name = "Broadcom BCM7445",
295 .features = PHY_GBIT_FEATURES |
296 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
297 .flags = PHY_IS_INTERNAL,
298 .config_init = bcm7xxx_28nm_config_init,
299 .config_aneg = genphy_config_aneg,
300 .read_status = genphy_read_status,
301 .resume = bcm7xxx_28nm_afe_config_init,
302 .driver = { .owner = THIS_MODULE },
303}, {
304 .phy_id = PHY_BCM_OUI_4, 344 .phy_id = PHY_BCM_OUI_4,
305 .phy_id_mask = 0xffff0000, 345 .phy_id_mask = 0xffff0000,
306 .name = "Broadcom BCM7XXX 40nm", 346 .name = "Broadcom BCM7XXX 40nm",
@@ -329,6 +369,8 @@ static struct phy_driver bcm7xxx_driver[] = {
329} }; 369} };
330 370
331static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { 371static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
372 { PHY_ID_BCM7250, 0xfffffff0, },
373 { PHY_ID_BCM7364, 0xfffffff0, },
332 { PHY_ID_BCM7366, 0xfffffff0, }, 374 { PHY_ID_BCM7366, 0xfffffff0, },
333 { PHY_ID_BCM7439, 0xfffffff0, }, 375 { PHY_ID_BCM7439, 0xfffffff0, },
334 { PHY_ID_BCM7445, 0xfffffff0, }, 376 { PHY_ID_BCM7445, 0xfffffff0, },
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 34088d60da74..854f2c9a7b2b 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -25,132 +25,10 @@
25#define BRCM_PHY_REV(phydev) \ 25#define BRCM_PHY_REV(phydev) \
26 ((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask)) 26 ((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask))
27 27
28/*
29 * Broadcom LED source encodings. These are used in BCM5461, BCM5481,
30 * BCM5482, and possibly some others.
31 */
32#define BCM_LED_SRC_LINKSPD1 0x0
33#define BCM_LED_SRC_LINKSPD2 0x1
34#define BCM_LED_SRC_XMITLED 0x2
35#define BCM_LED_SRC_ACTIVITYLED 0x3
36#define BCM_LED_SRC_FDXLED 0x4
37#define BCM_LED_SRC_SLAVE 0x5
38#define BCM_LED_SRC_INTR 0x6
39#define BCM_LED_SRC_QUALITY 0x7
40#define BCM_LED_SRC_RCVLED 0x8
41#define BCM_LED_SRC_MULTICOLOR1 0xa
42#define BCM_LED_SRC_OPENSHORT 0xb
43#define BCM_LED_SRC_OFF 0xe /* Tied high */
44#define BCM_LED_SRC_ON 0xf /* Tied low */
45
46
47/*
48 * BCM5482: Shadow registers
49 * Shadow values go into bits [14:10] of register 0x1c to select a shadow
50 * register to access.
51 */
52/* 00101: Spare Control Register 3 */
53#define BCM54XX_SHD_SCR3 0x05
54#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
55#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
56#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
57
58/* 01010: Auto Power-Down */
59#define BCM54XX_SHD_APD 0x0a
60#define BCM54XX_SHD_APD_EN 0x0020
61
62#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
63 /* LED3 / ~LINKSPD[2] selector */
64#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
65 /* LED1 / ~LINKSPD[1] selector */
66#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
67#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */
68#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
69#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
70#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
71#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
72#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
73
74
75/*
76 * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17)
77 */
78#define MII_BCM54XX_EXP_AADJ1CH0 0x001f
79#define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200
80#define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100
81#define MII_BCM54XX_EXP_AADJ1CH3 0x601f
82#define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002
83#define MII_BCM54XX_EXP_EXP08 0x0F08
84#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001
85#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200
86#define MII_BCM54XX_EXP_EXP75 0x0f75
87#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c
88#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001
89#define MII_BCM54XX_EXP_EXP96 0x0f96
90#define MII_BCM54XX_EXP_EXP96_MYST 0x0010
91#define MII_BCM54XX_EXP_EXP97 0x0f97
92#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c
93
94/*
95 * BCM5482: Secondary SerDes registers
96 */
97#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */
98#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */
99#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */
100#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
101#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
102
103
104/*****************************************************************************/
105/* Fast Ethernet Transceiver definitions. */
106/*****************************************************************************/
107
108#define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */
109#define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */
110#define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */
111#define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */
112#define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */
113#define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */
114
115#define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */
116#define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */
117
118
119/*** Shadow register definitions ***/
120
121#define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */
122#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */
123
124#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */
125#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003
126#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001
127
128#define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */
129#define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */
130
131
132MODULE_DESCRIPTION("Broadcom PHY driver"); 28MODULE_DESCRIPTION("Broadcom PHY driver");
133MODULE_AUTHOR("Maciej W. Rozycki"); 29MODULE_AUTHOR("Maciej W. Rozycki");
134MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
135 31
136/*
137 * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T
138 * 0x1c shadow registers.
139 */
140static int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow)
141{
142 phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
143 return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD));
144}
145
146static int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow, u16 val)
147{
148 return phy_write(phydev, MII_BCM54XX_SHD,
149 MII_BCM54XX_SHD_WRITE |
150 MII_BCM54XX_SHD_VAL(shadow) |
151 MII_BCM54XX_SHD_DATA(val));
152}
153
154/* Indirect register access functions for the Expansion Registers */ 32/* Indirect register access functions for the Expansion Registers */
155static int bcm54xx_exp_read(struct phy_device *phydev, u16 regnum) 33static int bcm54xx_exp_read(struct phy_device *phydev, u16 regnum)
156{ 34{
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index c301e4cb37ca..87648b306551 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -721,7 +721,7 @@ static inline u16 exts_chan_to_edata(int ch)
721} 721}
722 722
723static int decode_evnt(struct dp83640_private *dp83640, 723static int decode_evnt(struct dp83640_private *dp83640,
724 void *data, u16 ests) 724 void *data, int len, u16 ests)
725{ 725{
726 struct phy_txts *phy_txts; 726 struct phy_txts *phy_txts;
727 struct ptp_clock_event event; 727 struct ptp_clock_event event;
@@ -729,6 +729,16 @@ static int decode_evnt(struct dp83640_private *dp83640,
729 int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK; 729 int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK;
730 u16 ext_status = 0; 730 u16 ext_status = 0;
731 731
732 /* calculate length of the event timestamp status message */
733 if (ests & MULT_EVNT)
734 parsed = (words + 2) * sizeof(u16);
735 else
736 parsed = (words + 1) * sizeof(u16);
737
738 /* check if enough data is available */
739 if (len < parsed)
740 return len;
741
732 if (ests & MULT_EVNT) { 742 if (ests & MULT_EVNT) {
733 ext_status = *(u16 *) data; 743 ext_status = *(u16 *) data;
734 data += sizeof(ext_status); 744 data += sizeof(ext_status);
@@ -747,10 +757,7 @@ static int decode_evnt(struct dp83640_private *dp83640,
747 dp83640->edata.ns_lo = phy_txts->ns_lo; 757 dp83640->edata.ns_lo = phy_txts->ns_lo;
748 } 758 }
749 759
750 if (ext_status) { 760 if (!ext_status) {
751 parsed = words + 2;
752 } else {
753 parsed = words + 1;
754 i = ((ests >> EVNT_NUM_SHIFT) & EVNT_NUM_MASK) - EXT_EVENT; 761 i = ((ests >> EVNT_NUM_SHIFT) & EVNT_NUM_MASK) - EXT_EVENT;
755 ext_status = exts_chan_to_edata(i); 762 ext_status = exts_chan_to_edata(i);
756 } 763 }
@@ -768,7 +775,7 @@ static int decode_evnt(struct dp83640_private *dp83640,
768 } 775 }
769 } 776 }
770 777
771 return parsed * sizeof(u16); 778 return parsed;
772} 779}
773 780
774static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts) 781static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
@@ -905,9 +912,9 @@ static void decode_status_frame(struct dp83640_private *dp83640,
905 decode_txts(dp83640, phy_txts); 912 decode_txts(dp83640, phy_txts);
906 size = sizeof(*phy_txts); 913 size = sizeof(*phy_txts);
907 914
908 } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) { 915 } else if (PSF_EVNT == type) {
909 916
910 size = decode_evnt(dp83640, ptr, ests); 917 size = decode_evnt(dp83640, ptr, len, ests);
911 918
912 } else { 919 } else {
913 size = 0; 920 size = 0;
@@ -1141,7 +1148,7 @@ static void dp83640_remove(struct phy_device *phydev)
1141 kfree_skb(skb); 1148 kfree_skb(skb);
1142 1149
1143 while ((skb = skb_dequeue(&dp83640->tx_queue)) != NULL) 1150 while ((skb = skb_dequeue(&dp83640->tx_queue)) != NULL)
1144 skb_complete_tx_timestamp(skb, NULL); 1151 kfree_skb(skb);
1145 1152
1146 clock = dp83640_clock_get(dp83640->clock); 1153 clock = dp83640_clock_get(dp83640->clock);
1147 1154
@@ -1398,7 +1405,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
1398 1405
1399 case HWTSTAMP_TX_ONESTEP_SYNC: 1406 case HWTSTAMP_TX_ONESTEP_SYNC:
1400 if (is_sync(skb, type)) { 1407 if (is_sync(skb, type)) {
1401 skb_complete_tx_timestamp(skb, NULL); 1408 kfree_skb(skb);
1402 return; 1409 return;
1403 } 1410 }
1404 /* fall through */ 1411 /* fall through */
@@ -1409,7 +1416,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
1409 1416
1410 case HWTSTAMP_TX_OFF: 1417 case HWTSTAMP_TX_OFF:
1411 default: 1418 default:
1412 skb_complete_tx_timestamp(skb, NULL); 1419 kfree_skb(skb);
1413 break; 1420 break;
1414 } 1421 }
1415} 1422}
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index d60d875cb445..5b19fbbda6d4 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -124,6 +124,17 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
124 if (reg_num >= MII_REGS_NUM) 124 if (reg_num >= MII_REGS_NUM)
125 return -1; 125 return -1;
126 126
127 /* We do not support emulating Clause 45 over Clause 22 register reads
128 * return an error instead of bogus data.
129 */
130 switch (reg_num) {
131 case MII_MMD_CTRL:
132 case MII_MMD_DATA:
133 return -1;
134 default:
135 break;
136 }
137
127 list_for_each_entry(fp, &fmb->phys, node) { 138 list_for_each_entry(fp, &fmb->phys, node) {
128 if (fp->addr == phy_addr) { 139 if (fp->addr == phy_addr) {
129 /* Issue callback if user registered it. */ 140 /* Issue callback if user registered it. */
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
new file mode 100644
index 000000000000..5b643e588e8f
--- /dev/null
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -0,0 +1,213 @@
1/*
2 * Broadcom UniMAC MDIO bus controller driver
3 *
4 * Copyright (C) 2014, Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/phy.h>
14#include <linux/platform_device.h>
15#include <linux/sched.h>
16#include <linux/module.h>
17#include <linux/io.h>
18#include <linux/delay.h>
19
20#include <linux/of.h>
21#include <linux/of_platform.h>
22#include <linux/of_mdio.h>
23
24#define MDIO_CMD 0x00
25#define MDIO_START_BUSY (1 << 29)
26#define MDIO_READ_FAIL (1 << 28)
27#define MDIO_RD (2 << 26)
28#define MDIO_WR (1 << 26)
29#define MDIO_PMD_SHIFT 21
30#define MDIO_PMD_MASK 0x1F
31#define MDIO_REG_SHIFT 16
32#define MDIO_REG_MASK 0x1F
33
34#define MDIO_CFG 0x04
35#define MDIO_C22 (1 << 0)
36#define MDIO_C45 0
37#define MDIO_CLK_DIV_SHIFT 4
38#define MDIO_CLK_DIV_MASK 0x3F
39#define MDIO_SUPP_PREAMBLE (1 << 12)
40
41struct unimac_mdio_priv {
42 struct mii_bus *mii_bus;
43 void __iomem *base;
44};
45
46static inline void unimac_mdio_start(struct unimac_mdio_priv *priv)
47{
48 u32 reg;
49
50 reg = __raw_readl(priv->base + MDIO_CMD);
51 reg |= MDIO_START_BUSY;
52 __raw_writel(reg, priv->base + MDIO_CMD);
53}
54
55static inline unsigned int unimac_mdio_busy(struct unimac_mdio_priv *priv)
56{
57 return __raw_readl(priv->base + MDIO_CMD) & MDIO_START_BUSY;
58}
59
60static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
61{
62 struct unimac_mdio_priv *priv = bus->priv;
63 unsigned int timeout = 1000;
64 u32 cmd;
65
66 /* Prepare the read operation */
67 cmd = MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT);
68 __raw_writel(cmd, priv->base + MDIO_CMD);
69
70 /* Start MDIO transaction */
71 unimac_mdio_start(priv);
72
73 do {
74 if (!unimac_mdio_busy(priv))
75 break;
76
77 usleep_range(1000, 2000);
78 } while (timeout--);
79
80 if (!timeout)
81 return -ETIMEDOUT;
82
83 cmd = __raw_readl(priv->base + MDIO_CMD);
84 if (cmd & MDIO_READ_FAIL)
85 return -EIO;
86
87 return cmd & 0xffff;
88}
89
90static int unimac_mdio_write(struct mii_bus *bus, int phy_id,
91 int reg, u16 val)
92{
93 struct unimac_mdio_priv *priv = bus->priv;
94 unsigned int timeout = 1000;
95 u32 cmd;
96
97 /* Prepare the write operation */
98 cmd = MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
99 (reg << MDIO_REG_SHIFT) | (0xffff & val);
100 __raw_writel(cmd, priv->base + MDIO_CMD);
101
102 unimac_mdio_start(priv);
103
104 do {
105 if (!unimac_mdio_busy(priv))
106 break;
107
108 usleep_range(1000, 2000);
109 } while (timeout--);
110
111 if (!timeout)
112 return -ETIMEDOUT;
113
114 return 0;
115}
116
117static int unimac_mdio_probe(struct platform_device *pdev)
118{
119 struct unimac_mdio_priv *priv;
120 struct device_node *np;
121 struct mii_bus *bus;
122 struct resource *r;
123 int ret;
124
125 np = pdev->dev.of_node;
126
127 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
128 if (!priv)
129 return -ENOMEM;
130
131 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
132
133 /* Just ioremap, as this MDIO block is usually integrated into an
134 * Ethernet MAC controller register range
135 */
136 priv->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
137 if (!priv->base) {
138 dev_err(&pdev->dev, "failed to remap register\n");
139 return -ENOMEM;
140 }
141
142 priv->mii_bus = mdiobus_alloc();
143 if (!priv->mii_bus)
144 return -ENOMEM;
145
146 bus = priv->mii_bus;
147 bus->priv = priv;
148 bus->name = "unimac MII bus";
149 bus->parent = &pdev->dev;
150 bus->read = unimac_mdio_read;
151 bus->write = unimac_mdio_write;
152 snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
153
154 bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
155 if (!bus->irq) {
156 ret = -ENOMEM;
157 goto out_mdio_free;
158 }
159
160 ret = of_mdiobus_register(bus, np);
161 if (ret) {
162 dev_err(&pdev->dev, "MDIO bus registration failed\n");
163 goto out_mdio_irq;
164 }
165
166 platform_set_drvdata(pdev, priv);
167
168 dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus at 0x%p\n", priv->base);
169
170 return 0;
171
172out_mdio_irq:
173 kfree(bus->irq);
174out_mdio_free:
175 mdiobus_free(bus);
176 return ret;
177}
178
179static int unimac_mdio_remove(struct platform_device *pdev)
180{
181 struct unimac_mdio_priv *priv = platform_get_drvdata(pdev);
182
183 mdiobus_unregister(priv->mii_bus);
184 kfree(priv->mii_bus->irq);
185 mdiobus_free(priv->mii_bus);
186
187 return 0;
188}
189
190static struct of_device_id unimac_mdio_ids[] = {
191 { .compatible = "brcm,genet-mdio-v4", },
192 { .compatible = "brcm,genet-mdio-v3", },
193 { .compatible = "brcm,genet-mdio-v2", },
194 { .compatible = "brcm,genet-mdio-v1", },
195 { .compatible = "brcm,unimac-mdio", },
196 { /* sentinel */ },
197};
198
199static struct platform_driver unimac_mdio_driver = {
200 .driver = {
201 .name = "unimac-mdio",
202 .owner = THIS_MODULE,
203 .of_match_table = unimac_mdio_ids,
204 },
205 .probe = unimac_mdio_probe,
206 .remove = unimac_mdio_remove,
207};
208module_platform_driver(unimac_mdio_driver);
209
210MODULE_AUTHOR("Broadcom Corporation");
211MODULE_DESCRIPTION("Broadcom UniMAC MDIO bus controller");
212MODULE_LICENSE("GPL");
213MODULE_ALIAS("platform:unimac-mdio");
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 4eaadcfcb0fe..50051f271b10 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -553,8 +553,14 @@ static ssize_t
553phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf) 553phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf)
554{ 554{
555 struct phy_device *phydev = to_phy_device(dev); 555 struct phy_device *phydev = to_phy_device(dev);
556 const char *mode = NULL;
556 557
557 return sprintf(buf, "%s\n", phy_modes(phydev->interface)); 558 if (phy_is_internal(phydev))
559 mode = "internal";
560 else
561 mode = phy_modes(phydev->interface);
562
563 return sprintf(buf, "%s\n", mode);
558} 564}
559static DEVICE_ATTR_RO(phy_interface); 565static DEVICE_ATTR_RO(phy_interface);
560 566
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a854d38c231d..1dfffdc9dfc3 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -955,7 +955,7 @@ static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
955 * 3) Write reg 13 // MMD Data Command for MMD DEVAD 955 * 3) Write reg 13 // MMD Data Command for MMD DEVAD
956 * 3) Read reg 14 // Read MMD data 956 * 3) Read reg 14 // Read MMD data
957 */ 957 */
958static int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, 958int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
959 int devad, int addr) 959 int devad, int addr)
960{ 960{
961 struct phy_driver *phydrv = phydev->drv; 961 struct phy_driver *phydrv = phydev->drv;
@@ -971,6 +971,7 @@ static int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
971 } 971 }
972 return value; 972 return value;
973} 973}
974EXPORT_SYMBOL(phy_read_mmd_indirect);
974 975
975/** 976/**
976 * phy_write_mmd_indirect - writes data to the MMD registers 977 * phy_write_mmd_indirect - writes data to the MMD registers
@@ -988,7 +989,7 @@ static int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
988 * 3) Write reg 13 // MMD Data Command for MMD DEVAD 989 * 3) Write reg 13 // MMD Data Command for MMD DEVAD
989 * 3) Write reg 14 // Write MMD data 990 * 3) Write reg 14 // Write MMD data
990 */ 991 */
991static void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, 992void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
992 int devad, int addr, u32 data) 993 int devad, int addr, u32 data)
993{ 994{
994 struct phy_driver *phydrv = phydev->drv; 995 struct phy_driver *phydrv = phydev->drv;
@@ -1002,6 +1003,7 @@ static void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
1002 phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data); 1003 phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
1003 } 1004 }
1004} 1005}
1006EXPORT_SYMBOL(phy_write_mmd_indirect);
1005 1007
1006/** 1008/**
1007 * phy_init_eee - init and check the EEE feature 1009 * phy_init_eee - init and check the EEE feature
@@ -1017,12 +1019,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1017{ 1019{
1018 /* According to 802.3az,the EEE is supported only in full duplex-mode. 1020 /* According to 802.3az,the EEE is supported only in full duplex-mode.
1019 * Also EEE feature is active when core is operating with MII, GMII 1021 * Also EEE feature is active when core is operating with MII, GMII
1020 * or RGMII. 1022 * or RGMII. Internal PHYs are also allowed to proceed and should
1023 * return an error if they do not support EEE.
1021 */ 1024 */
1022 if ((phydev->duplex == DUPLEX_FULL) && 1025 if ((phydev->duplex == DUPLEX_FULL) &&
1023 ((phydev->interface == PHY_INTERFACE_MODE_MII) || 1026 ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
1024 (phydev->interface == PHY_INTERFACE_MODE_GMII) || 1027 (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
1025 (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { 1028 (phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
1029 phy_is_internal(phydev))) {
1026 int eee_lp, eee_cap, eee_adv; 1030 int eee_lp, eee_cap, eee_adv;
1027 u32 lp, cap, adv; 1031 u32 lp, cap, adv;
1028 int status; 1032 int status;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ca5ec3e18d36..3fc91e89f5a5 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -230,13 +230,13 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
230 for (i = 1; 230 for (i = 1;
231 i < num_ids && c45_ids->devices_in_package == 0; 231 i < num_ids && c45_ids->devices_in_package == 0;
232 i++) { 232 i++) {
233 reg_addr = MII_ADDR_C45 | i << 16 | 6; 233 reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS2;
234 phy_reg = mdiobus_read(bus, addr, reg_addr); 234 phy_reg = mdiobus_read(bus, addr, reg_addr);
235 if (phy_reg < 0) 235 if (phy_reg < 0)
236 return -EIO; 236 return -EIO;
237 c45_ids->devices_in_package = (phy_reg & 0xffff) << 16; 237 c45_ids->devices_in_package = (phy_reg & 0xffff) << 16;
238 238
239 reg_addr = MII_ADDR_C45 | i << 16 | 5; 239 reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS1;
240 phy_reg = mdiobus_read(bus, addr, reg_addr); 240 phy_reg = mdiobus_read(bus, addr, reg_addr);
241 if (phy_reg < 0) 241 if (phy_reg < 0)
242 return -EIO; 242 return -EIO;
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index ae7cd7f3656d..92578d72e4ee 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -47,22 +47,22 @@ static const int phy_BCM5400_link_table[8][3] = {
47 { 1, 0, 1 }, /* 1000BT */ 47 { 1, 0, 1 }, /* 1000BT */
48}; 48};
49 49
50static inline int __phy_read(struct mii_phy* phy, int id, int reg) 50static inline int __sungem_phy_read(struct mii_phy* phy, int id, int reg)
51{ 51{
52 return phy->mdio_read(phy->dev, id, reg); 52 return phy->mdio_read(phy->dev, id, reg);
53} 53}
54 54
55static inline void __phy_write(struct mii_phy* phy, int id, int reg, int val) 55static inline void __sungem_phy_write(struct mii_phy* phy, int id, int reg, int val)
56{ 56{
57 phy->mdio_write(phy->dev, id, reg, val); 57 phy->mdio_write(phy->dev, id, reg, val);
58} 58}
59 59
60static inline int phy_read(struct mii_phy* phy, int reg) 60static inline int sungem_phy_read(struct mii_phy* phy, int reg)
61{ 61{
62 return phy->mdio_read(phy->dev, phy->mii_id, reg); 62 return phy->mdio_read(phy->dev, phy->mii_id, reg);
63} 63}
64 64
65static inline void phy_write(struct mii_phy* phy, int reg, int val) 65static inline void sungem_phy_write(struct mii_phy* phy, int reg, int val)
66{ 66{
67 phy->mdio_write(phy->dev, phy->mii_id, reg, val); 67 phy->mdio_write(phy->dev, phy->mii_id, reg, val);
68} 68}
@@ -72,21 +72,21 @@ static int reset_one_mii_phy(struct mii_phy* phy, int phy_id)
72 u16 val; 72 u16 val;
73 int limit = 10000; 73 int limit = 10000;
74 74
75 val = __phy_read(phy, phy_id, MII_BMCR); 75 val = __sungem_phy_read(phy, phy_id, MII_BMCR);
76 val &= ~(BMCR_ISOLATE | BMCR_PDOWN); 76 val &= ~(BMCR_ISOLATE | BMCR_PDOWN);
77 val |= BMCR_RESET; 77 val |= BMCR_RESET;
78 __phy_write(phy, phy_id, MII_BMCR, val); 78 __sungem_phy_write(phy, phy_id, MII_BMCR, val);
79 79
80 udelay(100); 80 udelay(100);
81 81
82 while (--limit) { 82 while (--limit) {
83 val = __phy_read(phy, phy_id, MII_BMCR); 83 val = __sungem_phy_read(phy, phy_id, MII_BMCR);
84 if ((val & BMCR_RESET) == 0) 84 if ((val & BMCR_RESET) == 0)
85 break; 85 break;
86 udelay(10); 86 udelay(10);
87 } 87 }
88 if ((val & BMCR_ISOLATE) && limit > 0) 88 if ((val & BMCR_ISOLATE) && limit > 0)
89 __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE); 89 __sungem_phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE);
90 90
91 return limit <= 0; 91 return limit <= 0;
92} 92}
@@ -95,19 +95,19 @@ static int bcm5201_init(struct mii_phy* phy)
95{ 95{
96 u16 data; 96 u16 data;
97 97
98 data = phy_read(phy, MII_BCM5201_MULTIPHY); 98 data = sungem_phy_read(phy, MII_BCM5201_MULTIPHY);
99 data &= ~MII_BCM5201_MULTIPHY_SUPERISOLATE; 99 data &= ~MII_BCM5201_MULTIPHY_SUPERISOLATE;
100 phy_write(phy, MII_BCM5201_MULTIPHY, data); 100 sungem_phy_write(phy, MII_BCM5201_MULTIPHY, data);
101 101
102 phy_write(phy, MII_BCM5201_INTERRUPT, 0); 102 sungem_phy_write(phy, MII_BCM5201_INTERRUPT, 0);
103 103
104 return 0; 104 return 0;
105} 105}
106 106
107static int bcm5201_suspend(struct mii_phy* phy) 107static int bcm5201_suspend(struct mii_phy* phy)
108{ 108{
109 phy_write(phy, MII_BCM5201_INTERRUPT, 0); 109 sungem_phy_write(phy, MII_BCM5201_INTERRUPT, 0);
110 phy_write(phy, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE); 110 sungem_phy_write(phy, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE);
111 111
112 return 0; 112 return 0;
113} 113}
@@ -116,20 +116,20 @@ static int bcm5221_init(struct mii_phy* phy)
116{ 116{
117 u16 data; 117 u16 data;
118 118
119 data = phy_read(phy, MII_BCM5221_TEST); 119 data = sungem_phy_read(phy, MII_BCM5221_TEST);
120 phy_write(phy, MII_BCM5221_TEST, 120 sungem_phy_write(phy, MII_BCM5221_TEST,
121 data | MII_BCM5221_TEST_ENABLE_SHADOWS); 121 data | MII_BCM5221_TEST_ENABLE_SHADOWS);
122 122
123 data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); 123 data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2);
124 phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, 124 sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2,
125 data | MII_BCM5221_SHDOW_AUX_STAT2_APD); 125 data | MII_BCM5221_SHDOW_AUX_STAT2_APD);
126 126
127 data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); 127 data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
128 phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, 128 sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
129 data | MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR); 129 data | MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR);
130 130
131 data = phy_read(phy, MII_BCM5221_TEST); 131 data = sungem_phy_read(phy, MII_BCM5221_TEST);
132 phy_write(phy, MII_BCM5221_TEST, 132 sungem_phy_write(phy, MII_BCM5221_TEST,
133 data & ~MII_BCM5221_TEST_ENABLE_SHADOWS); 133 data & ~MII_BCM5221_TEST_ENABLE_SHADOWS);
134 134
135 return 0; 135 return 0;
@@ -139,12 +139,12 @@ static int bcm5221_suspend(struct mii_phy* phy)
139{ 139{
140 u16 data; 140 u16 data;
141 141
142 data = phy_read(phy, MII_BCM5221_TEST); 142 data = sungem_phy_read(phy, MII_BCM5221_TEST);
143 phy_write(phy, MII_BCM5221_TEST, 143 sungem_phy_write(phy, MII_BCM5221_TEST,
144 data | MII_BCM5221_TEST_ENABLE_SHADOWS); 144 data | MII_BCM5221_TEST_ENABLE_SHADOWS);
145 145
146 data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); 146 data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
147 phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, 147 sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
148 data | MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE); 148 data | MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE);
149 149
150 return 0; 150 return 0;
@@ -154,20 +154,20 @@ static int bcm5241_init(struct mii_phy* phy)
154{ 154{
155 u16 data; 155 u16 data;
156 156
157 data = phy_read(phy, MII_BCM5221_TEST); 157 data = sungem_phy_read(phy, MII_BCM5221_TEST);
158 phy_write(phy, MII_BCM5221_TEST, 158 sungem_phy_write(phy, MII_BCM5221_TEST,
159 data | MII_BCM5221_TEST_ENABLE_SHADOWS); 159 data | MII_BCM5221_TEST_ENABLE_SHADOWS);
160 160
161 data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); 161 data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2);
162 phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, 162 sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2,
163 data | MII_BCM5221_SHDOW_AUX_STAT2_APD); 163 data | MII_BCM5221_SHDOW_AUX_STAT2_APD);
164 164
165 data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); 165 data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
166 phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, 166 sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
167 data & ~MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR); 167 data & ~MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR);
168 168
169 data = phy_read(phy, MII_BCM5221_TEST); 169 data = sungem_phy_read(phy, MII_BCM5221_TEST);
170 phy_write(phy, MII_BCM5221_TEST, 170 sungem_phy_write(phy, MII_BCM5221_TEST,
171 data & ~MII_BCM5221_TEST_ENABLE_SHADOWS); 171 data & ~MII_BCM5221_TEST_ENABLE_SHADOWS);
172 172
173 return 0; 173 return 0;
@@ -177,12 +177,12 @@ static int bcm5241_suspend(struct mii_phy* phy)
177{ 177{
178 u16 data; 178 u16 data;
179 179
180 data = phy_read(phy, MII_BCM5221_TEST); 180 data = sungem_phy_read(phy, MII_BCM5221_TEST);
181 phy_write(phy, MII_BCM5221_TEST, 181 sungem_phy_write(phy, MII_BCM5221_TEST,
182 data | MII_BCM5221_TEST_ENABLE_SHADOWS); 182 data | MII_BCM5221_TEST_ENABLE_SHADOWS);
183 183
184 data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); 184 data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
185 phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, 185 sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
186 data | MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR); 186 data | MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR);
187 187
188 return 0; 188 return 0;
@@ -193,26 +193,26 @@ static int bcm5400_init(struct mii_phy* phy)
193 u16 data; 193 u16 data;
194 194
195 /* Configure for gigabit full duplex */ 195 /* Configure for gigabit full duplex */
196 data = phy_read(phy, MII_BCM5400_AUXCONTROL); 196 data = sungem_phy_read(phy, MII_BCM5400_AUXCONTROL);
197 data |= MII_BCM5400_AUXCONTROL_PWR10BASET; 197 data |= MII_BCM5400_AUXCONTROL_PWR10BASET;
198 phy_write(phy, MII_BCM5400_AUXCONTROL, data); 198 sungem_phy_write(phy, MII_BCM5400_AUXCONTROL, data);
199 199
200 data = phy_read(phy, MII_BCM5400_GB_CONTROL); 200 data = sungem_phy_read(phy, MII_BCM5400_GB_CONTROL);
201 data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; 201 data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
202 phy_write(phy, MII_BCM5400_GB_CONTROL, data); 202 sungem_phy_write(phy, MII_BCM5400_GB_CONTROL, data);
203 203
204 udelay(100); 204 udelay(100);
205 205
206 /* Reset and configure cascaded 10/100 PHY */ 206 /* Reset and configure cascaded 10/100 PHY */
207 (void)reset_one_mii_phy(phy, 0x1f); 207 (void)reset_one_mii_phy(phy, 0x1f);
208 208
209 data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); 209 data = __sungem_phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY);
210 data |= MII_BCM5201_MULTIPHY_SERIALMODE; 210 data |= MII_BCM5201_MULTIPHY_SERIALMODE;
211 __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); 211 __sungem_phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data);
212 212
213 data = phy_read(phy, MII_BCM5400_AUXCONTROL); 213 data = sungem_phy_read(phy, MII_BCM5400_AUXCONTROL);
214 data &= ~MII_BCM5400_AUXCONTROL_PWR10BASET; 214 data &= ~MII_BCM5400_AUXCONTROL_PWR10BASET;
215 phy_write(phy, MII_BCM5400_AUXCONTROL, data); 215 sungem_phy_write(phy, MII_BCM5400_AUXCONTROL, data);
216 216
217 return 0; 217 return 0;
218} 218}
@@ -220,7 +220,7 @@ static int bcm5400_init(struct mii_phy* phy)
220static int bcm5400_suspend(struct mii_phy* phy) 220static int bcm5400_suspend(struct mii_phy* phy)
221{ 221{
222#if 0 /* Commented out in Darwin... someone has those dawn docs ? */ 222#if 0 /* Commented out in Darwin... someone has those dawn docs ? */
223 phy_write(phy, MII_BMCR, BMCR_PDOWN); 223 sungem_phy_write(phy, MII_BMCR, BMCR_PDOWN);
224#endif 224#endif
225 return 0; 225 return 0;
226} 226}
@@ -230,7 +230,7 @@ static int bcm5401_init(struct mii_phy* phy)
230 u16 data; 230 u16 data;
231 int rev; 231 int rev;
232 232
233 rev = phy_read(phy, MII_PHYSID2) & 0x000f; 233 rev = sungem_phy_read(phy, MII_PHYSID2) & 0x000f;
234 if (rev == 0 || rev == 3) { 234 if (rev == 0 || rev == 3) {
235 /* Some revisions of 5401 appear to need this 235 /* Some revisions of 5401 appear to need this
236 * initialisation sequence to disable, according 236 * initialisation sequence to disable, according
@@ -243,32 +243,32 @@ static int bcm5401_init(struct mii_phy* phy)
243 * Note: This should (and does) match tg3_init_5401phy_dsp 243 * Note: This should (and does) match tg3_init_5401phy_dsp
244 * in the tg3.c driver. -DaveM 244 * in the tg3.c driver. -DaveM
245 */ 245 */
246 phy_write(phy, 0x18, 0x0c20); 246 sungem_phy_write(phy, 0x18, 0x0c20);
247 phy_write(phy, 0x17, 0x0012); 247 sungem_phy_write(phy, 0x17, 0x0012);
248 phy_write(phy, 0x15, 0x1804); 248 sungem_phy_write(phy, 0x15, 0x1804);
249 phy_write(phy, 0x17, 0x0013); 249 sungem_phy_write(phy, 0x17, 0x0013);
250 phy_write(phy, 0x15, 0x1204); 250 sungem_phy_write(phy, 0x15, 0x1204);
251 phy_write(phy, 0x17, 0x8006); 251 sungem_phy_write(phy, 0x17, 0x8006);
252 phy_write(phy, 0x15, 0x0132); 252 sungem_phy_write(phy, 0x15, 0x0132);
253 phy_write(phy, 0x17, 0x8006); 253 sungem_phy_write(phy, 0x17, 0x8006);
254 phy_write(phy, 0x15, 0x0232); 254 sungem_phy_write(phy, 0x15, 0x0232);
255 phy_write(phy, 0x17, 0x201f); 255 sungem_phy_write(phy, 0x17, 0x201f);
256 phy_write(phy, 0x15, 0x0a20); 256 sungem_phy_write(phy, 0x15, 0x0a20);
257 } 257 }
258 258
259 /* Configure for gigabit full duplex */ 259 /* Configure for gigabit full duplex */
260 data = phy_read(phy, MII_BCM5400_GB_CONTROL); 260 data = sungem_phy_read(phy, MII_BCM5400_GB_CONTROL);
261 data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; 261 data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
262 phy_write(phy, MII_BCM5400_GB_CONTROL, data); 262 sungem_phy_write(phy, MII_BCM5400_GB_CONTROL, data);
263 263
264 udelay(10); 264 udelay(10);
265 265
266 /* Reset and configure cascaded 10/100 PHY */ 266 /* Reset and configure cascaded 10/100 PHY */
267 (void)reset_one_mii_phy(phy, 0x1f); 267 (void)reset_one_mii_phy(phy, 0x1f);
268 268
269 data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); 269 data = __sungem_phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY);
270 data |= MII_BCM5201_MULTIPHY_SERIALMODE; 270 data |= MII_BCM5201_MULTIPHY_SERIALMODE;
271 __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); 271 __sungem_phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data);
272 272
273 return 0; 273 return 0;
274} 274}
@@ -276,7 +276,7 @@ static int bcm5401_init(struct mii_phy* phy)
276static int bcm5401_suspend(struct mii_phy* phy) 276static int bcm5401_suspend(struct mii_phy* phy)
277{ 277{
278#if 0 /* Commented out in Darwin... someone has those dawn docs ? */ 278#if 0 /* Commented out in Darwin... someone has those dawn docs ? */
279 phy_write(phy, MII_BMCR, BMCR_PDOWN); 279 sungem_phy_write(phy, MII_BMCR, BMCR_PDOWN);
280#endif 280#endif
281 return 0; 281 return 0;
282} 282}
@@ -288,19 +288,19 @@ static int bcm5411_init(struct mii_phy* phy)
288 /* Here's some more Apple black magic to setup 288 /* Here's some more Apple black magic to setup
289 * some voltage stuffs. 289 * some voltage stuffs.
290 */ 290 */
291 phy_write(phy, 0x1c, 0x8c23); 291 sungem_phy_write(phy, 0x1c, 0x8c23);
292 phy_write(phy, 0x1c, 0x8ca3); 292 sungem_phy_write(phy, 0x1c, 0x8ca3);
293 phy_write(phy, 0x1c, 0x8c23); 293 sungem_phy_write(phy, 0x1c, 0x8c23);
294 294
295 /* Here, Apple seems to want to reset it, do 295 /* Here, Apple seems to want to reset it, do
296 * it as well 296 * it as well
297 */ 297 */
298 phy_write(phy, MII_BMCR, BMCR_RESET); 298 sungem_phy_write(phy, MII_BMCR, BMCR_RESET);
299 phy_write(phy, MII_BMCR, 0x1340); 299 sungem_phy_write(phy, MII_BMCR, 0x1340);
300 300
301 data = phy_read(phy, MII_BCM5400_GB_CONTROL); 301 data = sungem_phy_read(phy, MII_BCM5400_GB_CONTROL);
302 data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; 302 data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
303 phy_write(phy, MII_BCM5400_GB_CONTROL, data); 303 sungem_phy_write(phy, MII_BCM5400_GB_CONTROL, data);
304 304
305 udelay(10); 305 udelay(10);
306 306
@@ -321,7 +321,7 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
321 phy->advertising = advertise; 321 phy->advertising = advertise;
322 322
323 /* Setup standard advertise */ 323 /* Setup standard advertise */
324 adv = phy_read(phy, MII_ADVERTISE); 324 adv = sungem_phy_read(phy, MII_ADVERTISE);
325 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); 325 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
326 if (advertise & ADVERTISED_10baseT_Half) 326 if (advertise & ADVERTISED_10baseT_Half)
327 adv |= ADVERTISE_10HALF; 327 adv |= ADVERTISE_10HALF;
@@ -331,12 +331,12 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
331 adv |= ADVERTISE_100HALF; 331 adv |= ADVERTISE_100HALF;
332 if (advertise & ADVERTISED_100baseT_Full) 332 if (advertise & ADVERTISED_100baseT_Full)
333 adv |= ADVERTISE_100FULL; 333 adv |= ADVERTISE_100FULL;
334 phy_write(phy, MII_ADVERTISE, adv); 334 sungem_phy_write(phy, MII_ADVERTISE, adv);
335 335
336 /* Start/Restart aneg */ 336 /* Start/Restart aneg */
337 ctl = phy_read(phy, MII_BMCR); 337 ctl = sungem_phy_read(phy, MII_BMCR);
338 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); 338 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
339 phy_write(phy, MII_BMCR, ctl); 339 sungem_phy_write(phy, MII_BMCR, ctl);
340 340
341 return 0; 341 return 0;
342} 342}
@@ -350,11 +350,11 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
350 phy->duplex = fd; 350 phy->duplex = fd;
351 phy->pause = 0; 351 phy->pause = 0;
352 352
353 ctl = phy_read(phy, MII_BMCR); 353 ctl = sungem_phy_read(phy, MII_BMCR);
354 ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE); 354 ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);
355 355
356 /* First reset the PHY */ 356 /* First reset the PHY */
357 phy_write(phy, MII_BMCR, ctl | BMCR_RESET); 357 sungem_phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
358 358
359 /* Select speed & duplex */ 359 /* Select speed & duplex */
360 switch(speed) { 360 switch(speed) {
@@ -369,7 +369,7 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
369 } 369 }
370 if (fd == DUPLEX_FULL) 370 if (fd == DUPLEX_FULL)
371 ctl |= BMCR_FULLDPLX; 371 ctl |= BMCR_FULLDPLX;
372 phy_write(phy, MII_BMCR, ctl); 372 sungem_phy_write(phy, MII_BMCR, ctl);
373 373
374 return 0; 374 return 0;
375} 375}
@@ -378,8 +378,8 @@ static int genmii_poll_link(struct mii_phy *phy)
378{ 378{
379 u16 status; 379 u16 status;
380 380
381 (void)phy_read(phy, MII_BMSR); 381 (void)sungem_phy_read(phy, MII_BMSR);
382 status = phy_read(phy, MII_BMSR); 382 status = sungem_phy_read(phy, MII_BMSR);
383 if ((status & BMSR_LSTATUS) == 0) 383 if ((status & BMSR_LSTATUS) == 0)
384 return 0; 384 return 0;
385 if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE)) 385 if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
@@ -392,7 +392,7 @@ static int genmii_read_link(struct mii_phy *phy)
392 u16 lpa; 392 u16 lpa;
393 393
394 if (phy->autoneg) { 394 if (phy->autoneg) {
395 lpa = phy_read(phy, MII_LPA); 395 lpa = sungem_phy_read(phy, MII_LPA);
396 396
397 if (lpa & (LPA_10FULL | LPA_100FULL)) 397 if (lpa & (LPA_10FULL | LPA_100FULL))
398 phy->duplex = DUPLEX_FULL; 398 phy->duplex = DUPLEX_FULL;
@@ -413,7 +413,7 @@ static int genmii_read_link(struct mii_phy *phy)
413 413
414static int generic_suspend(struct mii_phy* phy) 414static int generic_suspend(struct mii_phy* phy)
415{ 415{
416 phy_write(phy, MII_BMCR, BMCR_PDOWN); 416 sungem_phy_write(phy, MII_BMCR, BMCR_PDOWN);
417 417
418 return 0; 418 return 0;
419} 419}
@@ -423,27 +423,27 @@ static int bcm5421_init(struct mii_phy* phy)
423 u16 data; 423 u16 data;
424 unsigned int id; 424 unsigned int id;
425 425
426 id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)); 426 id = (sungem_phy_read(phy, MII_PHYSID1) << 16 | sungem_phy_read(phy, MII_PHYSID2));
427 427
428 /* Revision 0 of 5421 needs some fixups */ 428 /* Revision 0 of 5421 needs some fixups */
429 if (id == 0x002060e0) { 429 if (id == 0x002060e0) {
430 /* This is borrowed from MacOS 430 /* This is borrowed from MacOS
431 */ 431 */
432 phy_write(phy, 0x18, 0x1007); 432 sungem_phy_write(phy, 0x18, 0x1007);
433 data = phy_read(phy, 0x18); 433 data = sungem_phy_read(phy, 0x18);
434 phy_write(phy, 0x18, data | 0x0400); 434 sungem_phy_write(phy, 0x18, data | 0x0400);
435 phy_write(phy, 0x18, 0x0007); 435 sungem_phy_write(phy, 0x18, 0x0007);
436 data = phy_read(phy, 0x18); 436 data = sungem_phy_read(phy, 0x18);
437 phy_write(phy, 0x18, data | 0x0800); 437 sungem_phy_write(phy, 0x18, data | 0x0800);
438 phy_write(phy, 0x17, 0x000a); 438 sungem_phy_write(phy, 0x17, 0x000a);
439 data = phy_read(phy, 0x15); 439 data = sungem_phy_read(phy, 0x15);
440 phy_write(phy, 0x15, data | 0x0200); 440 sungem_phy_write(phy, 0x15, data | 0x0200);
441 } 441 }
442 442
443 /* Pick up some init code from OF for K2 version */ 443 /* Pick up some init code from OF for K2 version */
444 if ((id & 0xfffffff0) == 0x002062e0) { 444 if ((id & 0xfffffff0) == 0x002062e0) {
445 phy_write(phy, 4, 0x01e1); 445 sungem_phy_write(phy, 4, 0x01e1);
446 phy_write(phy, 9, 0x0300); 446 sungem_phy_write(phy, 9, 0x0300);
447 } 447 }
448 448
449 /* Check if we can enable automatic low power */ 449 /* Check if we can enable automatic low power */
@@ -455,9 +455,9 @@ static int bcm5421_init(struct mii_phy* phy)
455 can_low_power = 0; 455 can_low_power = 0;
456 if (can_low_power) { 456 if (can_low_power) {
457 /* Enable automatic low-power */ 457 /* Enable automatic low-power */
458 phy_write(phy, 0x1c, 0x9002); 458 sungem_phy_write(phy, 0x1c, 0x9002);
459 phy_write(phy, 0x1c, 0xa821); 459 sungem_phy_write(phy, 0x1c, 0xa821);
460 phy_write(phy, 0x1c, 0x941d); 460 sungem_phy_write(phy, 0x1c, 0x941d);
461 } 461 }
462 } 462 }
463#endif /* CONFIG_PPC_PMAC */ 463#endif /* CONFIG_PPC_PMAC */
@@ -476,7 +476,7 @@ static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
476 phy->advertising = advertise; 476 phy->advertising = advertise;
477 477
478 /* Setup standard advertise */ 478 /* Setup standard advertise */
479 adv = phy_read(phy, MII_ADVERTISE); 479 adv = sungem_phy_read(phy, MII_ADVERTISE);
480 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); 480 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
481 if (advertise & ADVERTISED_10baseT_Half) 481 if (advertise & ADVERTISED_10baseT_Half)
482 adv |= ADVERTISE_10HALF; 482 adv |= ADVERTISE_10HALF;
@@ -490,21 +490,21 @@ static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
490 adv |= ADVERTISE_PAUSE_CAP; 490 adv |= ADVERTISE_PAUSE_CAP;
491 if (advertise & ADVERTISED_Asym_Pause) 491 if (advertise & ADVERTISED_Asym_Pause)
492 adv |= ADVERTISE_PAUSE_ASYM; 492 adv |= ADVERTISE_PAUSE_ASYM;
493 phy_write(phy, MII_ADVERTISE, adv); 493 sungem_phy_write(phy, MII_ADVERTISE, adv);
494 494
495 /* Setup 1000BT advertise */ 495 /* Setup 1000BT advertise */
496 adv = phy_read(phy, MII_1000BASETCONTROL); 496 adv = sungem_phy_read(phy, MII_1000BASETCONTROL);
497 adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP|MII_1000BASETCONTROL_HALFDUPLEXCAP); 497 adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP|MII_1000BASETCONTROL_HALFDUPLEXCAP);
498 if (advertise & SUPPORTED_1000baseT_Half) 498 if (advertise & SUPPORTED_1000baseT_Half)
499 adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; 499 adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
500 if (advertise & SUPPORTED_1000baseT_Full) 500 if (advertise & SUPPORTED_1000baseT_Full)
501 adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; 501 adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
502 phy_write(phy, MII_1000BASETCONTROL, adv); 502 sungem_phy_write(phy, MII_1000BASETCONTROL, adv);
503 503
504 /* Start/Restart aneg */ 504 /* Start/Restart aneg */
505 ctl = phy_read(phy, MII_BMCR); 505 ctl = sungem_phy_read(phy, MII_BMCR);
506 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); 506 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
507 phy_write(phy, MII_BMCR, ctl); 507 sungem_phy_write(phy, MII_BMCR, ctl);
508 508
509 return 0; 509 return 0;
510} 510}
@@ -518,11 +518,11 @@ static int bcm54xx_setup_forced(struct mii_phy *phy, int speed, int fd)
518 phy->duplex = fd; 518 phy->duplex = fd;
519 phy->pause = 0; 519 phy->pause = 0;
520 520
521 ctl = phy_read(phy, MII_BMCR); 521 ctl = sungem_phy_read(phy, MII_BMCR);
522 ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE); 522 ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE);
523 523
524 /* First reset the PHY */ 524 /* First reset the PHY */
525 phy_write(phy, MII_BMCR, ctl | BMCR_RESET); 525 sungem_phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
526 526
527 /* Select speed & duplex */ 527 /* Select speed & duplex */
528 switch(speed) { 528 switch(speed) {
@@ -539,7 +539,7 @@ static int bcm54xx_setup_forced(struct mii_phy *phy, int speed, int fd)
539 539
540 // XXX Should we set the sungem to GII now on 1000BT ? 540 // XXX Should we set the sungem to GII now on 1000BT ?
541 541
542 phy_write(phy, MII_BMCR, ctl); 542 sungem_phy_write(phy, MII_BMCR, ctl);
543 543
544 return 0; 544 return 0;
545} 545}
@@ -550,7 +550,7 @@ static int bcm54xx_read_link(struct mii_phy *phy)
550 u16 val; 550 u16 val;
551 551
552 if (phy->autoneg) { 552 if (phy->autoneg) {
553 val = phy_read(phy, MII_BCM5400_AUXSTATUS); 553 val = sungem_phy_read(phy, MII_BCM5400_AUXSTATUS);
554 link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >> 554 link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >>
555 MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT); 555 MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT);
556 phy->duplex = phy_BCM5400_link_table[link_mode][0] ? 556 phy->duplex = phy_BCM5400_link_table[link_mode][0] ?
@@ -559,7 +559,7 @@ static int bcm54xx_read_link(struct mii_phy *phy)
559 SPEED_1000 : 559 SPEED_1000 :
560 (phy_BCM5400_link_table[link_mode][1] ? 560 (phy_BCM5400_link_table[link_mode][1] ?
561 SPEED_100 : SPEED_10); 561 SPEED_100 : SPEED_10);
562 val = phy_read(phy, MII_LPA); 562 val = sungem_phy_read(phy, MII_LPA);
563 phy->pause = (phy->duplex == DUPLEX_FULL) && 563 phy->pause = (phy->duplex == DUPLEX_FULL) &&
564 ((val & LPA_PAUSE) != 0); 564 ((val & LPA_PAUSE) != 0);
565 } 565 }
@@ -575,19 +575,19 @@ static int marvell88e1111_init(struct mii_phy* phy)
575 u16 rev; 575 u16 rev;
576 576
577 /* magic init sequence for rev 0 */ 577 /* magic init sequence for rev 0 */
578 rev = phy_read(phy, MII_PHYSID2) & 0x000f; 578 rev = sungem_phy_read(phy, MII_PHYSID2) & 0x000f;
579 if (rev == 0) { 579 if (rev == 0) {
580 phy_write(phy, 0x1d, 0x000a); 580 sungem_phy_write(phy, 0x1d, 0x000a);
581 phy_write(phy, 0x1e, 0x0821); 581 sungem_phy_write(phy, 0x1e, 0x0821);
582 582
583 phy_write(phy, 0x1d, 0x0006); 583 sungem_phy_write(phy, 0x1d, 0x0006);
584 phy_write(phy, 0x1e, 0x8600); 584 sungem_phy_write(phy, 0x1e, 0x8600);
585 585
586 phy_write(phy, 0x1d, 0x000b); 586 sungem_phy_write(phy, 0x1d, 0x000b);
587 phy_write(phy, 0x1e, 0x0100); 587 sungem_phy_write(phy, 0x1e, 0x0100);
588 588
589 phy_write(phy, 0x1d, 0x0004); 589 sungem_phy_write(phy, 0x1d, 0x0004);
590 phy_write(phy, 0x1e, 0x4850); 590 sungem_phy_write(phy, 0x1e, 0x4850);
591 } 591 }
592 return 0; 592 return 0;
593} 593}
@@ -600,8 +600,8 @@ static int bcm5421_poll_link(struct mii_phy* phy)
600 int mode; 600 int mode;
601 601
602 /* find out in what mode we are */ 602 /* find out in what mode we are */
603 phy_write(phy, MII_NCONFIG, 0x1000); 603 sungem_phy_write(phy, MII_NCONFIG, 0x1000);
604 phy_reg = phy_read(phy, MII_NCONFIG); 604 phy_reg = sungem_phy_read(phy, MII_NCONFIG);
605 605
606 mode = (phy_reg & BCM5421_MODE_MASK) >> 5; 606 mode = (phy_reg & BCM5421_MODE_MASK) >> 5;
607 607
@@ -609,8 +609,8 @@ static int bcm5421_poll_link(struct mii_phy* phy)
609 return genmii_poll_link(phy); 609 return genmii_poll_link(phy);
610 610
611 /* try to find out whether we have a link */ 611 /* try to find out whether we have a link */
612 phy_write(phy, MII_NCONFIG, 0x2000); 612 sungem_phy_write(phy, MII_NCONFIG, 0x2000);
613 phy_reg = phy_read(phy, MII_NCONFIG); 613 phy_reg = sungem_phy_read(phy, MII_NCONFIG);
614 614
615 if (phy_reg & 0x0020) 615 if (phy_reg & 0x0020)
616 return 0; 616 return 0;
@@ -624,8 +624,8 @@ static int bcm5421_read_link(struct mii_phy* phy)
624 int mode; 624 int mode;
625 625
626 /* find out in what mode we are */ 626 /* find out in what mode we are */
627 phy_write(phy, MII_NCONFIG, 0x1000); 627 sungem_phy_write(phy, MII_NCONFIG, 0x1000);
628 phy_reg = phy_read(phy, MII_NCONFIG); 628 phy_reg = sungem_phy_read(phy, MII_NCONFIG);
629 629
630 mode = (phy_reg & BCM5421_MODE_MASK ) >> 5; 630 mode = (phy_reg & BCM5421_MODE_MASK ) >> 5;
631 631
@@ -635,8 +635,8 @@ static int bcm5421_read_link(struct mii_phy* phy)
635 phy->speed = SPEED_1000; 635 phy->speed = SPEED_1000;
636 636
637 /* find out whether we are running half- or full duplex */ 637 /* find out whether we are running half- or full duplex */
638 phy_write(phy, MII_NCONFIG, 0x2000); 638 sungem_phy_write(phy, MII_NCONFIG, 0x2000);
639 phy_reg = phy_read(phy, MII_NCONFIG); 639 phy_reg = sungem_phy_read(phy, MII_NCONFIG);
640 640
641 if ( (phy_reg & 0x0080) >> 7) 641 if ( (phy_reg & 0x0080) >> 7)
642 phy->duplex |= DUPLEX_HALF; 642 phy->duplex |= DUPLEX_HALF;
@@ -649,14 +649,14 @@ static int bcm5421_read_link(struct mii_phy* phy)
649static int bcm5421_enable_fiber(struct mii_phy* phy, int autoneg) 649static int bcm5421_enable_fiber(struct mii_phy* phy, int autoneg)
650{ 650{
651 /* enable fiber mode */ 651 /* enable fiber mode */
652 phy_write(phy, MII_NCONFIG, 0x9020); 652 sungem_phy_write(phy, MII_NCONFIG, 0x9020);
653 /* LEDs active in both modes, autosense prio = fiber */ 653 /* LEDs active in both modes, autosense prio = fiber */
654 phy_write(phy, MII_NCONFIG, 0x945f); 654 sungem_phy_write(phy, MII_NCONFIG, 0x945f);
655 655
656 if (!autoneg) { 656 if (!autoneg) {
657 /* switch off fibre autoneg */ 657 /* switch off fibre autoneg */
658 phy_write(phy, MII_NCONFIG, 0xfc01); 658 sungem_phy_write(phy, MII_NCONFIG, 0xfc01);
659 phy_write(phy, 0x0b, 0x0004); 659 sungem_phy_write(phy, 0x0b, 0x0004);
660 } 660 }
661 661
662 phy->autoneg = autoneg; 662 phy->autoneg = autoneg;
@@ -673,8 +673,8 @@ static int bcm5461_poll_link(struct mii_phy* phy)
673 int mode; 673 int mode;
674 674
675 /* find out in what mode we are */ 675 /* find out in what mode we are */
676 phy_write(phy, MII_NCONFIG, 0x7c00); 676 sungem_phy_write(phy, MII_NCONFIG, 0x7c00);
677 phy_reg = phy_read(phy, MII_NCONFIG); 677 phy_reg = sungem_phy_read(phy, MII_NCONFIG);
678 678
679 mode = (phy_reg & BCM5461_MODE_MASK ) >> 1; 679 mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
680 680
@@ -682,8 +682,8 @@ static int bcm5461_poll_link(struct mii_phy* phy)
682 return genmii_poll_link(phy); 682 return genmii_poll_link(phy);
683 683
684 /* find out whether we have a link */ 684 /* find out whether we have a link */
685 phy_write(phy, MII_NCONFIG, 0x7000); 685 sungem_phy_write(phy, MII_NCONFIG, 0x7000);
686 phy_reg = phy_read(phy, MII_NCONFIG); 686 phy_reg = sungem_phy_read(phy, MII_NCONFIG);
687 687
688 if (phy_reg & BCM5461_FIBER_LINK) 688 if (phy_reg & BCM5461_FIBER_LINK)
689 return 1; 689 return 1;
@@ -699,8 +699,8 @@ static int bcm5461_read_link(struct mii_phy* phy)
699 int mode; 699 int mode;
700 700
701 /* find out in what mode we are */ 701 /* find out in what mode we are */
702 phy_write(phy, MII_NCONFIG, 0x7c00); 702 sungem_phy_write(phy, MII_NCONFIG, 0x7c00);
703 phy_reg = phy_read(phy, MII_NCONFIG); 703 phy_reg = sungem_phy_read(phy, MII_NCONFIG);
704 704
705 mode = (phy_reg & BCM5461_MODE_MASK ) >> 1; 705 mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
706 706
@@ -711,8 +711,8 @@ static int bcm5461_read_link(struct mii_phy* phy)
711 phy->speed = SPEED_1000; 711 phy->speed = SPEED_1000;
712 712
713 /* find out whether we are running half- or full duplex */ 713 /* find out whether we are running half- or full duplex */
714 phy_write(phy, MII_NCONFIG, 0x7000); 714 sungem_phy_write(phy, MII_NCONFIG, 0x7000);
715 phy_reg = phy_read(phy, MII_NCONFIG); 715 phy_reg = sungem_phy_read(phy, MII_NCONFIG);
716 716
717 if (phy_reg & BCM5461_FIBER_DUPLEX) 717 if (phy_reg & BCM5461_FIBER_DUPLEX)
718 phy->duplex |= DUPLEX_FULL; 718 phy->duplex |= DUPLEX_FULL;
@@ -725,15 +725,15 @@ static int bcm5461_read_link(struct mii_phy* phy)
725static int bcm5461_enable_fiber(struct mii_phy* phy, int autoneg) 725static int bcm5461_enable_fiber(struct mii_phy* phy, int autoneg)
726{ 726{
727 /* select fiber mode, enable 1000 base-X registers */ 727 /* select fiber mode, enable 1000 base-X registers */
728 phy_write(phy, MII_NCONFIG, 0xfc0b); 728 sungem_phy_write(phy, MII_NCONFIG, 0xfc0b);
729 729
730 if (autoneg) { 730 if (autoneg) {
731 /* enable fiber with no autonegotiation */ 731 /* enable fiber with no autonegotiation */
732 phy_write(phy, MII_ADVERTISE, 0x01e0); 732 sungem_phy_write(phy, MII_ADVERTISE, 0x01e0);
733 phy_write(phy, MII_BMCR, 0x1140); 733 sungem_phy_write(phy, MII_BMCR, 0x1140);
734 } else { 734 } else {
735 /* enable fiber with autonegotiation */ 735 /* enable fiber with autonegotiation */
736 phy_write(phy, MII_BMCR, 0x0140); 736 sungem_phy_write(phy, MII_BMCR, 0x0140);
737 } 737 }
738 738
739 phy->autoneg = autoneg; 739 phy->autoneg = autoneg;
@@ -752,7 +752,7 @@ static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
752 phy->advertising = advertise; 752 phy->advertising = advertise;
753 753
754 /* Setup standard advertise */ 754 /* Setup standard advertise */
755 adv = phy_read(phy, MII_ADVERTISE); 755 adv = sungem_phy_read(phy, MII_ADVERTISE);
756 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); 756 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
757 if (advertise & ADVERTISED_10baseT_Half) 757 if (advertise & ADVERTISED_10baseT_Half)
758 adv |= ADVERTISE_10HALF; 758 adv |= ADVERTISE_10HALF;
@@ -766,7 +766,7 @@ static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
766 adv |= ADVERTISE_PAUSE_CAP; 766 adv |= ADVERTISE_PAUSE_CAP;
767 if (advertise & ADVERTISED_Asym_Pause) 767 if (advertise & ADVERTISED_Asym_Pause)
768 adv |= ADVERTISE_PAUSE_ASYM; 768 adv |= ADVERTISE_PAUSE_ASYM;
769 phy_write(phy, MII_ADVERTISE, adv); 769 sungem_phy_write(phy, MII_ADVERTISE, adv);
770 770
771 /* Setup 1000BT advertise & enable crossover detect 771 /* Setup 1000BT advertise & enable crossover detect
772 * XXX How do we advertise 1000BT ? Darwin source is 772 * XXX How do we advertise 1000BT ? Darwin source is
@@ -774,7 +774,7 @@ static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
774 * write to control... Someone has specs for those 774 * write to control... Someone has specs for those
775 * beasts ? 775 * beasts ?
776 */ 776 */
777 adv = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); 777 adv = sungem_phy_read(phy, MII_M1011_PHY_SPEC_CONTROL);
778 adv |= MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX; 778 adv |= MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX;
779 adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP | 779 adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
780 MII_1000BASETCONTROL_HALFDUPLEXCAP); 780 MII_1000BASETCONTROL_HALFDUPLEXCAP);
@@ -782,12 +782,12 @@ static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
782 adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; 782 adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
783 if (advertise & SUPPORTED_1000baseT_Full) 783 if (advertise & SUPPORTED_1000baseT_Full)
784 adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; 784 adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
785 phy_write(phy, MII_1000BASETCONTROL, adv); 785 sungem_phy_write(phy, MII_1000BASETCONTROL, adv);
786 786
787 /* Start/Restart aneg */ 787 /* Start/Restart aneg */
788 ctl = phy_read(phy, MII_BMCR); 788 ctl = sungem_phy_read(phy, MII_BMCR);
789 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); 789 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
790 phy_write(phy, MII_BMCR, ctl); 790 sungem_phy_write(phy, MII_BMCR, ctl);
791 791
792 return 0; 792 return 0;
793} 793}
@@ -801,7 +801,7 @@ static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd)
801 phy->duplex = fd; 801 phy->duplex = fd;
802 phy->pause = 0; 802 phy->pause = 0;
803 803
804 ctl = phy_read(phy, MII_BMCR); 804 ctl = sungem_phy_read(phy, MII_BMCR);
805 ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE); 805 ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE);
806 ctl |= BMCR_RESET; 806 ctl |= BMCR_RESET;
807 807
@@ -824,7 +824,7 @@ static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd)
824 /* Disable crossover. Again, the way Apple does it is strange, 824 /* Disable crossover. Again, the way Apple does it is strange,
825 * though I don't assume they are wrong ;) 825 * though I don't assume they are wrong ;)
826 */ 826 */
827 ctl2 = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); 827 ctl2 = sungem_phy_read(phy, MII_M1011_PHY_SPEC_CONTROL);
828 ctl2 &= ~(MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX | 828 ctl2 &= ~(MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX |
829 MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX | 829 MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX |
830 MII_1000BASETCONTROL_FULLDUPLEXCAP | 830 MII_1000BASETCONTROL_FULLDUPLEXCAP |
@@ -833,11 +833,11 @@ static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd)
833 ctl2 |= (fd == DUPLEX_FULL) ? 833 ctl2 |= (fd == DUPLEX_FULL) ?
834 MII_1000BASETCONTROL_FULLDUPLEXCAP : 834 MII_1000BASETCONTROL_FULLDUPLEXCAP :
835 MII_1000BASETCONTROL_HALFDUPLEXCAP; 835 MII_1000BASETCONTROL_HALFDUPLEXCAP;
836 phy_write(phy, MII_1000BASETCONTROL, ctl2); 836 sungem_phy_write(phy, MII_1000BASETCONTROL, ctl2);
837 837
838 // XXX Should we set the sungem to GII now on 1000BT ? 838 // XXX Should we set the sungem to GII now on 1000BT ?
839 839
840 phy_write(phy, MII_BMCR, ctl); 840 sungem_phy_write(phy, MII_BMCR, ctl);
841 841
842 return 0; 842 return 0;
843} 843}
@@ -847,7 +847,7 @@ static int marvell_read_link(struct mii_phy *phy)
847 u16 status, pmask; 847 u16 status, pmask;
848 848
849 if (phy->autoneg) { 849 if (phy->autoneg) {
850 status = phy_read(phy, MII_M1011_PHY_SPEC_STATUS); 850 status = sungem_phy_read(phy, MII_M1011_PHY_SPEC_STATUS);
851 if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0) 851 if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0)
852 return -EAGAIN; 852 return -EAGAIN;
853 if (status & MII_M1011_PHY_SPEC_STATUS_1000) 853 if (status & MII_M1011_PHY_SPEC_STATUS_1000)
@@ -1174,7 +1174,7 @@ int sungem_phy_probe(struct mii_phy *phy, int mii_id)
1174 goto fail; 1174 goto fail;
1175 1175
1176 /* Read ID and find matching entry */ 1176 /* Read ID and find matching entry */
1177 id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)); 1177 id = (sungem_phy_read(phy, MII_PHYSID1) << 16 | sungem_phy_read(phy, MII_PHYSID2));
1178 printk(KERN_DEBUG KBUILD_MODNAME ": " "PHY ID: %x, addr: %x\n", 1178 printk(KERN_DEBUG KBUILD_MODNAME ": " "PHY ID: %x, addr: %x\n",
1179 id, mii_id); 1179 id, mii_id);
1180 for (i=0; (def = mii_phy_table[i]) != NULL; i++) 1180 for (i=0; (def = mii_phy_table[i]) != NULL; i++)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index ef10302ec936..2277c3679a51 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1003,7 +1003,6 @@ static int team_port_enter(struct team *team, struct team_port *port)
1003 int err = 0; 1003 int err = 0;
1004 1004
1005 dev_hold(team->dev); 1005 dev_hold(team->dev);
1006 port->dev->priv_flags |= IFF_TEAM_PORT;
1007 if (team->ops.port_enter) { 1006 if (team->ops.port_enter) {
1008 err = team->ops.port_enter(team, port); 1007 err = team->ops.port_enter(team, port);
1009 if (err) { 1008 if (err) {
@@ -1016,7 +1015,6 @@ static int team_port_enter(struct team *team, struct team_port *port)
1016 return 0; 1015 return 0;
1017 1016
1018err_port_enter: 1017err_port_enter:
1019 port->dev->priv_flags &= ~IFF_TEAM_PORT;
1020 dev_put(team->dev); 1018 dev_put(team->dev);
1021 1019
1022 return err; 1020 return err;
@@ -1026,7 +1024,6 @@ static void team_port_leave(struct team *team, struct team_port *port)
1026{ 1024{
1027 if (team->ops.port_leave) 1025 if (team->ops.port_leave)
1028 team->ops.port_leave(team, port); 1026 team->ops.port_leave(team, port);
1029 port->dev->priv_flags &= ~IFF_TEAM_PORT;
1030 dev_put(team->dev); 1027 dev_put(team->dev);
1031} 1028}
1032 1029
@@ -1075,6 +1072,25 @@ static void team_port_disable_netpoll(struct team_port *port)
1075} 1072}
1076#endif 1073#endif
1077 1074
1075static int team_upper_dev_link(struct net_device *dev,
1076 struct net_device *port_dev)
1077{
1078 int err;
1079
1080 err = netdev_master_upper_dev_link(port_dev, dev);
1081 if (err)
1082 return err;
1083 port_dev->priv_flags |= IFF_TEAM_PORT;
1084 return 0;
1085}
1086
1087static void team_upper_dev_unlink(struct net_device *dev,
1088 struct net_device *port_dev)
1089{
1090 netdev_upper_dev_unlink(port_dev, dev);
1091 port_dev->priv_flags &= ~IFF_TEAM_PORT;
1092}
1093
1078static void __team_port_change_port_added(struct team_port *port, bool linkup); 1094static void __team_port_change_port_added(struct team_port *port, bool linkup);
1079static int team_dev_type_check_change(struct net_device *dev, 1095static int team_dev_type_check_change(struct net_device *dev,
1080 struct net_device *port_dev); 1096 struct net_device *port_dev);
@@ -1161,13 +1177,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1161 goto err_enable_netpoll; 1177 goto err_enable_netpoll;
1162 } 1178 }
1163 1179
1164 err = netdev_master_upper_dev_link(port_dev, dev);
1165 if (err) {
1166 netdev_err(dev, "Device %s failed to set upper link\n",
1167 portname);
1168 goto err_set_upper_link;
1169 }
1170
1171 err = netdev_rx_handler_register(port_dev, team_handle_frame, 1180 err = netdev_rx_handler_register(port_dev, team_handle_frame,
1172 port); 1181 port);
1173 if (err) { 1182 if (err) {
@@ -1176,6 +1185,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1176 goto err_handler_register; 1185 goto err_handler_register;
1177 } 1186 }
1178 1187
1188 err = team_upper_dev_link(dev, port_dev);
1189 if (err) {
1190 netdev_err(dev, "Device %s failed to set upper link\n",
1191 portname);
1192 goto err_set_upper_link;
1193 }
1194
1179 err = __team_option_inst_add_port(team, port); 1195 err = __team_option_inst_add_port(team, port);
1180 if (err) { 1196 if (err) {
1181 netdev_err(dev, "Device %s failed to add per-port options\n", 1197 netdev_err(dev, "Device %s failed to add per-port options\n",
@@ -1195,12 +1211,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
1195 return 0; 1211 return 0;
1196 1212
1197err_option_port_add: 1213err_option_port_add:
1214 team_upper_dev_unlink(dev, port_dev);
1215
1216err_set_upper_link:
1198 netdev_rx_handler_unregister(port_dev); 1217 netdev_rx_handler_unregister(port_dev);
1199 1218
1200err_handler_register: 1219err_handler_register:
1201 netdev_upper_dev_unlink(port_dev, dev);
1202
1203err_set_upper_link:
1204 team_port_disable_netpoll(port); 1220 team_port_disable_netpoll(port);
1205 1221
1206err_enable_netpoll: 1222err_enable_netpoll:
@@ -1239,8 +1255,8 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
1239 1255
1240 team_port_disable(team, port); 1256 team_port_disable(team, port);
1241 list_del_rcu(&port->list); 1257 list_del_rcu(&port->list);
1258 team_upper_dev_unlink(dev, port_dev);
1242 netdev_rx_handler_unregister(port_dev); 1259 netdev_rx_handler_unregister(port_dev);
1243 netdev_upper_dev_unlink(port_dev, dev);
1244 team_port_disable_netpoll(port); 1260 team_port_disable_netpoll(port);
1245 vlan_vids_del_by_dev(port_dev, dev); 1261 vlan_vids_del_by_dev(port_dev, dev);
1246 dev_uc_unsync(port_dev, dev); 1262 dev_uc_unsync(port_dev, dev);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 87f710476217..f95e678cb69d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -424,7 +424,7 @@ enum rtl_register_content {
424 FULL_DUP = 0x01, 424 FULL_DUP = 0x01,
425}; 425};
426 426
427#define RTL8152_MAX_TX 10 427#define RTL8152_MAX_TX 4
428#define RTL8152_MAX_RX 10 428#define RTL8152_MAX_RX 10
429#define INTBUFSIZE 2 429#define INTBUFSIZE 2
430#define CRC_SIZE 4 430#define CRC_SIZE 4
@@ -607,9 +607,9 @@ enum tx_csum_stat {
607 * The RTL chips use a 64 element hash table based on the Ethernet CRC. 607 * The RTL chips use a 64 element hash table based on the Ethernet CRC.
608 */ 608 */
609static const int multicast_filter_limit = 32; 609static const int multicast_filter_limit = 32;
610static unsigned int rx_buf_sz = 16384; 610static unsigned int agg_buf_sz = 16384;
611 611
612#define RTL_LIMITED_TSO_SIZE (rx_buf_sz - sizeof(struct tx_desc) - \ 612#define RTL_LIMITED_TSO_SIZE (agg_buf_sz - sizeof(struct tx_desc) - \
613 VLAN_ETH_HLEN - VLAN_HLEN) 613 VLAN_ETH_HLEN - VLAN_HLEN)
614 614
615static 615static
@@ -623,8 +623,8 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
623 return -ENOMEM; 623 return -ENOMEM;
624 624
625 ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), 625 ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
626 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, 626 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
627 value, index, tmp, size, 500); 627 value, index, tmp, size, 500);
628 628
629 memcpy(data, tmp, size); 629 memcpy(data, tmp, size);
630 kfree(tmp); 630 kfree(tmp);
@@ -643,8 +643,8 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
643 return -ENOMEM; 643 return -ENOMEM;
644 644
645 ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0), 645 ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
646 RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, 646 RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
647 value, index, tmp, size, 500); 647 value, index, tmp, size, 500);
648 648
649 kfree(tmp); 649 kfree(tmp);
650 650
@@ -652,7 +652,7 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
652} 652}
653 653
654static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, 654static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
655 void *data, u16 type) 655 void *data, u16 type)
656{ 656{
657 u16 limit = 64; 657 u16 limit = 64;
658 int ret = 0; 658 int ret = 0;
@@ -692,7 +692,7 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
692} 692}
693 693
694static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, 694static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
695 u16 size, void *data, u16 type) 695 u16 size, void *data, u16 type)
696{ 696{
697 int ret; 697 int ret;
698 u16 byteen_start, byteen_end, byen; 698 u16 byteen_start, byteen_end, byen;
@@ -726,8 +726,8 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
726 while (size) { 726 while (size) {
727 if (size > limit) { 727 if (size > limit) {
728 ret = set_registers(tp, index, 728 ret = set_registers(tp, index,
729 type | BYTE_EN_DWORD, 729 type | BYTE_EN_DWORD,
730 limit, data); 730 limit, data);
731 if (ret < 0) 731 if (ret < 0)
732 goto error1; 732 goto error1;
733 733
@@ -736,8 +736,8 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
736 size -= limit; 736 size -= limit;
737 } else { 737 } else {
738 ret = set_registers(tp, index, 738 ret = set_registers(tp, index,
739 type | BYTE_EN_DWORD, 739 type | BYTE_EN_DWORD,
740 size, data); 740 size, data);
741 if (ret < 0) 741 if (ret < 0)
742 goto error1; 742 goto error1;
743 743
@@ -972,36 +972,8 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
972 usb_autopm_put_interface(tp->intf); 972 usb_autopm_put_interface(tp->intf);
973} 973}
974 974
975static 975static int
976int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags); 976r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
977
978static inline void set_ethernet_addr(struct r8152 *tp)
979{
980 struct net_device *dev = tp->netdev;
981 int ret;
982 u8 node_id[8] = {0};
983
984 if (tp->version == RTL_VER_01)
985 ret = pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id);
986 else
987 ret = pla_ocp_read(tp, PLA_BACKUP, sizeof(node_id), node_id);
988
989 if (ret < 0) {
990 netif_notice(tp, probe, dev, "inet addr fail\n");
991 } else {
992 if (tp->version != RTL_VER_01) {
993 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR,
994 CRWECR_CONFIG);
995 pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES,
996 sizeof(node_id), node_id);
997 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR,
998 CRWECR_NORAML);
999 }
1000
1001 memcpy(dev->dev_addr, node_id, dev->addr_len);
1002 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1003 }
1004}
1005 977
1006static int rtl8152_set_mac_address(struct net_device *netdev, void *p) 978static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
1007{ 979{
@@ -1020,6 +992,37 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
1020 return 0; 992 return 0;
1021} 993}
1022 994
995static int set_ethernet_addr(struct r8152 *tp)
996{
997 struct net_device *dev = tp->netdev;
998 struct sockaddr sa;
999 int ret;
1000
1001 if (tp->version == RTL_VER_01)
1002 ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
1003 else
1004 ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
1005
1006 if (ret < 0) {
1007 netif_err(tp, probe, dev, "Get ether addr fail\n");
1008 } else if (!is_valid_ether_addr(sa.sa_data)) {
1009 netif_err(tp, probe, dev, "Invalid ether addr %pM\n",
1010 sa.sa_data);
1011 eth_hw_addr_random(dev);
1012 ether_addr_copy(sa.sa_data, dev->dev_addr);
1013 ret = rtl8152_set_mac_address(dev, &sa);
1014 netif_info(tp, probe, dev, "Random ether addr %pM\n",
1015 sa.sa_data);
1016 } else {
1017 if (tp->version == RTL_VER_01)
1018 ether_addr_copy(dev->dev_addr, sa.sa_data);
1019 else
1020 ret = rtl8152_set_mac_address(dev, &sa);
1021 }
1022
1023 return ret;
1024}
1025
1023static void read_bulk_callback(struct urb *urb) 1026static void read_bulk_callback(struct urb *urb)
1024{ 1027{
1025 struct net_device *netdev; 1028 struct net_device *netdev;
@@ -1248,13 +1251,13 @@ static int alloc_all_mem(struct r8152 *tp)
1248 skb_queue_head_init(&tp->tx_queue); 1251 skb_queue_head_init(&tp->tx_queue);
1249 1252
1250 for (i = 0; i < RTL8152_MAX_RX; i++) { 1253 for (i = 0; i < RTL8152_MAX_RX; i++) {
1251 buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); 1254 buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node);
1252 if (!buf) 1255 if (!buf)
1253 goto err1; 1256 goto err1;
1254 1257
1255 if (buf != rx_agg_align(buf)) { 1258 if (buf != rx_agg_align(buf)) {
1256 kfree(buf); 1259 kfree(buf);
1257 buf = kmalloc_node(rx_buf_sz + RX_ALIGN, GFP_KERNEL, 1260 buf = kmalloc_node(agg_buf_sz + RX_ALIGN, GFP_KERNEL,
1258 node); 1261 node);
1259 if (!buf) 1262 if (!buf)
1260 goto err1; 1263 goto err1;
@@ -1274,13 +1277,13 @@ static int alloc_all_mem(struct r8152 *tp)
1274 } 1277 }
1275 1278
1276 for (i = 0; i < RTL8152_MAX_TX; i++) { 1279 for (i = 0; i < RTL8152_MAX_TX; i++) {
1277 buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); 1280 buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node);
1278 if (!buf) 1281 if (!buf)
1279 goto err1; 1282 goto err1;
1280 1283
1281 if (buf != tx_agg_align(buf)) { 1284 if (buf != tx_agg_align(buf)) {
1282 kfree(buf); 1285 kfree(buf);
1283 buf = kmalloc_node(rx_buf_sz + TX_ALIGN, GFP_KERNEL, 1286 buf = kmalloc_node(agg_buf_sz + TX_ALIGN, GFP_KERNEL,
1284 node); 1287 node);
1285 if (!buf) 1288 if (!buf)
1286 goto err1; 1289 goto err1;
@@ -1311,8 +1314,8 @@ static int alloc_all_mem(struct r8152 *tp)
1311 1314
1312 tp->intr_interval = (int)ep_intr->desc.bInterval; 1315 tp->intr_interval = (int)ep_intr->desc.bInterval;
1313 usb_fill_int_urb(tp->intr_urb, tp->udev, usb_rcvintpipe(tp->udev, 3), 1316 usb_fill_int_urb(tp->intr_urb, tp->udev, usb_rcvintpipe(tp->udev, 3),
1314 tp->intr_buff, INTBUFSIZE, intr_callback, 1317 tp->intr_buff, INTBUFSIZE, intr_callback,
1315 tp, tp->intr_interval); 1318 tp, tp->intr_interval);
1316 1319
1317 return 0; 1320 return 0;
1318 1321
@@ -1354,8 +1357,7 @@ static inline __be16 get_protocol(struct sk_buff *skb)
1354 return protocol; 1357 return protocol;
1355} 1358}
1356 1359
1357/* 1360/* r8152_csum_workaround()
1358 * r8152_csum_workaround()
1359 * The hw limites the value the transport offset. When the offset is out of the 1361 * The hw limites the value the transport offset. When the offset is out of the
1360 * range, calculate the checksum by sw. 1362 * range, calculate the checksum by sw.
1361 */ 1363 */
@@ -1398,8 +1400,7 @@ drop:
1398 } 1400 }
1399} 1401}
1400 1402
1401/* 1403/* msdn_giant_send_check()
1402 * msdn_giant_send_check()
1403 * According to the document of microsoft, the TCP Pseudo Header excludes the 1404 * According to the document of microsoft, the TCP Pseudo Header excludes the
1404 * packet length for IPv6 TCP large packets. 1405 * packet length for IPv6 TCP large packets.
1405 */ 1406 */
@@ -1518,8 +1519,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1518 spin_unlock(&tx_queue->lock); 1519 spin_unlock(&tx_queue->lock);
1519 1520
1520 tx_data = agg->head; 1521 tx_data = agg->head;
1521 agg->skb_num = agg->skb_len = 0; 1522 agg->skb_num = 0;
1522 remain = rx_buf_sz; 1523 agg->skb_len = 0;
1524 remain = agg_buf_sz;
1523 1525
1524 while (remain >= ETH_ZLEN + sizeof(struct tx_desc)) { 1526 while (remain >= ETH_ZLEN + sizeof(struct tx_desc)) {
1525 struct tx_desc *tx_desc; 1527 struct tx_desc *tx_desc;
@@ -1566,7 +1568,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1566 1568
1567 dev_kfree_skb_any(skb); 1569 dev_kfree_skb_any(skb);
1568 1570
1569 remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); 1571 remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
1570 } 1572 }
1571 1573
1572 if (!skb_queue_empty(&skb_head)) { 1574 if (!skb_queue_empty(&skb_head)) {
@@ -1772,8 +1774,8 @@ static
1772int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) 1774int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
1773{ 1775{
1774 usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), 1776 usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
1775 agg->head, rx_buf_sz, 1777 agg->head, agg_buf_sz,
1776 (usb_complete_t)read_bulk_callback, agg); 1778 (usb_complete_t)read_bulk_callback, agg);
1777 1779
1778 return usb_submit_urb(agg->urb, mem_flags); 1780 return usb_submit_urb(agg->urb, mem_flags);
1779} 1781}
@@ -1835,18 +1837,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
1835 /* Unconditionally log net taps. */ 1837 /* Unconditionally log net taps. */
1836 netif_notice(tp, link, netdev, "Promiscuous mode enabled\n"); 1838 netif_notice(tp, link, netdev, "Promiscuous mode enabled\n");
1837 ocp_data |= RCR_AM | RCR_AAP; 1839 ocp_data |= RCR_AM | RCR_AAP;
1838 mc_filter[1] = mc_filter[0] = 0xffffffff; 1840 mc_filter[1] = 0xffffffff;
1841 mc_filter[0] = 0xffffffff;
1839 } else if ((netdev_mc_count(netdev) > multicast_filter_limit) || 1842 } else if ((netdev_mc_count(netdev) > multicast_filter_limit) ||
1840 (netdev->flags & IFF_ALLMULTI)) { 1843 (netdev->flags & IFF_ALLMULTI)) {
1841 /* Too many to filter perfectly -- accept all multicasts. */ 1844 /* Too many to filter perfectly -- accept all multicasts. */
1842 ocp_data |= RCR_AM; 1845 ocp_data |= RCR_AM;
1843 mc_filter[1] = mc_filter[0] = 0xffffffff; 1846 mc_filter[1] = 0xffffffff;
1847 mc_filter[0] = 0xffffffff;
1844 } else { 1848 } else {
1845 struct netdev_hw_addr *ha; 1849 struct netdev_hw_addr *ha;
1846 1850
1847 mc_filter[1] = mc_filter[0] = 0; 1851 mc_filter[1] = 0;
1852 mc_filter[0] = 0;
1848 netdev_for_each_mc_addr(ha, netdev) { 1853 netdev_for_each_mc_addr(ha, netdev) {
1849 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; 1854 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1855
1850 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1856 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1851 ocp_data |= RCR_AM; 1857 ocp_data |= RCR_AM;
1852 } 1858 }
@@ -1861,7 +1867,7 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
1861} 1867}
1862 1868
1863static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, 1869static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
1864 struct net_device *netdev) 1870 struct net_device *netdev)
1865{ 1871{
1866 struct r8152 *tp = netdev_priv(netdev); 1872 struct r8152 *tp = netdev_priv(netdev);
1867 1873
@@ -1877,8 +1883,9 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
1877 usb_mark_last_busy(tp->udev); 1883 usb_mark_last_busy(tp->udev);
1878 tasklet_schedule(&tp->tl); 1884 tasklet_schedule(&tp->tl);
1879 } 1885 }
1880 } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) 1886 } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) {
1881 netif_stop_queue(netdev); 1887 netif_stop_queue(netdev);
1888 }
1882 1889
1883 return NETDEV_TX_OK; 1890 return NETDEV_TX_OK;
1884} 1891}
@@ -1903,7 +1910,7 @@ static void rtl8152_nic_reset(struct r8152 *tp)
1903 for (i = 0; i < 1000; i++) { 1910 for (i = 0; i < 1000; i++) {
1904 if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST)) 1911 if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST))
1905 break; 1912 break;
1906 udelay(100); 1913 usleep_range(100, 400);
1907 } 1914 }
1908} 1915}
1909 1916
@@ -1911,8 +1918,8 @@ static void set_tx_qlen(struct r8152 *tp)
1911{ 1918{
1912 struct net_device *netdev = tp->netdev; 1919 struct net_device *netdev = tp->netdev;
1913 1920
1914 tp->tx_qlen = rx_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + VLAN_HLEN + 1921 tp->tx_qlen = agg_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + VLAN_HLEN +
1915 sizeof(struct tx_desc)); 1922 sizeof(struct tx_desc));
1916} 1923}
1917 1924
1918static inline u8 rtl8152_get_speed(struct r8152 *tp) 1925static inline u8 rtl8152_get_speed(struct r8152 *tp)
@@ -2861,8 +2868,7 @@ static int rtl8152_close(struct net_device *netdev)
2861 if (res < 0) { 2868 if (res < 0) {
2862 rtl_drop_queued_tx(tp); 2869 rtl_drop_queued_tx(tp);
2863 } else { 2870 } else {
2864 /* 2871 /* The autosuspend may have been enabled and wouldn't
2865 * The autosuspend may have been enabled and wouldn't
2866 * be disable when autoresume occurs, because the 2872 * be disable when autoresume occurs, because the
2867 * netif_running() would be false. 2873 * netif_running() would be false.
2868 */ 2874 */
@@ -3085,8 +3091,9 @@ static int rtl8152_resume(struct usb_interface *intf)
3085 } else { 3091 } else {
3086 tp->rtl_ops.up(tp); 3092 tp->rtl_ops.up(tp);
3087 rtl8152_set_speed(tp, AUTONEG_ENABLE, 3093 rtl8152_set_speed(tp, AUTONEG_ENABLE,
3088 tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, 3094 tp->mii.supports_gmii ?
3089 DUPLEX_FULL); 3095 SPEED_1000 : SPEED_100,
3096 DUPLEX_FULL);
3090 } 3097 }
3091 tp->speed = 0; 3098 tp->speed = 0;
3092 netif_carrier_off(tp->netdev); 3099 netif_carrier_off(tp->netdev);
@@ -3147,8 +3154,8 @@ static void rtl8152_get_drvinfo(struct net_device *netdev,
3147{ 3154{
3148 struct r8152 *tp = netdev_priv(netdev); 3155 struct r8152 *tp = netdev_priv(netdev);
3149 3156
3150 strncpy(info->driver, MODULENAME, ETHTOOL_BUSINFO_LEN); 3157 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
3151 strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN); 3158 strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
3152 usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info)); 3159 usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info));
3153} 3160}
3154 3161
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 59caa06f34a6..9359a13d285a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -934,7 +934,6 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
934 dev_kfree_skb_any(skb); 934 dev_kfree_skb_any(skb);
935 return NETDEV_TX_OK; 935 return NETDEV_TX_OK;
936 } 936 }
937 virtqueue_kick(sq->vq);
938 937
939 /* Don't wait up for transmitted skbs to be freed. */ 938 /* Don't wait up for transmitted skbs to be freed. */
940 skb_orphan(skb); 939 skb_orphan(skb);
@@ -954,6 +953,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
954 } 953 }
955 } 954 }
956 955
956 if (__netif_subqueue_stopped(dev, qnum) || !skb->xmit_more)
957 virtqueue_kick(sq->vq);
958
957 return NETDEV_TX_OK; 959 return NETDEV_TX_OK;
958} 960}
959 961
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index beb377b2d4b7..53c3ec19807c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1158,8 +1158,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1158 if (!vs) 1158 if (!vs)
1159 goto drop; 1159 goto drop;
1160 1160
1161 skb_pop_rcv_encapsulation(skb);
1162
1163 vs->rcv(vs, skb, vxh->vx_vni); 1161 vs->rcv(vs, skb, vxh->vx_vni);
1164 return 0; 1162 return 0;
1165 1163
@@ -2372,6 +2370,8 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2372 /* Disable multicast loopback */ 2370 /* Disable multicast loopback */
2373 inet_sk(sock->sk)->mc_loop = 0; 2371 inet_sk(sock->sk)->mc_loop = 0;
2374 2372
2373 udp_set_convert_csum(sock->sk, true);
2374
2375 return sock; 2375 return sock;
2376} 2376}
2377 2377
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 43c9960dce1c..ae6ecf401189 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -192,8 +192,10 @@ static netdev_tx_t dlci_transmit(struct sk_buff *skb, struct net_device *dev)
192{ 192{
193 struct dlci_local *dlp = netdev_priv(dev); 193 struct dlci_local *dlp = netdev_priv(dev);
194 194
195 if (skb) 195 if (skb) {
196 dlp->slave->netdev_ops->ndo_start_xmit(skb, dlp->slave); 196 struct netdev_queue *txq = skb_get_tx_queue(dev, skb);
197 netdev_start_xmit(skb, dlp->slave, txq, false);
198 }
197 return NETDEV_TX_OK; 199 return NETDEV_TX_OK;
198} 200}
199 201
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index bcdc882cd415..146f48cc65d7 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1101,7 +1101,15 @@ static void ncm_tx_tasklet(unsigned long data)
1101 /* Only send if data is available. */ 1101 /* Only send if data is available. */
1102 if (ncm->skb_tx_data) { 1102 if (ncm->skb_tx_data) {
1103 ncm->timer_force_tx = true; 1103 ncm->timer_force_tx = true;
1104
1105 /* XXX This allowance of a NULL skb argument to ndo_start_xmit
1106 * XXX is not sane. The gadget layer should be redesigned so
1107 * XXX that the dev->wrap() invocations to build SKBs is transparent
1108 * XXX and performed in some way outside of the ndo_start_xmit
1109 * XXX interface.
1110 */
1104 ncm->netdev->netdev_ops->ndo_start_xmit(NULL, ncm->netdev); 1111 ncm->netdev->netdev_ops->ndo_start_xmit(NULL, ncm->netdev);
1112
1105 ncm->timer_force_tx = false; 1113 ncm->timer_force_tx = false;
1106 } 1114 }
1107} 1115}