aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>2011-02-15 16:17:32 -0500
committerDavid S. Miller <davem@davemloft.net>2011-02-22 13:10:09 -0500
commitdc19e4e5e02fb6b46cccb08b2735e38b997a6ddf (patch)
tree715d1a105d4b42201ea5f6466a3a89b4d97b8c52
parent59ed5aba9ca1c799e272b352d5d2d7fe12bd32e8 (diff)
sh: sh_eth: Add support ethtool
This commit supports following functions. - get_settings - set_settings - nway_reset - get_msglevel - set_msglevel - get_link - get_strings - get_ethtool_stats - get_sset_count About other function, the device does not support. Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> Signed-off-by: Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/sh_eth.c208
1 files changed, 189 insertions, 19 deletions
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 819c1750e2ab..095e52580884 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -32,10 +32,17 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/ethtool.h>
35#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
36 37
37#include "sh_eth.h" 38#include "sh_eth.h"
38 39
40#define SH_ETH_DEF_MSG_ENABLE \
41 (NETIF_MSG_LINK | \
42 NETIF_MSG_TIMER | \
43 NETIF_MSG_RX_ERR| \
44 NETIF_MSG_TX_ERR)
45
39/* There is CPU dependent code */ 46/* There is CPU dependent code */
40#if defined(CONFIG_CPU_SUBTYPE_SH7724) 47#if defined(CONFIG_CPU_SUBTYPE_SH7724)
41#define SH_ETH_RESET_DEFAULT 1 48#define SH_ETH_RESET_DEFAULT 1
@@ -817,6 +824,20 @@ static int sh_eth_rx(struct net_device *ndev)
817 return 0; 824 return 0;
818} 825}
819 826
827static void sh_eth_rcv_snd_disable(u32 ioaddr)
828{
829 /* disable tx and rx */
830 writel(readl(ioaddr + ECMR) &
831 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
832}
833
834static void sh_eth_rcv_snd_enable(u32 ioaddr)
835{
836 /* enable tx and rx */
837 writel(readl(ioaddr + ECMR) |
838 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
839}
840
820/* error control function */ 841/* error control function */
821static void sh_eth_error(struct net_device *ndev, int intr_status) 842static void sh_eth_error(struct net_device *ndev, int intr_status)
822{ 843{
@@ -843,11 +864,9 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
843 if (mdp->ether_link_active_low) 864 if (mdp->ether_link_active_low)
844 link_stat = ~link_stat; 865 link_stat = ~link_stat;
845 } 866 }
846 if (!(link_stat & PHY_ST_LINK)) { 867 if (!(link_stat & PHY_ST_LINK))
847 /* Link Down : disable tx and rx */ 868 sh_eth_rcv_snd_disable(ioaddr);
848 writel(readl(ioaddr + ECMR) & 869 else {
849 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
850 } else {
851 /* Link Up */ 870 /* Link Up */
852 writel(readl(ioaddr + EESIPR) & 871 writel(readl(ioaddr + EESIPR) &
853 ~DMAC_M_ECI, ioaddr + EESIPR); 872 ~DMAC_M_ECI, ioaddr + EESIPR);
@@ -857,8 +876,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
857 writel(readl(ioaddr + EESIPR) | 876 writel(readl(ioaddr + EESIPR) |
858 DMAC_M_ECI, ioaddr + EESIPR); 877 DMAC_M_ECI, ioaddr + EESIPR);
859 /* enable tx and rx */ 878 /* enable tx and rx */
860 writel(readl(ioaddr + ECMR) | 879 sh_eth_rcv_snd_enable(ioaddr);
861 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
862 } 880 }
863 } 881 }
864 } 882 }
@@ -867,6 +885,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
867 /* Write buck end. unused write back interrupt */ 885 /* Write buck end. unused write back interrupt */
868 if (intr_status & EESR_TABT) /* Transmit Abort int */ 886 if (intr_status & EESR_TABT) /* Transmit Abort int */
869 mdp->stats.tx_aborted_errors++; 887 mdp->stats.tx_aborted_errors++;
888 if (netif_msg_tx_err(mdp))
889 dev_err(&ndev->dev, "Transmit Abort\n");
870 } 890 }
871 891
872 if (intr_status & EESR_RABT) { 892 if (intr_status & EESR_RABT) {
@@ -874,14 +894,23 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
874 if (intr_status & EESR_RFRMER) { 894 if (intr_status & EESR_RFRMER) {
875 /* Receive Frame Overflow int */ 895 /* Receive Frame Overflow int */
876 mdp->stats.rx_frame_errors++; 896 mdp->stats.rx_frame_errors++;
877 dev_err(&ndev->dev, "Receive Frame Overflow\n"); 897 if (netif_msg_rx_err(mdp))
898 dev_err(&ndev->dev, "Receive Abort\n");
878 } 899 }
879 } 900 }
880 901
881 if (!mdp->cd->no_ade) { 902 if (intr_status & EESR_TDE) {
882 if (intr_status & EESR_ADE && intr_status & EESR_TDE && 903 /* Transmit Descriptor Empty int */
883 intr_status & EESR_TFE) 904 mdp->stats.tx_fifo_errors++;
884 mdp->stats.tx_fifo_errors++; 905 if (netif_msg_tx_err(mdp))
906 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
907 }
908
909 if (intr_status & EESR_TFE) {
910 /* FIFO under flow */
911 mdp->stats.tx_fifo_errors++;
912 if (netif_msg_tx_err(mdp))
913 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
885 } 914 }
886 915
887 if (intr_status & EESR_RDE) { 916 if (intr_status & EESR_RDE) {
@@ -890,12 +919,22 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
890 919
891 if (readl(ioaddr + EDRRR) ^ EDRRR_R) 920 if (readl(ioaddr + EDRRR) ^ EDRRR_R)
892 writel(EDRRR_R, ioaddr + EDRRR); 921 writel(EDRRR_R, ioaddr + EDRRR);
893 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 922 if (netif_msg_rx_err(mdp))
923 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
894 } 924 }
925
895 if (intr_status & EESR_RFE) { 926 if (intr_status & EESR_RFE) {
896 /* Receive FIFO Overflow int */ 927 /* Receive FIFO Overflow int */
897 mdp->stats.rx_fifo_errors++; 928 mdp->stats.rx_fifo_errors++;
898 dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 929 if (netif_msg_rx_err(mdp))
930 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
931 }
932
933 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
934 /* Address Error */
935 mdp->stats.tx_fifo_errors++;
936 if (netif_msg_tx_err(mdp))
937 dev_err(&ndev->dev, "Address Error\n");
899 } 938 }
900 939
901 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 940 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -1012,7 +1051,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1012 mdp->duplex = -1; 1051 mdp->duplex = -1;
1013 } 1052 }
1014 1053
1015 if (new_state) 1054 if (new_state && netif_msg_link(mdp))
1016 phy_print_status(phydev); 1055 phy_print_status(phydev);
1017} 1056}
1018 1057
@@ -1063,6 +1102,132 @@ static int sh_eth_phy_start(struct net_device *ndev)
1063 return 0; 1102 return 0;
1064} 1103}
1065 1104
1105static int sh_eth_get_settings(struct net_device *ndev,
1106 struct ethtool_cmd *ecmd)
1107{
1108 struct sh_eth_private *mdp = netdev_priv(ndev);
1109 unsigned long flags;
1110 int ret;
1111
1112 spin_lock_irqsave(&mdp->lock, flags);
1113 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1114 spin_unlock_irqrestore(&mdp->lock, flags);
1115
1116 return ret;
1117}
1118
1119static int sh_eth_set_settings(struct net_device *ndev,
1120 struct ethtool_cmd *ecmd)
1121{
1122 struct sh_eth_private *mdp = netdev_priv(ndev);
1123 unsigned long flags;
1124 int ret;
1125 u32 ioaddr = ndev->base_addr;
1126
1127 spin_lock_irqsave(&mdp->lock, flags);
1128
1129 /* disable tx and rx */
1130 sh_eth_rcv_snd_disable(ioaddr);
1131
1132 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1133 if (ret)
1134 goto error_exit;
1135
1136 if (ecmd->duplex == DUPLEX_FULL)
1137 mdp->duplex = 1;
1138 else
1139 mdp->duplex = 0;
1140
1141 if (mdp->cd->set_duplex)
1142 mdp->cd->set_duplex(ndev);
1143
1144error_exit:
1145 mdelay(1);
1146
1147 /* enable tx and rx */
1148 sh_eth_rcv_snd_enable(ioaddr);
1149
1150 spin_unlock_irqrestore(&mdp->lock, flags);
1151
1152 return ret;
1153}
1154
1155static int sh_eth_nway_reset(struct net_device *ndev)
1156{
1157 struct sh_eth_private *mdp = netdev_priv(ndev);
1158 unsigned long flags;
1159 int ret;
1160
1161 spin_lock_irqsave(&mdp->lock, flags);
1162 ret = phy_start_aneg(mdp->phydev);
1163 spin_unlock_irqrestore(&mdp->lock, flags);
1164
1165 return ret;
1166}
1167
1168static u32 sh_eth_get_msglevel(struct net_device *ndev)
1169{
1170 struct sh_eth_private *mdp = netdev_priv(ndev);
1171 return mdp->msg_enable;
1172}
1173
1174static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1175{
1176 struct sh_eth_private *mdp = netdev_priv(ndev);
1177 mdp->msg_enable = value;
1178}
1179
1180static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1181 "rx_current", "tx_current",
1182 "rx_dirty", "tx_dirty",
1183};
1184#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1185
1186static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1187{
1188 switch (sset) {
1189 case ETH_SS_STATS:
1190 return SH_ETH_STATS_LEN;
1191 default:
1192 return -EOPNOTSUPP;
1193 }
1194}
1195
1196static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1197 struct ethtool_stats *stats, u64 *data)
1198{
1199 struct sh_eth_private *mdp = netdev_priv(ndev);
1200 int i = 0;
1201
1202 /* device-specific stats */
1203 data[i++] = mdp->cur_rx;
1204 data[i++] = mdp->cur_tx;
1205 data[i++] = mdp->dirty_rx;
1206 data[i++] = mdp->dirty_tx;
1207}
1208
1209static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1210{
1211 switch (stringset) {
1212 case ETH_SS_STATS:
1213 memcpy(data, *sh_eth_gstrings_stats,
1214 sizeof(sh_eth_gstrings_stats));
1215 break;
1216 }
1217}
1218
1219static struct ethtool_ops sh_eth_ethtool_ops = {
1220 .get_settings = sh_eth_get_settings,
1221 .set_settings = sh_eth_set_settings,
1222 .nway_reset = sh_eth_nway_reset,
1223 .get_msglevel = sh_eth_get_msglevel,
1224 .set_msglevel = sh_eth_set_msglevel,
1225 .get_link = ethtool_op_get_link,
1226 .get_strings = sh_eth_get_strings,
1227 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1228 .get_sset_count = sh_eth_get_sset_count,
1229};
1230
1066/* network device open function */ 1231/* network device open function */
1067static int sh_eth_open(struct net_device *ndev) 1232static int sh_eth_open(struct net_device *ndev)
1068{ 1233{
@@ -1073,8 +1238,8 @@ static int sh_eth_open(struct net_device *ndev)
1073 1238
1074 ret = request_irq(ndev->irq, sh_eth_interrupt, 1239 ret = request_irq(ndev->irq, sh_eth_interrupt,
1075#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1240#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1076 defined(CONFIG_CPU_SUBTYPE_SH7764) || \ 1241 defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1077 defined(CONFIG_CPU_SUBTYPE_SH7757) 1242 defined(CONFIG_CPU_SUBTYPE_SH7757)
1078 IRQF_SHARED, 1243 IRQF_SHARED,
1079#else 1244#else
1080 0, 1245 0,
@@ -1123,8 +1288,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
1123 1288
1124 netif_stop_queue(ndev); 1289 netif_stop_queue(ndev);
1125 1290
1126 /* worning message out. */ 1291 if (netif_msg_timer(mdp))
1127 printk(KERN_WARNING "%s: transmit timed out, status %8.8x," 1292 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1128 " resetting...\n", ndev->name, (int)readl(ioaddr + EESR)); 1293 " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
1129 1294
1130 /* tx_errors count up */ 1295 /* tx_errors count up */
@@ -1167,6 +1332,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1167 spin_lock_irqsave(&mdp->lock, flags); 1332 spin_lock_irqsave(&mdp->lock, flags);
1168 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 1333 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1169 if (!sh_eth_txfree(ndev)) { 1334 if (!sh_eth_txfree(ndev)) {
1335 if (netif_msg_tx_queued(mdp))
1336 dev_warn(&ndev->dev, "TxFD exhausted.\n");
1170 netif_stop_queue(ndev); 1337 netif_stop_queue(ndev);
1171 spin_unlock_irqrestore(&mdp->lock, flags); 1338 spin_unlock_irqrestore(&mdp->lock, flags);
1172 return NETDEV_TX_BUSY; 1339 return NETDEV_TX_BUSY;
@@ -1497,8 +1664,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1497 1664
1498 /* set function */ 1665 /* set function */
1499 ndev->netdev_ops = &sh_eth_netdev_ops; 1666 ndev->netdev_ops = &sh_eth_netdev_ops;
1667 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
1500 ndev->watchdog_timeo = TX_TIMEOUT; 1668 ndev->watchdog_timeo = TX_TIMEOUT;
1501 1669
1670 /* debug message level */
1671 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
1502 mdp->post_rx = POST_RX >> (devno << 1); 1672 mdp->post_rx = POST_RX >> (devno << 1);
1503 mdp->post_fw = POST_FW >> (devno << 1); 1673 mdp->post_fw = POST_FW >> (devno << 1);
1504 1674