aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/jme.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/jme.c')
-rw-r--r--drivers/net/jme.c120
1 files changed, 76 insertions, 44 deletions
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 1d2a32544ed2..b705ad3a53a7 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -37,6 +37,7 @@
37#include <linux/tcp.h> 37#include <linux/tcp.h>
38#include <linux/udp.h> 38#include <linux/udp.h>
39#include <linux/if_vlan.h> 39#include <linux/if_vlan.h>
40#include <linux/slab.h>
40#include <net/ip6_checksum.h> 41#include <net/ip6_checksum.h>
41#include "jme.h" 42#include "jme.h"
42 43
@@ -288,7 +289,7 @@ jme_set_rx_pcc(struct jme_adapter *jme, int p)
288 wmb(); 289 wmb();
289 290
290 if (!(test_bit(JME_FLAG_POLL, &jme->flags))) 291 if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
291 msg_rx_status(jme, "Switched to PCC_P%d\n", p); 292 netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p);
292} 293}
293 294
294static void 295static void
@@ -483,13 +484,13 @@ jme_check_link(struct net_device *netdev, int testonly)
483 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ? 484 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
484 "MDI-X" : 485 "MDI-X" :
485 "MDI"); 486 "MDI");
486 msg_link(jme, "Link is up at %s.\n", linkmsg); 487 netif_info(jme, link, jme->dev, "Link is up at %s.\n", linkmsg);
487 netif_carrier_on(netdev); 488 netif_carrier_on(netdev);
488 } else { 489 } else {
489 if (testonly) 490 if (testonly)
490 goto out; 491 goto out;
491 492
492 msg_link(jme, "Link is down.\n"); 493 netif_info(jme, link, jme->dev, "Link is down.\n");
493 jme->phylink = 0; 494 jme->phylink = 0;
494 netif_carrier_off(netdev); 495 netif_carrier_off(netdev);
495 } 496 }
@@ -883,20 +884,20 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
883 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS)) 884 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
884 == RXWBFLAG_TCPON)) { 885 == RXWBFLAG_TCPON)) {
885 if (flags & RXWBFLAG_IPV4) 886 if (flags & RXWBFLAG_IPV4)
886 msg_rx_err(jme, "TCP Checksum error\n"); 887 netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n");
887 return false; 888 return false;
888 } 889 }
889 890
890 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) 891 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
891 == RXWBFLAG_UDPON)) { 892 == RXWBFLAG_UDPON)) {
892 if (flags & RXWBFLAG_IPV4) 893 if (flags & RXWBFLAG_IPV4)
893 msg_rx_err(jme, "UDP Checksum error.\n"); 894 netif_err(jme, rx_err, jme->dev, "UDP Checksum error.\n");
894 return false; 895 return false;
895 } 896 }
896 897
897 if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) 898 if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
898 == RXWBFLAG_IPV4)) { 899 == RXWBFLAG_IPV4)) {
899 msg_rx_err(jme, "IPv4 Checksum error.\n"); 900 netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error.\n");
900 return false; 901 return false;
901 } 902 }
902 903
@@ -946,6 +947,8 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
946 jme->jme_vlan_rx(skb, jme->vlgrp, 947 jme->jme_vlan_rx(skb, jme->vlgrp,
947 le16_to_cpu(rxdesc->descwb.vlan)); 948 le16_to_cpu(rxdesc->descwb.vlan));
948 NET_STAT(jme).rx_bytes += 4; 949 NET_STAT(jme).rx_bytes += 4;
950 } else {
951 dev_kfree_skb(skb);
949 } 952 }
950 } else { 953 } else {
951 jme->jme_rx(skb); 954 jme->jme_rx(skb);
@@ -1050,8 +1053,8 @@ jme_dynamic_pcc(struct jme_adapter *jme)
1050 1053
1051 if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD) 1054 if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1052 jme_attempt_pcc(dpi, PCC_P3); 1055 jme_attempt_pcc(dpi, PCC_P3);
1053 else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD 1056 else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD ||
1054 || dpi->intr_cnt > PCC_INTR_THRESHOLD) 1057 dpi->intr_cnt > PCC_INTR_THRESHOLD)
1055 jme_attempt_pcc(dpi, PCC_P2); 1058 jme_attempt_pcc(dpi, PCC_P2);
1056 else 1059 else
1057 jme_attempt_pcc(dpi, PCC_P1); 1060 jme_attempt_pcc(dpi, PCC_P1);
@@ -1186,9 +1189,9 @@ jme_link_change_tasklet(unsigned long arg)
1186 1189
1187 while (!atomic_dec_and_test(&jme->link_changing)) { 1190 while (!atomic_dec_and_test(&jme->link_changing)) {
1188 atomic_inc(&jme->link_changing); 1191 atomic_inc(&jme->link_changing);
1189 msg_intr(jme, "Get link change lock failed.\n"); 1192 netif_info(jme, intr, jme->dev, "Get link change lock failed.\n");
1190 while (atomic_read(&jme->link_changing) != 1) 1193 while (atomic_read(&jme->link_changing) != 1)
1191 msg_intr(jme, "Waiting link change lock.\n"); 1194 netif_info(jme, intr, jme->dev, "Waiting link change lock.\n");
1192 } 1195 }
1193 1196
1194 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) 1197 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
@@ -1305,7 +1308,7 @@ jme_rx_empty_tasklet(unsigned long arg)
1305 if (unlikely(!netif_carrier_ok(jme->dev))) 1308 if (unlikely(!netif_carrier_ok(jme->dev)))
1306 return; 1309 return;
1307 1310
1308 msg_rx_status(jme, "RX Queue Full!\n"); 1311 netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
1309 1312
1310 jme_rx_clean_tasklet(arg); 1313 jme_rx_clean_tasklet(arg);
1311 1314
@@ -1325,7 +1328,7 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
1325 smp_wmb(); 1328 smp_wmb();
1326 if (unlikely(netif_queue_stopped(jme->dev) && 1329 if (unlikely(netif_queue_stopped(jme->dev) &&
1327 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { 1330 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1328 msg_tx_done(jme, "TX Queue Waked.\n"); 1331 netif_info(jme, tx_done, jme->dev, "TX Queue Waked.\n");
1329 netif_wake_queue(jme->dev); 1332 netif_wake_queue(jme->dev);
1330 } 1333 }
1331 1334
@@ -1835,7 +1838,7 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
1835 *flags |= TXFLAG_UDPCS; 1838 *flags |= TXFLAG_UDPCS;
1836 break; 1839 break;
1837 default: 1840 default:
1838 msg_tx_err(jme, "Error upper layer protocol.\n"); 1841 netif_err(jme, tx_err, jme->dev, "Error upper layer protocol.\n");
1839 break; 1842 break;
1840 } 1843 }
1841 } 1844 }
@@ -1910,12 +1913,12 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
1910 smp_wmb(); 1913 smp_wmb();
1911 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { 1914 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
1912 netif_stop_queue(jme->dev); 1915 netif_stop_queue(jme->dev);
1913 msg_tx_queued(jme, "TX Queue Paused.\n"); 1916 netif_info(jme, tx_queued, jme->dev, "TX Queue Paused.\n");
1914 smp_wmb(); 1917 smp_wmb();
1915 if (atomic_read(&txring->nr_free) 1918 if (atomic_read(&txring->nr_free)
1916 >= (jme->tx_wake_threshold)) { 1919 >= (jme->tx_wake_threshold)) {
1917 netif_wake_queue(jme->dev); 1920 netif_wake_queue(jme->dev);
1918 msg_tx_queued(jme, "TX Queue Fast Waked.\n"); 1921 netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked.\n");
1919 } 1922 }
1920 } 1923 }
1921 1924
@@ -1923,7 +1926,7 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
1923 (jiffies - txbi->start_xmit) >= TX_TIMEOUT && 1926 (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
1924 txbi->skb)) { 1927 txbi->skb)) {
1925 netif_stop_queue(jme->dev); 1928 netif_stop_queue(jme->dev);
1926 msg_tx_queued(jme, "TX Queue Stopped %d@%lu.\n", idx, jiffies); 1929 netif_info(jme, tx_queued, jme->dev, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
1927 } 1930 }
1928} 1931}
1929 1932
@@ -1946,7 +1949,7 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1946 1949
1947 if (unlikely(idx < 0)) { 1950 if (unlikely(idx < 0)) {
1948 netif_stop_queue(netdev); 1951 netif_stop_queue(netdev);
1949 msg_tx_err(jme, "BUG! Tx ring full when queue awake!\n"); 1952 netif_err(jme, tx_err, jme->dev, "BUG! Tx ring full when queue awake!\n");
1950 1953
1951 return NETDEV_TX_BUSY; 1954 return NETDEV_TX_BUSY;
1952 } 1955 }
@@ -1997,7 +2000,6 @@ jme_set_multi(struct net_device *netdev)
1997{ 2000{
1998 struct jme_adapter *jme = netdev_priv(netdev); 2001 struct jme_adapter *jme = netdev_priv(netdev);
1999 u32 mc_hash[2] = {}; 2002 u32 mc_hash[2] = {};
2000 int i;
2001 2003
2002 spin_lock_bh(&jme->rxmcs_lock); 2004 spin_lock_bh(&jme->rxmcs_lock);
2003 2005
@@ -2012,10 +2014,7 @@ jme_set_multi(struct net_device *netdev)
2012 int bit_nr; 2014 int bit_nr;
2013 2015
2014 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; 2016 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
2015 for (i = 0, mclist = netdev->mc_list; 2017 netdev_for_each_mc_addr(mclist, netdev) {
2016 mclist && i < netdev->mc_count;
2017 ++i, mclist = mclist->next) {
2018
2019 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F; 2018 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
2020 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); 2019 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
2021 } 2020 }
@@ -2085,12 +2084,45 @@ jme_tx_timeout(struct net_device *netdev)
2085 jme_reset_link(jme); 2084 jme_reset_link(jme);
2086} 2085}
2087 2086
2087static inline void jme_pause_rx(struct jme_adapter *jme)
2088{
2089 atomic_dec(&jme->link_changing);
2090
2091 jme_set_rx_pcc(jme, PCC_OFF);
2092 if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2093 JME_NAPI_DISABLE(jme);
2094 } else {
2095 tasklet_disable(&jme->rxclean_task);
2096 tasklet_disable(&jme->rxempty_task);
2097 }
2098}
2099
2100static inline void jme_resume_rx(struct jme_adapter *jme)
2101{
2102 struct dynpcc_info *dpi = &(jme->dpi);
2103
2104 if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2105 JME_NAPI_ENABLE(jme);
2106 } else {
2107 tasklet_hi_enable(&jme->rxclean_task);
2108 tasklet_hi_enable(&jme->rxempty_task);
2109 }
2110 dpi->cur = PCC_P1;
2111 dpi->attempt = PCC_P1;
2112 dpi->cnt = 0;
2113 jme_set_rx_pcc(jme, PCC_P1);
2114
2115 atomic_inc(&jme->link_changing);
2116}
2117
2088static void 2118static void
2089jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 2119jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2090{ 2120{
2091 struct jme_adapter *jme = netdev_priv(netdev); 2121 struct jme_adapter *jme = netdev_priv(netdev);
2092 2122
2123 jme_pause_rx(jme);
2093 jme->vlgrp = grp; 2124 jme->vlgrp = grp;
2125 jme_resume_rx(jme);
2094} 2126}
2095 2127
2096static void 2128static void
@@ -2199,8 +2231,8 @@ jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2199 if (netif_running(netdev)) 2231 if (netif_running(netdev))
2200 return -EBUSY; 2232 return -EBUSY;
2201 2233
2202 if (ecmd->use_adaptive_rx_coalesce 2234 if (ecmd->use_adaptive_rx_coalesce &&
2203 && test_bit(JME_FLAG_POLL, &jme->flags)) { 2235 test_bit(JME_FLAG_POLL, &jme->flags)) {
2204 clear_bit(JME_FLAG_POLL, &jme->flags); 2236 clear_bit(JME_FLAG_POLL, &jme->flags);
2205 jme->jme_rx = netif_rx; 2237 jme->jme_rx = netif_rx;
2206 jme->jme_vlan_rx = vlan_hwaccel_rx; 2238 jme->jme_vlan_rx = vlan_hwaccel_rx;
@@ -2209,8 +2241,8 @@ jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2209 dpi->cnt = 0; 2241 dpi->cnt = 0;
2210 jme_set_rx_pcc(jme, PCC_P1); 2242 jme_set_rx_pcc(jme, PCC_P1);
2211 jme_interrupt_mode(jme); 2243 jme_interrupt_mode(jme);
2212 } else if (!(ecmd->use_adaptive_rx_coalesce) 2244 } else if (!(ecmd->use_adaptive_rx_coalesce) &&
2213 && !(test_bit(JME_FLAG_POLL, &jme->flags))) { 2245 !(test_bit(JME_FLAG_POLL, &jme->flags))) {
2214 set_bit(JME_FLAG_POLL, &jme->flags); 2246 set_bit(JME_FLAG_POLL, &jme->flags);
2215 jme->jme_rx = netif_receive_skb; 2247 jme->jme_rx = netif_receive_skb;
2216 jme->jme_vlan_rx = vlan_hwaccel_receive_skb; 2248 jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
@@ -2473,7 +2505,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2473 val = jread32(jme, JME_SMBCSR); 2505 val = jread32(jme, JME_SMBCSR);
2474 } 2506 }
2475 if (!to) { 2507 if (!to) {
2476 msg_hw(jme, "SMB Bus Busy.\n"); 2508 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
2477 return 0xFF; 2509 return 0xFF;
2478 } 2510 }
2479 2511
@@ -2489,7 +2521,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2489 val = jread32(jme, JME_SMBINTF); 2521 val = jread32(jme, JME_SMBINTF);
2490 } 2522 }
2491 if (!to) { 2523 if (!to) {
2492 msg_hw(jme, "SMB Bus Busy.\n"); 2524 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
2493 return 0xFF; 2525 return 0xFF;
2494 } 2526 }
2495 2527
@@ -2509,7 +2541,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2509 val = jread32(jme, JME_SMBCSR); 2541 val = jread32(jme, JME_SMBCSR);
2510 } 2542 }
2511 if (!to) { 2543 if (!to) {
2512 msg_hw(jme, "SMB Bus Busy.\n"); 2544 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
2513 return; 2545 return;
2514 } 2546 }
2515 2547
@@ -2526,7 +2558,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2526 val = jread32(jme, JME_SMBINTF); 2558 val = jread32(jme, JME_SMBINTF);
2527 } 2559 }
2528 if (!to) { 2560 if (!to) {
2529 msg_hw(jme, "SMB Bus Busy.\n"); 2561 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
2530 return; 2562 return;
2531 } 2563 }
2532 2564
@@ -2764,19 +2796,19 @@ jme_init_one(struct pci_dev *pdev,
2764 atomic_set(&jme->rx_empty, 1); 2796 atomic_set(&jme->rx_empty, 1);
2765 2797
2766 tasklet_init(&jme->pcc_task, 2798 tasklet_init(&jme->pcc_task,
2767 &jme_pcc_tasklet, 2799 jme_pcc_tasklet,
2768 (unsigned long) jme); 2800 (unsigned long) jme);
2769 tasklet_init(&jme->linkch_task, 2801 tasklet_init(&jme->linkch_task,
2770 &jme_link_change_tasklet, 2802 jme_link_change_tasklet,
2771 (unsigned long) jme); 2803 (unsigned long) jme);
2772 tasklet_init(&jme->txclean_task, 2804 tasklet_init(&jme->txclean_task,
2773 &jme_tx_clean_tasklet, 2805 jme_tx_clean_tasklet,
2774 (unsigned long) jme); 2806 (unsigned long) jme);
2775 tasklet_init(&jme->rxclean_task, 2807 tasklet_init(&jme->rxclean_task,
2776 &jme_rx_clean_tasklet, 2808 jme_rx_clean_tasklet,
2777 (unsigned long) jme); 2809 (unsigned long) jme);
2778 tasklet_init(&jme->rxempty_task, 2810 tasklet_init(&jme->rxempty_task,
2779 &jme_rx_empty_tasklet, 2811 jme_rx_empty_tasklet,
2780 (unsigned long) jme); 2812 (unsigned long) jme);
2781 tasklet_disable_nosync(&jme->linkch_task); 2813 tasklet_disable_nosync(&jme->linkch_task);
2782 tasklet_disable_nosync(&jme->txclean_task); 2814 tasklet_disable_nosync(&jme->txclean_task);
@@ -2876,14 +2908,14 @@ jme_init_one(struct pci_dev *pdev,
2876 goto err_out_unmap; 2908 goto err_out_unmap;
2877 } 2909 }
2878 2910
2879 msg_probe(jme, "%s%s ver:%x rev:%x macaddr:%pM\n", 2911 netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n",
2880 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? 2912 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
2881 "JMC250 Gigabit Ethernet" : 2913 "JMC250 Gigabit Ethernet" :
2882 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? 2914 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
2883 "JMC260 Fast Ethernet" : "Unknown", 2915 "JMC260 Fast Ethernet" : "Unknown",
2884 (jme->fpgaver != 0) ? " (FPGA)" : "", 2916 (jme->fpgaver != 0) ? " (FPGA)" : "",
2885 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, 2917 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
2886 jme->rev, netdev->dev_addr); 2918 jme->rev, netdev->dev_addr);
2887 2919
2888 return 0; 2920 return 0;
2889 2921
@@ -2994,7 +3026,7 @@ jme_resume(struct pci_dev *pdev)
2994} 3026}
2995#endif 3027#endif
2996 3028
2997static struct pci_device_id jme_pci_tbl[] = { 3029static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
2998 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) }, 3030 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
2999 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) }, 3031 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
3000 { } 3032 { }