diff options
Diffstat (limited to 'drivers/net/8139cp.c')
-rw-r--r-- | drivers/net/8139cp.c | 92 |
1 files changed, 38 insertions, 54 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 83a1922e68e0..a09e6ce3eaa0 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -46,6 +46,8 @@ | |||
46 | 46 | ||
47 | */ | 47 | */ |
48 | 48 | ||
49 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
50 | |||
49 | #define DRV_NAME "8139cp" | 51 | #define DRV_NAME "8139cp" |
50 | #define DRV_VERSION "1.3" | 52 | #define DRV_VERSION "1.3" |
51 | #define DRV_RELDATE "Mar 22, 2004" | 53 | #define DRV_RELDATE "Mar 22, 2004" |
@@ -62,6 +64,7 @@ | |||
62 | #include <linux/dma-mapping.h> | 64 | #include <linux/dma-mapping.h> |
63 | #include <linux/delay.h> | 65 | #include <linux/delay.h> |
64 | #include <linux/ethtool.h> | 66 | #include <linux/ethtool.h> |
67 | #include <linux/gfp.h> | ||
65 | #include <linux/mii.h> | 68 | #include <linux/mii.h> |
66 | #include <linux/if_vlan.h> | 69 | #include <linux/if_vlan.h> |
67 | #include <linux/crc32.h> | 70 | #include <linux/crc32.h> |
@@ -104,8 +107,6 @@ static int multicast_filter_limit = 32; | |||
104 | module_param(multicast_filter_limit, int, 0); | 107 | module_param(multicast_filter_limit, int, 0); |
105 | MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses"); | 108 | MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses"); |
106 | 109 | ||
107 | #define PFX DRV_NAME ": " | ||
108 | |||
109 | #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ | 110 | #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ |
110 | NETIF_MSG_PROBE | \ | 111 | NETIF_MSG_PROBE | \ |
111 | NETIF_MSG_LINK) | 112 | NETIF_MSG_LINK) |
@@ -394,7 +395,7 @@ static int cp_get_eeprom(struct net_device *dev, | |||
394 | static int cp_set_eeprom(struct net_device *dev, | 395 | static int cp_set_eeprom(struct net_device *dev, |
395 | struct ethtool_eeprom *eeprom, u8 *data); | 396 | struct ethtool_eeprom *eeprom, u8 *data); |
396 | 397 | ||
397 | static struct pci_device_id cp_pci_tbl[] = { | 398 | static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = { |
398 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), }, | 399 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), }, |
399 | { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), }, | 400 | { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), }, |
400 | { }, | 401 | { }, |
@@ -470,9 +471,8 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, | |||
470 | static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, | 471 | static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, |
471 | u32 status, u32 len) | 472 | u32 status, u32 len) |
472 | { | 473 | { |
473 | if (netif_msg_rx_err (cp)) | 474 | netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n", |
474 | pr_debug("%s: rx err, slot %d status 0x%x len %d\n", | 475 | rx_tail, status, len); |
475 | cp->dev->name, rx_tail, status, len); | ||
476 | cp->dev->stats.rx_errors++; | 476 | cp->dev->stats.rx_errors++; |
477 | if (status & RxErrFrame) | 477 | if (status & RxErrFrame) |
478 | cp->dev->stats.rx_frame_errors++; | 478 | cp->dev->stats.rx_frame_errors++; |
@@ -545,18 +545,15 @@ rx_status_loop: | |||
545 | goto rx_next; | 545 | goto rx_next; |
546 | } | 546 | } |
547 | 547 | ||
548 | if (netif_msg_rx_status(cp)) | 548 | netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n", |
549 | pr_debug("%s: rx slot %d status 0x%x len %d\n", | 549 | rx_tail, status, len); |
550 | dev->name, rx_tail, status, len); | ||
551 | 550 | ||
552 | new_skb = netdev_alloc_skb(dev, buflen + NET_IP_ALIGN); | 551 | new_skb = netdev_alloc_skb_ip_align(dev, buflen); |
553 | if (!new_skb) { | 552 | if (!new_skb) { |
554 | dev->stats.rx_dropped++; | 553 | dev->stats.rx_dropped++; |
555 | goto rx_next; | 554 | goto rx_next; |
556 | } | 555 | } |
557 | 556 | ||
558 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
559 | |||
560 | dma_unmap_single(&cp->pdev->dev, mapping, | 557 | dma_unmap_single(&cp->pdev->dev, mapping, |
561 | buflen, PCI_DMA_FROMDEVICE); | 558 | buflen, PCI_DMA_FROMDEVICE); |
562 | 559 | ||
@@ -623,9 +620,8 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) | |||
623 | if (!status || (status == 0xFFFF)) | 620 | if (!status || (status == 0xFFFF)) |
624 | return IRQ_NONE; | 621 | return IRQ_NONE; |
625 | 622 | ||
626 | if (netif_msg_intr(cp)) | 623 | netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n", |
627 | pr_debug("%s: intr, status %04x cmd %02x cpcmd %04x\n", | 624 | status, cpr8(Cmd), cpr16(CpCmd)); |
628 | dev->name, status, cpr8(Cmd), cpr16(CpCmd)); | ||
629 | 625 | ||
630 | cpw16(IntrStatus, status & ~cp_rx_intr_mask); | 626 | cpw16(IntrStatus, status & ~cp_rx_intr_mask); |
631 | 627 | ||
@@ -656,8 +652,8 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) | |||
656 | 652 | ||
657 | pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status); | 653 | pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status); |
658 | pci_write_config_word(cp->pdev, PCI_STATUS, pci_status); | 654 | pci_write_config_word(cp->pdev, PCI_STATUS, pci_status); |
659 | pr_err("%s: PCI bus error, status=%04x, PCI status=%04x\n", | 655 | netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n", |
660 | dev->name, status, pci_status); | 656 | status, pci_status); |
661 | 657 | ||
662 | /* TODO: reset hardware */ | 658 | /* TODO: reset hardware */ |
663 | } | 659 | } |
@@ -702,9 +698,8 @@ static void cp_tx (struct cp_private *cp) | |||
702 | 698 | ||
703 | if (status & LastFrag) { | 699 | if (status & LastFrag) { |
704 | if (status & (TxError | TxFIFOUnder)) { | 700 | if (status & (TxError | TxFIFOUnder)) { |
705 | if (netif_msg_tx_err(cp)) | 701 | netif_dbg(cp, tx_err, cp->dev, |
706 | pr_debug("%s: tx err, status 0x%x\n", | 702 | "tx err, status 0x%x\n", status); |
707 | cp->dev->name, status); | ||
708 | cp->dev->stats.tx_errors++; | 703 | cp->dev->stats.tx_errors++; |
709 | if (status & TxOWC) | 704 | if (status & TxOWC) |
710 | cp->dev->stats.tx_window_errors++; | 705 | cp->dev->stats.tx_window_errors++; |
@@ -719,8 +714,8 @@ static void cp_tx (struct cp_private *cp) | |||
719 | ((status >> TxColCntShift) & TxColCntMask); | 714 | ((status >> TxColCntShift) & TxColCntMask); |
720 | cp->dev->stats.tx_packets++; | 715 | cp->dev->stats.tx_packets++; |
721 | cp->dev->stats.tx_bytes += skb->len; | 716 | cp->dev->stats.tx_bytes += skb->len; |
722 | if (netif_msg_tx_done(cp)) | 717 | netif_dbg(cp, tx_done, cp->dev, |
723 | pr_debug("%s: tx done, slot %d\n", cp->dev->name, tx_tail); | 718 | "tx done, slot %d\n", tx_tail); |
724 | } | 719 | } |
725 | dev_kfree_skb_irq(skb); | 720 | dev_kfree_skb_irq(skb); |
726 | } | 721 | } |
@@ -754,8 +749,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
754 | if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { | 749 | if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { |
755 | netif_stop_queue(dev); | 750 | netif_stop_queue(dev); |
756 | spin_unlock_irqrestore(&cp->lock, intr_flags); | 751 | spin_unlock_irqrestore(&cp->lock, intr_flags); |
757 | pr_err(PFX "%s: BUG! Tx Ring full when queue awake!\n", | 752 | netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); |
758 | dev->name); | ||
759 | return NETDEV_TX_BUSY; | 753 | return NETDEV_TX_BUSY; |
760 | } | 754 | } |
761 | 755 | ||
@@ -880,9 +874,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, | |||
880 | wmb(); | 874 | wmb(); |
881 | } | 875 | } |
882 | cp->tx_head = entry; | 876 | cp->tx_head = entry; |
883 | if (netif_msg_tx_queued(cp)) | 877 | netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", |
884 | pr_debug("%s: tx queued, slot %d, skblen %d\n", | 878 | entry, skb->len); |
885 | dev->name, entry, skb->len); | ||
886 | if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) | 879 | if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) |
887 | netif_stop_queue(dev); | 880 | netif_stop_queue(dev); |
888 | 881 | ||
@@ -901,7 +894,7 @@ static void __cp_set_rx_mode (struct net_device *dev) | |||
901 | { | 894 | { |
902 | struct cp_private *cp = netdev_priv(dev); | 895 | struct cp_private *cp = netdev_priv(dev); |
903 | u32 mc_filter[2]; /* Multicast hash filter */ | 896 | u32 mc_filter[2]; /* Multicast hash filter */ |
904 | int i, rx_mode; | 897 | int rx_mode; |
905 | u32 tmp; | 898 | u32 tmp; |
906 | 899 | ||
907 | /* Note: do not reorder, GCC is clever about common statements. */ | 900 | /* Note: do not reorder, GCC is clever about common statements. */ |
@@ -911,8 +904,8 @@ static void __cp_set_rx_mode (struct net_device *dev) | |||
911 | AcceptBroadcast | AcceptMulticast | AcceptMyPhys | | 904 | AcceptBroadcast | AcceptMulticast | AcceptMyPhys | |
912 | AcceptAllPhys; | 905 | AcceptAllPhys; |
913 | mc_filter[1] = mc_filter[0] = 0xffffffff; | 906 | mc_filter[1] = mc_filter[0] = 0xffffffff; |
914 | } else if ((dev->mc_count > multicast_filter_limit) | 907 | } else if ((netdev_mc_count(dev) > multicast_filter_limit) || |
915 | || (dev->flags & IFF_ALLMULTI)) { | 908 | (dev->flags & IFF_ALLMULTI)) { |
916 | /* Too many to filter perfectly -- accept all multicasts. */ | 909 | /* Too many to filter perfectly -- accept all multicasts. */ |
917 | rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; | 910 | rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; |
918 | mc_filter[1] = mc_filter[0] = 0xffffffff; | 911 | mc_filter[1] = mc_filter[0] = 0xffffffff; |
@@ -920,8 +913,7 @@ static void __cp_set_rx_mode (struct net_device *dev) | |||
920 | struct dev_mc_list *mclist; | 913 | struct dev_mc_list *mclist; |
921 | rx_mode = AcceptBroadcast | AcceptMyPhys; | 914 | rx_mode = AcceptBroadcast | AcceptMyPhys; |
922 | mc_filter[1] = mc_filter[0] = 0; | 915 | mc_filter[1] = mc_filter[0] = 0; |
923 | for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; | 916 | netdev_for_each_mc_addr(mclist, dev) { |
924 | i++, mclist = mclist->next) { | ||
925 | int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; | 917 | int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; |
926 | 918 | ||
927 | mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); | 919 | mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); |
@@ -995,7 +987,7 @@ static void cp_reset_hw (struct cp_private *cp) | |||
995 | schedule_timeout_uninterruptible(10); | 987 | schedule_timeout_uninterruptible(10); |
996 | } | 988 | } |
997 | 989 | ||
998 | pr_err("%s: hardware reset timeout\n", cp->dev->name); | 990 | netdev_err(cp->dev, "hardware reset timeout\n"); |
999 | } | 991 | } |
1000 | 992 | ||
1001 | static inline void cp_start_hw (struct cp_private *cp) | 993 | static inline void cp_start_hw (struct cp_private *cp) |
@@ -1057,12 +1049,10 @@ static int cp_refill_rx(struct cp_private *cp) | |||
1057 | struct sk_buff *skb; | 1049 | struct sk_buff *skb; |
1058 | dma_addr_t mapping; | 1050 | dma_addr_t mapping; |
1059 | 1051 | ||
1060 | skb = netdev_alloc_skb(dev, cp->rx_buf_sz + NET_IP_ALIGN); | 1052 | skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz); |
1061 | if (!skb) | 1053 | if (!skb) |
1062 | goto err_out; | 1054 | goto err_out; |
1063 | 1055 | ||
1064 | skb_reserve(skb, NET_IP_ALIGN); | ||
1065 | |||
1066 | mapping = dma_map_single(&cp->pdev->dev, skb->data, | 1056 | mapping = dma_map_single(&cp->pdev->dev, skb->data, |
1067 | cp->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1057 | cp->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1068 | cp->rx_skb[i] = skb; | 1058 | cp->rx_skb[i] = skb; |
@@ -1164,8 +1154,7 @@ static int cp_open (struct net_device *dev) | |||
1164 | struct cp_private *cp = netdev_priv(dev); | 1154 | struct cp_private *cp = netdev_priv(dev); |
1165 | int rc; | 1155 | int rc; |
1166 | 1156 | ||
1167 | if (netif_msg_ifup(cp)) | 1157 | netif_dbg(cp, ifup, dev, "enabling interface\n"); |
1168 | pr_debug("%s: enabling interface\n", dev->name); | ||
1169 | 1158 | ||
1170 | rc = cp_alloc_rings(cp); | 1159 | rc = cp_alloc_rings(cp); |
1171 | if (rc) | 1160 | if (rc) |
@@ -1199,8 +1188,7 @@ static int cp_close (struct net_device *dev) | |||
1199 | 1188 | ||
1200 | napi_disable(&cp->napi); | 1189 | napi_disable(&cp->napi); |
1201 | 1190 | ||
1202 | if (netif_msg_ifdown(cp)) | 1191 | netif_dbg(cp, ifdown, dev, "disabling interface\n"); |
1203 | pr_debug("%s: disabling interface\n", dev->name); | ||
1204 | 1192 | ||
1205 | spin_lock_irqsave(&cp->lock, flags); | 1193 | spin_lock_irqsave(&cp->lock, flags); |
1206 | 1194 | ||
@@ -1223,9 +1211,9 @@ static void cp_tx_timeout(struct net_device *dev) | |||
1223 | unsigned long flags; | 1211 | unsigned long flags; |
1224 | int rc; | 1212 | int rc; |
1225 | 1213 | ||
1226 | pr_warning("%s: Transmit timeout, status %2x %4x %4x %4x\n", | 1214 | netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n", |
1227 | dev->name, cpr8(Cmd), cpr16(CpCmd), | 1215 | cpr8(Cmd), cpr16(CpCmd), |
1228 | cpr16(IntrStatus), cpr16(IntrMask)); | 1216 | cpr16(IntrStatus), cpr16(IntrMask)); |
1229 | 1217 | ||
1230 | spin_lock_irqsave(&cp->lock, flags); | 1218 | spin_lock_irqsave(&cp->lock, flags); |
1231 | 1219 | ||
@@ -1878,8 +1866,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1878 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && | 1866 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && |
1879 | pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) { | 1867 | pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) { |
1880 | dev_info(&pdev->dev, | 1868 | dev_info(&pdev->dev, |
1881 | "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n", | 1869 | "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n", |
1882 | pdev->vendor, pdev->device, pdev->revision); | 1870 | pdev->vendor, pdev->device, pdev->revision); |
1883 | return -ENODEV; | 1871 | return -ENODEV; |
1884 | } | 1872 | } |
1885 | 1873 | ||
@@ -1937,14 +1925,13 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1937 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 1925 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
1938 | if (rc) { | 1926 | if (rc) { |
1939 | dev_err(&pdev->dev, | 1927 | dev_err(&pdev->dev, |
1940 | "No usable DMA configuration, aborting.\n"); | 1928 | "No usable DMA configuration, aborting\n"); |
1941 | goto err_out_res; | 1929 | goto err_out_res; |
1942 | } | 1930 | } |
1943 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 1931 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
1944 | if (rc) { | 1932 | if (rc) { |
1945 | dev_err(&pdev->dev, | 1933 | dev_err(&pdev->dev, |
1946 | "No usable consistent DMA configuration, " | 1934 | "No usable consistent DMA configuration, aborting\n"); |
1947 | "aborting.\n"); | ||
1948 | goto err_out_res; | 1935 | goto err_out_res; |
1949 | } | 1936 | } |
1950 | } | 1937 | } |
@@ -1956,7 +1943,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1956 | if (!regs) { | 1943 | if (!regs) { |
1957 | rc = -EIO; | 1944 | rc = -EIO; |
1958 | dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n", | 1945 | dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n", |
1959 | (unsigned long long)pci_resource_len(pdev, 1), | 1946 | (unsigned long long)pci_resource_len(pdev, 1), |
1960 | (unsigned long long)pciaddr); | 1947 | (unsigned long long)pciaddr); |
1961 | goto err_out_res; | 1948 | goto err_out_res; |
1962 | } | 1949 | } |
@@ -1994,11 +1981,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1994 | if (rc) | 1981 | if (rc) |
1995 | goto err_out_iomap; | 1982 | goto err_out_iomap; |
1996 | 1983 | ||
1997 | pr_info("%s: RTL-8139C+ at 0x%lx, %pM, IRQ %d\n", | 1984 | netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n", |
1998 | dev->name, | 1985 | dev->base_addr, dev->dev_addr, dev->irq); |
1999 | dev->base_addr, | ||
2000 | dev->dev_addr, | ||
2001 | dev->irq); | ||
2002 | 1986 | ||
2003 | pci_set_drvdata(pdev, dev); | 1987 | pci_set_drvdata(pdev, dev); |
2004 | 1988 | ||