diff options
Diffstat (limited to 'drivers/net/8139cp.c')
-rw-r--r-- | drivers/net/8139cp.c | 167 |
1 files changed, 78 insertions, 89 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 0cdc830449d8..5a4990ae3730 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -48,11 +48,10 @@ | |||
48 | */ | 48 | */ |
49 | 49 | ||
50 | #define DRV_NAME "8139cp" | 50 | #define DRV_NAME "8139cp" |
51 | #define DRV_VERSION "1.2" | 51 | #define DRV_VERSION "1.3" |
52 | #define DRV_RELDATE "Mar 22, 2004" | 52 | #define DRV_RELDATE "Mar 22, 2004" |
53 | 53 | ||
54 | 54 | ||
55 | #include <linux/config.h> | ||
56 | #include <linux/module.h> | 55 | #include <linux/module.h> |
57 | #include <linux/moduleparam.h> | 56 | #include <linux/moduleparam.h> |
58 | #include <linux/kernel.h> | 57 | #include <linux/kernel.h> |
@@ -315,12 +314,6 @@ struct cp_desc { | |||
315 | u64 addr; | 314 | u64 addr; |
316 | }; | 315 | }; |
317 | 316 | ||
318 | struct ring_info { | ||
319 | struct sk_buff *skb; | ||
320 | dma_addr_t mapping; | ||
321 | u32 len; | ||
322 | }; | ||
323 | |||
324 | struct cp_dma_stats { | 317 | struct cp_dma_stats { |
325 | u64 tx_ok; | 318 | u64 tx_ok; |
326 | u64 rx_ok; | 319 | u64 rx_ok; |
@@ -354,23 +347,23 @@ struct cp_private { | |||
354 | struct net_device_stats net_stats; | 347 | struct net_device_stats net_stats; |
355 | struct cp_extra_stats cp_stats; | 348 | struct cp_extra_stats cp_stats; |
356 | 349 | ||
357 | unsigned rx_tail ____cacheline_aligned; | 350 | unsigned rx_head ____cacheline_aligned; |
351 | unsigned rx_tail; | ||
358 | struct cp_desc *rx_ring; | 352 | struct cp_desc *rx_ring; |
359 | struct ring_info rx_skb[CP_RX_RING_SIZE]; | 353 | struct sk_buff *rx_skb[CP_RX_RING_SIZE]; |
360 | unsigned rx_buf_sz; | ||
361 | 354 | ||
362 | unsigned tx_head ____cacheline_aligned; | 355 | unsigned tx_head ____cacheline_aligned; |
363 | unsigned tx_tail; | 356 | unsigned tx_tail; |
364 | |||
365 | struct cp_desc *tx_ring; | 357 | struct cp_desc *tx_ring; |
366 | struct ring_info tx_skb[CP_TX_RING_SIZE]; | 358 | struct sk_buff *tx_skb[CP_TX_RING_SIZE]; |
367 | dma_addr_t ring_dma; | 359 | |
360 | unsigned rx_buf_sz; | ||
361 | unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ | ||
368 | 362 | ||
369 | #if CP_VLAN_TAG_USED | 363 | #if CP_VLAN_TAG_USED |
370 | struct vlan_group *vlgrp; | 364 | struct vlan_group *vlgrp; |
371 | #endif | 365 | #endif |
372 | 366 | dma_addr_t ring_dma; | |
373 | unsigned int wol_enabled : 1; /* Is Wake-on-LAN enabled? */ | ||
374 | 367 | ||
375 | struct mii_if_info mii_if; | 368 | struct mii_if_info mii_if; |
376 | }; | 369 | }; |
@@ -408,10 +401,8 @@ static int cp_set_eeprom(struct net_device *dev, | |||
408 | struct ethtool_eeprom *eeprom, u8 *data); | 401 | struct ethtool_eeprom *eeprom, u8 *data); |
409 | 402 | ||
410 | static struct pci_device_id cp_pci_tbl[] = { | 403 | static struct pci_device_id cp_pci_tbl[] = { |
411 | { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139, | 404 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), }, |
412 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, | 405 | { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), }, |
413 | { PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322, | ||
414 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, | ||
415 | { }, | 406 | { }, |
416 | }; | 407 | }; |
417 | MODULE_DEVICE_TABLE(pci, cp_pci_tbl); | 408 | MODULE_DEVICE_TABLE(pci, cp_pci_tbl); |
@@ -543,7 +534,7 @@ rx_status_loop: | |||
543 | struct cp_desc *desc; | 534 | struct cp_desc *desc; |
544 | unsigned buflen; | 535 | unsigned buflen; |
545 | 536 | ||
546 | skb = cp->rx_skb[rx_tail].skb; | 537 | skb = cp->rx_skb[rx_tail]; |
547 | BUG_ON(!skb); | 538 | BUG_ON(!skb); |
548 | 539 | ||
549 | desc = &cp->rx_ring[rx_tail]; | 540 | desc = &cp->rx_ring[rx_tail]; |
@@ -552,7 +543,7 @@ rx_status_loop: | |||
552 | break; | 543 | break; |
553 | 544 | ||
554 | len = (status & 0x1fff) - 4; | 545 | len = (status & 0x1fff) - 4; |
555 | mapping = cp->rx_skb[rx_tail].mapping; | 546 | mapping = le64_to_cpu(desc->addr); |
556 | 547 | ||
557 | if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) { | 548 | if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) { |
558 | /* we don't support incoming fragmented frames. | 549 | /* we don't support incoming fragmented frames. |
@@ -573,7 +564,7 @@ rx_status_loop: | |||
573 | 564 | ||
574 | if (netif_msg_rx_status(cp)) | 565 | if (netif_msg_rx_status(cp)) |
575 | printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n", | 566 | printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n", |
576 | cp->dev->name, rx_tail, status, len); | 567 | dev->name, rx_tail, status, len); |
577 | 568 | ||
578 | buflen = cp->rx_buf_sz + RX_OFFSET; | 569 | buflen = cp->rx_buf_sz + RX_OFFSET; |
579 | new_skb = dev_alloc_skb (buflen); | 570 | new_skb = dev_alloc_skb (buflen); |
@@ -583,7 +574,7 @@ rx_status_loop: | |||
583 | } | 574 | } |
584 | 575 | ||
585 | skb_reserve(new_skb, RX_OFFSET); | 576 | skb_reserve(new_skb, RX_OFFSET); |
586 | new_skb->dev = cp->dev; | 577 | new_skb->dev = dev; |
587 | 578 | ||
588 | pci_unmap_single(cp->pdev, mapping, | 579 | pci_unmap_single(cp->pdev, mapping, |
589 | buflen, PCI_DMA_FROMDEVICE); | 580 | buflen, PCI_DMA_FROMDEVICE); |
@@ -596,11 +587,9 @@ rx_status_loop: | |||
596 | 587 | ||
597 | skb_put(skb, len); | 588 | skb_put(skb, len); |
598 | 589 | ||
599 | mapping = | 590 | mapping = pci_map_single(cp->pdev, new_skb->data, buflen, |
600 | cp->rx_skb[rx_tail].mapping = | 591 | PCI_DMA_FROMDEVICE); |
601 | pci_map_single(cp->pdev, new_skb->data, | 592 | cp->rx_skb[rx_tail] = new_skb; |
602 | buflen, PCI_DMA_FROMDEVICE); | ||
603 | cp->rx_skb[rx_tail].skb = new_skb; | ||
604 | 593 | ||
605 | cp_rx_skb(cp, skb, desc); | 594 | cp_rx_skb(cp, skb, desc); |
606 | rx++; | 595 | rx++; |
@@ -718,19 +707,21 @@ static void cp_tx (struct cp_private *cp) | |||
718 | unsigned tx_tail = cp->tx_tail; | 707 | unsigned tx_tail = cp->tx_tail; |
719 | 708 | ||
720 | while (tx_tail != tx_head) { | 709 | while (tx_tail != tx_head) { |
710 | struct cp_desc *txd = cp->tx_ring + tx_tail; | ||
721 | struct sk_buff *skb; | 711 | struct sk_buff *skb; |
722 | u32 status; | 712 | u32 status; |
723 | 713 | ||
724 | rmb(); | 714 | rmb(); |
725 | status = le32_to_cpu(cp->tx_ring[tx_tail].opts1); | 715 | status = le32_to_cpu(txd->opts1); |
726 | if (status & DescOwn) | 716 | if (status & DescOwn) |
727 | break; | 717 | break; |
728 | 718 | ||
729 | skb = cp->tx_skb[tx_tail].skb; | 719 | skb = cp->tx_skb[tx_tail]; |
730 | BUG_ON(!skb); | 720 | BUG_ON(!skb); |
731 | 721 | ||
732 | pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, | 722 | pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr), |
733 | cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE); | 723 | le32_to_cpu(txd->opts1) & 0xffff, |
724 | PCI_DMA_TODEVICE); | ||
734 | 725 | ||
735 | if (status & LastFrag) { | 726 | if (status & LastFrag) { |
736 | if (status & (TxError | TxFIFOUnder)) { | 727 | if (status & (TxError | TxFIFOUnder)) { |
@@ -757,7 +748,7 @@ static void cp_tx (struct cp_private *cp) | |||
757 | dev_kfree_skb_irq(skb); | 748 | dev_kfree_skb_irq(skb); |
758 | } | 749 | } |
759 | 750 | ||
760 | cp->tx_skb[tx_tail].skb = NULL; | 751 | cp->tx_skb[tx_tail] = NULL; |
761 | 752 | ||
762 | tx_tail = NEXT_TX(tx_tail); | 753 | tx_tail = NEXT_TX(tx_tail); |
763 | } | 754 | } |
@@ -814,7 +805,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
814 | 805 | ||
815 | if (mss) | 806 | if (mss) |
816 | flags |= LargeSend | ((mss & MSSMask) << MSSShift); | 807 | flags |= LargeSend | ((mss & MSSMask) << MSSShift); |
817 | else if (skb->ip_summed == CHECKSUM_HW) { | 808 | else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
818 | const struct iphdr *ip = skb->nh.iph; | 809 | const struct iphdr *ip = skb->nh.iph; |
819 | if (ip->protocol == IPPROTO_TCP) | 810 | if (ip->protocol == IPPROTO_TCP) |
820 | flags |= IPCS | TCPCS; | 811 | flags |= IPCS | TCPCS; |
@@ -827,9 +818,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
827 | txd->opts1 = cpu_to_le32(flags); | 818 | txd->opts1 = cpu_to_le32(flags); |
828 | wmb(); | 819 | wmb(); |
829 | 820 | ||
830 | cp->tx_skb[entry].skb = skb; | 821 | cp->tx_skb[entry] = skb; |
831 | cp->tx_skb[entry].mapping = mapping; | ||
832 | cp->tx_skb[entry].len = len; | ||
833 | entry = NEXT_TX(entry); | 822 | entry = NEXT_TX(entry); |
834 | } else { | 823 | } else { |
835 | struct cp_desc *txd; | 824 | struct cp_desc *txd; |
@@ -845,9 +834,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
845 | first_len = skb_headlen(skb); | 834 | first_len = skb_headlen(skb); |
846 | first_mapping = pci_map_single(cp->pdev, skb->data, | 835 | first_mapping = pci_map_single(cp->pdev, skb->data, |
847 | first_len, PCI_DMA_TODEVICE); | 836 | first_len, PCI_DMA_TODEVICE); |
848 | cp->tx_skb[entry].skb = skb; | 837 | cp->tx_skb[entry] = skb; |
849 | cp->tx_skb[entry].mapping = first_mapping; | ||
850 | cp->tx_skb[entry].len = first_len; | ||
851 | entry = NEXT_TX(entry); | 838 | entry = NEXT_TX(entry); |
852 | 839 | ||
853 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | 840 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
@@ -868,7 +855,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
868 | if (mss) | 855 | if (mss) |
869 | ctrl |= LargeSend | | 856 | ctrl |= LargeSend | |
870 | ((mss & MSSMask) << MSSShift); | 857 | ((mss & MSSMask) << MSSShift); |
871 | else if (skb->ip_summed == CHECKSUM_HW) { | 858 | else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
872 | if (ip->protocol == IPPROTO_TCP) | 859 | if (ip->protocol == IPPROTO_TCP) |
873 | ctrl |= IPCS | TCPCS; | 860 | ctrl |= IPCS | TCPCS; |
874 | else if (ip->protocol == IPPROTO_UDP) | 861 | else if (ip->protocol == IPPROTO_UDP) |
@@ -888,9 +875,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
888 | txd->opts1 = cpu_to_le32(ctrl); | 875 | txd->opts1 = cpu_to_le32(ctrl); |
889 | wmb(); | 876 | wmb(); |
890 | 877 | ||
891 | cp->tx_skb[entry].skb = skb; | 878 | cp->tx_skb[entry] = skb; |
892 | cp->tx_skb[entry].mapping = mapping; | ||
893 | cp->tx_skb[entry].len = len; | ||
894 | entry = NEXT_TX(entry); | 879 | entry = NEXT_TX(entry); |
895 | } | 880 | } |
896 | 881 | ||
@@ -899,7 +884,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
899 | txd->addr = cpu_to_le64(first_mapping); | 884 | txd->addr = cpu_to_le64(first_mapping); |
900 | wmb(); | 885 | wmb(); |
901 | 886 | ||
902 | if (skb->ip_summed == CHECKSUM_HW) { | 887 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
903 | if (ip->protocol == IPPROTO_TCP) | 888 | if (ip->protocol == IPPROTO_TCP) |
904 | txd->opts1 = cpu_to_le32(first_eor | first_len | | 889 | txd->opts1 = cpu_to_le32(first_eor | first_len | |
905 | FirstFrag | DescOwn | | 890 | FirstFrag | DescOwn | |
@@ -943,8 +928,6 @@ static void __cp_set_rx_mode (struct net_device *dev) | |||
943 | /* Note: do not reorder, GCC is clever about common statements. */ | 928 | /* Note: do not reorder, GCC is clever about common statements. */ |
944 | if (dev->flags & IFF_PROMISC) { | 929 | if (dev->flags & IFF_PROMISC) { |
945 | /* Unconditionally log net taps. */ | 930 | /* Unconditionally log net taps. */ |
946 | printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n", | ||
947 | dev->name); | ||
948 | rx_mode = | 931 | rx_mode = |
949 | AcceptBroadcast | AcceptMulticast | AcceptMyPhys | | 932 | AcceptBroadcast | AcceptMulticast | AcceptMyPhys | |
950 | AcceptAllPhys; | 933 | AcceptAllPhys; |
@@ -1092,6 +1075,7 @@ static int cp_refill_rx (struct cp_private *cp) | |||
1092 | 1075 | ||
1093 | for (i = 0; i < CP_RX_RING_SIZE; i++) { | 1076 | for (i = 0; i < CP_RX_RING_SIZE; i++) { |
1094 | struct sk_buff *skb; | 1077 | struct sk_buff *skb; |
1078 | dma_addr_t mapping; | ||
1095 | 1079 | ||
1096 | skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET); | 1080 | skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET); |
1097 | if (!skb) | 1081 | if (!skb) |
@@ -1100,12 +1084,12 @@ static int cp_refill_rx (struct cp_private *cp) | |||
1100 | skb->dev = cp->dev; | 1084 | skb->dev = cp->dev; |
1101 | skb_reserve(skb, RX_OFFSET); | 1085 | skb_reserve(skb, RX_OFFSET); |
1102 | 1086 | ||
1103 | cp->rx_skb[i].mapping = pci_map_single(cp->pdev, | 1087 | mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz, |
1104 | skb->data, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1088 | PCI_DMA_FROMDEVICE); |
1105 | cp->rx_skb[i].skb = skb; | 1089 | cp->rx_skb[i] = skb; |
1106 | 1090 | ||
1107 | cp->rx_ring[i].opts2 = 0; | 1091 | cp->rx_ring[i].opts2 = 0; |
1108 | cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping); | 1092 | cp->rx_ring[i].addr = cpu_to_le64(mapping); |
1109 | if (i == (CP_RX_RING_SIZE - 1)) | 1093 | if (i == (CP_RX_RING_SIZE - 1)) |
1110 | cp->rx_ring[i].opts1 = | 1094 | cp->rx_ring[i].opts1 = |
1111 | cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); | 1095 | cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); |
@@ -1153,23 +1137,27 @@ static int cp_alloc_rings (struct cp_private *cp) | |||
1153 | 1137 | ||
1154 | static void cp_clean_rings (struct cp_private *cp) | 1138 | static void cp_clean_rings (struct cp_private *cp) |
1155 | { | 1139 | { |
1140 | struct cp_desc *desc; | ||
1156 | unsigned i; | 1141 | unsigned i; |
1157 | 1142 | ||
1158 | for (i = 0; i < CP_RX_RING_SIZE; i++) { | 1143 | for (i = 0; i < CP_RX_RING_SIZE; i++) { |
1159 | if (cp->rx_skb[i].skb) { | 1144 | if (cp->rx_skb[i]) { |
1160 | pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping, | 1145 | desc = cp->rx_ring + i; |
1146 | pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr), | ||
1161 | cp->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1147 | cp->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1162 | dev_kfree_skb(cp->rx_skb[i].skb); | 1148 | dev_kfree_skb(cp->rx_skb[i]); |
1163 | } | 1149 | } |
1164 | } | 1150 | } |
1165 | 1151 | ||
1166 | for (i = 0; i < CP_TX_RING_SIZE; i++) { | 1152 | for (i = 0; i < CP_TX_RING_SIZE; i++) { |
1167 | if (cp->tx_skb[i].skb) { | 1153 | if (cp->tx_skb[i]) { |
1168 | struct sk_buff *skb = cp->tx_skb[i].skb; | 1154 | struct sk_buff *skb = cp->tx_skb[i]; |
1169 | 1155 | ||
1170 | pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping, | 1156 | desc = cp->tx_ring + i; |
1171 | cp->tx_skb[i].len, PCI_DMA_TODEVICE); | 1157 | pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr), |
1172 | if (le32_to_cpu(cp->tx_ring[i].opts1) & LastFrag) | 1158 | le32_to_cpu(desc->opts1) & 0xffff, |
1159 | PCI_DMA_TODEVICE); | ||
1160 | if (le32_to_cpu(desc->opts1) & LastFrag) | ||
1173 | dev_kfree_skb(skb); | 1161 | dev_kfree_skb(skb); |
1174 | cp->net_stats.tx_dropped++; | 1162 | cp->net_stats.tx_dropped++; |
1175 | } | 1163 | } |
@@ -1178,8 +1166,8 @@ static void cp_clean_rings (struct cp_private *cp) | |||
1178 | memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); | 1166 | memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); |
1179 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); | 1167 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); |
1180 | 1168 | ||
1181 | memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE); | 1169 | memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); |
1182 | memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE); | 1170 | memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); |
1183 | } | 1171 | } |
1184 | 1172 | ||
1185 | static void cp_free_rings (struct cp_private *cp) | 1173 | static void cp_free_rings (struct cp_private *cp) |
@@ -1204,7 +1192,7 @@ static int cp_open (struct net_device *dev) | |||
1204 | 1192 | ||
1205 | cp_init_hw(cp); | 1193 | cp_init_hw(cp); |
1206 | 1194 | ||
1207 | rc = request_irq(dev->irq, cp_interrupt, SA_SHIRQ, dev->name, dev); | 1195 | rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev); |
1208 | if (rc) | 1196 | if (rc) |
1209 | goto err_out_hw; | 1197 | goto err_out_hw; |
1210 | 1198 | ||
@@ -1558,7 +1546,7 @@ static void cp_get_ethtool_stats (struct net_device *dev, | |||
1558 | pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma); | 1546 | pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma); |
1559 | } | 1547 | } |
1560 | 1548 | ||
1561 | static struct ethtool_ops cp_ethtool_ops = { | 1549 | static const struct ethtool_ops cp_ethtool_ops = { |
1562 | .get_drvinfo = cp_get_drvinfo, | 1550 | .get_drvinfo = cp_get_drvinfo, |
1563 | .get_regs_len = cp_get_regs_len, | 1551 | .get_regs_len = cp_get_regs_len, |
1564 | .get_stats_count = cp_get_stats_count, | 1552 | .get_stats_count = cp_get_stats_count, |
@@ -1823,7 +1811,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1823 | struct cp_private *cp; | 1811 | struct cp_private *cp; |
1824 | int rc; | 1812 | int rc; |
1825 | void __iomem *regs; | 1813 | void __iomem *regs; |
1826 | long pciaddr; | 1814 | resource_size_t pciaddr; |
1827 | unsigned int addr_len, i, pci_using_dac; | 1815 | unsigned int addr_len, i, pci_using_dac; |
1828 | u8 pci_rev; | 1816 | u8 pci_rev; |
1829 | 1817 | ||
@@ -1837,9 +1825,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1837 | 1825 | ||
1838 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && | 1826 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && |
1839 | pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) { | 1827 | pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) { |
1840 | printk(KERN_ERR PFX "pci dev %s (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n", | 1828 | dev_err(&pdev->dev, |
1841 | pci_name(pdev), pdev->vendor, pdev->device, pci_rev); | 1829 | "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n", |
1842 | printk(KERN_ERR PFX "Try the \"8139too\" driver instead.\n"); | 1830 | pdev->vendor, pdev->device, pci_rev); |
1831 | dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n"); | ||
1843 | return -ENODEV; | 1832 | return -ENODEV; |
1844 | } | 1833 | } |
1845 | 1834 | ||
@@ -1877,14 +1866,13 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1877 | pciaddr = pci_resource_start(pdev, 1); | 1866 | pciaddr = pci_resource_start(pdev, 1); |
1878 | if (!pciaddr) { | 1867 | if (!pciaddr) { |
1879 | rc = -EIO; | 1868 | rc = -EIO; |
1880 | printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n", | 1869 | dev_err(&pdev->dev, "no MMIO resource\n"); |
1881 | pci_name(pdev)); | ||
1882 | goto err_out_res; | 1870 | goto err_out_res; |
1883 | } | 1871 | } |
1884 | if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) { | 1872 | if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) { |
1885 | rc = -EIO; | 1873 | rc = -EIO; |
1886 | printk(KERN_ERR PFX "MMIO resource (%lx) too small on pci dev %s\n", | 1874 | dev_err(&pdev->dev, "MMIO resource (%llx) too small\n", |
1887 | pci_resource_len(pdev, 1), pci_name(pdev)); | 1875 | (unsigned long long)pci_resource_len(pdev, 1)); |
1888 | goto err_out_res; | 1876 | goto err_out_res; |
1889 | } | 1877 | } |
1890 | 1878 | ||
@@ -1898,14 +1886,15 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1898 | 1886 | ||
1899 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | 1887 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); |
1900 | if (rc) { | 1888 | if (rc) { |
1901 | printk(KERN_ERR PFX "No usable DMA configuration, " | 1889 | dev_err(&pdev->dev, |
1902 | "aborting.\n"); | 1890 | "No usable DMA configuration, aborting.\n"); |
1903 | goto err_out_res; | 1891 | goto err_out_res; |
1904 | } | 1892 | } |
1905 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | 1893 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); |
1906 | if (rc) { | 1894 | if (rc) { |
1907 | printk(KERN_ERR PFX "No usable consistent DMA configuration, " | 1895 | dev_err(&pdev->dev, |
1908 | "aborting.\n"); | 1896 | "No usable consistent DMA configuration, " |
1897 | "aborting.\n"); | ||
1909 | goto err_out_res; | 1898 | goto err_out_res; |
1910 | } | 1899 | } |
1911 | } | 1900 | } |
@@ -1916,8 +1905,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1916 | regs = ioremap(pciaddr, CP_REGS_SIZE); | 1905 | regs = ioremap(pciaddr, CP_REGS_SIZE); |
1917 | if (!regs) { | 1906 | if (!regs) { |
1918 | rc = -EIO; | 1907 | rc = -EIO; |
1919 | printk(KERN_ERR PFX "Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n", | 1908 | dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n", |
1920 | pci_resource_len(pdev, 1), pciaddr, pci_name(pdev)); | 1909 | (unsigned long long)pci_resource_len(pdev, 1), |
1910 | (unsigned long long)pciaddr); | ||
1921 | goto err_out_res; | 1911 | goto err_out_res; |
1922 | } | 1912 | } |
1923 | dev->base_addr = (unsigned long) regs; | 1913 | dev->base_addr = (unsigned long) regs; |
@@ -1986,7 +1976,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1986 | /* enable busmastering and memory-write-invalidate */ | 1976 | /* enable busmastering and memory-write-invalidate */ |
1987 | pci_set_master(pdev); | 1977 | pci_set_master(pdev); |
1988 | 1978 | ||
1989 | if (cp->wol_enabled) cp_set_d3_state (cp); | 1979 | if (cp->wol_enabled) |
1980 | cp_set_d3_state (cp); | ||
1990 | 1981 | ||
1991 | return 0; | 1982 | return 0; |
1992 | 1983 | ||
@@ -2008,10 +1999,10 @@ static void cp_remove_one (struct pci_dev *pdev) | |||
2008 | struct net_device *dev = pci_get_drvdata(pdev); | 1999 | struct net_device *dev = pci_get_drvdata(pdev); |
2009 | struct cp_private *cp = netdev_priv(dev); | 2000 | struct cp_private *cp = netdev_priv(dev); |
2010 | 2001 | ||
2011 | BUG_ON(!dev); | ||
2012 | unregister_netdev(dev); | 2002 | unregister_netdev(dev); |
2013 | iounmap(cp->regs); | 2003 | iounmap(cp->regs); |
2014 | if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0); | 2004 | if (cp->wol_enabled) |
2005 | pci_set_power_state (pdev, PCI_D0); | ||
2015 | pci_release_regions(pdev); | 2006 | pci_release_regions(pdev); |
2016 | pci_clear_mwi(pdev); | 2007 | pci_clear_mwi(pdev); |
2017 | pci_disable_device(pdev); | 2008 | pci_disable_device(pdev); |
@@ -2022,14 +2013,12 @@ static void cp_remove_one (struct pci_dev *pdev) | |||
2022 | #ifdef CONFIG_PM | 2013 | #ifdef CONFIG_PM |
2023 | static int cp_suspend (struct pci_dev *pdev, pm_message_t state) | 2014 | static int cp_suspend (struct pci_dev *pdev, pm_message_t state) |
2024 | { | 2015 | { |
2025 | struct net_device *dev; | 2016 | struct net_device *dev = pci_get_drvdata(pdev); |
2026 | struct cp_private *cp; | 2017 | struct cp_private *cp = netdev_priv(dev); |
2027 | unsigned long flags; | 2018 | unsigned long flags; |
2028 | 2019 | ||
2029 | dev = pci_get_drvdata (pdev); | 2020 | if (!netif_running(dev)) |
2030 | cp = netdev_priv(dev); | 2021 | return 0; |
2031 | |||
2032 | if (!dev || !netif_running (dev)) return 0; | ||
2033 | 2022 | ||
2034 | netif_device_detach (dev); | 2023 | netif_device_detach (dev); |
2035 | netif_stop_queue (dev); | 2024 | netif_stop_queue (dev); |
@@ -2095,7 +2084,7 @@ static int __init cp_init (void) | |||
2095 | #ifdef MODULE | 2084 | #ifdef MODULE |
2096 | printk("%s", version); | 2085 | printk("%s", version); |
2097 | #endif | 2086 | #endif |
2098 | return pci_module_init (&cp_driver); | 2087 | return pci_register_driver(&cp_driver); |
2099 | } | 2088 | } |
2100 | 2089 | ||
2101 | static void __exit cp_exit (void) | 2090 | static void __exit cp_exit (void) |