diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-24 13:15:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-24 13:15:13 -0400 |
commit | a319a2773a13bab56a0d0b3744ba8703324313b5 (patch) | |
tree | f02c86acabd1031439fd422a167784007e84ebb1 /drivers/net/8139cp.c | |
parent | e18fa700c9a31360bc8f193aa543b7ef7b39a06b (diff) | |
parent | 183798799216fad36c7219fe8d4d6dee6b8fa755 (diff) |
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (217 commits)
net/ieee80211: fix more crypto-related build breakage
[PATCH] Spidernet: add ethtool -S (show statistics)
[NET] GT96100: Delete bitrotting ethernet driver
[PATCH] mv643xx_eth: restrict to 32-bit PPC_MULTIPLATFORM
[PATCH] Cirrus Logic ep93xx ethernet driver
r8169: the MMIO region of the 8167 stands behin BAR#1
e1000, ixgb: Remove pointless wrappers
[PATCH] Remove powerpc specific parts of 3c509 driver
[PATCH] s2io: Switch to pci_get_device
[PATCH] gt96100: move to pci_get_device API
[PATCH] ehea: bugfix for register access functions
[PATCH] e1000 disable device on PCI error
drivers/net/phy/fixed: #if 0 some incomplete code
drivers/net: const-ify ethtool_ops declarations
[PATCH] ethtool: allow const ethtool_ops
[PATCH] sky2: big endian
[PATCH] sky2: fiber support
[PATCH] sky2: tx pause bug fix
drivers/net: Trim trailing whitespace
[PATCH] ehea: IBM eHEA Ethernet Device Driver
...
Manually resolved conflicts in drivers/net/ixgb/ixgb_main.c and
drivers/net/sky2.c related to CHECKSUM_HW/CHECKSUM_PARTIAL changes by
commit 84fa7933a33f806bbbaae6775e87459b1ec584c0 that just happened to be
next to unrelated changes in this update.
Diffstat (limited to 'drivers/net/8139cp.c')
-rw-r--r-- | drivers/net/8139cp.c | 122 |
1 files changed, 54 insertions, 68 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index a48b211c489d..5a4990ae3730 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -48,7 +48,7 @@ | |||
48 | */ | 48 | */ |
49 | 49 | ||
50 | #define DRV_NAME "8139cp" | 50 | #define DRV_NAME "8139cp" |
51 | #define DRV_VERSION "1.2" | 51 | #define DRV_VERSION "1.3" |
52 | #define DRV_RELDATE "Mar 22, 2004" | 52 | #define DRV_RELDATE "Mar 22, 2004" |
53 | 53 | ||
54 | 54 | ||
@@ -314,12 +314,6 @@ struct cp_desc { | |||
314 | u64 addr; | 314 | u64 addr; |
315 | }; | 315 | }; |
316 | 316 | ||
317 | struct ring_info { | ||
318 | struct sk_buff *skb; | ||
319 | dma_addr_t mapping; | ||
320 | u32 len; | ||
321 | }; | ||
322 | |||
323 | struct cp_dma_stats { | 317 | struct cp_dma_stats { |
324 | u64 tx_ok; | 318 | u64 tx_ok; |
325 | u64 rx_ok; | 319 | u64 rx_ok; |
@@ -353,23 +347,23 @@ struct cp_private { | |||
353 | struct net_device_stats net_stats; | 347 | struct net_device_stats net_stats; |
354 | struct cp_extra_stats cp_stats; | 348 | struct cp_extra_stats cp_stats; |
355 | 349 | ||
356 | unsigned rx_tail ____cacheline_aligned; | 350 | unsigned rx_head ____cacheline_aligned; |
351 | unsigned rx_tail; | ||
357 | struct cp_desc *rx_ring; | 352 | struct cp_desc *rx_ring; |
358 | struct ring_info rx_skb[CP_RX_RING_SIZE]; | 353 | struct sk_buff *rx_skb[CP_RX_RING_SIZE]; |
359 | unsigned rx_buf_sz; | ||
360 | 354 | ||
361 | unsigned tx_head ____cacheline_aligned; | 355 | unsigned tx_head ____cacheline_aligned; |
362 | unsigned tx_tail; | 356 | unsigned tx_tail; |
363 | |||
364 | struct cp_desc *tx_ring; | 357 | struct cp_desc *tx_ring; |
365 | struct ring_info tx_skb[CP_TX_RING_SIZE]; | 358 | struct sk_buff *tx_skb[CP_TX_RING_SIZE]; |
366 | dma_addr_t ring_dma; | 359 | |
360 | unsigned rx_buf_sz; | ||
361 | unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ | ||
367 | 362 | ||
368 | #if CP_VLAN_TAG_USED | 363 | #if CP_VLAN_TAG_USED |
369 | struct vlan_group *vlgrp; | 364 | struct vlan_group *vlgrp; |
370 | #endif | 365 | #endif |
371 | 366 | dma_addr_t ring_dma; | |
372 | unsigned int wol_enabled : 1; /* Is Wake-on-LAN enabled? */ | ||
373 | 367 | ||
374 | struct mii_if_info mii_if; | 368 | struct mii_if_info mii_if; |
375 | }; | 369 | }; |
@@ -407,10 +401,8 @@ static int cp_set_eeprom(struct net_device *dev, | |||
407 | struct ethtool_eeprom *eeprom, u8 *data); | 401 | struct ethtool_eeprom *eeprom, u8 *data); |
408 | 402 | ||
409 | static struct pci_device_id cp_pci_tbl[] = { | 403 | static struct pci_device_id cp_pci_tbl[] = { |
410 | { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139, | 404 | { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), }, |
411 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, | 405 | { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), }, |
412 | { PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322, | ||
413 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, | ||
414 | { }, | 406 | { }, |
415 | }; | 407 | }; |
416 | MODULE_DEVICE_TABLE(pci, cp_pci_tbl); | 408 | MODULE_DEVICE_TABLE(pci, cp_pci_tbl); |
@@ -542,7 +534,7 @@ rx_status_loop: | |||
542 | struct cp_desc *desc; | 534 | struct cp_desc *desc; |
543 | unsigned buflen; | 535 | unsigned buflen; |
544 | 536 | ||
545 | skb = cp->rx_skb[rx_tail].skb; | 537 | skb = cp->rx_skb[rx_tail]; |
546 | BUG_ON(!skb); | 538 | BUG_ON(!skb); |
547 | 539 | ||
548 | desc = &cp->rx_ring[rx_tail]; | 540 | desc = &cp->rx_ring[rx_tail]; |
@@ -551,7 +543,7 @@ rx_status_loop: | |||
551 | break; | 543 | break; |
552 | 544 | ||
553 | len = (status & 0x1fff) - 4; | 545 | len = (status & 0x1fff) - 4; |
554 | mapping = cp->rx_skb[rx_tail].mapping; | 546 | mapping = le64_to_cpu(desc->addr); |
555 | 547 | ||
556 | if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) { | 548 | if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) { |
557 | /* we don't support incoming fragmented frames. | 549 | /* we don't support incoming fragmented frames. |
@@ -572,7 +564,7 @@ rx_status_loop: | |||
572 | 564 | ||
573 | if (netif_msg_rx_status(cp)) | 565 | if (netif_msg_rx_status(cp)) |
574 | printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n", | 566 | printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n", |
575 | cp->dev->name, rx_tail, status, len); | 567 | dev->name, rx_tail, status, len); |
576 | 568 | ||
577 | buflen = cp->rx_buf_sz + RX_OFFSET; | 569 | buflen = cp->rx_buf_sz + RX_OFFSET; |
578 | new_skb = dev_alloc_skb (buflen); | 570 | new_skb = dev_alloc_skb (buflen); |
@@ -582,7 +574,7 @@ rx_status_loop: | |||
582 | } | 574 | } |
583 | 575 | ||
584 | skb_reserve(new_skb, RX_OFFSET); | 576 | skb_reserve(new_skb, RX_OFFSET); |
585 | new_skb->dev = cp->dev; | 577 | new_skb->dev = dev; |
586 | 578 | ||
587 | pci_unmap_single(cp->pdev, mapping, | 579 | pci_unmap_single(cp->pdev, mapping, |
588 | buflen, PCI_DMA_FROMDEVICE); | 580 | buflen, PCI_DMA_FROMDEVICE); |
@@ -595,11 +587,9 @@ rx_status_loop: | |||
595 | 587 | ||
596 | skb_put(skb, len); | 588 | skb_put(skb, len); |
597 | 589 | ||
598 | mapping = | 590 | mapping = pci_map_single(cp->pdev, new_skb->data, buflen, |
599 | cp->rx_skb[rx_tail].mapping = | 591 | PCI_DMA_FROMDEVICE); |
600 | pci_map_single(cp->pdev, new_skb->data, | 592 | cp->rx_skb[rx_tail] = new_skb; |
601 | buflen, PCI_DMA_FROMDEVICE); | ||
602 | cp->rx_skb[rx_tail].skb = new_skb; | ||
603 | 593 | ||
604 | cp_rx_skb(cp, skb, desc); | 594 | cp_rx_skb(cp, skb, desc); |
605 | rx++; | 595 | rx++; |
@@ -717,19 +707,21 @@ static void cp_tx (struct cp_private *cp) | |||
717 | unsigned tx_tail = cp->tx_tail; | 707 | unsigned tx_tail = cp->tx_tail; |
718 | 708 | ||
719 | while (tx_tail != tx_head) { | 709 | while (tx_tail != tx_head) { |
710 | struct cp_desc *txd = cp->tx_ring + tx_tail; | ||
720 | struct sk_buff *skb; | 711 | struct sk_buff *skb; |
721 | u32 status; | 712 | u32 status; |
722 | 713 | ||
723 | rmb(); | 714 | rmb(); |
724 | status = le32_to_cpu(cp->tx_ring[tx_tail].opts1); | 715 | status = le32_to_cpu(txd->opts1); |
725 | if (status & DescOwn) | 716 | if (status & DescOwn) |
726 | break; | 717 | break; |
727 | 718 | ||
728 | skb = cp->tx_skb[tx_tail].skb; | 719 | skb = cp->tx_skb[tx_tail]; |
729 | BUG_ON(!skb); | 720 | BUG_ON(!skb); |
730 | 721 | ||
731 | pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, | 722 | pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr), |
732 | cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE); | 723 | le32_to_cpu(txd->opts1) & 0xffff, |
724 | PCI_DMA_TODEVICE); | ||
733 | 725 | ||
734 | if (status & LastFrag) { | 726 | if (status & LastFrag) { |
735 | if (status & (TxError | TxFIFOUnder)) { | 727 | if (status & (TxError | TxFIFOUnder)) { |
@@ -756,7 +748,7 @@ static void cp_tx (struct cp_private *cp) | |||
756 | dev_kfree_skb_irq(skb); | 748 | dev_kfree_skb_irq(skb); |
757 | } | 749 | } |
758 | 750 | ||
759 | cp->tx_skb[tx_tail].skb = NULL; | 751 | cp->tx_skb[tx_tail] = NULL; |
760 | 752 | ||
761 | tx_tail = NEXT_TX(tx_tail); | 753 | tx_tail = NEXT_TX(tx_tail); |
762 | } | 754 | } |
@@ -826,9 +818,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
826 | txd->opts1 = cpu_to_le32(flags); | 818 | txd->opts1 = cpu_to_le32(flags); |
827 | wmb(); | 819 | wmb(); |
828 | 820 | ||
829 | cp->tx_skb[entry].skb = skb; | 821 | cp->tx_skb[entry] = skb; |
830 | cp->tx_skb[entry].mapping = mapping; | ||
831 | cp->tx_skb[entry].len = len; | ||
832 | entry = NEXT_TX(entry); | 822 | entry = NEXT_TX(entry); |
833 | } else { | 823 | } else { |
834 | struct cp_desc *txd; | 824 | struct cp_desc *txd; |
@@ -844,9 +834,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
844 | first_len = skb_headlen(skb); | 834 | first_len = skb_headlen(skb); |
845 | first_mapping = pci_map_single(cp->pdev, skb->data, | 835 | first_mapping = pci_map_single(cp->pdev, skb->data, |
846 | first_len, PCI_DMA_TODEVICE); | 836 | first_len, PCI_DMA_TODEVICE); |
847 | cp->tx_skb[entry].skb = skb; | 837 | cp->tx_skb[entry] = skb; |
848 | cp->tx_skb[entry].mapping = first_mapping; | ||
849 | cp->tx_skb[entry].len = first_len; | ||
850 | entry = NEXT_TX(entry); | 838 | entry = NEXT_TX(entry); |
851 | 839 | ||
852 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | 840 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
@@ -887,9 +875,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
887 | txd->opts1 = cpu_to_le32(ctrl); | 875 | txd->opts1 = cpu_to_le32(ctrl); |
888 | wmb(); | 876 | wmb(); |
889 | 877 | ||
890 | cp->tx_skb[entry].skb = skb; | 878 | cp->tx_skb[entry] = skb; |
891 | cp->tx_skb[entry].mapping = mapping; | ||
892 | cp->tx_skb[entry].len = len; | ||
893 | entry = NEXT_TX(entry); | 879 | entry = NEXT_TX(entry); |
894 | } | 880 | } |
895 | 881 | ||
@@ -942,8 +928,6 @@ static void __cp_set_rx_mode (struct net_device *dev) | |||
942 | /* Note: do not reorder, GCC is clever about common statements. */ | 928 | /* Note: do not reorder, GCC is clever about common statements. */ |
943 | if (dev->flags & IFF_PROMISC) { | 929 | if (dev->flags & IFF_PROMISC) { |
944 | /* Unconditionally log net taps. */ | 930 | /* Unconditionally log net taps. */ |
945 | printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n", | ||
946 | dev->name); | ||
947 | rx_mode = | 931 | rx_mode = |
948 | AcceptBroadcast | AcceptMulticast | AcceptMyPhys | | 932 | AcceptBroadcast | AcceptMulticast | AcceptMyPhys | |
949 | AcceptAllPhys; | 933 | AcceptAllPhys; |
@@ -1091,6 +1075,7 @@ static int cp_refill_rx (struct cp_private *cp) | |||
1091 | 1075 | ||
1092 | for (i = 0; i < CP_RX_RING_SIZE; i++) { | 1076 | for (i = 0; i < CP_RX_RING_SIZE; i++) { |
1093 | struct sk_buff *skb; | 1077 | struct sk_buff *skb; |
1078 | dma_addr_t mapping; | ||
1094 | 1079 | ||
1095 | skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET); | 1080 | skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET); |
1096 | if (!skb) | 1081 | if (!skb) |
@@ -1099,12 +1084,12 @@ static int cp_refill_rx (struct cp_private *cp) | |||
1099 | skb->dev = cp->dev; | 1084 | skb->dev = cp->dev; |
1100 | skb_reserve(skb, RX_OFFSET); | 1085 | skb_reserve(skb, RX_OFFSET); |
1101 | 1086 | ||
1102 | cp->rx_skb[i].mapping = pci_map_single(cp->pdev, | 1087 | mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz, |
1103 | skb->data, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1088 | PCI_DMA_FROMDEVICE); |
1104 | cp->rx_skb[i].skb = skb; | 1089 | cp->rx_skb[i] = skb; |
1105 | 1090 | ||
1106 | cp->rx_ring[i].opts2 = 0; | 1091 | cp->rx_ring[i].opts2 = 0; |
1107 | cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping); | 1092 | cp->rx_ring[i].addr = cpu_to_le64(mapping); |
1108 | if (i == (CP_RX_RING_SIZE - 1)) | 1093 | if (i == (CP_RX_RING_SIZE - 1)) |
1109 | cp->rx_ring[i].opts1 = | 1094 | cp->rx_ring[i].opts1 = |
1110 | cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); | 1095 | cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); |
@@ -1152,23 +1137,27 @@ static int cp_alloc_rings (struct cp_private *cp) | |||
1152 | 1137 | ||
1153 | static void cp_clean_rings (struct cp_private *cp) | 1138 | static void cp_clean_rings (struct cp_private *cp) |
1154 | { | 1139 | { |
1140 | struct cp_desc *desc; | ||
1155 | unsigned i; | 1141 | unsigned i; |
1156 | 1142 | ||
1157 | for (i = 0; i < CP_RX_RING_SIZE; i++) { | 1143 | for (i = 0; i < CP_RX_RING_SIZE; i++) { |
1158 | if (cp->rx_skb[i].skb) { | 1144 | if (cp->rx_skb[i]) { |
1159 | pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping, | 1145 | desc = cp->rx_ring + i; |
1146 | pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr), | ||
1160 | cp->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1147 | cp->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1161 | dev_kfree_skb(cp->rx_skb[i].skb); | 1148 | dev_kfree_skb(cp->rx_skb[i]); |
1162 | } | 1149 | } |
1163 | } | 1150 | } |
1164 | 1151 | ||
1165 | for (i = 0; i < CP_TX_RING_SIZE; i++) { | 1152 | for (i = 0; i < CP_TX_RING_SIZE; i++) { |
1166 | if (cp->tx_skb[i].skb) { | 1153 | if (cp->tx_skb[i]) { |
1167 | struct sk_buff *skb = cp->tx_skb[i].skb; | 1154 | struct sk_buff *skb = cp->tx_skb[i]; |
1168 | 1155 | ||
1169 | pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping, | 1156 | desc = cp->tx_ring + i; |
1170 | cp->tx_skb[i].len, PCI_DMA_TODEVICE); | 1157 | pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr), |
1171 | if (le32_to_cpu(cp->tx_ring[i].opts1) & LastFrag) | 1158 | le32_to_cpu(desc->opts1) & 0xffff, |
1159 | PCI_DMA_TODEVICE); | ||
1160 | if (le32_to_cpu(desc->opts1) & LastFrag) | ||
1172 | dev_kfree_skb(skb); | 1161 | dev_kfree_skb(skb); |
1173 | cp->net_stats.tx_dropped++; | 1162 | cp->net_stats.tx_dropped++; |
1174 | } | 1163 | } |
@@ -1177,8 +1166,8 @@ static void cp_clean_rings (struct cp_private *cp) | |||
1177 | memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); | 1166 | memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); |
1178 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); | 1167 | memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); |
1179 | 1168 | ||
1180 | memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE); | 1169 | memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); |
1181 | memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE); | 1170 | memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); |
1182 | } | 1171 | } |
1183 | 1172 | ||
1184 | static void cp_free_rings (struct cp_private *cp) | 1173 | static void cp_free_rings (struct cp_private *cp) |
@@ -1557,7 +1546,7 @@ static void cp_get_ethtool_stats (struct net_device *dev, | |||
1557 | pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma); | 1546 | pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma); |
1558 | } | 1547 | } |
1559 | 1548 | ||
1560 | static struct ethtool_ops cp_ethtool_ops = { | 1549 | static const struct ethtool_ops cp_ethtool_ops = { |
1561 | .get_drvinfo = cp_get_drvinfo, | 1550 | .get_drvinfo = cp_get_drvinfo, |
1562 | .get_regs_len = cp_get_regs_len, | 1551 | .get_regs_len = cp_get_regs_len, |
1563 | .get_stats_count = cp_get_stats_count, | 1552 | .get_stats_count = cp_get_stats_count, |
@@ -2010,7 +1999,6 @@ static void cp_remove_one (struct pci_dev *pdev) | |||
2010 | struct net_device *dev = pci_get_drvdata(pdev); | 1999 | struct net_device *dev = pci_get_drvdata(pdev); |
2011 | struct cp_private *cp = netdev_priv(dev); | 2000 | struct cp_private *cp = netdev_priv(dev); |
2012 | 2001 | ||
2013 | BUG_ON(!dev); | ||
2014 | unregister_netdev(dev); | 2002 | unregister_netdev(dev); |
2015 | iounmap(cp->regs); | 2003 | iounmap(cp->regs); |
2016 | if (cp->wol_enabled) | 2004 | if (cp->wol_enabled) |
@@ -2025,14 +2013,12 @@ static void cp_remove_one (struct pci_dev *pdev) | |||
2025 | #ifdef CONFIG_PM | 2013 | #ifdef CONFIG_PM |
2026 | static int cp_suspend (struct pci_dev *pdev, pm_message_t state) | 2014 | static int cp_suspend (struct pci_dev *pdev, pm_message_t state) |
2027 | { | 2015 | { |
2028 | struct net_device *dev; | 2016 | struct net_device *dev = pci_get_drvdata(pdev); |
2029 | struct cp_private *cp; | 2017 | struct cp_private *cp = netdev_priv(dev); |
2030 | unsigned long flags; | 2018 | unsigned long flags; |
2031 | 2019 | ||
2032 | dev = pci_get_drvdata (pdev); | 2020 | if (!netif_running(dev)) |
2033 | cp = netdev_priv(dev); | 2021 | return 0; |
2034 | |||
2035 | if (!dev || !netif_running (dev)) return 0; | ||
2036 | 2022 | ||
2037 | netif_device_detach (dev); | 2023 | netif_device_detach (dev); |
2038 | netif_stop_queue (dev); | 2024 | netif_stop_queue (dev); |
@@ -2098,7 +2084,7 @@ static int __init cp_init (void) | |||
2098 | #ifdef MODULE | 2084 | #ifdef MODULE |
2099 | printk("%s", version); | 2085 | printk("%s", version); |
2100 | #endif | 2086 | #endif |
2101 | return pci_module_init (&cp_driver); | 2087 | return pci_register_driver(&cp_driver); |
2102 | } | 2088 | } |
2103 | 2089 | ||
2104 | static void __exit cp_exit (void) | 2090 | static void __exit cp_exit (void) |