diff options
Diffstat (limited to 'drivers/net/sundance.c')
-rw-r--r-- | drivers/net/sundance.c | 164 |
1 files changed, 114 insertions, 50 deletions
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index 2678588ea4b2..8b5aeca24d5d 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c | |||
@@ -96,6 +96,7 @@ static char *media[MAX_UNITS]; | |||
96 | #include <asm/io.h> | 96 | #include <asm/io.h> |
97 | #include <linux/delay.h> | 97 | #include <linux/delay.h> |
98 | #include <linux/spinlock.h> | 98 | #include <linux/spinlock.h> |
99 | #include <linux/dma-mapping.h> | ||
99 | #ifndef _COMPAT_WITH_OLD_KERNEL | 100 | #ifndef _COMPAT_WITH_OLD_KERNEL |
100 | #include <linux/crc32.h> | 101 | #include <linux/crc32.h> |
101 | #include <linux/ethtool.h> | 102 | #include <linux/ethtool.h> |
@@ -523,13 +524,15 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, | |||
523 | tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev); | 524 | tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev); |
524 | tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev); | 525 | tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev); |
525 | 526 | ||
526 | ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); | 527 | ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, |
528 | &ring_dma, GFP_KERNEL); | ||
527 | if (!ring_space) | 529 | if (!ring_space) |
528 | goto err_out_cleardev; | 530 | goto err_out_cleardev; |
529 | np->tx_ring = (struct netdev_desc *)ring_space; | 531 | np->tx_ring = (struct netdev_desc *)ring_space; |
530 | np->tx_ring_dma = ring_dma; | 532 | np->tx_ring_dma = ring_dma; |
531 | 533 | ||
532 | ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); | 534 | ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, |
535 | &ring_dma, GFP_KERNEL); | ||
533 | if (!ring_space) | 536 | if (!ring_space) |
534 | goto err_out_unmap_tx; | 537 | goto err_out_unmap_tx; |
535 | np->rx_ring = (struct netdev_desc *)ring_space; | 538 | np->rx_ring = (struct netdev_desc *)ring_space; |
@@ -663,9 +666,11 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, | |||
663 | err_out_unregister: | 666 | err_out_unregister: |
664 | unregister_netdev(dev); | 667 | unregister_netdev(dev); |
665 | err_out_unmap_rx: | 668 | err_out_unmap_rx: |
666 | pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); | 669 | dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, |
670 | np->rx_ring, np->rx_ring_dma); | ||
667 | err_out_unmap_tx: | 671 | err_out_unmap_tx: |
668 | pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); | 672 | dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, |
673 | np->tx_ring, np->tx_ring_dma); | ||
669 | err_out_cleardev: | 674 | err_out_cleardev: |
670 | pci_set_drvdata(pdev, NULL); | 675 | pci_set_drvdata(pdev, NULL); |
671 | pci_iounmap(pdev, ioaddr); | 676 | pci_iounmap(pdev, ioaddr); |
@@ -874,7 +879,7 @@ static int netdev_open(struct net_device *dev) | |||
874 | init_timer(&np->timer); | 879 | init_timer(&np->timer); |
875 | np->timer.expires = jiffies + 3*HZ; | 880 | np->timer.expires = jiffies + 3*HZ; |
876 | np->timer.data = (unsigned long)dev; | 881 | np->timer.data = (unsigned long)dev; |
877 | np->timer.function = &netdev_timer; /* timer handler */ | 882 | np->timer.function = netdev_timer; /* timer handler */ |
878 | add_timer(&np->timer); | 883 | add_timer(&np->timer); |
879 | 884 | ||
880 | /* Enable interrupts by setting the interrupt mask. */ | 885 | /* Enable interrupts by setting the interrupt mask. */ |
@@ -1011,8 +1016,14 @@ static void init_ring(struct net_device *dev) | |||
1011 | skb->dev = dev; /* Mark as being used by this device. */ | 1016 | skb->dev = dev; /* Mark as being used by this device. */ |
1012 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ | 1017 | skb_reserve(skb, 2); /* 16 byte align the IP header. */ |
1013 | np->rx_ring[i].frag[0].addr = cpu_to_le32( | 1018 | np->rx_ring[i].frag[0].addr = cpu_to_le32( |
1014 | pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, | 1019 | dma_map_single(&np->pci_dev->dev, skb->data, |
1015 | PCI_DMA_FROMDEVICE)); | 1020 | np->rx_buf_sz, DMA_FROM_DEVICE)); |
1021 | if (dma_mapping_error(&np->pci_dev->dev, | ||
1022 | np->rx_ring[i].frag[0].addr)) { | ||
1023 | dev_kfree_skb(skb); | ||
1024 | np->rx_skbuff[i] = NULL; | ||
1025 | break; | ||
1026 | } | ||
1016 | np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); | 1027 | np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); |
1017 | } | 1028 | } |
1018 | np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); | 1029 | np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); |
@@ -1063,9 +1074,11 @@ start_tx (struct sk_buff *skb, struct net_device *dev) | |||
1063 | 1074 | ||
1064 | txdesc->next_desc = 0; | 1075 | txdesc->next_desc = 0; |
1065 | txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); | 1076 | txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); |
1066 | txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data, | 1077 | txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev, |
1067 | skb->len, | 1078 | skb->data, skb->len, DMA_TO_DEVICE)); |
1068 | PCI_DMA_TODEVICE)); | 1079 | if (dma_mapping_error(&np->pci_dev->dev, |
1080 | txdesc->frag[0].addr)) | ||
1081 | goto drop_frame; | ||
1069 | txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag); | 1082 | txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag); |
1070 | 1083 | ||
1071 | /* Increment cur_tx before tasklet_schedule() */ | 1084 | /* Increment cur_tx before tasklet_schedule() */ |
@@ -1087,6 +1100,12 @@ start_tx (struct sk_buff *skb, struct net_device *dev) | |||
1087 | dev->name, np->cur_tx, entry); | 1100 | dev->name, np->cur_tx, entry); |
1088 | } | 1101 | } |
1089 | return NETDEV_TX_OK; | 1102 | return NETDEV_TX_OK; |
1103 | |||
1104 | drop_frame: | ||
1105 | dev_kfree_skb(skb); | ||
1106 | np->tx_skbuff[entry] = NULL; | ||
1107 | dev->stats.tx_dropped++; | ||
1108 | return NETDEV_TX_OK; | ||
1090 | } | 1109 | } |
1091 | 1110 | ||
1092 | /* Reset hardware tx and free all of tx buffers */ | 1111 | /* Reset hardware tx and free all of tx buffers */ |
@@ -1097,7 +1116,6 @@ reset_tx (struct net_device *dev) | |||
1097 | void __iomem *ioaddr = np->base; | 1116 | void __iomem *ioaddr = np->base; |
1098 | struct sk_buff *skb; | 1117 | struct sk_buff *skb; |
1099 | int i; | 1118 | int i; |
1100 | int irq = in_interrupt(); | ||
1101 | 1119 | ||
1102 | /* Reset tx logic, TxListPtr will be cleaned */ | 1120 | /* Reset tx logic, TxListPtr will be cleaned */ |
1103 | iowrite16 (TxDisable, ioaddr + MACCtrl1); | 1121 | iowrite16 (TxDisable, ioaddr + MACCtrl1); |
@@ -1109,13 +1127,10 @@ reset_tx (struct net_device *dev) | |||
1109 | 1127 | ||
1110 | skb = np->tx_skbuff[i]; | 1128 | skb = np->tx_skbuff[i]; |
1111 | if (skb) { | 1129 | if (skb) { |
1112 | pci_unmap_single(np->pci_dev, | 1130 | dma_unmap_single(&np->pci_dev->dev, |
1113 | le32_to_cpu(np->tx_ring[i].frag[0].addr), | 1131 | le32_to_cpu(np->tx_ring[i].frag[0].addr), |
1114 | skb->len, PCI_DMA_TODEVICE); | 1132 | skb->len, DMA_TO_DEVICE); |
1115 | if (irq) | 1133 | dev_kfree_skb_any(skb); |
1116 | dev_kfree_skb_irq (skb); | ||
1117 | else | ||
1118 | dev_kfree_skb (skb); | ||
1119 | np->tx_skbuff[i] = NULL; | 1134 | np->tx_skbuff[i] = NULL; |
1120 | dev->stats.tx_dropped++; | 1135 | dev->stats.tx_dropped++; |
1121 | } | 1136 | } |
@@ -1233,9 +1248,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) | |||
1233 | break; | 1248 | break; |
1234 | skb = np->tx_skbuff[entry]; | 1249 | skb = np->tx_skbuff[entry]; |
1235 | /* Free the original skb. */ | 1250 | /* Free the original skb. */ |
1236 | pci_unmap_single(np->pci_dev, | 1251 | dma_unmap_single(&np->pci_dev->dev, |
1237 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), | 1252 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), |
1238 | skb->len, PCI_DMA_TODEVICE); | 1253 | skb->len, DMA_TO_DEVICE); |
1239 | dev_kfree_skb_irq (np->tx_skbuff[entry]); | 1254 | dev_kfree_skb_irq (np->tx_skbuff[entry]); |
1240 | np->tx_skbuff[entry] = NULL; | 1255 | np->tx_skbuff[entry] = NULL; |
1241 | np->tx_ring[entry].frag[0].addr = 0; | 1256 | np->tx_ring[entry].frag[0].addr = 0; |
@@ -1252,9 +1267,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) | |||
1252 | break; | 1267 | break; |
1253 | skb = np->tx_skbuff[entry]; | 1268 | skb = np->tx_skbuff[entry]; |
1254 | /* Free the original skb. */ | 1269 | /* Free the original skb. */ |
1255 | pci_unmap_single(np->pci_dev, | 1270 | dma_unmap_single(&np->pci_dev->dev, |
1256 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), | 1271 | le32_to_cpu(np->tx_ring[entry].frag[0].addr), |
1257 | skb->len, PCI_DMA_TODEVICE); | 1272 | skb->len, DMA_TO_DEVICE); |
1258 | dev_kfree_skb_irq (np->tx_skbuff[entry]); | 1273 | dev_kfree_skb_irq (np->tx_skbuff[entry]); |
1259 | np->tx_skbuff[entry] = NULL; | 1274 | np->tx_skbuff[entry] = NULL; |
1260 | np->tx_ring[entry].frag[0].addr = 0; | 1275 | np->tx_ring[entry].frag[0].addr = 0; |
@@ -1334,22 +1349,18 @@ static void rx_poll(unsigned long data) | |||
1334 | if (pkt_len < rx_copybreak && | 1349 | if (pkt_len < rx_copybreak && |
1335 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | 1350 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { |
1336 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | 1351 | skb_reserve(skb, 2); /* 16 byte align the IP header */ |
1337 | pci_dma_sync_single_for_cpu(np->pci_dev, | 1352 | dma_sync_single_for_cpu(&np->pci_dev->dev, |
1338 | le32_to_cpu(desc->frag[0].addr), | 1353 | le32_to_cpu(desc->frag[0].addr), |
1339 | np->rx_buf_sz, | 1354 | np->rx_buf_sz, DMA_FROM_DEVICE); |
1340 | PCI_DMA_FROMDEVICE); | ||
1341 | |||
1342 | skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); | 1355 | skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); |
1343 | pci_dma_sync_single_for_device(np->pci_dev, | 1356 | dma_sync_single_for_device(&np->pci_dev->dev, |
1344 | le32_to_cpu(desc->frag[0].addr), | 1357 | le32_to_cpu(desc->frag[0].addr), |
1345 | np->rx_buf_sz, | 1358 | np->rx_buf_sz, DMA_FROM_DEVICE); |
1346 | PCI_DMA_FROMDEVICE); | ||
1347 | skb_put(skb, pkt_len); | 1359 | skb_put(skb, pkt_len); |
1348 | } else { | 1360 | } else { |
1349 | pci_unmap_single(np->pci_dev, | 1361 | dma_unmap_single(&np->pci_dev->dev, |
1350 | le32_to_cpu(desc->frag[0].addr), | 1362 | le32_to_cpu(desc->frag[0].addr), |
1351 | np->rx_buf_sz, | 1363 | np->rx_buf_sz, DMA_FROM_DEVICE); |
1352 | PCI_DMA_FROMDEVICE); | ||
1353 | skb_put(skb = np->rx_skbuff[entry], pkt_len); | 1364 | skb_put(skb = np->rx_skbuff[entry], pkt_len); |
1354 | np->rx_skbuff[entry] = NULL; | 1365 | np->rx_skbuff[entry] = NULL; |
1355 | } | 1366 | } |
@@ -1396,8 +1407,14 @@ static void refill_rx (struct net_device *dev) | |||
1396 | skb->dev = dev; /* Mark as being used by this device. */ | 1407 | skb->dev = dev; /* Mark as being used by this device. */ |
1397 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | 1408 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
1398 | np->rx_ring[entry].frag[0].addr = cpu_to_le32( | 1409 | np->rx_ring[entry].frag[0].addr = cpu_to_le32( |
1399 | pci_map_single(np->pci_dev, skb->data, | 1410 | dma_map_single(&np->pci_dev->dev, skb->data, |
1400 | np->rx_buf_sz, PCI_DMA_FROMDEVICE)); | 1411 | np->rx_buf_sz, DMA_FROM_DEVICE)); |
1412 | if (dma_mapping_error(&np->pci_dev->dev, | ||
1413 | np->rx_ring[entry].frag[0].addr)) { | ||
1414 | dev_kfree_skb_irq(skb); | ||
1415 | np->rx_skbuff[entry] = NULL; | ||
1416 | break; | ||
1417 | } | ||
1401 | } | 1418 | } |
1402 | /* Perhaps we need not reset this field. */ | 1419 | /* Perhaps we need not reset this field. */ |
1403 | np->rx_ring[entry].frag[0].length = | 1420 | np->rx_ring[entry].frag[0].length = |
@@ -1715,9 +1732,9 @@ static int netdev_close(struct net_device *dev) | |||
1715 | np->rx_ring[i].status = 0; | 1732 | np->rx_ring[i].status = 0; |
1716 | skb = np->rx_skbuff[i]; | 1733 | skb = np->rx_skbuff[i]; |
1717 | if (skb) { | 1734 | if (skb) { |
1718 | pci_unmap_single(np->pci_dev, | 1735 | dma_unmap_single(&np->pci_dev->dev, |
1719 | le32_to_cpu(np->rx_ring[i].frag[0].addr), | 1736 | le32_to_cpu(np->rx_ring[i].frag[0].addr), |
1720 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1737 | np->rx_buf_sz, DMA_FROM_DEVICE); |
1721 | dev_kfree_skb(skb); | 1738 | dev_kfree_skb(skb); |
1722 | np->rx_skbuff[i] = NULL; | 1739 | np->rx_skbuff[i] = NULL; |
1723 | } | 1740 | } |
@@ -1727,9 +1744,9 @@ static int netdev_close(struct net_device *dev) | |||
1727 | np->tx_ring[i].next_desc = 0; | 1744 | np->tx_ring[i].next_desc = 0; |
1728 | skb = np->tx_skbuff[i]; | 1745 | skb = np->tx_skbuff[i]; |
1729 | if (skb) { | 1746 | if (skb) { |
1730 | pci_unmap_single(np->pci_dev, | 1747 | dma_unmap_single(&np->pci_dev->dev, |
1731 | le32_to_cpu(np->tx_ring[i].frag[0].addr), | 1748 | le32_to_cpu(np->tx_ring[i].frag[0].addr), |
1732 | skb->len, PCI_DMA_TODEVICE); | 1749 | skb->len, DMA_TO_DEVICE); |
1733 | dev_kfree_skb(skb); | 1750 | dev_kfree_skb(skb); |
1734 | np->tx_skbuff[i] = NULL; | 1751 | np->tx_skbuff[i] = NULL; |
1735 | } | 1752 | } |
@@ -1743,25 +1760,72 @@ static void __devexit sundance_remove1 (struct pci_dev *pdev) | |||
1743 | struct net_device *dev = pci_get_drvdata(pdev); | 1760 | struct net_device *dev = pci_get_drvdata(pdev); |
1744 | 1761 | ||
1745 | if (dev) { | 1762 | if (dev) { |
1746 | struct netdev_private *np = netdev_priv(dev); | 1763 | struct netdev_private *np = netdev_priv(dev); |
1747 | 1764 | unregister_netdev(dev); | |
1748 | unregister_netdev(dev); | 1765 | dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, |
1749 | pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, | 1766 | np->rx_ring, np->rx_ring_dma); |
1750 | np->rx_ring_dma); | 1767 | dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, |
1751 | pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, | 1768 | np->tx_ring, np->tx_ring_dma); |
1752 | np->tx_ring_dma); | 1769 | pci_iounmap(pdev, np->base); |
1753 | pci_iounmap(pdev, np->base); | 1770 | pci_release_regions(pdev); |
1754 | pci_release_regions(pdev); | 1771 | free_netdev(dev); |
1755 | free_netdev(dev); | 1772 | pci_set_drvdata(pdev, NULL); |
1756 | pci_set_drvdata(pdev, NULL); | ||
1757 | } | 1773 | } |
1758 | } | 1774 | } |
1759 | 1775 | ||
1776 | #ifdef CONFIG_PM | ||
1777 | |||
1778 | static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state) | ||
1779 | { | ||
1780 | struct net_device *dev = pci_get_drvdata(pci_dev); | ||
1781 | |||
1782 | if (!netif_running(dev)) | ||
1783 | return 0; | ||
1784 | |||
1785 | netdev_close(dev); | ||
1786 | netif_device_detach(dev); | ||
1787 | |||
1788 | pci_save_state(pci_dev); | ||
1789 | pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); | ||
1790 | |||
1791 | return 0; | ||
1792 | } | ||
1793 | |||
1794 | static int sundance_resume(struct pci_dev *pci_dev) | ||
1795 | { | ||
1796 | struct net_device *dev = pci_get_drvdata(pci_dev); | ||
1797 | int err = 0; | ||
1798 | |||
1799 | if (!netif_running(dev)) | ||
1800 | return 0; | ||
1801 | |||
1802 | pci_set_power_state(pci_dev, PCI_D0); | ||
1803 | pci_restore_state(pci_dev); | ||
1804 | |||
1805 | err = netdev_open(dev); | ||
1806 | if (err) { | ||
1807 | printk(KERN_ERR "%s: Can't resume interface!\n", | ||
1808 | dev->name); | ||
1809 | goto out; | ||
1810 | } | ||
1811 | |||
1812 | netif_device_attach(dev); | ||
1813 | |||
1814 | out: | ||
1815 | return err; | ||
1816 | } | ||
1817 | |||
1818 | #endif /* CONFIG_PM */ | ||
1819 | |||
1760 | static struct pci_driver sundance_driver = { | 1820 | static struct pci_driver sundance_driver = { |
1761 | .name = DRV_NAME, | 1821 | .name = DRV_NAME, |
1762 | .id_table = sundance_pci_tbl, | 1822 | .id_table = sundance_pci_tbl, |
1763 | .probe = sundance_probe1, | 1823 | .probe = sundance_probe1, |
1764 | .remove = __devexit_p(sundance_remove1), | 1824 | .remove = __devexit_p(sundance_remove1), |
1825 | #ifdef CONFIG_PM | ||
1826 | .suspend = sundance_suspend, | ||
1827 | .resume = sundance_resume, | ||
1828 | #endif /* CONFIG_PM */ | ||
1765 | }; | 1829 | }; |
1766 | 1830 | ||
1767 | static int __init sundance_init(void) | 1831 | static int __init sundance_init(void) |