aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/via
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/via')
-rw-r--r--drivers/net/ethernet/via/via-velocity.c51
-rw-r--r--drivers/net/ethernet/via/via-velocity.h1
2 files changed, 26 insertions, 26 deletions
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 187eef33df11..5996cee0ffa7 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -46,6 +46,7 @@
46#include <linux/types.h> 46#include <linux/types.h>
47#include <linux/bitops.h> 47#include <linux/bitops.h>
48#include <linux/init.h> 48#include <linux/init.h>
49#include <linux/dma-mapping.h>
49#include <linux/mm.h> 50#include <linux/mm.h>
50#include <linux/errno.h> 51#include <linux/errno.h>
51#include <linux/ioport.h> 52#include <linux/ioport.h>
@@ -1459,7 +1460,6 @@ static int velocity_init_dma_rings(struct velocity_info *vptr)
1459 struct velocity_opt *opt = &vptr->options; 1460 struct velocity_opt *opt = &vptr->options;
1460 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); 1461 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1461 const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc); 1462 const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1462 struct pci_dev *pdev = vptr->pdev;
1463 dma_addr_t pool_dma; 1463 dma_addr_t pool_dma;
1464 void *pool; 1464 void *pool;
1465 unsigned int i; 1465 unsigned int i;
@@ -1467,13 +1467,13 @@ static int velocity_init_dma_rings(struct velocity_info *vptr)
1467 /* 1467 /*
1468 * Allocate all RD/TD rings a single pool. 1468 * Allocate all RD/TD rings a single pool.
1469 * 1469 *
1470 * pci_alloc_consistent() fulfills the requirement for 64 bytes 1470 * dma_alloc_coherent() fulfills the requirement for 64 bytes
1471 * alignment 1471 * alignment
1472 */ 1472 */
1473 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq + 1473 pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
1474 rx_ring_size, &pool_dma); 1474 rx_ring_size, &pool_dma, GFP_ATOMIC);
1475 if (!pool) { 1475 if (!pool) {
1476 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", 1476 dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
1477 vptr->netdev->name); 1477 vptr->netdev->name);
1478 return -ENOMEM; 1478 return -ENOMEM;
1479 } 1479 }
@@ -1524,8 +1524,8 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1524 */ 1524 */
1525 skb_reserve(rd_info->skb, 1525 skb_reserve(rd_info->skb,
1526 64 - ((unsigned long) rd_info->skb->data & 63)); 1526 64 - ((unsigned long) rd_info->skb->data & 63));
1527 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, 1527 rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
1528 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); 1528 vptr->rx.buf_sz, DMA_FROM_DEVICE);
1529 1529
1530 /* 1530 /*
1531 * Fill in the descriptor to match 1531 * Fill in the descriptor to match
@@ -1588,8 +1588,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1588 1588
1589 if (!rd_info->skb) 1589 if (!rd_info->skb)
1590 continue; 1590 continue;
1591 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, 1591 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
1592 PCI_DMA_FROMDEVICE); 1592 DMA_FROM_DEVICE);
1593 rd_info->skb_dma = 0; 1593 rd_info->skb_dma = 0;
1594 1594
1595 dev_kfree_skb(rd_info->skb); 1595 dev_kfree_skb(rd_info->skb);
@@ -1670,7 +1670,7 @@ static void velocity_free_dma_rings(struct velocity_info *vptr)
1670 const int size = vptr->options.numrx * sizeof(struct rx_desc) + 1670 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1671 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; 1671 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1672 1672
1673 pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma); 1673 dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
1674} 1674}
1675 1675
1676static int velocity_init_rings(struct velocity_info *vptr, int mtu) 1676static int velocity_init_rings(struct velocity_info *vptr, int mtu)
@@ -1727,8 +1727,8 @@ static void velocity_free_tx_buf(struct velocity_info *vptr,
1727 pktlen = max_t(size_t, pktlen, 1727 pktlen = max_t(size_t, pktlen,
1728 td->td_buf[i].size & ~TD_QUEUE); 1728 td->td_buf[i].size & ~TD_QUEUE);
1729 1729
1730 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], 1730 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1731 le16_to_cpu(pktlen), PCI_DMA_TODEVICE); 1731 le16_to_cpu(pktlen), DMA_TO_DEVICE);
1732 } 1732 }
1733 } 1733 }
1734 dev_kfree_skb_irq(skb); 1734 dev_kfree_skb_irq(skb);
@@ -1750,8 +1750,8 @@ static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1750 if (td_info->skb) { 1750 if (td_info->skb) {
1751 for (i = 0; i < td_info->nskb_dma; i++) { 1751 for (i = 0; i < td_info->nskb_dma; i++) {
1752 if (td_info->skb_dma[i]) { 1752 if (td_info->skb_dma[i]) {
1753 pci_unmap_single(vptr->pdev, td_info->skb_dma[i], 1753 dma_unmap_single(vptr->dev, td_info->skb_dma[i],
1754 td_info->skb->len, PCI_DMA_TODEVICE); 1754 td_info->skb->len, DMA_TO_DEVICE);
1755 td_info->skb_dma[i] = 0; 1755 td_info->skb_dma[i] = 0;
1756 } 1756 }
1757 } 1757 }
@@ -2029,7 +2029,6 @@ static inline void velocity_iph_realign(struct velocity_info *vptr,
2029 */ 2029 */
2030static int velocity_receive_frame(struct velocity_info *vptr, int idx) 2030static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2031{ 2031{
2032 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
2033 struct net_device_stats *stats = &vptr->netdev->stats; 2032 struct net_device_stats *stats = &vptr->netdev->stats;
2034 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); 2033 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2035 struct rx_desc *rd = &(vptr->rx.ring[idx]); 2034 struct rx_desc *rd = &(vptr->rx.ring[idx]);
@@ -2047,8 +2046,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2047 2046
2048 skb = rd_info->skb; 2047 skb = rd_info->skb;
2049 2048
2050 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, 2049 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2051 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); 2050 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2052 2051
2053 /* 2052 /*
2054 * Drop frame not meeting IEEE 802.3 2053 * Drop frame not meeting IEEE 802.3
@@ -2061,19 +2060,18 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2061 } 2060 }
2062 } 2061 }
2063 2062
2064 pci_action = pci_dma_sync_single_for_device;
2065
2066 velocity_rx_csum(rd, skb); 2063 velocity_rx_csum(rd, skb);
2067 2064
2068 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { 2065 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2069 velocity_iph_realign(vptr, skb, pkt_len); 2066 velocity_iph_realign(vptr, skb, pkt_len);
2070 pci_action = pci_unmap_single;
2071 rd_info->skb = NULL; 2067 rd_info->skb = NULL;
2068 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
2069 DMA_FROM_DEVICE);
2070 } else {
2071 dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
2072 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2072 } 2073 }
2073 2074
2074 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
2075 PCI_DMA_FROMDEVICE);
2076
2077 skb_put(skb, pkt_len - 4); 2075 skb_put(skb, pkt_len - 4);
2078 skb->protocol = eth_type_trans(skb, vptr->netdev); 2076 skb->protocol = eth_type_trans(skb, vptr->netdev);
2079 2077
@@ -2550,7 +2548,8 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2550 * add it to the transmit ring. 2548 * add it to the transmit ring.
2551 */ 2549 */
2552 tdinfo->skb = skb; 2550 tdinfo->skb = skb;
2553 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); 2551 tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
2552 DMA_TO_DEVICE);
2554 td_ptr->tdesc0.len = cpu_to_le16(pktlen); 2553 td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2555 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2554 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2556 td_ptr->td_buf[0].pa_high = 0; 2555 td_ptr->td_buf[0].pa_high = 0;
@@ -2560,7 +2559,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2560 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2559 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2561 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2560 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2562 2561
2563 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev, 2562 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
2564 frag, 0, 2563 frag, 0,
2565 skb_frag_size(frag), 2564 skb_frag_size(frag),
2566 DMA_TO_DEVICE); 2565 DMA_TO_DEVICE);
@@ -2637,6 +2636,7 @@ static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
2637{ 2636{
2638 memset(vptr, 0, sizeof(struct velocity_info)); 2637 memset(vptr, 0, sizeof(struct velocity_info));
2639 2638
2639 vptr->dev = &pdev->dev;
2640 vptr->pdev = pdev; 2640 vptr->pdev = pdev;
2641 vptr->chip_id = info->chip_id; 2641 vptr->chip_id = info->chip_id;
2642 vptr->tx.numq = info->txqueue; 2642 vptr->tx.numq = info->txqueue;
@@ -2744,7 +2744,6 @@ static int velocity_found1(struct pci_dev *pdev,
2744 SET_NETDEV_DEV(dev, &pdev->dev); 2744 SET_NETDEV_DEV(dev, &pdev->dev);
2745 vptr = netdev_priv(dev); 2745 vptr = netdev_priv(dev);
2746 2746
2747
2748 if (first) { 2747 if (first) {
2749 printk(KERN_INFO "%s Ver. %s\n", 2748 printk(KERN_INFO "%s Ver. %s\n",
2750 VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION); 2749 VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h
index ff8d7828aa1b..c38bbaed4d12 100644
--- a/drivers/net/ethernet/via/via-velocity.h
+++ b/drivers/net/ethernet/via/via-velocity.h
@@ -1434,6 +1434,7 @@ struct velocity_opt {
1434#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) 1434#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1435 1435
1436struct velocity_info { 1436struct velocity_info {
1437 struct device *dev;
1437 struct pci_dev *pdev; 1438 struct pci_dev *pdev;
1438 struct net_device *netdev; 1439 struct net_device *netdev;
1439 1440