aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2009-07-21 05:15:49 -0400
committerDavid S. Miller <davem@davemloft.net>2009-07-23 21:01:37 -0400
commit83c98a8cd04dd0f848574370594886ba3bf56750 (patch)
tree4dd7df11014b83bb1e4251a26f9a0df408562d17 /drivers
parent67edfef78639573e9b01c26295a935349aab6fa3 (diff)
Remove unused zero-copy code from velocity NIC driver.
This code hasn't been enabled in forever. Signed-off-by: Dave Jones <davej@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/via-velocity.c77
1 files changed, 11 insertions, 66 deletions
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 42365e5f931b..4ebe2cacf50c 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -976,9 +976,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
976 dev->netdev_ops = &velocity_netdev_ops; 976 dev->netdev_ops = &velocity_netdev_ops;
977 dev->ethtool_ops = &velocity_ethtool_ops; 977 dev->ethtool_ops = &velocity_ethtool_ops;
978 978
979#ifdef VELOCITY_ZERO_COPY_SUPPORT
980 dev->features |= NETIF_F_SG;
981#endif
982 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | 979 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
983 NETIF_F_HW_VLAN_RX; 980 NETIF_F_HW_VLAN_RX;
984 981
@@ -1849,11 +1846,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
1849 1846
1850 pktlen = max_t(unsigned int, skb->len, ETH_ZLEN); 1847 pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
1851 for (i = 0; i < tdinfo->nskb_dma; i++) { 1848 for (i = 0; i < tdinfo->nskb_dma; i++) {
1852#ifdef VELOCITY_ZERO_COPY_SUPPORT
1853 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
1854#else
1855 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE); 1849 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE);
1856#endif
1857 tdinfo->skb_dma[i] = 0; 1850 tdinfo->skb_dma[i] = 0;
1858 } 1851 }
1859 } 1852 }
@@ -2095,13 +2088,6 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2095 2088
2096 len = cpu_to_le16(pktlen); 2089 len = cpu_to_le16(pktlen);
2097 2090
2098#ifdef VELOCITY_ZERO_COPY_SUPPORT
2099 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2100 kfree_skb(skb);
2101 return NETDEV_TX_OK;
2102 }
2103#endif
2104
2105 spin_lock_irqsave(&vptr->lock, flags); 2091 spin_lock_irqsave(&vptr->lock, flags);
2106 2092
2107 index = vptr->tx.curr[qnum]; 2093 index = vptr->tx.curr[qnum];
@@ -2111,59 +2097,18 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2111 td_ptr->tdesc1.TCR = TCR0_TIC; 2097 td_ptr->tdesc1.TCR = TCR0_TIC;
2112 td_ptr->td_buf[0].size &= ~TD_QUEUE; 2098 td_ptr->td_buf[0].size &= ~TD_QUEUE;
2113 2099
2114#ifdef VELOCITY_ZERO_COPY_SUPPORT 2100 /*
2115 if (skb_shinfo(skb)->nr_frags > 0) { 2101 * Map the linear network buffer into PCI space and
2116 int nfrags = skb_shinfo(skb)->nr_frags; 2102 * add it to the transmit ring.
2117 tdinfo->skb = skb; 2103 */
2118 if (nfrags > 6) { 2104 tdinfo->skb = skb;
2119 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); 2105 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
2120 tdinfo->skb_dma[0] = tdinfo->buf_dma; 2106 td_ptr->tdesc0.len = len;
2121 td_ptr->tdesc0.len = len; 2107 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2122 td_ptr->tx.buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2108 td_ptr->td_buf[0].pa_high = 0;
2123 td_ptr->tx.buf[0].pa_high = 0; 2109 td_ptr->td_buf[0].size = len;
2124 td_ptr->tx.buf[0].size = len; /* queue is 0 anyway */ 2110 tdinfo->nskb_dma = 1;
2125 tdinfo->nskb_dma = 1;
2126 } else {
2127 int i = 0;
2128 tdinfo->nskb_dma = 0;
2129 tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data,
2130 skb_headlen(skb), PCI_DMA_TODEVICE);
2131
2132 td_ptr->tdesc0.len = len;
2133
2134 /* FIXME: support 48bit DMA later */
2135 td_ptr->tx.buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
2136 td_ptr->tx.buf[i].pa_high = 0;
2137 td_ptr->tx.buf[i].size = cpu_to_le16(skb_headlen(skb));
2138
2139 for (i = 0; i < nfrags; i++) {
2140 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2141 void *addr = (void *)page_address(frag->page) + frag->page_offset;
2142
2143 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
2144
2145 td_ptr->tx.buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2146 td_ptr->tx.buf[i + 1].pa_high = 0;
2147 td_ptr->tx.buf[i + 1].size = cpu_to_le16(frag->size);
2148 }
2149 tdinfo->nskb_dma = i - 1;
2150 }
2151 2111
2152 } else
2153#endif
2154 {
2155 /*
2156 * Map the linear network buffer into PCI space and
2157 * add it to the transmit ring.
2158 */
2159 tdinfo->skb = skb;
2160 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
2161 td_ptr->tdesc0.len = len;
2162 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2163 td_ptr->td_buf[0].pa_high = 0;
2164 td_ptr->td_buf[0].size = len;
2165 tdinfo->nskb_dma = 1;
2166 }
2167 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; 2112 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2168 2113
2169 if (vptr->vlgrp && vlan_tx_tag_present(skb)) { 2114 if (vptr->vlgrp && vlan_tx_tag_present(skb)) {