aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/via-velocity.c
diff options
context:
space:
mode:
authorFrancois Romieu <romieu@fr.zoreil.com>2008-07-10 18:03:44 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-07-11 01:10:13 -0400
commit580a690208321ed45addef5ef12e25b87f9f5dec (patch)
tree19971ea8e723dd0d4d98c1ba36125cae43f201dd /drivers/net/via-velocity.c
parent79d16385c7f287a33ea771c4dbe60ae43f791b49 (diff)
via-velocity: remove the bounce buffers
Executive summary: the bounce buffers are in my way - they use something like a 64 * 1500 bytes area of PCI consistent area - they are not resized when the MTU changes - they are used - to hand-pad undersized packets. skb_pad anyone ? - to linearize fragmented skbs whose fragment count goes beyond the 7 fragments hardware limit in order to claim scatter-gather support Actually the SG code is commented out and I wonder if it could not be implemented (ab-)using the large send feature of the chipset since the latter should support some multi-descriptor packet transmitting. Signed-off-by: Francois Romieu <romieu@fr.zoreil.com> Fixed-by: Séguier Régis <rseguier@e-teleport.net> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/via-velocity.c')
-rw-r--r--drivers/net/via-velocity.c72
1 files changed, 18 insertions, 54 deletions
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index bcbf2fa9b94a..fce2dfd0e9e6 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1104,7 +1104,6 @@ static int velocity_init_rings(struct velocity_info *vptr)
1104{ 1104{
1105 int i; 1105 int i;
1106 unsigned int psize; 1106 unsigned int psize;
1107 unsigned int tsize;
1108 dma_addr_t pool_dma; 1107 dma_addr_t pool_dma;
1109 u8 *pool; 1108 u8 *pool;
1110 1109
@@ -1133,19 +1132,6 @@ static int velocity_init_rings(struct velocity_info *vptr)
1133 1132
1134 vptr->rd_pool_dma = pool_dma; 1133 vptr->rd_pool_dma = pool_dma;
1135 1134
1136 tsize = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq;
1137 vptr->tx_bufs = pci_alloc_consistent(vptr->pdev, tsize,
1138 &vptr->tx_bufs_dma);
1139
1140 if (vptr->tx_bufs == NULL) {
1141 printk(KERN_ERR "%s: DMA memory allocation failed.\n",
1142 vptr->dev->name);
1143 pci_free_consistent(vptr->pdev, psize, pool, pool_dma);
1144 return -ENOMEM;
1145 }
1146
1147 memset(vptr->tx_bufs, 0, vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq);
1148
1149 i = vptr->options.numrx * sizeof(struct rx_desc); 1135 i = vptr->options.numrx * sizeof(struct rx_desc);
1150 pool += i; 1136 pool += i;
1151 pool_dma += i; 1137 pool_dma += i;
@@ -1169,16 +1155,10 @@ static int velocity_init_rings(struct velocity_info *vptr)
1169 1155
1170static void velocity_free_rings(struct velocity_info *vptr) 1156static void velocity_free_rings(struct velocity_info *vptr)
1171{ 1157{
1172 int size; 1158 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1173 1159 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
1174 size = vptr->options.numrx * sizeof(struct rx_desc) +
1175 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
1176 1160
1177 pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); 1161 pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma);
1178
1179 size = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq;
1180
1181 pci_free_consistent(vptr->pdev, size, vptr->tx_bufs, vptr->tx_bufs_dma);
1182} 1162}
1183 1163
1184static inline void velocity_give_many_rx_descs(struct velocity_info *vptr) 1164static inline void velocity_give_many_rx_descs(struct velocity_info *vptr)
@@ -1313,10 +1293,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1313 1293
1314static int velocity_init_td_ring(struct velocity_info *vptr) 1294static int velocity_init_td_ring(struct velocity_info *vptr)
1315{ 1295{
1316 int i, j;
1317 dma_addr_t curr; 1296 dma_addr_t curr;
1318 struct tx_desc *td; 1297 unsigned int j;
1319 struct velocity_td_info *td_info;
1320 1298
1321 /* Init the TD ring entries */ 1299 /* Init the TD ring entries */
1322 for (j = 0; j < vptr->num_txq; j++) { 1300 for (j = 0; j < vptr->num_txq; j++) {
@@ -1331,14 +1309,6 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
1331 return -ENOMEM; 1309 return -ENOMEM;
1332 } 1310 }
1333 1311
1334 for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) {
1335 td = &(vptr->td_rings[j][i]);
1336 td_info = &(vptr->td_infos[j][i]);
1337 td_info->buf = vptr->tx_bufs +
1338 (j * vptr->options.numtx + i) * PKT_BUF_SZ;
1339 td_info->buf_dma = vptr->tx_bufs_dma +
1340 (j * vptr->options.numtx + i) * PKT_BUF_SZ;
1341 }
1342 vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; 1312 vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0;
1343 } 1313 }
1344 return 0; 1314 return 0;
@@ -1867,7 +1837,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
1867 /* 1837 /*
1868 * Don't unmap the pre-allocated tx_bufs 1838 * Don't unmap the pre-allocated tx_bufs
1869 */ 1839 */
1870 if (tdinfo->skb_dma && (tdinfo->skb_dma[0] != tdinfo->buf_dma)) { 1840 if (tdinfo->skb_dma) {
1871 1841
1872 for (i = 0; i < tdinfo->nskb_dma; i++) { 1842 for (i = 0; i < tdinfo->nskb_dma; i++) {
1873#ifdef VELOCITY_ZERO_COPY_SUPPORT 1843#ifdef VELOCITY_ZERO_COPY_SUPPORT
@@ -2063,9 +2033,19 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2063 struct tx_desc *td_ptr; 2033 struct tx_desc *td_ptr;
2064 struct velocity_td_info *tdinfo; 2034 struct velocity_td_info *tdinfo;
2065 unsigned long flags; 2035 unsigned long flags;
2066 int index;
2067 int pktlen = skb->len; 2036 int pktlen = skb->len;
2068 __le16 len = cpu_to_le16(pktlen); 2037 __le16 len;
2038 int index;
2039
2040
2041
2042 if (skb->len < ETH_ZLEN) {
2043 if (skb_padto(skb, ETH_ZLEN))
2044 goto out;
2045 pktlen = ETH_ZLEN;
2046 }
2047
2048 len = cpu_to_le16(pktlen);
2069 2049
2070#ifdef VELOCITY_ZERO_COPY_SUPPORT 2050#ifdef VELOCITY_ZERO_COPY_SUPPORT
2071 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { 2051 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
@@ -2083,23 +2063,6 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2083 td_ptr->tdesc1.TCR = TCR0_TIC; 2063 td_ptr->tdesc1.TCR = TCR0_TIC;
2084 td_ptr->td_buf[0].size &= ~TD_QUEUE; 2064 td_ptr->td_buf[0].size &= ~TD_QUEUE;
2085 2065
2086 /*
2087 * Pad short frames.
2088 */
2089 if (pktlen < ETH_ZLEN) {
2090 /* Cannot occur until ZC support */
2091 pktlen = ETH_ZLEN;
2092 len = cpu_to_le16(ETH_ZLEN);
2093 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
2094 memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
2095 tdinfo->skb = skb;
2096 tdinfo->skb_dma[0] = tdinfo->buf_dma;
2097 td_ptr->tdesc0.len = len;
2098 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2099 td_ptr->td_buf[0].pa_high = 0;
2100 td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
2101 tdinfo->nskb_dma = 1;
2102 } else
2103#ifdef VELOCITY_ZERO_COPY_SUPPORT 2066#ifdef VELOCITY_ZERO_COPY_SUPPORT
2104 if (skb_shinfo(skb)->nr_frags > 0) { 2067 if (skb_shinfo(skb)->nr_frags > 0) {
2105 int nfrags = skb_shinfo(skb)->nr_frags; 2068 int nfrags = skb_shinfo(skb)->nr_frags;
@@ -2191,7 +2154,8 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2191 } 2154 }
2192 dev->trans_start = jiffies; 2155 dev->trans_start = jiffies;
2193 spin_unlock_irqrestore(&vptr->lock, flags); 2156 spin_unlock_irqrestore(&vptr->lock, flags);
2194 return 0; 2157out:
2158 return NETDEV_TX_OK;
2195} 2159}
2196 2160
2197/** 2161/**