diff options
Diffstat (limited to 'drivers/net/via-velocity.c')
-rw-r--r-- | drivers/net/via-velocity.c | 183 |
1 files changed, 68 insertions, 115 deletions
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 6b8d882d197b..370ce30f2f45 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -1102,61 +1102,41 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc | |||
1102 | 1102 | ||
1103 | static int velocity_init_rings(struct velocity_info *vptr) | 1103 | static int velocity_init_rings(struct velocity_info *vptr) |
1104 | { | 1104 | { |
1105 | int i; | 1105 | struct velocity_opt *opt = &vptr->options; |
1106 | unsigned int psize; | 1106 | const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); |
1107 | unsigned int tsize; | 1107 | const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc); |
1108 | struct pci_dev *pdev = vptr->pdev; | ||
1108 | dma_addr_t pool_dma; | 1109 | dma_addr_t pool_dma; |
1109 | u8 *pool; | 1110 | void *pool; |
1110 | 1111 | unsigned int i; | |
1111 | /* | ||
1112 | * Allocate all RD/TD rings a single pool | ||
1113 | */ | ||
1114 | |||
1115 | psize = vptr->options.numrx * sizeof(struct rx_desc) + | ||
1116 | vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; | ||
1117 | 1112 | ||
1118 | /* | 1113 | /* |
1114 | * Allocate all RD/TD rings a single pool. | ||
1115 | * | ||
1119 | * pci_alloc_consistent() fulfills the requirement for 64 bytes | 1116 | * pci_alloc_consistent() fulfills the requirement for 64 bytes |
1120 | * alignment | 1117 | * alignment |
1121 | */ | 1118 | */ |
1122 | pool = pci_alloc_consistent(vptr->pdev, psize, &pool_dma); | 1119 | pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->num_txq + |
1123 | 1120 | rx_ring_size, &pool_dma); | |
1124 | if (pool == NULL) { | 1121 | if (!pool) { |
1125 | printk(KERN_ERR "%s : DMA memory allocation failed.\n", | 1122 | dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", |
1126 | vptr->dev->name); | 1123 | vptr->dev->name); |
1127 | return -ENOMEM; | 1124 | return -ENOMEM; |
1128 | } | 1125 | } |
1129 | 1126 | ||
1130 | memset(pool, 0, psize); | 1127 | vptr->rd_ring = pool; |
1131 | |||
1132 | vptr->rd_ring = (struct rx_desc *) pool; | ||
1133 | |||
1134 | vptr->rd_pool_dma = pool_dma; | 1128 | vptr->rd_pool_dma = pool_dma; |
1135 | 1129 | ||
1136 | tsize = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq; | 1130 | pool += rx_ring_size; |
1137 | vptr->tx_bufs = pci_alloc_consistent(vptr->pdev, tsize, | 1131 | pool_dma += rx_ring_size; |
1138 | &vptr->tx_bufs_dma); | ||
1139 | 1132 | ||
1140 | if (vptr->tx_bufs == NULL) { | ||
1141 | printk(KERN_ERR "%s: DMA memory allocation failed.\n", | ||
1142 | vptr->dev->name); | ||
1143 | pci_free_consistent(vptr->pdev, psize, pool, pool_dma); | ||
1144 | return -ENOMEM; | ||
1145 | } | ||
1146 | |||
1147 | memset(vptr->tx_bufs, 0, vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq); | ||
1148 | |||
1149 | i = vptr->options.numrx * sizeof(struct rx_desc); | ||
1150 | pool += i; | ||
1151 | pool_dma += i; | ||
1152 | for (i = 0; i < vptr->num_txq; i++) { | 1133 | for (i = 0; i < vptr->num_txq; i++) { |
1153 | int offset = vptr->options.numtx * sizeof(struct tx_desc); | 1134 | vptr->td_rings[i] = pool; |
1154 | |||
1155 | vptr->td_pool_dma[i] = pool_dma; | 1135 | vptr->td_pool_dma[i] = pool_dma; |
1156 | vptr->td_rings[i] = (struct tx_desc *) pool; | 1136 | pool += tx_ring_size; |
1157 | pool += offset; | 1137 | pool_dma += tx_ring_size; |
1158 | pool_dma += offset; | ||
1159 | } | 1138 | } |
1139 | |||
1160 | return 0; | 1140 | return 0; |
1161 | } | 1141 | } |
1162 | 1142 | ||
@@ -1169,19 +1149,13 @@ static int velocity_init_rings(struct velocity_info *vptr) | |||
1169 | 1149 | ||
1170 | static void velocity_free_rings(struct velocity_info *vptr) | 1150 | static void velocity_free_rings(struct velocity_info *vptr) |
1171 | { | 1151 | { |
1172 | int size; | 1152 | const int size = vptr->options.numrx * sizeof(struct rx_desc) + |
1173 | 1153 | vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; | |
1174 | size = vptr->options.numrx * sizeof(struct rx_desc) + | ||
1175 | vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; | ||
1176 | 1154 | ||
1177 | pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); | 1155 | pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); |
1178 | |||
1179 | size = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq; | ||
1180 | |||
1181 | pci_free_consistent(vptr->pdev, size, vptr->tx_bufs, vptr->tx_bufs_dma); | ||
1182 | } | 1156 | } |
1183 | 1157 | ||
1184 | static inline void velocity_give_many_rx_descs(struct velocity_info *vptr) | 1158 | static void velocity_give_many_rx_descs(struct velocity_info *vptr) |
1185 | { | 1159 | { |
1186 | struct mac_regs __iomem *regs = vptr->mac_regs; | 1160 | struct mac_regs __iomem *regs = vptr->mac_regs; |
1187 | int avail, dirty, unusable; | 1161 | int avail, dirty, unusable; |
@@ -1208,7 +1182,7 @@ static inline void velocity_give_many_rx_descs(struct velocity_info *vptr) | |||
1208 | 1182 | ||
1209 | static int velocity_rx_refill(struct velocity_info *vptr) | 1183 | static int velocity_rx_refill(struct velocity_info *vptr) |
1210 | { | 1184 | { |
1211 | int dirty = vptr->rd_dirty, done = 0, ret = 0; | 1185 | int dirty = vptr->rd_dirty, done = 0; |
1212 | 1186 | ||
1213 | do { | 1187 | do { |
1214 | struct rx_desc *rd = vptr->rd_ring + dirty; | 1188 | struct rx_desc *rd = vptr->rd_ring + dirty; |
@@ -1218,8 +1192,7 @@ static int velocity_rx_refill(struct velocity_info *vptr) | |||
1218 | break; | 1192 | break; |
1219 | 1193 | ||
1220 | if (!vptr->rd_info[dirty].skb) { | 1194 | if (!vptr->rd_info[dirty].skb) { |
1221 | ret = velocity_alloc_rx_buf(vptr, dirty); | 1195 | if (velocity_alloc_rx_buf(vptr, dirty) < 0) |
1222 | if (ret < 0) | ||
1223 | break; | 1196 | break; |
1224 | } | 1197 | } |
1225 | done++; | 1198 | done++; |
@@ -1229,10 +1202,14 @@ static int velocity_rx_refill(struct velocity_info *vptr) | |||
1229 | if (done) { | 1202 | if (done) { |
1230 | vptr->rd_dirty = dirty; | 1203 | vptr->rd_dirty = dirty; |
1231 | vptr->rd_filled += done; | 1204 | vptr->rd_filled += done; |
1232 | velocity_give_many_rx_descs(vptr); | ||
1233 | } | 1205 | } |
1234 | 1206 | ||
1235 | return ret; | 1207 | return done; |
1208 | } | ||
1209 | |||
1210 | static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) | ||
1211 | { | ||
1212 | vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; | ||
1236 | } | 1213 | } |
1237 | 1214 | ||
1238 | /** | 1215 | /** |
@@ -1245,25 +1222,24 @@ static int velocity_rx_refill(struct velocity_info *vptr) | |||
1245 | 1222 | ||
1246 | static int velocity_init_rd_ring(struct velocity_info *vptr) | 1223 | static int velocity_init_rd_ring(struct velocity_info *vptr) |
1247 | { | 1224 | { |
1248 | int ret; | 1225 | int ret = -ENOMEM; |
1249 | int mtu = vptr->dev->mtu; | ||
1250 | |||
1251 | vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; | ||
1252 | 1226 | ||
1253 | vptr->rd_info = kcalloc(vptr->options.numrx, | 1227 | vptr->rd_info = kcalloc(vptr->options.numrx, |
1254 | sizeof(struct velocity_rd_info), GFP_KERNEL); | 1228 | sizeof(struct velocity_rd_info), GFP_KERNEL); |
1255 | if (!vptr->rd_info) | 1229 | if (!vptr->rd_info) |
1256 | return -ENOMEM; | 1230 | goto out; |
1257 | 1231 | ||
1258 | vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; | 1232 | vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; |
1259 | 1233 | ||
1260 | ret = velocity_rx_refill(vptr); | 1234 | if (velocity_rx_refill(vptr) != vptr->options.numrx) { |
1261 | if (ret < 0) { | ||
1262 | VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR | 1235 | VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR |
1263 | "%s: failed to allocate RX buffer.\n", vptr->dev->name); | 1236 | "%s: failed to allocate RX buffer.\n", vptr->dev->name); |
1264 | velocity_free_rd_ring(vptr); | 1237 | velocity_free_rd_ring(vptr); |
1238 | goto out; | ||
1265 | } | 1239 | } |
1266 | 1240 | ||
1241 | ret = 0; | ||
1242 | out: | ||
1267 | return ret; | 1243 | return ret; |
1268 | } | 1244 | } |
1269 | 1245 | ||
@@ -1313,10 +1289,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr) | |||
1313 | 1289 | ||
1314 | static int velocity_init_td_ring(struct velocity_info *vptr) | 1290 | static int velocity_init_td_ring(struct velocity_info *vptr) |
1315 | { | 1291 | { |
1316 | int i, j; | ||
1317 | dma_addr_t curr; | 1292 | dma_addr_t curr; |
1318 | struct tx_desc *td; | 1293 | unsigned int j; |
1319 | struct velocity_td_info *td_info; | ||
1320 | 1294 | ||
1321 | /* Init the TD ring entries */ | 1295 | /* Init the TD ring entries */ |
1322 | for (j = 0; j < vptr->num_txq; j++) { | 1296 | for (j = 0; j < vptr->num_txq; j++) { |
@@ -1331,14 +1305,6 @@ static int velocity_init_td_ring(struct velocity_info *vptr) | |||
1331 | return -ENOMEM; | 1305 | return -ENOMEM; |
1332 | } | 1306 | } |
1333 | 1307 | ||
1334 | for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) { | ||
1335 | td = &(vptr->td_rings[j][i]); | ||
1336 | td_info = &(vptr->td_infos[j][i]); | ||
1337 | td_info->buf = vptr->tx_bufs + | ||
1338 | (j * vptr->options.numtx + i) * PKT_BUF_SZ; | ||
1339 | td_info->buf_dma = vptr->tx_bufs_dma + | ||
1340 | (j * vptr->options.numtx + i) * PKT_BUF_SZ; | ||
1341 | } | ||
1342 | vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; | 1308 | vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; |
1343 | } | 1309 | } |
1344 | return 0; | 1310 | return 0; |
@@ -1448,10 +1414,8 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status) | |||
1448 | 1414 | ||
1449 | vptr->rd_curr = rd_curr; | 1415 | vptr->rd_curr = rd_curr; |
1450 | 1416 | ||
1451 | if (works > 0 && velocity_rx_refill(vptr) < 0) { | 1417 | if ((works > 0) && (velocity_rx_refill(vptr) > 0)) |
1452 | VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR | 1418 | velocity_give_many_rx_descs(vptr); |
1453 | "%s: rx buf allocation failure\n", vptr->dev->name); | ||
1454 | } | ||
1455 | 1419 | ||
1456 | VAR_USED(stats); | 1420 | VAR_USED(stats); |
1457 | return works; | 1421 | return works; |
@@ -1495,24 +1459,18 @@ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb) | |||
1495 | * enough. This function returns a negative value if the received | 1459 | * enough. This function returns a negative value if the received |
1496 | * packet is too big or if memory is exhausted. | 1460 | * packet is too big or if memory is exhausted. |
1497 | */ | 1461 | */ |
1498 | static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, | 1462 | static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, |
1499 | struct velocity_info *vptr) | 1463 | struct velocity_info *vptr) |
1500 | { | 1464 | { |
1501 | int ret = -1; | 1465 | int ret = -1; |
1502 | |||
1503 | if (pkt_size < rx_copybreak) { | 1466 | if (pkt_size < rx_copybreak) { |
1504 | struct sk_buff *new_skb; | 1467 | struct sk_buff *new_skb; |
1505 | 1468 | ||
1506 | new_skb = dev_alloc_skb(pkt_size + 2); | 1469 | new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2); |
1507 | if (new_skb) { | 1470 | if (new_skb) { |
1508 | new_skb->dev = vptr->dev; | ||
1509 | new_skb->ip_summed = rx_skb[0]->ip_summed; | 1471 | new_skb->ip_summed = rx_skb[0]->ip_summed; |
1510 | 1472 | skb_reserve(new_skb, 2); | |
1511 | if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) | 1473 | skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); |
1512 | skb_reserve(new_skb, 2); | ||
1513 | |||
1514 | skb_copy_from_linear_data(rx_skb[0], new_skb->data, | ||
1515 | pkt_size); | ||
1516 | *rx_skb = new_skb; | 1474 | *rx_skb = new_skb; |
1517 | ret = 0; | 1475 | ret = 0; |
1518 | } | 1476 | } |
@@ -1533,12 +1491,8 @@ static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, | |||
1533 | static inline void velocity_iph_realign(struct velocity_info *vptr, | 1491 | static inline void velocity_iph_realign(struct velocity_info *vptr, |
1534 | struct sk_buff *skb, int pkt_size) | 1492 | struct sk_buff *skb, int pkt_size) |
1535 | { | 1493 | { |
1536 | /* FIXME - memmove ? */ | ||
1537 | if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { | 1494 | if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { |
1538 | int i; | 1495 | memmove(skb->data + 2, skb->data, pkt_size); |
1539 | |||
1540 | for (i = pkt_size; i >= 0; i--) | ||
1541 | *(skb->data + i + 2) = *(skb->data + i); | ||
1542 | skb_reserve(skb, 2); | 1496 | skb_reserve(skb, 2); |
1543 | } | 1497 | } |
1544 | } | 1498 | } |
@@ -1629,7 +1583,7 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) | |||
1629 | struct rx_desc *rd = &(vptr->rd_ring[idx]); | 1583 | struct rx_desc *rd = &(vptr->rd_ring[idx]); |
1630 | struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); | 1584 | struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); |
1631 | 1585 | ||
1632 | rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64); | 1586 | rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64); |
1633 | if (rd_info->skb == NULL) | 1587 | if (rd_info->skb == NULL) |
1634 | return -ENOMEM; | 1588 | return -ENOMEM; |
1635 | 1589 | ||
@@ -1638,7 +1592,6 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) | |||
1638 | * 64byte alignment. | 1592 | * 64byte alignment. |
1639 | */ | 1593 | */ |
1640 | skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); | 1594 | skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); |
1641 | rd_info->skb->dev = vptr->dev; | ||
1642 | rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); | 1595 | rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1643 | 1596 | ||
1644 | /* | 1597 | /* |
@@ -1878,7 +1831,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_ | |||
1878 | /* | 1831 | /* |
1879 | * Don't unmap the pre-allocated tx_bufs | 1832 | * Don't unmap the pre-allocated tx_bufs |
1880 | */ | 1833 | */ |
1881 | if (tdinfo->skb_dma && (tdinfo->skb_dma[0] != tdinfo->buf_dma)) { | 1834 | if (tdinfo->skb_dma) { |
1882 | 1835 | ||
1883 | for (i = 0; i < tdinfo->nskb_dma; i++) { | 1836 | for (i = 0; i < tdinfo->nskb_dma; i++) { |
1884 | #ifdef VELOCITY_ZERO_COPY_SUPPORT | 1837 | #ifdef VELOCITY_ZERO_COPY_SUPPORT |
@@ -1909,6 +1862,8 @@ static int velocity_open(struct net_device *dev) | |||
1909 | struct velocity_info *vptr = netdev_priv(dev); | 1862 | struct velocity_info *vptr = netdev_priv(dev); |
1910 | int ret; | 1863 | int ret; |
1911 | 1864 | ||
1865 | velocity_set_rxbufsize(vptr, dev->mtu); | ||
1866 | |||
1912 | ret = velocity_init_rings(vptr); | 1867 | ret = velocity_init_rings(vptr); |
1913 | if (ret < 0) | 1868 | if (ret < 0) |
1914 | goto out; | 1869 | goto out; |
@@ -1924,6 +1879,8 @@ static int velocity_open(struct net_device *dev) | |||
1924 | /* Ensure chip is running */ | 1879 | /* Ensure chip is running */ |
1925 | pci_set_power_state(vptr->pdev, PCI_D0); | 1880 | pci_set_power_state(vptr->pdev, PCI_D0); |
1926 | 1881 | ||
1882 | velocity_give_many_rx_descs(vptr); | ||
1883 | |||
1927 | velocity_init_registers(vptr, VELOCITY_INIT_COLD); | 1884 | velocity_init_registers(vptr, VELOCITY_INIT_COLD); |
1928 | 1885 | ||
1929 | ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED, | 1886 | ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED, |
@@ -1988,6 +1945,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) | |||
1988 | 1945 | ||
1989 | dev->mtu = new_mtu; | 1946 | dev->mtu = new_mtu; |
1990 | 1947 | ||
1948 | velocity_set_rxbufsize(vptr, new_mtu); | ||
1949 | |||
1991 | ret = velocity_init_rd_ring(vptr); | 1950 | ret = velocity_init_rd_ring(vptr); |
1992 | if (ret < 0) | 1951 | if (ret < 0) |
1993 | goto out_unlock; | 1952 | goto out_unlock; |
@@ -2074,9 +2033,19 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2074 | struct tx_desc *td_ptr; | 2033 | struct tx_desc *td_ptr; |
2075 | struct velocity_td_info *tdinfo; | 2034 | struct velocity_td_info *tdinfo; |
2076 | unsigned long flags; | 2035 | unsigned long flags; |
2077 | int index; | ||
2078 | int pktlen = skb->len; | 2036 | int pktlen = skb->len; |
2079 | __le16 len = cpu_to_le16(pktlen); | 2037 | __le16 len; |
2038 | int index; | ||
2039 | |||
2040 | |||
2041 | |||
2042 | if (skb->len < ETH_ZLEN) { | ||
2043 | if (skb_padto(skb, ETH_ZLEN)) | ||
2044 | goto out; | ||
2045 | pktlen = ETH_ZLEN; | ||
2046 | } | ||
2047 | |||
2048 | len = cpu_to_le16(pktlen); | ||
2080 | 2049 | ||
2081 | #ifdef VELOCITY_ZERO_COPY_SUPPORT | 2050 | #ifdef VELOCITY_ZERO_COPY_SUPPORT |
2082 | if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { | 2051 | if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { |
@@ -2094,23 +2063,6 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2094 | td_ptr->tdesc1.TCR = TCR0_TIC; | 2063 | td_ptr->tdesc1.TCR = TCR0_TIC; |
2095 | td_ptr->td_buf[0].size &= ~TD_QUEUE; | 2064 | td_ptr->td_buf[0].size &= ~TD_QUEUE; |
2096 | 2065 | ||
2097 | /* | ||
2098 | * Pad short frames. | ||
2099 | */ | ||
2100 | if (pktlen < ETH_ZLEN) { | ||
2101 | /* Cannot occur until ZC support */ | ||
2102 | pktlen = ETH_ZLEN; | ||
2103 | len = cpu_to_le16(ETH_ZLEN); | ||
2104 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); | ||
2105 | memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); | ||
2106 | tdinfo->skb = skb; | ||
2107 | tdinfo->skb_dma[0] = tdinfo->buf_dma; | ||
2108 | td_ptr->tdesc0.len = len; | ||
2109 | td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); | ||
2110 | td_ptr->td_buf[0].pa_high = 0; | ||
2111 | td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ | ||
2112 | tdinfo->nskb_dma = 1; | ||
2113 | } else | ||
2114 | #ifdef VELOCITY_ZERO_COPY_SUPPORT | 2066 | #ifdef VELOCITY_ZERO_COPY_SUPPORT |
2115 | if (skb_shinfo(skb)->nr_frags > 0) { | 2067 | if (skb_shinfo(skb)->nr_frags > 0) { |
2116 | int nfrags = skb_shinfo(skb)->nr_frags; | 2068 | int nfrags = skb_shinfo(skb)->nr_frags; |
@@ -2202,7 +2154,8 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2202 | } | 2154 | } |
2203 | dev->trans_start = jiffies; | 2155 | dev->trans_start = jiffies; |
2204 | spin_unlock_irqrestore(&vptr->lock, flags); | 2156 | spin_unlock_irqrestore(&vptr->lock, flags); |
2205 | return 0; | 2157 | out: |
2158 | return NETDEV_TX_OK; | ||
2206 | } | 2159 | } |
2207 | 2160 | ||
2208 | /** | 2161 | /** |