aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/via-velocity.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/via-velocity.c')
-rw-r--r--drivers/net/via-velocity.c468
1 files changed, 231 insertions, 237 deletions
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 6b8d882d197b..2dced383bcfb 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -12,7 +12,7 @@
12 * Scatter gather 12 * Scatter gather
13 * More testing 13 * More testing
14 * 14 *
15 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@redhat.com> 15 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
16 * Additional fixes and clean up: Francois Romieu 16 * Additional fixes and clean up: Francois Romieu
17 * 17 *
18 * This source has not been verified for use in safety critical systems. 18 * This source has not been verified for use in safety critical systems.
@@ -662,6 +662,10 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
662 spin_unlock_irq(&vptr->lock); 662 spin_unlock_irq(&vptr->lock);
663} 663}
664 664
665static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
666{
667 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
668}
665 669
666/** 670/**
667 * velocity_rx_reset - handle a receive reset 671 * velocity_rx_reset - handle a receive reset
@@ -677,16 +681,16 @@ static void velocity_rx_reset(struct velocity_info *vptr)
677 struct mac_regs __iomem * regs = vptr->mac_regs; 681 struct mac_regs __iomem * regs = vptr->mac_regs;
678 int i; 682 int i;
679 683
680 vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0; 684 velocity_init_rx_ring_indexes(vptr);
681 685
682 /* 686 /*
683 * Init state, all RD entries belong to the NIC 687 * Init state, all RD entries belong to the NIC
684 */ 688 */
685 for (i = 0; i < vptr->options.numrx; ++i) 689 for (i = 0; i < vptr->options.numrx; ++i)
686 vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC; 690 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
687 691
688 writew(vptr->options.numrx, &regs->RBRDU); 692 writew(vptr->options.numrx, &regs->RBRDU);
689 writel(vptr->rd_pool_dma, &regs->RDBaseLo); 693 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
690 writew(0, &regs->RDIdx); 694 writew(0, &regs->RDIdx);
691 writew(vptr->options.numrx - 1, &regs->RDCSize); 695 writew(vptr->options.numrx - 1, &regs->RDCSize);
692} 696}
@@ -779,15 +783,15 @@ static void velocity_init_registers(struct velocity_info *vptr,
779 783
780 vptr->int_mask = INT_MASK_DEF; 784 vptr->int_mask = INT_MASK_DEF;
781 785
782 writel(vptr->rd_pool_dma, &regs->RDBaseLo); 786 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
783 writew(vptr->options.numrx - 1, &regs->RDCSize); 787 writew(vptr->options.numrx - 1, &regs->RDCSize);
784 mac_rx_queue_run(regs); 788 mac_rx_queue_run(regs);
785 mac_rx_queue_wake(regs); 789 mac_rx_queue_wake(regs);
786 790
787 writew(vptr->options.numtx - 1, &regs->TDCSize); 791 writew(vptr->options.numtx - 1, &regs->TDCSize);
788 792
789 for (i = 0; i < vptr->num_txq; i++) { 793 for (i = 0; i < vptr->tx.numq; i++) {
790 writel(vptr->td_pool_dma[i], &regs->TDBaseLo[i]); 794 writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
791 mac_tx_queue_run(regs, i); 795 mac_tx_queue_run(regs, i);
792 } 796 }
793 797
@@ -1047,7 +1051,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
1047 1051
1048 vptr->pdev = pdev; 1052 vptr->pdev = pdev;
1049 vptr->chip_id = info->chip_id; 1053 vptr->chip_id = info->chip_id;
1050 vptr->num_txq = info->txqueue; 1054 vptr->tx.numq = info->txqueue;
1051 vptr->multicast_limit = MCAM_SIZE; 1055 vptr->multicast_limit = MCAM_SIZE;
1052 spin_lock_init(&vptr->lock); 1056 spin_lock_init(&vptr->lock);
1053 INIT_LIST_HEAD(&vptr->list); 1057 INIT_LIST_HEAD(&vptr->list);
@@ -1093,95 +1097,69 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc
1093} 1097}
1094 1098
1095/** 1099/**
1096 * velocity_init_rings - set up DMA rings 1100 * velocity_init_dma_rings - set up DMA rings
1097 * @vptr: Velocity to set up 1101 * @vptr: Velocity to set up
1098 * 1102 *
1099 * Allocate PCI mapped DMA rings for the receive and transmit layer 1103 * Allocate PCI mapped DMA rings for the receive and transmit layer
1100 * to use. 1104 * to use.
1101 */ 1105 */
1102 1106
1103static int velocity_init_rings(struct velocity_info *vptr) 1107static int velocity_init_dma_rings(struct velocity_info *vptr)
1104{ 1108{
1105 int i; 1109 struct velocity_opt *opt = &vptr->options;
1106 unsigned int psize; 1110 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1107 unsigned int tsize; 1111 const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1112 struct pci_dev *pdev = vptr->pdev;
1108 dma_addr_t pool_dma; 1113 dma_addr_t pool_dma;
1109 u8 *pool; 1114 void *pool;
1110 1115 unsigned int i;
1111 /*
1112 * Allocate all RD/TD rings a single pool
1113 */
1114
1115 psize = vptr->options.numrx * sizeof(struct rx_desc) +
1116 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
1117 1116
1118 /* 1117 /*
1118 * Allocate all RD/TD rings a single pool.
1119 *
1119 * pci_alloc_consistent() fulfills the requirement for 64 bytes 1120 * pci_alloc_consistent() fulfills the requirement for 64 bytes
1120 * alignment 1121 * alignment
1121 */ 1122 */
1122 pool = pci_alloc_consistent(vptr->pdev, psize, &pool_dma); 1123 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1123 1124 rx_ring_size, &pool_dma);
1124 if (pool == NULL) { 1125 if (!pool) {
1125 printk(KERN_ERR "%s : DMA memory allocation failed.\n", 1126 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
1126 vptr->dev->name); 1127 vptr->dev->name);
1127 return -ENOMEM; 1128 return -ENOMEM;
1128 } 1129 }
1129 1130
1130 memset(pool, 0, psize); 1131 vptr->rx.ring = pool;
1132 vptr->rx.pool_dma = pool_dma;
1131 1133
1132 vptr->rd_ring = (struct rx_desc *) pool; 1134 pool += rx_ring_size;
1135 pool_dma += rx_ring_size;
1133 1136
1134 vptr->rd_pool_dma = pool_dma; 1137 for (i = 0; i < vptr->tx.numq; i++) {
1135 1138 vptr->tx.rings[i] = pool;
1136 tsize = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq; 1139 vptr->tx.pool_dma[i] = pool_dma;
1137 vptr->tx_bufs = pci_alloc_consistent(vptr->pdev, tsize, 1140 pool += tx_ring_size;
1138 &vptr->tx_bufs_dma); 1141 pool_dma += tx_ring_size;
1139
1140 if (vptr->tx_bufs == NULL) {
1141 printk(KERN_ERR "%s: DMA memory allocation failed.\n",
1142 vptr->dev->name);
1143 pci_free_consistent(vptr->pdev, psize, pool, pool_dma);
1144 return -ENOMEM;
1145 } 1142 }
1146 1143
1147 memset(vptr->tx_bufs, 0, vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq);
1148
1149 i = vptr->options.numrx * sizeof(struct rx_desc);
1150 pool += i;
1151 pool_dma += i;
1152 for (i = 0; i < vptr->num_txq; i++) {
1153 int offset = vptr->options.numtx * sizeof(struct tx_desc);
1154
1155 vptr->td_pool_dma[i] = pool_dma;
1156 vptr->td_rings[i] = (struct tx_desc *) pool;
1157 pool += offset;
1158 pool_dma += offset;
1159 }
1160 return 0; 1144 return 0;
1161} 1145}
1162 1146
1163/** 1147/**
1164 * velocity_free_rings - free PCI ring pointers 1148 * velocity_free_dma_rings - free PCI ring pointers
1165 * @vptr: Velocity to free from 1149 * @vptr: Velocity to free from
1166 * 1150 *
1167 * Clean up the PCI ring buffers allocated to this velocity. 1151 * Clean up the PCI ring buffers allocated to this velocity.
1168 */ 1152 */
1169 1153
1170static void velocity_free_rings(struct velocity_info *vptr) 1154static void velocity_free_dma_rings(struct velocity_info *vptr)
1171{ 1155{
1172 int size; 1156 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1173 1157 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1174 size = vptr->options.numrx * sizeof(struct rx_desc) +
1175 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq;
1176 1158
1177 pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); 1159 pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1178
1179 size = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq;
1180
1181 pci_free_consistent(vptr->pdev, size, vptr->tx_bufs, vptr->tx_bufs_dma);
1182} 1160}
1183 1161
1184static inline void velocity_give_many_rx_descs(struct velocity_info *vptr) 1162static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1185{ 1163{
1186 struct mac_regs __iomem *regs = vptr->mac_regs; 1164 struct mac_regs __iomem *regs = vptr->mac_regs;
1187 int avail, dirty, unusable; 1165 int avail, dirty, unusable;
@@ -1190,49 +1168,52 @@ static inline void velocity_give_many_rx_descs(struct velocity_info *vptr)
1190 * RD number must be equal to 4X per hardware spec 1168 * RD number must be equal to 4X per hardware spec
1191 * (programming guide rev 1.20, p.13) 1169 * (programming guide rev 1.20, p.13)
1192 */ 1170 */
1193 if (vptr->rd_filled < 4) 1171 if (vptr->rx.filled < 4)
1194 return; 1172 return;
1195 1173
1196 wmb(); 1174 wmb();
1197 1175
1198 unusable = vptr->rd_filled & 0x0003; 1176 unusable = vptr->rx.filled & 0x0003;
1199 dirty = vptr->rd_dirty - unusable; 1177 dirty = vptr->rx.dirty - unusable;
1200 for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { 1178 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1201 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; 1179 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1202 vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC; 1180 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1203 } 1181 }
1204 1182
1205 writew(vptr->rd_filled & 0xfffc, &regs->RBRDU); 1183 writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1206 vptr->rd_filled = unusable; 1184 vptr->rx.filled = unusable;
1207} 1185}
1208 1186
1209static int velocity_rx_refill(struct velocity_info *vptr) 1187static int velocity_rx_refill(struct velocity_info *vptr)
1210{ 1188{
1211 int dirty = vptr->rd_dirty, done = 0, ret = 0; 1189 int dirty = vptr->rx.dirty, done = 0;
1212 1190
1213 do { 1191 do {
1214 struct rx_desc *rd = vptr->rd_ring + dirty; 1192 struct rx_desc *rd = vptr->rx.ring + dirty;
1215 1193
1216 /* Fine for an all zero Rx desc at init time as well */ 1194 /* Fine for an all zero Rx desc at init time as well */
1217 if (rd->rdesc0.len & OWNED_BY_NIC) 1195 if (rd->rdesc0.len & OWNED_BY_NIC)
1218 break; 1196 break;
1219 1197
1220 if (!vptr->rd_info[dirty].skb) { 1198 if (!vptr->rx.info[dirty].skb) {
1221 ret = velocity_alloc_rx_buf(vptr, dirty); 1199 if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1222 if (ret < 0)
1223 break; 1200 break;
1224 } 1201 }
1225 done++; 1202 done++;
1226 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; 1203 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1227 } while (dirty != vptr->rd_curr); 1204 } while (dirty != vptr->rx.curr);
1228 1205
1229 if (done) { 1206 if (done) {
1230 vptr->rd_dirty = dirty; 1207 vptr->rx.dirty = dirty;
1231 vptr->rd_filled += done; 1208 vptr->rx.filled += done;
1232 velocity_give_many_rx_descs(vptr);
1233 } 1209 }
1234 1210
1235 return ret; 1211 return done;
1212}
1213
1214static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1215{
1216 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1236} 1217}
1237 1218
1238/** 1219/**
@@ -1245,25 +1226,24 @@ static int velocity_rx_refill(struct velocity_info *vptr)
1245 1226
1246static int velocity_init_rd_ring(struct velocity_info *vptr) 1227static int velocity_init_rd_ring(struct velocity_info *vptr)
1247{ 1228{
1248 int ret; 1229 int ret = -ENOMEM;
1249 int mtu = vptr->dev->mtu;
1250
1251 vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1252 1230
1253 vptr->rd_info = kcalloc(vptr->options.numrx, 1231 vptr->rx.info = kcalloc(vptr->options.numrx,
1254 sizeof(struct velocity_rd_info), GFP_KERNEL); 1232 sizeof(struct velocity_rd_info), GFP_KERNEL);
1255 if (!vptr->rd_info) 1233 if (!vptr->rx.info)
1256 return -ENOMEM; 1234 goto out;
1257 1235
1258 vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; 1236 velocity_init_rx_ring_indexes(vptr);
1259 1237
1260 ret = velocity_rx_refill(vptr); 1238 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1261 if (ret < 0) {
1262 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR 1239 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1263 "%s: failed to allocate RX buffer.\n", vptr->dev->name); 1240 "%s: failed to allocate RX buffer.\n", vptr->dev->name);
1264 velocity_free_rd_ring(vptr); 1241 velocity_free_rd_ring(vptr);
1242 goto out;
1265 } 1243 }
1266 1244
1245 ret = 0;
1246out:
1267 return ret; 1247 return ret;
1268} 1248}
1269 1249
@@ -1279,27 +1259,27 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1279{ 1259{
1280 int i; 1260 int i;
1281 1261
1282 if (vptr->rd_info == NULL) 1262 if (vptr->rx.info == NULL)
1283 return; 1263 return;
1284 1264
1285 for (i = 0; i < vptr->options.numrx; i++) { 1265 for (i = 0; i < vptr->options.numrx; i++) {
1286 struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); 1266 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1287 struct rx_desc *rd = vptr->rd_ring + i; 1267 struct rx_desc *rd = vptr->rx.ring + i;
1288 1268
1289 memset(rd, 0, sizeof(*rd)); 1269 memset(rd, 0, sizeof(*rd));
1290 1270
1291 if (!rd_info->skb) 1271 if (!rd_info->skb)
1292 continue; 1272 continue;
1293 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, 1273 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1294 PCI_DMA_FROMDEVICE); 1274 PCI_DMA_FROMDEVICE);
1295 rd_info->skb_dma = (dma_addr_t) NULL; 1275 rd_info->skb_dma = 0;
1296 1276
1297 dev_kfree_skb(rd_info->skb); 1277 dev_kfree_skb(rd_info->skb);
1298 rd_info->skb = NULL; 1278 rd_info->skb = NULL;
1299 } 1279 }
1300 1280
1301 kfree(vptr->rd_info); 1281 kfree(vptr->rx.info);
1302 vptr->rd_info = NULL; 1282 vptr->rx.info = NULL;
1303} 1283}
1304 1284
1305/** 1285/**
@@ -1313,33 +1293,23 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1313 1293
1314static int velocity_init_td_ring(struct velocity_info *vptr) 1294static int velocity_init_td_ring(struct velocity_info *vptr)
1315{ 1295{
1316 int i, j;
1317 dma_addr_t curr; 1296 dma_addr_t curr;
1318 struct tx_desc *td; 1297 unsigned int j;
1319 struct velocity_td_info *td_info;
1320 1298
1321 /* Init the TD ring entries */ 1299 /* Init the TD ring entries */
1322 for (j = 0; j < vptr->num_txq; j++) { 1300 for (j = 0; j < vptr->tx.numq; j++) {
1323 curr = vptr->td_pool_dma[j]; 1301 curr = vptr->tx.pool_dma[j];
1324 1302
1325 vptr->td_infos[j] = kcalloc(vptr->options.numtx, 1303 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1326 sizeof(struct velocity_td_info), 1304 sizeof(struct velocity_td_info),
1327 GFP_KERNEL); 1305 GFP_KERNEL);
1328 if (!vptr->td_infos[j]) { 1306 if (!vptr->tx.infos[j]) {
1329 while(--j >= 0) 1307 while(--j >= 0)
1330 kfree(vptr->td_infos[j]); 1308 kfree(vptr->tx.infos[j]);
1331 return -ENOMEM; 1309 return -ENOMEM;
1332 } 1310 }
1333 1311
1334 for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) { 1312 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1335 td = &(vptr->td_rings[j][i]);
1336 td_info = &(vptr->td_infos[j][i]);
1337 td_info->buf = vptr->tx_bufs +
1338 (j * vptr->options.numtx + i) * PKT_BUF_SZ;
1339 td_info->buf_dma = vptr->tx_bufs_dma +
1340 (j * vptr->options.numtx + i) * PKT_BUF_SZ;
1341 }
1342 vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0;
1343 } 1313 }
1344 return 0; 1314 return 0;
1345} 1315}
@@ -1351,7 +1321,7 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
1351static void velocity_free_td_ring_entry(struct velocity_info *vptr, 1321static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1352 int q, int n) 1322 int q, int n)
1353{ 1323{
1354 struct velocity_td_info * td_info = &(vptr->td_infos[q][n]); 1324 struct velocity_td_info * td_info = &(vptr->tx.infos[q][n]);
1355 int i; 1325 int i;
1356 1326
1357 if (td_info == NULL) 1327 if (td_info == NULL)
@@ -1363,7 +1333,7 @@ static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1363 if (td_info->skb_dma[i]) { 1333 if (td_info->skb_dma[i]) {
1364 pci_unmap_single(vptr->pdev, td_info->skb_dma[i], 1334 pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
1365 td_info->skb->len, PCI_DMA_TODEVICE); 1335 td_info->skb->len, PCI_DMA_TODEVICE);
1366 td_info->skb_dma[i] = (dma_addr_t) NULL; 1336 td_info->skb_dma[i] = 0;
1367 } 1337 }
1368 } 1338 }
1369 dev_kfree_skb(td_info->skb); 1339 dev_kfree_skb(td_info->skb);
@@ -1383,15 +1353,15 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1383{ 1353{
1384 int i, j; 1354 int i, j;
1385 1355
1386 for (j = 0; j < vptr->num_txq; j++) { 1356 for (j = 0; j < vptr->tx.numq; j++) {
1387 if (vptr->td_infos[j] == NULL) 1357 if (vptr->tx.infos[j] == NULL)
1388 continue; 1358 continue;
1389 for (i = 0; i < vptr->options.numtx; i++) { 1359 for (i = 0; i < vptr->options.numtx; i++) {
1390 velocity_free_td_ring_entry(vptr, j, i); 1360 velocity_free_td_ring_entry(vptr, j, i);
1391 1361
1392 } 1362 }
1393 kfree(vptr->td_infos[j]); 1363 kfree(vptr->tx.infos[j]);
1394 vptr->td_infos[j] = NULL; 1364 vptr->tx.infos[j] = NULL;
1395 } 1365 }
1396} 1366}
1397 1367
@@ -1408,13 +1378,13 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1408static int velocity_rx_srv(struct velocity_info *vptr, int status) 1378static int velocity_rx_srv(struct velocity_info *vptr, int status)
1409{ 1379{
1410 struct net_device_stats *stats = &vptr->stats; 1380 struct net_device_stats *stats = &vptr->stats;
1411 int rd_curr = vptr->rd_curr; 1381 int rd_curr = vptr->rx.curr;
1412 int works = 0; 1382 int works = 0;
1413 1383
1414 do { 1384 do {
1415 struct rx_desc *rd = vptr->rd_ring + rd_curr; 1385 struct rx_desc *rd = vptr->rx.ring + rd_curr;
1416 1386
1417 if (!vptr->rd_info[rd_curr].skb) 1387 if (!vptr->rx.info[rd_curr].skb)
1418 break; 1388 break;
1419 1389
1420 if (rd->rdesc0.len & OWNED_BY_NIC) 1390 if (rd->rdesc0.len & OWNED_BY_NIC)
@@ -1446,12 +1416,10 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
1446 rd_curr = 0; 1416 rd_curr = 0;
1447 } while (++works <= 15); 1417 } while (++works <= 15);
1448 1418
1449 vptr->rd_curr = rd_curr; 1419 vptr->rx.curr = rd_curr;
1450 1420
1451 if (works > 0 && velocity_rx_refill(vptr) < 0) { 1421 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
1452 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR 1422 velocity_give_many_rx_descs(vptr);
1453 "%s: rx buf allocation failure\n", vptr->dev->name);
1454 }
1455 1423
1456 VAR_USED(stats); 1424 VAR_USED(stats);
1457 return works; 1425 return works;
@@ -1495,24 +1463,18 @@ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1495 * enough. This function returns a negative value if the received 1463 * enough. This function returns a negative value if the received
1496 * packet is too big or if memory is exhausted. 1464 * packet is too big or if memory is exhausted.
1497 */ 1465 */
1498static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size, 1466static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1499 struct velocity_info *vptr) 1467 struct velocity_info *vptr)
1500{ 1468{
1501 int ret = -1; 1469 int ret = -1;
1502
1503 if (pkt_size < rx_copybreak) { 1470 if (pkt_size < rx_copybreak) {
1504 struct sk_buff *new_skb; 1471 struct sk_buff *new_skb;
1505 1472
1506 new_skb = dev_alloc_skb(pkt_size + 2); 1473 new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2);
1507 if (new_skb) { 1474 if (new_skb) {
1508 new_skb->dev = vptr->dev;
1509 new_skb->ip_summed = rx_skb[0]->ip_summed; 1475 new_skb->ip_summed = rx_skb[0]->ip_summed;
1510 1476 skb_reserve(new_skb, 2);
1511 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) 1477 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1512 skb_reserve(new_skb, 2);
1513
1514 skb_copy_from_linear_data(rx_skb[0], new_skb->data,
1515 pkt_size);
1516 *rx_skb = new_skb; 1478 *rx_skb = new_skb;
1517 ret = 0; 1479 ret = 0;
1518 } 1480 }
@@ -1533,12 +1495,8 @@ static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1533static inline void velocity_iph_realign(struct velocity_info *vptr, 1495static inline void velocity_iph_realign(struct velocity_info *vptr,
1534 struct sk_buff *skb, int pkt_size) 1496 struct sk_buff *skb, int pkt_size)
1535{ 1497{
1536 /* FIXME - memmove ? */
1537 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { 1498 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
1538 int i; 1499 memmove(skb->data + 2, skb->data, pkt_size);
1539
1540 for (i = pkt_size; i >= 0; i--)
1541 *(skb->data + i + 2) = *(skb->data + i);
1542 skb_reserve(skb, 2); 1500 skb_reserve(skb, 2);
1543 } 1501 }
1544} 1502}
@@ -1556,8 +1514,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1556{ 1514{
1557 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); 1515 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
1558 struct net_device_stats *stats = &vptr->stats; 1516 struct net_device_stats *stats = &vptr->stats;
1559 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1517 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1560 struct rx_desc *rd = &(vptr->rd_ring[idx]); 1518 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1561 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 1519 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
1562 struct sk_buff *skb; 1520 struct sk_buff *skb;
1563 1521
@@ -1573,7 +1531,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1573 skb = rd_info->skb; 1531 skb = rd_info->skb;
1574 1532
1575 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, 1533 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
1576 vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1534 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1577 1535
1578 /* 1536 /*
1579 * Drop frame not meeting IEEE 802.3 1537 * Drop frame not meeting IEEE 802.3
@@ -1596,7 +1554,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1596 rd_info->skb = NULL; 1554 rd_info->skb = NULL;
1597 } 1555 }
1598 1556
1599 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, 1557 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1600 PCI_DMA_FROMDEVICE); 1558 PCI_DMA_FROMDEVICE);
1601 1559
1602 skb_put(skb, pkt_len - 4); 1560 skb_put(skb, pkt_len - 4);
@@ -1626,10 +1584,10 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1626 1584
1627static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) 1585static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1628{ 1586{
1629 struct rx_desc *rd = &(vptr->rd_ring[idx]); 1587 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1630 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1588 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1631 1589
1632 rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64); 1590 rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1633 if (rd_info->skb == NULL) 1591 if (rd_info->skb == NULL)
1634 return -ENOMEM; 1592 return -ENOMEM;
1635 1593
@@ -1638,15 +1596,15 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1638 * 64byte alignment. 1596 * 64byte alignment.
1639 */ 1597 */
1640 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); 1598 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
1641 rd_info->skb->dev = vptr->dev; 1599 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1642 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1600 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1643 1601
1644 /* 1602 /*
1645 * Fill in the descriptor to match 1603 * Fill in the descriptor to match
1646 */ 1604 */
1647 1605
1648 *((u32 *) & (rd->rdesc0)) = 0; 1606 *((u32 *) & (rd->rdesc0)) = 0;
1649 rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN; 1607 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1650 rd->pa_low = cpu_to_le32(rd_info->skb_dma); 1608 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1651 rd->pa_high = 0; 1609 rd->pa_high = 0;
1652 return 0; 1610 return 0;
@@ -1672,15 +1630,15 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1672 struct velocity_td_info *tdinfo; 1630 struct velocity_td_info *tdinfo;
1673 struct net_device_stats *stats = &vptr->stats; 1631 struct net_device_stats *stats = &vptr->stats;
1674 1632
1675 for (qnum = 0; qnum < vptr->num_txq; qnum++) { 1633 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1676 for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0; 1634 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1677 idx = (idx + 1) % vptr->options.numtx) { 1635 idx = (idx + 1) % vptr->options.numtx) {
1678 1636
1679 /* 1637 /*
1680 * Get Tx Descriptor 1638 * Get Tx Descriptor
1681 */ 1639 */
1682 td = &(vptr->td_rings[qnum][idx]); 1640 td = &(vptr->tx.rings[qnum][idx]);
1683 tdinfo = &(vptr->td_infos[qnum][idx]); 1641 tdinfo = &(vptr->tx.infos[qnum][idx]);
1684 1642
1685 if (td->tdesc0.len & OWNED_BY_NIC) 1643 if (td->tdesc0.len & OWNED_BY_NIC)
1686 break; 1644 break;
@@ -1704,9 +1662,9 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1704 stats->tx_bytes += tdinfo->skb->len; 1662 stats->tx_bytes += tdinfo->skb->len;
1705 } 1663 }
1706 velocity_free_tx_buf(vptr, tdinfo); 1664 velocity_free_tx_buf(vptr, tdinfo);
1707 vptr->td_used[qnum]--; 1665 vptr->tx.used[qnum]--;
1708 } 1666 }
1709 vptr->td_tail[qnum] = idx; 1667 vptr->tx.tail[qnum] = idx;
1710 1668
1711 if (AVAIL_TD(vptr, qnum) < 1) { 1669 if (AVAIL_TD(vptr, qnum) < 1) {
1712 full = 1; 1670 full = 1;
@@ -1878,7 +1836,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
1878 /* 1836 /*
1879 * Don't unmap the pre-allocated tx_bufs 1837 * Don't unmap the pre-allocated tx_bufs
1880 */ 1838 */
1881 if (tdinfo->skb_dma && (tdinfo->skb_dma[0] != tdinfo->buf_dma)) { 1839 if (tdinfo->skb_dma) {
1882 1840
1883 for (i = 0; i < tdinfo->nskb_dma; i++) { 1841 for (i = 0; i < tdinfo->nskb_dma; i++) {
1884#ifdef VELOCITY_ZERO_COPY_SUPPORT 1842#ifdef VELOCITY_ZERO_COPY_SUPPORT
@@ -1893,6 +1851,40 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
1893 tdinfo->skb = NULL; 1851 tdinfo->skb = NULL;
1894} 1852}
1895 1853
1854static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1855{
1856 int ret;
1857
1858 velocity_set_rxbufsize(vptr, mtu);
1859
1860 ret = velocity_init_dma_rings(vptr);
1861 if (ret < 0)
1862 goto out;
1863
1864 ret = velocity_init_rd_ring(vptr);
1865 if (ret < 0)
1866 goto err_free_dma_rings_0;
1867
1868 ret = velocity_init_td_ring(vptr);
1869 if (ret < 0)
1870 goto err_free_rd_ring_1;
1871out:
1872 return ret;
1873
1874err_free_rd_ring_1:
1875 velocity_free_rd_ring(vptr);
1876err_free_dma_rings_0:
1877 velocity_free_dma_rings(vptr);
1878 goto out;
1879}
1880
1881static void velocity_free_rings(struct velocity_info *vptr)
1882{
1883 velocity_free_td_ring(vptr);
1884 velocity_free_rd_ring(vptr);
1885 velocity_free_dma_rings(vptr);
1886}
1887
1896/** 1888/**
1897 * velocity_open - interface activation callback 1889 * velocity_open - interface activation callback
1898 * @dev: network layer device to open 1890 * @dev: network layer device to open
@@ -1909,21 +1901,15 @@ static int velocity_open(struct net_device *dev)
1909 struct velocity_info *vptr = netdev_priv(dev); 1901 struct velocity_info *vptr = netdev_priv(dev);
1910 int ret; 1902 int ret;
1911 1903
1912 ret = velocity_init_rings(vptr); 1904 ret = velocity_init_rings(vptr, dev->mtu);
1913 if (ret < 0) 1905 if (ret < 0)
1914 goto out; 1906 goto out;
1915 1907
1916 ret = velocity_init_rd_ring(vptr);
1917 if (ret < 0)
1918 goto err_free_desc_rings;
1919
1920 ret = velocity_init_td_ring(vptr);
1921 if (ret < 0)
1922 goto err_free_rd_ring;
1923
1924 /* Ensure chip is running */ 1908 /* Ensure chip is running */
1925 pci_set_power_state(vptr->pdev, PCI_D0); 1909 pci_set_power_state(vptr->pdev, PCI_D0);
1926 1910
1911 velocity_give_many_rx_descs(vptr);
1912
1927 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 1913 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
1928 1914
1929 ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED, 1915 ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED,
@@ -1931,7 +1917,8 @@ static int velocity_open(struct net_device *dev)
1931 if (ret < 0) { 1917 if (ret < 0) {
1932 /* Power down the chip */ 1918 /* Power down the chip */
1933 pci_set_power_state(vptr->pdev, PCI_D3hot); 1919 pci_set_power_state(vptr->pdev, PCI_D3hot);
1934 goto err_free_td_ring; 1920 velocity_free_rings(vptr);
1921 goto out;
1935 } 1922 }
1936 1923
1937 mac_enable_int(vptr->mac_regs); 1924 mac_enable_int(vptr->mac_regs);
@@ -1939,14 +1926,6 @@ static int velocity_open(struct net_device *dev)
1939 vptr->flags |= VELOCITY_FLAGS_OPENED; 1926 vptr->flags |= VELOCITY_FLAGS_OPENED;
1940out: 1927out:
1941 return ret; 1928 return ret;
1942
1943err_free_td_ring:
1944 velocity_free_td_ring(vptr);
1945err_free_rd_ring:
1946 velocity_free_rd_ring(vptr);
1947err_free_desc_rings:
1948 velocity_free_rings(vptr);
1949 goto out;
1950} 1929}
1951 1930
1952/** 1931/**
@@ -1962,48 +1941,72 @@ err_free_desc_rings:
1962static int velocity_change_mtu(struct net_device *dev, int new_mtu) 1941static int velocity_change_mtu(struct net_device *dev, int new_mtu)
1963{ 1942{
1964 struct velocity_info *vptr = netdev_priv(dev); 1943 struct velocity_info *vptr = netdev_priv(dev);
1965 unsigned long flags;
1966 int oldmtu = dev->mtu;
1967 int ret = 0; 1944 int ret = 0;
1968 1945
1969 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { 1946 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
1970 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", 1947 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
1971 vptr->dev->name); 1948 vptr->dev->name);
1972 return -EINVAL; 1949 ret = -EINVAL;
1950 goto out_0;
1973 } 1951 }
1974 1952
1975 if (!netif_running(dev)) { 1953 if (!netif_running(dev)) {
1976 dev->mtu = new_mtu; 1954 dev->mtu = new_mtu;
1977 return 0; 1955 goto out_0;
1978 } 1956 }
1979 1957
1980 if (new_mtu != oldmtu) { 1958 if (dev->mtu != new_mtu) {
1959 struct velocity_info *tmp_vptr;
1960 unsigned long flags;
1961 struct rx_info rx;
1962 struct tx_info tx;
1963
1964 tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
1965 if (!tmp_vptr) {
1966 ret = -ENOMEM;
1967 goto out_0;
1968 }
1969
1970 tmp_vptr->dev = dev;
1971 tmp_vptr->pdev = vptr->pdev;
1972 tmp_vptr->options = vptr->options;
1973 tmp_vptr->tx.numq = vptr->tx.numq;
1974
1975 ret = velocity_init_rings(tmp_vptr, new_mtu);
1976 if (ret < 0)
1977 goto out_free_tmp_vptr_1;
1978
1981 spin_lock_irqsave(&vptr->lock, flags); 1979 spin_lock_irqsave(&vptr->lock, flags);
1982 1980
1983 netif_stop_queue(dev); 1981 netif_stop_queue(dev);
1984 velocity_shutdown(vptr); 1982 velocity_shutdown(vptr);
1985 1983
1986 velocity_free_td_ring(vptr); 1984 rx = vptr->rx;
1987 velocity_free_rd_ring(vptr); 1985 tx = vptr->tx;
1988 1986
1989 dev->mtu = new_mtu; 1987 vptr->rx = tmp_vptr->rx;
1988 vptr->tx = tmp_vptr->tx;
1990 1989
1991 ret = velocity_init_rd_ring(vptr); 1990 tmp_vptr->rx = rx;
1992 if (ret < 0) 1991 tmp_vptr->tx = tx;
1993 goto out_unlock;
1994 1992
1995 ret = velocity_init_td_ring(vptr); 1993 dev->mtu = new_mtu;
1996 if (ret < 0) 1994
1997 goto out_unlock; 1995 velocity_give_many_rx_descs(vptr);
1998 1996
1999 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 1997 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2000 1998
2001 mac_enable_int(vptr->mac_regs); 1999 mac_enable_int(vptr->mac_regs);
2002 netif_start_queue(dev); 2000 netif_start_queue(dev);
2003out_unlock: 2001
2004 spin_unlock_irqrestore(&vptr->lock, flags); 2002 spin_unlock_irqrestore(&vptr->lock, flags);
2005 }
2006 2003
2004 velocity_free_rings(tmp_vptr);
2005
2006out_free_tmp_vptr_1:
2007 kfree(tmp_vptr);
2008 }
2009out_0:
2007 return ret; 2010 return ret;
2008} 2011}
2009 2012
@@ -2049,9 +2052,6 @@ static int velocity_close(struct net_device *dev)
2049 /* Power down the chip */ 2052 /* Power down the chip */
2050 pci_set_power_state(vptr->pdev, PCI_D3hot); 2053 pci_set_power_state(vptr->pdev, PCI_D3hot);
2051 2054
2052 /* Free the resources */
2053 velocity_free_td_ring(vptr);
2054 velocity_free_rd_ring(vptr);
2055 velocity_free_rings(vptr); 2055 velocity_free_rings(vptr);
2056 2056
2057 vptr->flags &= (~VELOCITY_FLAGS_OPENED); 2057 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
@@ -2074,9 +2074,19 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2074 struct tx_desc *td_ptr; 2074 struct tx_desc *td_ptr;
2075 struct velocity_td_info *tdinfo; 2075 struct velocity_td_info *tdinfo;
2076 unsigned long flags; 2076 unsigned long flags;
2077 int index;
2078 int pktlen = skb->len; 2077 int pktlen = skb->len;
2079 __le16 len = cpu_to_le16(pktlen); 2078 __le16 len;
2079 int index;
2080
2081
2082
2083 if (skb->len < ETH_ZLEN) {
2084 if (skb_padto(skb, ETH_ZLEN))
2085 goto out;
2086 pktlen = ETH_ZLEN;
2087 }
2088
2089 len = cpu_to_le16(pktlen);
2080 2090
2081#ifdef VELOCITY_ZERO_COPY_SUPPORT 2091#ifdef VELOCITY_ZERO_COPY_SUPPORT
2082 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { 2092 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
@@ -2087,30 +2097,13 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2087 2097
2088 spin_lock_irqsave(&vptr->lock, flags); 2098 spin_lock_irqsave(&vptr->lock, flags);
2089 2099
2090 index = vptr->td_curr[qnum]; 2100 index = vptr->tx.curr[qnum];
2091 td_ptr = &(vptr->td_rings[qnum][index]); 2101 td_ptr = &(vptr->tx.rings[qnum][index]);
2092 tdinfo = &(vptr->td_infos[qnum][index]); 2102 tdinfo = &(vptr->tx.infos[qnum][index]);
2093 2103
2094 td_ptr->tdesc1.TCR = TCR0_TIC; 2104 td_ptr->tdesc1.TCR = TCR0_TIC;
2095 td_ptr->td_buf[0].size &= ~TD_QUEUE; 2105 td_ptr->td_buf[0].size &= ~TD_QUEUE;
2096 2106
2097 /*
2098 * Pad short frames.
2099 */
2100 if (pktlen < ETH_ZLEN) {
2101 /* Cannot occur until ZC support */
2102 pktlen = ETH_ZLEN;
2103 len = cpu_to_le16(ETH_ZLEN);
2104 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
2105 memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
2106 tdinfo->skb = skb;
2107 tdinfo->skb_dma[0] = tdinfo->buf_dma;
2108 td_ptr->tdesc0.len = len;
2109 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2110 td_ptr->td_buf[0].pa_high = 0;
2111 td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
2112 tdinfo->nskb_dma = 1;
2113 } else
2114#ifdef VELOCITY_ZERO_COPY_SUPPORT 2107#ifdef VELOCITY_ZERO_COPY_SUPPORT
2115 if (skb_shinfo(skb)->nr_frags > 0) { 2108 if (skb_shinfo(skb)->nr_frags > 0) {
2116 int nfrags = skb_shinfo(skb)->nr_frags; 2109 int nfrags = skb_shinfo(skb)->nr_frags;
@@ -2119,9 +2112,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2119 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); 2112 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
2120 tdinfo->skb_dma[0] = tdinfo->buf_dma; 2113 tdinfo->skb_dma[0] = tdinfo->buf_dma;
2121 td_ptr->tdesc0.len = len; 2114 td_ptr->tdesc0.len = len;
2122 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2115 td_ptr->tx.buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2123 td_ptr->td_buf[0].pa_high = 0; 2116 td_ptr->tx.buf[0].pa_high = 0;
2124 td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ 2117 td_ptr->tx.buf[0].size = len; /* queue is 0 anyway */
2125 tdinfo->nskb_dma = 1; 2118 tdinfo->nskb_dma = 1;
2126 } else { 2119 } else {
2127 int i = 0; 2120 int i = 0;
@@ -2132,9 +2125,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2132 td_ptr->tdesc0.len = len; 2125 td_ptr->tdesc0.len = len;
2133 2126
2134 /* FIXME: support 48bit DMA later */ 2127 /* FIXME: support 48bit DMA later */
2135 td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); 2128 td_ptr->tx.buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
2136 td_ptr->td_buf[i].pa_high = 0; 2129 td_ptr->tx.buf[i].pa_high = 0;
2137 td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb)); 2130 td_ptr->tx.buf[i].size = cpu_to_le16(skb_headlen(skb));
2138 2131
2139 for (i = 0; i < nfrags; i++) { 2132 for (i = 0; i < nfrags; i++) {
2140 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2133 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2142,9 +2135,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2142 2135
2143 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); 2136 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
2144 2137
2145 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); 2138 td_ptr->tx.buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2146 td_ptr->td_buf[i + 1].pa_high = 0; 2139 td_ptr->tx.buf[i + 1].pa_high = 0;
2147 td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); 2140 td_ptr->tx.buf[i + 1].size = cpu_to_le16(frag->size);
2148 } 2141 }
2149 tdinfo->nskb_dma = i - 1; 2142 tdinfo->nskb_dma = i - 1;
2150 } 2143 }
@@ -2190,19 +2183,20 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2190 if (prev < 0) 2183 if (prev < 0)
2191 prev = vptr->options.numtx - 1; 2184 prev = vptr->options.numtx - 1;
2192 td_ptr->tdesc0.len |= OWNED_BY_NIC; 2185 td_ptr->tdesc0.len |= OWNED_BY_NIC;
2193 vptr->td_used[qnum]++; 2186 vptr->tx.used[qnum]++;
2194 vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; 2187 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2195 2188
2196 if (AVAIL_TD(vptr, qnum) < 1) 2189 if (AVAIL_TD(vptr, qnum) < 1)
2197 netif_stop_queue(dev); 2190 netif_stop_queue(dev);
2198 2191
2199 td_ptr = &(vptr->td_rings[qnum][prev]); 2192 td_ptr = &(vptr->tx.rings[qnum][prev]);
2200 td_ptr->td_buf[0].size |= TD_QUEUE; 2193 td_ptr->td_buf[0].size |= TD_QUEUE;
2201 mac_tx_queue_wake(vptr->mac_regs, qnum); 2194 mac_tx_queue_wake(vptr->mac_regs, qnum);
2202 } 2195 }
2203 dev->trans_start = jiffies; 2196 dev->trans_start = jiffies;
2204 spin_unlock_irqrestore(&vptr->lock, flags); 2197 spin_unlock_irqrestore(&vptr->lock, flags);
2205 return 0; 2198out:
2199 return NETDEV_TX_OK;
2206} 2200}
2207 2201
2208/** 2202/**
@@ -3452,8 +3446,8 @@ static int velocity_resume(struct pci_dev *pdev)
3452 3446
3453 velocity_tx_srv(vptr, 0); 3447 velocity_tx_srv(vptr, 0);
3454 3448
3455 for (i = 0; i < vptr->num_txq; i++) { 3449 for (i = 0; i < vptr->tx.numq; i++) {
3456 if (vptr->td_used[i]) { 3450 if (vptr->tx.used[i]) {
3457 mac_tx_queue_wake(vptr->mac_regs, i); 3451 mac_tx_queue_wake(vptr->mac_regs, i);
3458 } 3452 }
3459 } 3453 }