aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/via-velocity.c173
-rw-r--r--drivers/net/via-velocity.h50
2 files changed, 114 insertions, 109 deletions
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 370ce30f2f45..ad3c6733bde7 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -677,16 +677,16 @@ static void velocity_rx_reset(struct velocity_info *vptr)
677 struct mac_regs __iomem * regs = vptr->mac_regs; 677 struct mac_regs __iomem * regs = vptr->mac_regs;
678 int i; 678 int i;
679 679
680 vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0; 680 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
681 681
682 /* 682 /*
683 * Init state, all RD entries belong to the NIC 683 * Init state, all RD entries belong to the NIC
684 */ 684 */
685 for (i = 0; i < vptr->options.numrx; ++i) 685 for (i = 0; i < vptr->options.numrx; ++i)
686 vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC; 686 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
687 687
688 writew(vptr->options.numrx, &regs->RBRDU); 688 writew(vptr->options.numrx, &regs->RBRDU);
689 writel(vptr->rd_pool_dma, &regs->RDBaseLo); 689 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
690 writew(0, &regs->RDIdx); 690 writew(0, &regs->RDIdx);
691 writew(vptr->options.numrx - 1, &regs->RDCSize); 691 writew(vptr->options.numrx - 1, &regs->RDCSize);
692} 692}
@@ -779,15 +779,15 @@ static void velocity_init_registers(struct velocity_info *vptr,
779 779
780 vptr->int_mask = INT_MASK_DEF; 780 vptr->int_mask = INT_MASK_DEF;
781 781
782 writel(vptr->rd_pool_dma, &regs->RDBaseLo); 782 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
783 writew(vptr->options.numrx - 1, &regs->RDCSize); 783 writew(vptr->options.numrx - 1, &regs->RDCSize);
784 mac_rx_queue_run(regs); 784 mac_rx_queue_run(regs);
785 mac_rx_queue_wake(regs); 785 mac_rx_queue_wake(regs);
786 786
787 writew(vptr->options.numtx - 1, &regs->TDCSize); 787 writew(vptr->options.numtx - 1, &regs->TDCSize);
788 788
789 for (i = 0; i < vptr->num_txq; i++) { 789 for (i = 0; i < vptr->tx.numq; i++) {
790 writel(vptr->td_pool_dma[i], &regs->TDBaseLo[i]); 790 writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
791 mac_tx_queue_run(regs, i); 791 mac_tx_queue_run(regs, i);
792 } 792 }
793 793
@@ -1047,7 +1047,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
1047 1047
1048 vptr->pdev = pdev; 1048 vptr->pdev = pdev;
1049 vptr->chip_id = info->chip_id; 1049 vptr->chip_id = info->chip_id;
1050 vptr->num_txq = info->txqueue; 1050 vptr->tx.numq = info->txqueue;
1051 vptr->multicast_limit = MCAM_SIZE; 1051 vptr->multicast_limit = MCAM_SIZE;
1052 spin_lock_init(&vptr->lock); 1052 spin_lock_init(&vptr->lock);
1053 INIT_LIST_HEAD(&vptr->list); 1053 INIT_LIST_HEAD(&vptr->list);
@@ -1116,7 +1116,7 @@ static int velocity_init_rings(struct velocity_info *vptr)
1116 * pci_alloc_consistent() fulfills the requirement for 64 bytes 1116 * pci_alloc_consistent() fulfills the requirement for 64 bytes
1117 * alignment 1117 * alignment
1118 */ 1118 */
1119 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->num_txq + 1119 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1120 rx_ring_size, &pool_dma); 1120 rx_ring_size, &pool_dma);
1121 if (!pool) { 1121 if (!pool) {
1122 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", 1122 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
@@ -1124,15 +1124,15 @@ static int velocity_init_rings(struct velocity_info *vptr)
1124 return -ENOMEM; 1124 return -ENOMEM;
1125 } 1125 }
1126 1126
1127 vptr->rd_ring = pool; 1127 vptr->rx.ring = pool;
1128 vptr->rd_pool_dma = pool_dma; 1128 vptr->rx.pool_dma = pool_dma;
1129 1129
1130 pool += rx_ring_size; 1130 pool += rx_ring_size;
1131 pool_dma += rx_ring_size; 1131 pool_dma += rx_ring_size;
1132 1132
1133 for (i = 0; i < vptr->num_txq; i++) { 1133 for (i = 0; i < vptr->tx.numq; i++) {
1134 vptr->td_rings[i] = pool; 1134 vptr->tx.rings[i] = pool;
1135 vptr->td_pool_dma[i] = pool_dma; 1135 vptr->tx.pool_dma[i] = pool_dma;
1136 pool += tx_ring_size; 1136 pool += tx_ring_size;
1137 pool_dma += tx_ring_size; 1137 pool_dma += tx_ring_size;
1138 } 1138 }
@@ -1150,9 +1150,9 @@ static int velocity_init_rings(struct velocity_info *vptr)
1150static void velocity_free_rings(struct velocity_info *vptr) 1150static void velocity_free_rings(struct velocity_info *vptr)
1151{ 1151{
1152 const int size = vptr->options.numrx * sizeof(struct rx_desc) + 1152 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1153 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; 1153 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1154 1154
1155 pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); 1155 pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1156} 1156}
1157 1157
1158static void velocity_give_many_rx_descs(struct velocity_info *vptr) 1158static void velocity_give_many_rx_descs(struct velocity_info *vptr)
@@ -1164,44 +1164,44 @@ static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1164 * RD number must be equal to 4X per hardware spec 1164 * RD number must be equal to 4X per hardware spec
1165 * (programming guide rev 1.20, p.13) 1165 * (programming guide rev 1.20, p.13)
1166 */ 1166 */
1167 if (vptr->rd_filled < 4) 1167 if (vptr->rx.filled < 4)
1168 return; 1168 return;
1169 1169
1170 wmb(); 1170 wmb();
1171 1171
1172 unusable = vptr->rd_filled & 0x0003; 1172 unusable = vptr->rx.filled & 0x0003;
1173 dirty = vptr->rd_dirty - unusable; 1173 dirty = vptr->rx.dirty - unusable;
1174 for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { 1174 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1175 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; 1175 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1176 vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC; 1176 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1177 } 1177 }
1178 1178
1179 writew(vptr->rd_filled & 0xfffc, &regs->RBRDU); 1179 writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1180 vptr->rd_filled = unusable; 1180 vptr->rx.filled = unusable;
1181} 1181}
1182 1182
1183static int velocity_rx_refill(struct velocity_info *vptr) 1183static int velocity_rx_refill(struct velocity_info *vptr)
1184{ 1184{
1185 int dirty = vptr->rd_dirty, done = 0; 1185 int dirty = vptr->rx.dirty, done = 0;
1186 1186
1187 do { 1187 do {
1188 struct rx_desc *rd = vptr->rd_ring + dirty; 1188 struct rx_desc *rd = vptr->rx.ring + dirty;
1189 1189
1190 /* Fine for an all zero Rx desc at init time as well */ 1190 /* Fine for an all zero Rx desc at init time as well */
1191 if (rd->rdesc0.len & OWNED_BY_NIC) 1191 if (rd->rdesc0.len & OWNED_BY_NIC)
1192 break; 1192 break;
1193 1193
1194 if (!vptr->rd_info[dirty].skb) { 1194 if (!vptr->rx.info[dirty].skb) {
1195 if (velocity_alloc_rx_buf(vptr, dirty) < 0) 1195 if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1196 break; 1196 break;
1197 } 1197 }
1198 done++; 1198 done++;
1199 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; 1199 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1200 } while (dirty != vptr->rd_curr); 1200 } while (dirty != vptr->rx.curr);
1201 1201
1202 if (done) { 1202 if (done) {
1203 vptr->rd_dirty = dirty; 1203 vptr->rx.dirty = dirty;
1204 vptr->rd_filled += done; 1204 vptr->rx.filled += done;
1205 } 1205 }
1206 1206
1207 return done; 1207 return done;
@@ -1209,7 +1209,7 @@ static int velocity_rx_refill(struct velocity_info *vptr)
1209 1209
1210static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) 1210static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1211{ 1211{
1212 vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; 1212 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1213} 1213}
1214 1214
1215/** 1215/**
@@ -1224,12 +1224,12 @@ static int velocity_init_rd_ring(struct velocity_info *vptr)
1224{ 1224{
1225 int ret = -ENOMEM; 1225 int ret = -ENOMEM;
1226 1226
1227 vptr->rd_info = kcalloc(vptr->options.numrx, 1227 vptr->rx.info = kcalloc(vptr->options.numrx,
1228 sizeof(struct velocity_rd_info), GFP_KERNEL); 1228 sizeof(struct velocity_rd_info), GFP_KERNEL);
1229 if (!vptr->rd_info) 1229 if (!vptr->rx.info)
1230 goto out; 1230 goto out;
1231 1231
1232 vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; 1232 vptr->rx.filled = vptr->rx.dirty = vptr->rx.curr = 0;
1233 1233
1234 if (velocity_rx_refill(vptr) != vptr->options.numrx) { 1234 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1235 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR 1235 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
@@ -1255,18 +1255,18 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1255{ 1255{
1256 int i; 1256 int i;
1257 1257
1258 if (vptr->rd_info == NULL) 1258 if (vptr->rx.info == NULL)
1259 return; 1259 return;
1260 1260
1261 for (i = 0; i < vptr->options.numrx; i++) { 1261 for (i = 0; i < vptr->options.numrx; i++) {
1262 struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); 1262 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1263 struct rx_desc *rd = vptr->rd_ring + i; 1263 struct rx_desc *rd = vptr->rx.ring + i;
1264 1264
1265 memset(rd, 0, sizeof(*rd)); 1265 memset(rd, 0, sizeof(*rd));
1266 1266
1267 if (!rd_info->skb) 1267 if (!rd_info->skb)
1268 continue; 1268 continue;
1269 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, 1269 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1270 PCI_DMA_FROMDEVICE); 1270 PCI_DMA_FROMDEVICE);
1271 rd_info->skb_dma = (dma_addr_t) NULL; 1271 rd_info->skb_dma = (dma_addr_t) NULL;
1272 1272
@@ -1274,8 +1274,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1274 rd_info->skb = NULL; 1274 rd_info->skb = NULL;
1275 } 1275 }
1276 1276
1277 kfree(vptr->rd_info); 1277 kfree(vptr->rx.info);
1278 vptr->rd_info = NULL; 1278 vptr->rx.info = NULL;
1279} 1279}
1280 1280
1281/** 1281/**
@@ -1293,19 +1293,19 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
1293 unsigned int j; 1293 unsigned int j;
1294 1294
1295 /* Init the TD ring entries */ 1295 /* Init the TD ring entries */
1296 for (j = 0; j < vptr->num_txq; j++) { 1296 for (j = 0; j < vptr->tx.numq; j++) {
1297 curr = vptr->td_pool_dma[j]; 1297 curr = vptr->tx.pool_dma[j];
1298 1298
1299 vptr->td_infos[j] = kcalloc(vptr->options.numtx, 1299 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1300 sizeof(struct velocity_td_info), 1300 sizeof(struct velocity_td_info),
1301 GFP_KERNEL); 1301 GFP_KERNEL);
1302 if (!vptr->td_infos[j]) { 1302 if (!vptr->tx.infos[j]) {
1303 while(--j >= 0) 1303 while(--j >= 0)
1304 kfree(vptr->td_infos[j]); 1304 kfree(vptr->tx.infos[j]);
1305 return -ENOMEM; 1305 return -ENOMEM;
1306 } 1306 }
1307 1307
1308 vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; 1308 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1309 } 1309 }
1310 return 0; 1310 return 0;
1311} 1311}
@@ -1317,7 +1317,7 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
1317static void velocity_free_td_ring_entry(struct velocity_info *vptr, 1317static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1318 int q, int n) 1318 int q, int n)
1319{ 1319{
1320 struct velocity_td_info * td_info = &(vptr->td_infos[q][n]); 1320 struct velocity_td_info * td_info = &(vptr->tx.infos[q][n]);
1321 int i; 1321 int i;
1322 1322
1323 if (td_info == NULL) 1323 if (td_info == NULL)
@@ -1349,15 +1349,15 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1349{ 1349{
1350 int i, j; 1350 int i, j;
1351 1351
1352 for (j = 0; j < vptr->num_txq; j++) { 1352 for (j = 0; j < vptr->tx.numq; j++) {
1353 if (vptr->td_infos[j] == NULL) 1353 if (vptr->tx.infos[j] == NULL)
1354 continue; 1354 continue;
1355 for (i = 0; i < vptr->options.numtx; i++) { 1355 for (i = 0; i < vptr->options.numtx; i++) {
1356 velocity_free_td_ring_entry(vptr, j, i); 1356 velocity_free_td_ring_entry(vptr, j, i);
1357 1357
1358 } 1358 }
1359 kfree(vptr->td_infos[j]); 1359 kfree(vptr->tx.infos[j]);
1360 vptr->td_infos[j] = NULL; 1360 vptr->tx.infos[j] = NULL;
1361 } 1361 }
1362} 1362}
1363 1363
@@ -1374,13 +1374,13 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1374static int velocity_rx_srv(struct velocity_info *vptr, int status) 1374static int velocity_rx_srv(struct velocity_info *vptr, int status)
1375{ 1375{
1376 struct net_device_stats *stats = &vptr->stats; 1376 struct net_device_stats *stats = &vptr->stats;
1377 int rd_curr = vptr->rd_curr; 1377 int rd_curr = vptr->rx.curr;
1378 int works = 0; 1378 int works = 0;
1379 1379
1380 do { 1380 do {
1381 struct rx_desc *rd = vptr->rd_ring + rd_curr; 1381 struct rx_desc *rd = vptr->rx.ring + rd_curr;
1382 1382
1383 if (!vptr->rd_info[rd_curr].skb) 1383 if (!vptr->rx.info[rd_curr].skb)
1384 break; 1384 break;
1385 1385
1386 if (rd->rdesc0.len & OWNED_BY_NIC) 1386 if (rd->rdesc0.len & OWNED_BY_NIC)
@@ -1412,7 +1412,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
1412 rd_curr = 0; 1412 rd_curr = 0;
1413 } while (++works <= 15); 1413 } while (++works <= 15);
1414 1414
1415 vptr->rd_curr = rd_curr; 1415 vptr->rx.curr = rd_curr;
1416 1416
1417 if ((works > 0) && (velocity_rx_refill(vptr) > 0)) 1417 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
1418 velocity_give_many_rx_descs(vptr); 1418 velocity_give_many_rx_descs(vptr);
@@ -1510,8 +1510,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1510{ 1510{
1511 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); 1511 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
1512 struct net_device_stats *stats = &vptr->stats; 1512 struct net_device_stats *stats = &vptr->stats;
1513 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1513 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1514 struct rx_desc *rd = &(vptr->rd_ring[idx]); 1514 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1515 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 1515 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
1516 struct sk_buff *skb; 1516 struct sk_buff *skb;
1517 1517
@@ -1527,7 +1527,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1527 skb = rd_info->skb; 1527 skb = rd_info->skb;
1528 1528
1529 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, 1529 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
1530 vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1530 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1531 1531
1532 /* 1532 /*
1533 * Drop frame not meeting IEEE 802.3 1533 * Drop frame not meeting IEEE 802.3
@@ -1550,7 +1550,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1550 rd_info->skb = NULL; 1550 rd_info->skb = NULL;
1551 } 1551 }
1552 1552
1553 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, 1553 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1554 PCI_DMA_FROMDEVICE); 1554 PCI_DMA_FROMDEVICE);
1555 1555
1556 skb_put(skb, pkt_len - 4); 1556 skb_put(skb, pkt_len - 4);
@@ -1580,10 +1580,10 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1580 1580
1581static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) 1581static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1582{ 1582{
1583 struct rx_desc *rd = &(vptr->rd_ring[idx]); 1583 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1584 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1584 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1585 1585
1586 rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64); 1586 rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1587 if (rd_info->skb == NULL) 1587 if (rd_info->skb == NULL)
1588 return -ENOMEM; 1588 return -ENOMEM;
1589 1589
@@ -1592,14 +1592,15 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1592 * 64byte alignment. 1592 * 64byte alignment.
1593 */ 1593 */
1594 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); 1594 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
1595 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1595 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1596 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1596 1597
1597 /* 1598 /*
1598 * Fill in the descriptor to match 1599 * Fill in the descriptor to match
1599 */ 1600 */
1600 1601
1601 *((u32 *) & (rd->rdesc0)) = 0; 1602 *((u32 *) & (rd->rdesc0)) = 0;
1602 rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN; 1603 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1603 rd->pa_low = cpu_to_le32(rd_info->skb_dma); 1604 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1604 rd->pa_high = 0; 1605 rd->pa_high = 0;
1605 return 0; 1606 return 0;
@@ -1625,15 +1626,15 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1625 struct velocity_td_info *tdinfo; 1626 struct velocity_td_info *tdinfo;
1626 struct net_device_stats *stats = &vptr->stats; 1627 struct net_device_stats *stats = &vptr->stats;
1627 1628
1628 for (qnum = 0; qnum < vptr->num_txq; qnum++) { 1629 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1629 for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0; 1630 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1630 idx = (idx + 1) % vptr->options.numtx) { 1631 idx = (idx + 1) % vptr->options.numtx) {
1631 1632
1632 /* 1633 /*
1633 * Get Tx Descriptor 1634 * Get Tx Descriptor
1634 */ 1635 */
1635 td = &(vptr->td_rings[qnum][idx]); 1636 td = &(vptr->tx.rings[qnum][idx]);
1636 tdinfo = &(vptr->td_infos[qnum][idx]); 1637 tdinfo = &(vptr->tx.infos[qnum][idx]);
1637 1638
1638 if (td->tdesc0.len & OWNED_BY_NIC) 1639 if (td->tdesc0.len & OWNED_BY_NIC)
1639 break; 1640 break;
@@ -1657,9 +1658,9 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1657 stats->tx_bytes += tdinfo->skb->len; 1658 stats->tx_bytes += tdinfo->skb->len;
1658 } 1659 }
1659 velocity_free_tx_buf(vptr, tdinfo); 1660 velocity_free_tx_buf(vptr, tdinfo);
1660 vptr->td_used[qnum]--; 1661 vptr->tx.used[qnum]--;
1661 } 1662 }
1662 vptr->td_tail[qnum] = idx; 1663 vptr->tx.tail[qnum] = idx;
1663 1664
1664 if (AVAIL_TD(vptr, qnum) < 1) { 1665 if (AVAIL_TD(vptr, qnum) < 1) {
1665 full = 1; 1666 full = 1;
@@ -2056,9 +2057,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2056 2057
2057 spin_lock_irqsave(&vptr->lock, flags); 2058 spin_lock_irqsave(&vptr->lock, flags);
2058 2059
2059 index = vptr->td_curr[qnum]; 2060 index = vptr->tx.curr[qnum];
2060 td_ptr = &(vptr->td_rings[qnum][index]); 2061 td_ptr = &(vptr->tx.rings[qnum][index]);
2061 tdinfo = &(vptr->td_infos[qnum][index]); 2062 tdinfo = &(vptr->tx.infos[qnum][index]);
2062 2063
2063 td_ptr->tdesc1.TCR = TCR0_TIC; 2064 td_ptr->tdesc1.TCR = TCR0_TIC;
2064 td_ptr->td_buf[0].size &= ~TD_QUEUE; 2065 td_ptr->td_buf[0].size &= ~TD_QUEUE;
@@ -2071,9 +2072,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2071 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); 2072 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
2072 tdinfo->skb_dma[0] = tdinfo->buf_dma; 2073 tdinfo->skb_dma[0] = tdinfo->buf_dma;
2073 td_ptr->tdesc0.len = len; 2074 td_ptr->tdesc0.len = len;
2074 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2075 td_ptr->tx.buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2075 td_ptr->td_buf[0].pa_high = 0; 2076 td_ptr->tx.buf[0].pa_high = 0;
2076 td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ 2077 td_ptr->tx.buf[0].size = len; /* queue is 0 anyway */
2077 tdinfo->nskb_dma = 1; 2078 tdinfo->nskb_dma = 1;
2078 } else { 2079 } else {
2079 int i = 0; 2080 int i = 0;
@@ -2084,9 +2085,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2084 td_ptr->tdesc0.len = len; 2085 td_ptr->tdesc0.len = len;
2085 2086
2086 /* FIXME: support 48bit DMA later */ 2087 /* FIXME: support 48bit DMA later */
2087 td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); 2088 td_ptr->tx.buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
2088 td_ptr->td_buf[i].pa_high = 0; 2089 td_ptr->tx.buf[i].pa_high = 0;
2089 td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb)); 2090 td_ptr->tx.buf[i].size = cpu_to_le16(skb_headlen(skb));
2090 2091
2091 for (i = 0; i < nfrags; i++) { 2092 for (i = 0; i < nfrags; i++) {
2092 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2093 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2094,9 +2095,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2094 2095
2095 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); 2096 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
2096 2097
2097 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); 2098 td_ptr->tx.buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2098 td_ptr->td_buf[i + 1].pa_high = 0; 2099 td_ptr->tx.buf[i + 1].pa_high = 0;
2099 td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); 2100 td_ptr->tx.buf[i + 1].size = cpu_to_le16(frag->size);
2100 } 2101 }
2101 tdinfo->nskb_dma = i - 1; 2102 tdinfo->nskb_dma = i - 1;
2102 } 2103 }
@@ -2142,13 +2143,13 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2142 if (prev < 0) 2143 if (prev < 0)
2143 prev = vptr->options.numtx - 1; 2144 prev = vptr->options.numtx - 1;
2144 td_ptr->tdesc0.len |= OWNED_BY_NIC; 2145 td_ptr->tdesc0.len |= OWNED_BY_NIC;
2145 vptr->td_used[qnum]++; 2146 vptr->tx.used[qnum]++;
2146 vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; 2147 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2147 2148
2148 if (AVAIL_TD(vptr, qnum) < 1) 2149 if (AVAIL_TD(vptr, qnum) < 1)
2149 netif_stop_queue(dev); 2150 netif_stop_queue(dev);
2150 2151
2151 td_ptr = &(vptr->td_rings[qnum][prev]); 2152 td_ptr = &(vptr->tx.rings[qnum][prev]);
2152 td_ptr->td_buf[0].size |= TD_QUEUE; 2153 td_ptr->td_buf[0].size |= TD_QUEUE;
2153 mac_tx_queue_wake(vptr->mac_regs, qnum); 2154 mac_tx_queue_wake(vptr->mac_regs, qnum);
2154 } 2155 }
@@ -3405,8 +3406,8 @@ static int velocity_resume(struct pci_dev *pdev)
3405 3406
3406 velocity_tx_srv(vptr, 0); 3407 velocity_tx_srv(vptr, 0);
3407 3408
3408 for (i = 0; i < vptr->num_txq; i++) { 3409 for (i = 0; i < vptr->tx.numq; i++) {
3409 if (vptr->td_used[i]) { 3410 if (vptr->tx.used[i]) {
3410 mac_tx_queue_wake(vptr->mac_regs, i); 3411 mac_tx_queue_wake(vptr->mac_regs, i);
3411 } 3412 }
3412 } 3413 }
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 86446147284c..1b95b04c9257 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1494,6 +1494,10 @@ struct velocity_opt {
1494 u32 flags; 1494 u32 flags;
1495}; 1495};
1496 1496
1497#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->tx.used[(q)]))
1498
1499#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1500
1497struct velocity_info { 1501struct velocity_info {
1498 struct list_head list; 1502 struct list_head list;
1499 1503
@@ -1501,9 +1505,6 @@ struct velocity_info {
1501 struct net_device *dev; 1505 struct net_device *dev;
1502 struct net_device_stats stats; 1506 struct net_device_stats stats;
1503 1507
1504 dma_addr_t rd_pool_dma;
1505 dma_addr_t td_pool_dma[TX_QUEUE_NO];
1506
1507 struct vlan_group *vlgrp; 1508 struct vlan_group *vlgrp;
1508 u8 ip_addr[4]; 1509 u8 ip_addr[4];
1509 enum chip_type chip_id; 1510 enum chip_type chip_id;
@@ -1512,25 +1513,29 @@ struct velocity_info {
1512 unsigned long memaddr; 1513 unsigned long memaddr;
1513 unsigned long ioaddr; 1514 unsigned long ioaddr;
1514 1515
1515 u8 rev_id; 1516 struct tx_info {
1516 1517 int numq;
1517#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->td_used[(q)])) 1518
1519 /* FIXME: the locality of the data seems rather poor. */
1520 int used[TX_QUEUE_NO];
1521 int curr[TX_QUEUE_NO];
1522 int tail[TX_QUEUE_NO];
1523 struct tx_desc *rings[TX_QUEUE_NO];
1524 struct velocity_td_info *infos[TX_QUEUE_NO];
1525 dma_addr_t pool_dma[TX_QUEUE_NO];
1526 } tx;
1527
1528 struct rx_info {
1529 int buf_sz;
1530
1531 int dirty;
1532 int curr;
1533 u32 filled;
1534 struct rx_desc *ring;
1535 struct velocity_rd_info *info; /* It's an array */
1536 dma_addr_t pool_dma;
1537 } rx;
1518 1538
1519 int num_txq;
1520
1521 volatile int td_used[TX_QUEUE_NO];
1522 int td_curr[TX_QUEUE_NO];
1523 int td_tail[TX_QUEUE_NO];
1524 struct tx_desc *td_rings[TX_QUEUE_NO];
1525 struct velocity_td_info *td_infos[TX_QUEUE_NO];
1526
1527 int rd_curr;
1528 int rd_dirty;
1529 u32 rd_filled;
1530 struct rx_desc *rd_ring;
1531 struct velocity_rd_info *rd_info; /* It's an array */
1532
1533#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1534 u32 mib_counter[MAX_HW_MIB_COUNTER]; 1539 u32 mib_counter[MAX_HW_MIB_COUNTER];
1535 struct velocity_opt options; 1540 struct velocity_opt options;
1536 1541
@@ -1538,7 +1543,6 @@ struct velocity_info {
1538 1543
1539 u32 flags; 1544 u32 flags;
1540 1545
1541 int rx_buf_sz;
1542 u32 mii_status; 1546 u32 mii_status;
1543 u32 phy_id; 1547 u32 phy_id;
1544 int multicast_limit; 1548 int multicast_limit;
@@ -1554,8 +1558,8 @@ struct velocity_info {
1554 struct velocity_context context; 1558 struct velocity_context context;
1555 1559
1556 u32 ticks; 1560 u32 ticks;
1557 u32 rx_bytes;
1558 1561
1562 u8 rev_id;
1559}; 1563};
1560 1564
1561/** 1565/**