aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2008-08-07 02:05:01 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-08-07 02:05:01 -0400
commit8bad4afe1257585967a1b38cd3f21324b260812b (patch)
treeff88d4b96d9bd47609d403537aac2931dd498774
parentf71eb1a24a8cdde8d388c8f93e935aa7ac491047 (diff)
parent3c4dc7115dfdb9e0450b7a3b0649948f5356d4af (diff)
Merge branch 'via-velocity' of git://git.kernel.org/pub/scm/linux/kernel/git/romieu/netdev-2.6 into tmp
-rw-r--r--drivers/net/via-velocity.c301
-rw-r--r--drivers/net/via-velocity.h50
2 files changed, 198 insertions, 153 deletions
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 370ce30f2f45..007c12970065 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -662,6 +662,10 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
662 spin_unlock_irq(&vptr->lock); 662 spin_unlock_irq(&vptr->lock);
663} 663}
664 664
665static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
666{
667 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
668}
665 669
666/** 670/**
667 * velocity_rx_reset - handle a receive reset 671 * velocity_rx_reset - handle a receive reset
@@ -677,16 +681,16 @@ static void velocity_rx_reset(struct velocity_info *vptr)
677 struct mac_regs __iomem * regs = vptr->mac_regs; 681 struct mac_regs __iomem * regs = vptr->mac_regs;
678 int i; 682 int i;
679 683
680 vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0; 684 velocity_init_rx_ring_indexes(vptr);
681 685
682 /* 686 /*
683 * Init state, all RD entries belong to the NIC 687 * Init state, all RD entries belong to the NIC
684 */ 688 */
685 for (i = 0; i < vptr->options.numrx; ++i) 689 for (i = 0; i < vptr->options.numrx; ++i)
686 vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC; 690 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
687 691
688 writew(vptr->options.numrx, &regs->RBRDU); 692 writew(vptr->options.numrx, &regs->RBRDU);
689 writel(vptr->rd_pool_dma, &regs->RDBaseLo); 693 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
690 writew(0, &regs->RDIdx); 694 writew(0, &regs->RDIdx);
691 writew(vptr->options.numrx - 1, &regs->RDCSize); 695 writew(vptr->options.numrx - 1, &regs->RDCSize);
692} 696}
@@ -779,15 +783,15 @@ static void velocity_init_registers(struct velocity_info *vptr,
779 783
780 vptr->int_mask = INT_MASK_DEF; 784 vptr->int_mask = INT_MASK_DEF;
781 785
782 writel(vptr->rd_pool_dma, &regs->RDBaseLo); 786 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
783 writew(vptr->options.numrx - 1, &regs->RDCSize); 787 writew(vptr->options.numrx - 1, &regs->RDCSize);
784 mac_rx_queue_run(regs); 788 mac_rx_queue_run(regs);
785 mac_rx_queue_wake(regs); 789 mac_rx_queue_wake(regs);
786 790
787 writew(vptr->options.numtx - 1, &regs->TDCSize); 791 writew(vptr->options.numtx - 1, &regs->TDCSize);
788 792
789 for (i = 0; i < vptr->num_txq; i++) { 793 for (i = 0; i < vptr->tx.numq; i++) {
790 writel(vptr->td_pool_dma[i], &regs->TDBaseLo[i]); 794 writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
791 mac_tx_queue_run(regs, i); 795 mac_tx_queue_run(regs, i);
792 } 796 }
793 797
@@ -1047,7 +1051,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
1047 1051
1048 vptr->pdev = pdev; 1052 vptr->pdev = pdev;
1049 vptr->chip_id = info->chip_id; 1053 vptr->chip_id = info->chip_id;
1050 vptr->num_txq = info->txqueue; 1054 vptr->tx.numq = info->txqueue;
1051 vptr->multicast_limit = MCAM_SIZE; 1055 vptr->multicast_limit = MCAM_SIZE;
1052 spin_lock_init(&vptr->lock); 1056 spin_lock_init(&vptr->lock);
1053 INIT_LIST_HEAD(&vptr->list); 1057 INIT_LIST_HEAD(&vptr->list);
@@ -1093,14 +1097,14 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc
1093} 1097}
1094 1098
1095/** 1099/**
1096 * velocity_init_rings - set up DMA rings 1100 * velocity_init_dma_rings - set up DMA rings
1097 * @vptr: Velocity to set up 1101 * @vptr: Velocity to set up
1098 * 1102 *
1099 * Allocate PCI mapped DMA rings for the receive and transmit layer 1103 * Allocate PCI mapped DMA rings for the receive and transmit layer
1100 * to use. 1104 * to use.
1101 */ 1105 */
1102 1106
1103static int velocity_init_rings(struct velocity_info *vptr) 1107static int velocity_init_dma_rings(struct velocity_info *vptr)
1104{ 1108{
1105 struct velocity_opt *opt = &vptr->options; 1109 struct velocity_opt *opt = &vptr->options;
1106 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); 1110 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
@@ -1116,7 +1120,7 @@ static int velocity_init_rings(struct velocity_info *vptr)
1116 * pci_alloc_consistent() fulfills the requirement for 64 bytes 1120 * pci_alloc_consistent() fulfills the requirement for 64 bytes
1117 * alignment 1121 * alignment
1118 */ 1122 */
1119 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->num_txq + 1123 pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1120 rx_ring_size, &pool_dma); 1124 rx_ring_size, &pool_dma);
1121 if (!pool) { 1125 if (!pool) {
1122 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", 1126 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
@@ -1124,15 +1128,15 @@ static int velocity_init_rings(struct velocity_info *vptr)
1124 return -ENOMEM; 1128 return -ENOMEM;
1125 } 1129 }
1126 1130
1127 vptr->rd_ring = pool; 1131 vptr->rx.ring = pool;
1128 vptr->rd_pool_dma = pool_dma; 1132 vptr->rx.pool_dma = pool_dma;
1129 1133
1130 pool += rx_ring_size; 1134 pool += rx_ring_size;
1131 pool_dma += rx_ring_size; 1135 pool_dma += rx_ring_size;
1132 1136
1133 for (i = 0; i < vptr->num_txq; i++) { 1137 for (i = 0; i < vptr->tx.numq; i++) {
1134 vptr->td_rings[i] = pool; 1138 vptr->tx.rings[i] = pool;
1135 vptr->td_pool_dma[i] = pool_dma; 1139 vptr->tx.pool_dma[i] = pool_dma;
1136 pool += tx_ring_size; 1140 pool += tx_ring_size;
1137 pool_dma += tx_ring_size; 1141 pool_dma += tx_ring_size;
1138 } 1142 }
@@ -1141,18 +1145,18 @@ static int velocity_init_rings(struct velocity_info *vptr)
1141} 1145}
1142 1146
1143/** 1147/**
1144 * velocity_free_rings - free PCI ring pointers 1148 * velocity_free_dma_rings - free PCI ring pointers
1145 * @vptr: Velocity to free from 1149 * @vptr: Velocity to free from
1146 * 1150 *
1147 * Clean up the PCI ring buffers allocated to this velocity. 1151 * Clean up the PCI ring buffers allocated to this velocity.
1148 */ 1152 */
1149 1153
1150static void velocity_free_rings(struct velocity_info *vptr) 1154static void velocity_free_dma_rings(struct velocity_info *vptr)
1151{ 1155{
1152 const int size = vptr->options.numrx * sizeof(struct rx_desc) + 1156 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1153 vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; 1157 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1154 1158
1155 pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); 1159 pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1156} 1160}
1157 1161
1158static void velocity_give_many_rx_descs(struct velocity_info *vptr) 1162static void velocity_give_many_rx_descs(struct velocity_info *vptr)
@@ -1164,44 +1168,44 @@ static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1164 * RD number must be equal to 4X per hardware spec 1168 * RD number must be equal to 4X per hardware spec
1165 * (programming guide rev 1.20, p.13) 1169 * (programming guide rev 1.20, p.13)
1166 */ 1170 */
1167 if (vptr->rd_filled < 4) 1171 if (vptr->rx.filled < 4)
1168 return; 1172 return;
1169 1173
1170 wmb(); 1174 wmb();
1171 1175
1172 unusable = vptr->rd_filled & 0x0003; 1176 unusable = vptr->rx.filled & 0x0003;
1173 dirty = vptr->rd_dirty - unusable; 1177 dirty = vptr->rx.dirty - unusable;
1174 for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { 1178 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1175 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; 1179 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1176 vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC; 1180 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1177 } 1181 }
1178 1182
1179 writew(vptr->rd_filled & 0xfffc, &regs->RBRDU); 1183 writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1180 vptr->rd_filled = unusable; 1184 vptr->rx.filled = unusable;
1181} 1185}
1182 1186
1183static int velocity_rx_refill(struct velocity_info *vptr) 1187static int velocity_rx_refill(struct velocity_info *vptr)
1184{ 1188{
1185 int dirty = vptr->rd_dirty, done = 0; 1189 int dirty = vptr->rx.dirty, done = 0;
1186 1190
1187 do { 1191 do {
1188 struct rx_desc *rd = vptr->rd_ring + dirty; 1192 struct rx_desc *rd = vptr->rx.ring + dirty;
1189 1193
1190 /* Fine for an all zero Rx desc at init time as well */ 1194 /* Fine for an all zero Rx desc at init time as well */
1191 if (rd->rdesc0.len & OWNED_BY_NIC) 1195 if (rd->rdesc0.len & OWNED_BY_NIC)
1192 break; 1196 break;
1193 1197
1194 if (!vptr->rd_info[dirty].skb) { 1198 if (!vptr->rx.info[dirty].skb) {
1195 if (velocity_alloc_rx_buf(vptr, dirty) < 0) 1199 if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1196 break; 1200 break;
1197 } 1201 }
1198 done++; 1202 done++;
1199 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; 1203 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1200 } while (dirty != vptr->rd_curr); 1204 } while (dirty != vptr->rx.curr);
1201 1205
1202 if (done) { 1206 if (done) {
1203 vptr->rd_dirty = dirty; 1207 vptr->rx.dirty = dirty;
1204 vptr->rd_filled += done; 1208 vptr->rx.filled += done;
1205 } 1209 }
1206 1210
1207 return done; 1211 return done;
@@ -1209,7 +1213,7 @@ static int velocity_rx_refill(struct velocity_info *vptr)
1209 1213
1210static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) 1214static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1211{ 1215{
1212 vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; 1216 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1213} 1217}
1214 1218
1215/** 1219/**
@@ -1224,12 +1228,12 @@ static int velocity_init_rd_ring(struct velocity_info *vptr)
1224{ 1228{
1225 int ret = -ENOMEM; 1229 int ret = -ENOMEM;
1226 1230
1227 vptr->rd_info = kcalloc(vptr->options.numrx, 1231 vptr->rx.info = kcalloc(vptr->options.numrx,
1228 sizeof(struct velocity_rd_info), GFP_KERNEL); 1232 sizeof(struct velocity_rd_info), GFP_KERNEL);
1229 if (!vptr->rd_info) 1233 if (!vptr->rx.info)
1230 goto out; 1234 goto out;
1231 1235
1232 vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; 1236 velocity_init_rx_ring_indexes(vptr);
1233 1237
1234 if (velocity_rx_refill(vptr) != vptr->options.numrx) { 1238 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1235 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR 1239 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
@@ -1255,18 +1259,18 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1255{ 1259{
1256 int i; 1260 int i;
1257 1261
1258 if (vptr->rd_info == NULL) 1262 if (vptr->rx.info == NULL)
1259 return; 1263 return;
1260 1264
1261 for (i = 0; i < vptr->options.numrx; i++) { 1265 for (i = 0; i < vptr->options.numrx; i++) {
1262 struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); 1266 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1263 struct rx_desc *rd = vptr->rd_ring + i; 1267 struct rx_desc *rd = vptr->rx.ring + i;
1264 1268
1265 memset(rd, 0, sizeof(*rd)); 1269 memset(rd, 0, sizeof(*rd));
1266 1270
1267 if (!rd_info->skb) 1271 if (!rd_info->skb)
1268 continue; 1272 continue;
1269 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, 1273 pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1270 PCI_DMA_FROMDEVICE); 1274 PCI_DMA_FROMDEVICE);
1271 rd_info->skb_dma = (dma_addr_t) NULL; 1275 rd_info->skb_dma = (dma_addr_t) NULL;
1272 1276
@@ -1274,8 +1278,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1274 rd_info->skb = NULL; 1278 rd_info->skb = NULL;
1275 } 1279 }
1276 1280
1277 kfree(vptr->rd_info); 1281 kfree(vptr->rx.info);
1278 vptr->rd_info = NULL; 1282 vptr->rx.info = NULL;
1279} 1283}
1280 1284
1281/** 1285/**
@@ -1293,19 +1297,19 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
1293 unsigned int j; 1297 unsigned int j;
1294 1298
1295 /* Init the TD ring entries */ 1299 /* Init the TD ring entries */
1296 for (j = 0; j < vptr->num_txq; j++) { 1300 for (j = 0; j < vptr->tx.numq; j++) {
1297 curr = vptr->td_pool_dma[j]; 1301 curr = vptr->tx.pool_dma[j];
1298 1302
1299 vptr->td_infos[j] = kcalloc(vptr->options.numtx, 1303 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1300 sizeof(struct velocity_td_info), 1304 sizeof(struct velocity_td_info),
1301 GFP_KERNEL); 1305 GFP_KERNEL);
1302 if (!vptr->td_infos[j]) { 1306 if (!vptr->tx.infos[j]) {
1303 while(--j >= 0) 1307 while(--j >= 0)
1304 kfree(vptr->td_infos[j]); 1308 kfree(vptr->tx.infos[j]);
1305 return -ENOMEM; 1309 return -ENOMEM;
1306 } 1310 }
1307 1311
1308 vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; 1312 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1309 } 1313 }
1310 return 0; 1314 return 0;
1311} 1315}
@@ -1317,7 +1321,7 @@ static int velocity_init_td_ring(struct velocity_info *vptr)
1317static void velocity_free_td_ring_entry(struct velocity_info *vptr, 1321static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1318 int q, int n) 1322 int q, int n)
1319{ 1323{
1320 struct velocity_td_info * td_info = &(vptr->td_infos[q][n]); 1324 struct velocity_td_info * td_info = &(vptr->tx.infos[q][n]);
1321 int i; 1325 int i;
1322 1326
1323 if (td_info == NULL) 1327 if (td_info == NULL)
@@ -1349,15 +1353,15 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1349{ 1353{
1350 int i, j; 1354 int i, j;
1351 1355
1352 for (j = 0; j < vptr->num_txq; j++) { 1356 for (j = 0; j < vptr->tx.numq; j++) {
1353 if (vptr->td_infos[j] == NULL) 1357 if (vptr->tx.infos[j] == NULL)
1354 continue; 1358 continue;
1355 for (i = 0; i < vptr->options.numtx; i++) { 1359 for (i = 0; i < vptr->options.numtx; i++) {
1356 velocity_free_td_ring_entry(vptr, j, i); 1360 velocity_free_td_ring_entry(vptr, j, i);
1357 1361
1358 } 1362 }
1359 kfree(vptr->td_infos[j]); 1363 kfree(vptr->tx.infos[j]);
1360 vptr->td_infos[j] = NULL; 1364 vptr->tx.infos[j] = NULL;
1361 } 1365 }
1362} 1366}
1363 1367
@@ -1374,13 +1378,13 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1374static int velocity_rx_srv(struct velocity_info *vptr, int status) 1378static int velocity_rx_srv(struct velocity_info *vptr, int status)
1375{ 1379{
1376 struct net_device_stats *stats = &vptr->stats; 1380 struct net_device_stats *stats = &vptr->stats;
1377 int rd_curr = vptr->rd_curr; 1381 int rd_curr = vptr->rx.curr;
1378 int works = 0; 1382 int works = 0;
1379 1383
1380 do { 1384 do {
1381 struct rx_desc *rd = vptr->rd_ring + rd_curr; 1385 struct rx_desc *rd = vptr->rx.ring + rd_curr;
1382 1386
1383 if (!vptr->rd_info[rd_curr].skb) 1387 if (!vptr->rx.info[rd_curr].skb)
1384 break; 1388 break;
1385 1389
1386 if (rd->rdesc0.len & OWNED_BY_NIC) 1390 if (rd->rdesc0.len & OWNED_BY_NIC)
@@ -1412,7 +1416,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
1412 rd_curr = 0; 1416 rd_curr = 0;
1413 } while (++works <= 15); 1417 } while (++works <= 15);
1414 1418
1415 vptr->rd_curr = rd_curr; 1419 vptr->rx.curr = rd_curr;
1416 1420
1417 if ((works > 0) && (velocity_rx_refill(vptr) > 0)) 1421 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
1418 velocity_give_many_rx_descs(vptr); 1422 velocity_give_many_rx_descs(vptr);
@@ -1510,8 +1514,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1510{ 1514{
1511 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); 1515 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
1512 struct net_device_stats *stats = &vptr->stats; 1516 struct net_device_stats *stats = &vptr->stats;
1513 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1517 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1514 struct rx_desc *rd = &(vptr->rd_ring[idx]); 1518 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1515 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 1519 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
1516 struct sk_buff *skb; 1520 struct sk_buff *skb;
1517 1521
@@ -1527,7 +1531,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1527 skb = rd_info->skb; 1531 skb = rd_info->skb;
1528 1532
1529 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, 1533 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
1530 vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1534 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1531 1535
1532 /* 1536 /*
1533 * Drop frame not meeting IEEE 802.3 1537 * Drop frame not meeting IEEE 802.3
@@ -1550,7 +1554,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1550 rd_info->skb = NULL; 1554 rd_info->skb = NULL;
1551 } 1555 }
1552 1556
1553 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, 1557 pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1554 PCI_DMA_FROMDEVICE); 1558 PCI_DMA_FROMDEVICE);
1555 1559
1556 skb_put(skb, pkt_len - 4); 1560 skb_put(skb, pkt_len - 4);
@@ -1580,10 +1584,10 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1580 1584
1581static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) 1585static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1582{ 1586{
1583 struct rx_desc *rd = &(vptr->rd_ring[idx]); 1587 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1584 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1588 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1585 1589
1586 rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64); 1590 rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1587 if (rd_info->skb == NULL) 1591 if (rd_info->skb == NULL)
1588 return -ENOMEM; 1592 return -ENOMEM;
1589 1593
@@ -1592,14 +1596,15 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1592 * 64byte alignment. 1596 * 64byte alignment.
1593 */ 1597 */
1594 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); 1598 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
1595 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1599 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1600 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1596 1601
1597 /* 1602 /*
1598 * Fill in the descriptor to match 1603 * Fill in the descriptor to match
1599 */ 1604 */
1600 1605
1601 *((u32 *) & (rd->rdesc0)) = 0; 1606 *((u32 *) & (rd->rdesc0)) = 0;
1602 rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN; 1607 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1603 rd->pa_low = cpu_to_le32(rd_info->skb_dma); 1608 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1604 rd->pa_high = 0; 1609 rd->pa_high = 0;
1605 return 0; 1610 return 0;
@@ -1625,15 +1630,15 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1625 struct velocity_td_info *tdinfo; 1630 struct velocity_td_info *tdinfo;
1626 struct net_device_stats *stats = &vptr->stats; 1631 struct net_device_stats *stats = &vptr->stats;
1627 1632
1628 for (qnum = 0; qnum < vptr->num_txq; qnum++) { 1633 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1629 for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0; 1634 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1630 idx = (idx + 1) % vptr->options.numtx) { 1635 idx = (idx + 1) % vptr->options.numtx) {
1631 1636
1632 /* 1637 /*
1633 * Get Tx Descriptor 1638 * Get Tx Descriptor
1634 */ 1639 */
1635 td = &(vptr->td_rings[qnum][idx]); 1640 td = &(vptr->tx.rings[qnum][idx]);
1636 tdinfo = &(vptr->td_infos[qnum][idx]); 1641 tdinfo = &(vptr->tx.infos[qnum][idx]);
1637 1642
1638 if (td->tdesc0.len & OWNED_BY_NIC) 1643 if (td->tdesc0.len & OWNED_BY_NIC)
1639 break; 1644 break;
@@ -1657,9 +1662,9 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1657 stats->tx_bytes += tdinfo->skb->len; 1662 stats->tx_bytes += tdinfo->skb->len;
1658 } 1663 }
1659 velocity_free_tx_buf(vptr, tdinfo); 1664 velocity_free_tx_buf(vptr, tdinfo);
1660 vptr->td_used[qnum]--; 1665 vptr->tx.used[qnum]--;
1661 } 1666 }
1662 vptr->td_tail[qnum] = idx; 1667 vptr->tx.tail[qnum] = idx;
1663 1668
1664 if (AVAIL_TD(vptr, qnum) < 1) { 1669 if (AVAIL_TD(vptr, qnum) < 1) {
1665 full = 1; 1670 full = 1;
@@ -1846,6 +1851,40 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
1846 tdinfo->skb = NULL; 1851 tdinfo->skb = NULL;
1847} 1852}
1848 1853
1854static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1855{
1856 int ret;
1857
1858 velocity_set_rxbufsize(vptr, mtu);
1859
1860 ret = velocity_init_dma_rings(vptr);
1861 if (ret < 0)
1862 goto out;
1863
1864 ret = velocity_init_rd_ring(vptr);
1865 if (ret < 0)
1866 goto err_free_dma_rings_0;
1867
1868 ret = velocity_init_td_ring(vptr);
1869 if (ret < 0)
1870 goto err_free_rd_ring_1;
1871out:
1872 return ret;
1873
1874err_free_rd_ring_1:
1875 velocity_free_rd_ring(vptr);
1876err_free_dma_rings_0:
1877 velocity_free_dma_rings(vptr);
1878 goto out;
1879}
1880
1881static void velocity_free_rings(struct velocity_info *vptr)
1882{
1883 velocity_free_td_ring(vptr);
1884 velocity_free_rd_ring(vptr);
1885 velocity_free_dma_rings(vptr);
1886}
1887
1849/** 1888/**
1850 * velocity_open - interface activation callback 1889 * velocity_open - interface activation callback
1851 * @dev: network layer device to open 1890 * @dev: network layer device to open
@@ -1862,20 +1901,10 @@ static int velocity_open(struct net_device *dev)
1862 struct velocity_info *vptr = netdev_priv(dev); 1901 struct velocity_info *vptr = netdev_priv(dev);
1863 int ret; 1902 int ret;
1864 1903
1865 velocity_set_rxbufsize(vptr, dev->mtu); 1904 ret = velocity_init_rings(vptr, dev->mtu);
1866
1867 ret = velocity_init_rings(vptr);
1868 if (ret < 0) 1905 if (ret < 0)
1869 goto out; 1906 goto out;
1870 1907
1871 ret = velocity_init_rd_ring(vptr);
1872 if (ret < 0)
1873 goto err_free_desc_rings;
1874
1875 ret = velocity_init_td_ring(vptr);
1876 if (ret < 0)
1877 goto err_free_rd_ring;
1878
1879 /* Ensure chip is running */ 1908 /* Ensure chip is running */
1880 pci_set_power_state(vptr->pdev, PCI_D0); 1909 pci_set_power_state(vptr->pdev, PCI_D0);
1881 1910
@@ -1888,7 +1917,8 @@ static int velocity_open(struct net_device *dev)
1888 if (ret < 0) { 1917 if (ret < 0) {
1889 /* Power down the chip */ 1918 /* Power down the chip */
1890 pci_set_power_state(vptr->pdev, PCI_D3hot); 1919 pci_set_power_state(vptr->pdev, PCI_D3hot);
1891 goto err_free_td_ring; 1920 velocity_free_rings(vptr);
1921 goto out;
1892 } 1922 }
1893 1923
1894 mac_enable_int(vptr->mac_regs); 1924 mac_enable_int(vptr->mac_regs);
@@ -1896,14 +1926,6 @@ static int velocity_open(struct net_device *dev)
1896 vptr->flags |= VELOCITY_FLAGS_OPENED; 1926 vptr->flags |= VELOCITY_FLAGS_OPENED;
1897out: 1927out:
1898 return ret; 1928 return ret;
1899
1900err_free_td_ring:
1901 velocity_free_td_ring(vptr);
1902err_free_rd_ring:
1903 velocity_free_rd_ring(vptr);
1904err_free_desc_rings:
1905 velocity_free_rings(vptr);
1906 goto out;
1907} 1929}
1908 1930
1909/** 1931/**
@@ -1919,50 +1941,72 @@ err_free_desc_rings:
1919static int velocity_change_mtu(struct net_device *dev, int new_mtu) 1941static int velocity_change_mtu(struct net_device *dev, int new_mtu)
1920{ 1942{
1921 struct velocity_info *vptr = netdev_priv(dev); 1943 struct velocity_info *vptr = netdev_priv(dev);
1922 unsigned long flags;
1923 int oldmtu = dev->mtu;
1924 int ret = 0; 1944 int ret = 0;
1925 1945
1926 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { 1946 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
1927 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", 1947 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
1928 vptr->dev->name); 1948 vptr->dev->name);
1929 return -EINVAL; 1949 ret = -EINVAL;
1950 goto out_0;
1930 } 1951 }
1931 1952
1932 if (!netif_running(dev)) { 1953 if (!netif_running(dev)) {
1933 dev->mtu = new_mtu; 1954 dev->mtu = new_mtu;
1934 return 0; 1955 goto out_0;
1935 } 1956 }
1936 1957
1937 if (new_mtu != oldmtu) { 1958 if (dev->mtu != new_mtu) {
1959 struct velocity_info *tmp_vptr;
1960 unsigned long flags;
1961 struct rx_info rx;
1962 struct tx_info tx;
1963
1964 tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
1965 if (!tmp_vptr) {
1966 ret = -ENOMEM;
1967 goto out_0;
1968 }
1969
1970 tmp_vptr->dev = dev;
1971 tmp_vptr->pdev = vptr->pdev;
1972 tmp_vptr->options = vptr->options;
1973 tmp_vptr->tx.numq = vptr->tx.numq;
1974
1975 ret = velocity_init_rings(tmp_vptr, new_mtu);
1976 if (ret < 0)
1977 goto out_free_tmp_vptr_1;
1978
1938 spin_lock_irqsave(&vptr->lock, flags); 1979 spin_lock_irqsave(&vptr->lock, flags);
1939 1980
1940 netif_stop_queue(dev); 1981 netif_stop_queue(dev);
1941 velocity_shutdown(vptr); 1982 velocity_shutdown(vptr);
1942 1983
1943 velocity_free_td_ring(vptr); 1984 rx = vptr->rx;
1944 velocity_free_rd_ring(vptr); 1985 tx = vptr->tx;
1945 1986
1946 dev->mtu = new_mtu; 1987 vptr->rx = tmp_vptr->rx;
1988 vptr->tx = tmp_vptr->tx;
1947 1989
1948 velocity_set_rxbufsize(vptr, new_mtu); 1990 tmp_vptr->rx = rx;
1991 tmp_vptr->tx = tx;
1949 1992
1950 ret = velocity_init_rd_ring(vptr); 1993 dev->mtu = new_mtu;
1951 if (ret < 0)
1952 goto out_unlock;
1953 1994
1954 ret = velocity_init_td_ring(vptr); 1995 velocity_give_many_rx_descs(vptr);
1955 if (ret < 0)
1956 goto out_unlock;
1957 1996
1958 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 1997 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
1959 1998
1960 mac_enable_int(vptr->mac_regs); 1999 mac_enable_int(vptr->mac_regs);
1961 netif_start_queue(dev); 2000 netif_start_queue(dev);
1962out_unlock: 2001
1963 spin_unlock_irqrestore(&vptr->lock, flags); 2002 spin_unlock_irqrestore(&vptr->lock, flags);
1964 }
1965 2003
2004 velocity_free_rings(tmp_vptr);
2005
2006out_free_tmp_vptr_1:
2007 kfree(tmp_vptr);
2008 }
2009out_0:
1966 return ret; 2010 return ret;
1967} 2011}
1968 2012
@@ -2008,9 +2052,6 @@ static int velocity_close(struct net_device *dev)
2008 /* Power down the chip */ 2052 /* Power down the chip */
2009 pci_set_power_state(vptr->pdev, PCI_D3hot); 2053 pci_set_power_state(vptr->pdev, PCI_D3hot);
2010 2054
2011 /* Free the resources */
2012 velocity_free_td_ring(vptr);
2013 velocity_free_rd_ring(vptr);
2014 velocity_free_rings(vptr); 2055 velocity_free_rings(vptr);
2015 2056
2016 vptr->flags &= (~VELOCITY_FLAGS_OPENED); 2057 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
@@ -2056,9 +2097,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2056 2097
2057 spin_lock_irqsave(&vptr->lock, flags); 2098 spin_lock_irqsave(&vptr->lock, flags);
2058 2099
2059 index = vptr->td_curr[qnum]; 2100 index = vptr->tx.curr[qnum];
2060 td_ptr = &(vptr->td_rings[qnum][index]); 2101 td_ptr = &(vptr->tx.rings[qnum][index]);
2061 tdinfo = &(vptr->td_infos[qnum][index]); 2102 tdinfo = &(vptr->tx.infos[qnum][index]);
2062 2103
2063 td_ptr->tdesc1.TCR = TCR0_TIC; 2104 td_ptr->tdesc1.TCR = TCR0_TIC;
2064 td_ptr->td_buf[0].size &= ~TD_QUEUE; 2105 td_ptr->td_buf[0].size &= ~TD_QUEUE;
@@ -2071,9 +2112,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2071 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); 2112 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
2072 tdinfo->skb_dma[0] = tdinfo->buf_dma; 2113 tdinfo->skb_dma[0] = tdinfo->buf_dma;
2073 td_ptr->tdesc0.len = len; 2114 td_ptr->tdesc0.len = len;
2074 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2115 td_ptr->tx.buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2075 td_ptr->td_buf[0].pa_high = 0; 2116 td_ptr->tx.buf[0].pa_high = 0;
2076 td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ 2117 td_ptr->tx.buf[0].size = len; /* queue is 0 anyway */
2077 tdinfo->nskb_dma = 1; 2118 tdinfo->nskb_dma = 1;
2078 } else { 2119 } else {
2079 int i = 0; 2120 int i = 0;
@@ -2084,9 +2125,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2084 td_ptr->tdesc0.len = len; 2125 td_ptr->tdesc0.len = len;
2085 2126
2086 /* FIXME: support 48bit DMA later */ 2127 /* FIXME: support 48bit DMA later */
2087 td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); 2128 td_ptr->tx.buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
2088 td_ptr->td_buf[i].pa_high = 0; 2129 td_ptr->tx.buf[i].pa_high = 0;
2089 td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb)); 2130 td_ptr->tx.buf[i].size = cpu_to_le16(skb_headlen(skb));
2090 2131
2091 for (i = 0; i < nfrags; i++) { 2132 for (i = 0; i < nfrags; i++) {
2092 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2133 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2094,9 +2135,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2094 2135
2095 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); 2136 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
2096 2137
2097 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); 2138 td_ptr->tx.buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2098 td_ptr->td_buf[i + 1].pa_high = 0; 2139 td_ptr->tx.buf[i + 1].pa_high = 0;
2099 td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); 2140 td_ptr->tx.buf[i + 1].size = cpu_to_le16(frag->size);
2100 } 2141 }
2101 tdinfo->nskb_dma = i - 1; 2142 tdinfo->nskb_dma = i - 1;
2102 } 2143 }
@@ -2142,13 +2183,13 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2142 if (prev < 0) 2183 if (prev < 0)
2143 prev = vptr->options.numtx - 1; 2184 prev = vptr->options.numtx - 1;
2144 td_ptr->tdesc0.len |= OWNED_BY_NIC; 2185 td_ptr->tdesc0.len |= OWNED_BY_NIC;
2145 vptr->td_used[qnum]++; 2186 vptr->tx.used[qnum]++;
2146 vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; 2187 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2147 2188
2148 if (AVAIL_TD(vptr, qnum) < 1) 2189 if (AVAIL_TD(vptr, qnum) < 1)
2149 netif_stop_queue(dev); 2190 netif_stop_queue(dev);
2150 2191
2151 td_ptr = &(vptr->td_rings[qnum][prev]); 2192 td_ptr = &(vptr->tx.rings[qnum][prev]);
2152 td_ptr->td_buf[0].size |= TD_QUEUE; 2193 td_ptr->td_buf[0].size |= TD_QUEUE;
2153 mac_tx_queue_wake(vptr->mac_regs, qnum); 2194 mac_tx_queue_wake(vptr->mac_regs, qnum);
2154 } 2195 }
@@ -3405,8 +3446,8 @@ static int velocity_resume(struct pci_dev *pdev)
3405 3446
3406 velocity_tx_srv(vptr, 0); 3447 velocity_tx_srv(vptr, 0);
3407 3448
3408 for (i = 0; i < vptr->num_txq; i++) { 3449 for (i = 0; i < vptr->tx.numq; i++) {
3409 if (vptr->td_used[i]) { 3450 if (vptr->tx.used[i]) {
3410 mac_tx_queue_wake(vptr->mac_regs, i); 3451 mac_tx_queue_wake(vptr->mac_regs, i);
3411 } 3452 }
3412 } 3453 }
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 86446147284c..1b95b04c9257 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1494,6 +1494,10 @@ struct velocity_opt {
1494 u32 flags; 1494 u32 flags;
1495}; 1495};
1496 1496
1497#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->tx.used[(q)]))
1498
1499#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1500
1497struct velocity_info { 1501struct velocity_info {
1498 struct list_head list; 1502 struct list_head list;
1499 1503
@@ -1501,9 +1505,6 @@ struct velocity_info {
1501 struct net_device *dev; 1505 struct net_device *dev;
1502 struct net_device_stats stats; 1506 struct net_device_stats stats;
1503 1507
1504 dma_addr_t rd_pool_dma;
1505 dma_addr_t td_pool_dma[TX_QUEUE_NO];
1506
1507 struct vlan_group *vlgrp; 1508 struct vlan_group *vlgrp;
1508 u8 ip_addr[4]; 1509 u8 ip_addr[4];
1509 enum chip_type chip_id; 1510 enum chip_type chip_id;
@@ -1512,25 +1513,29 @@ struct velocity_info {
1512 unsigned long memaddr; 1513 unsigned long memaddr;
1513 unsigned long ioaddr; 1514 unsigned long ioaddr;
1514 1515
1515 u8 rev_id; 1516 struct tx_info {
1516 1517 int numq;
1517#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->td_used[(q)])) 1518
1519 /* FIXME: the locality of the data seems rather poor. */
1520 int used[TX_QUEUE_NO];
1521 int curr[TX_QUEUE_NO];
1522 int tail[TX_QUEUE_NO];
1523 struct tx_desc *rings[TX_QUEUE_NO];
1524 struct velocity_td_info *infos[TX_QUEUE_NO];
1525 dma_addr_t pool_dma[TX_QUEUE_NO];
1526 } tx;
1527
1528 struct rx_info {
1529 int buf_sz;
1530
1531 int dirty;
1532 int curr;
1533 u32 filled;
1534 struct rx_desc *ring;
1535 struct velocity_rd_info *info; /* It's an array */
1536 dma_addr_t pool_dma;
1537 } rx;
1518 1538
1519 int num_txq;
1520
1521 volatile int td_used[TX_QUEUE_NO];
1522 int td_curr[TX_QUEUE_NO];
1523 int td_tail[TX_QUEUE_NO];
1524 struct tx_desc *td_rings[TX_QUEUE_NO];
1525 struct velocity_td_info *td_infos[TX_QUEUE_NO];
1526
1527 int rd_curr;
1528 int rd_dirty;
1529 u32 rd_filled;
1530 struct rx_desc *rd_ring;
1531 struct velocity_rd_info *rd_info; /* It's an array */
1532
1533#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1534 u32 mib_counter[MAX_HW_MIB_COUNTER]; 1539 u32 mib_counter[MAX_HW_MIB_COUNTER];
1535 struct velocity_opt options; 1540 struct velocity_opt options;
1536 1541
@@ -1538,7 +1543,6 @@ struct velocity_info {
1538 1543
1539 u32 flags; 1544 u32 flags;
1540 1545
1541 int rx_buf_sz;
1542 u32 mii_status; 1546 u32 mii_status;
1543 u32 phy_id; 1547 u32 phy_id;
1544 int multicast_limit; 1548 int multicast_limit;
@@ -1554,8 +1558,8 @@ struct velocity_info {
1554 struct velocity_context context; 1558 struct velocity_context context;
1555 1559
1556 u32 ticks; 1560 u32 ticks;
1557 u32 rx_bytes;
1558 1561
1562 u8 rev_id;
1559}; 1563};
1560 1564
1561/** 1565/**