aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/vmxnet3
diff options
context:
space:
mode:
authorAndy King <acking@vmware.com>2013-08-23 12:33:49 -0400
committerDavid S. Miller <davem@davemloft.net>2013-08-27 16:02:02 -0400
commitb0eb57cb97e7837ebb746404c2c58c6f536f23fa (patch)
treed6be118d336bc74951340e440211c847d95e08f2 /drivers/net/vmxnet3
parent68d7bdcb4c00ec799ad949f4b4c50985539710e2 (diff)
VMXNET3: Add support for virtual IOMMU
This patch adds support for virtual IOMMU to the vmxnet3 module. We switch to DMA consistent mappings for anything we pass to the device. There were a few places where we already did this, but using pci_blah(); these have been fixed to use dma_blah(), along with all new occurrences where we've replaced kmalloc() and friends. Also fix two small bugs: 1) use after free of rq->buf_info in vmxnet3_rq_destroy() 2) a cpu_to_le32() that should have been a cpu_to_le64() Acked-by: George Zhang <georgezhang@vmware.com> Acked-by: Aditya Sarwade <asarwade@vmware.com> Signed-off-by: Andy King <acking@vmware.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/vmxnet3')
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c211
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h10
2 files changed, 138 insertions, 83 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 55a62cae2cb4..7e2788c488ed 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -313,10 +313,10 @@ vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
313 struct pci_dev *pdev) 313 struct pci_dev *pdev)
314{ 314{
315 if (tbi->map_type == VMXNET3_MAP_SINGLE) 315 if (tbi->map_type == VMXNET3_MAP_SINGLE)
316 pci_unmap_single(pdev, tbi->dma_addr, tbi->len, 316 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
317 PCI_DMA_TODEVICE); 317 PCI_DMA_TODEVICE);
318 else if (tbi->map_type == VMXNET3_MAP_PAGE) 318 else if (tbi->map_type == VMXNET3_MAP_PAGE)
319 pci_unmap_page(pdev, tbi->dma_addr, tbi->len, 319 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
320 PCI_DMA_TODEVICE); 320 PCI_DMA_TODEVICE);
321 else 321 else
322 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); 322 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
@@ -429,25 +429,29 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
429 struct vmxnet3_adapter *adapter) 429 struct vmxnet3_adapter *adapter)
430{ 430{
431 if (tq->tx_ring.base) { 431 if (tq->tx_ring.base) {
432 pci_free_consistent(adapter->pdev, tq->tx_ring.size * 432 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
433 sizeof(struct Vmxnet3_TxDesc), 433 sizeof(struct Vmxnet3_TxDesc),
434 tq->tx_ring.base, tq->tx_ring.basePA); 434 tq->tx_ring.base, tq->tx_ring.basePA);
435 tq->tx_ring.base = NULL; 435 tq->tx_ring.base = NULL;
436 } 436 }
437 if (tq->data_ring.base) { 437 if (tq->data_ring.base) {
438 pci_free_consistent(adapter->pdev, tq->data_ring.size * 438 dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size *
439 sizeof(struct Vmxnet3_TxDataDesc), 439 sizeof(struct Vmxnet3_TxDataDesc),
440 tq->data_ring.base, tq->data_ring.basePA); 440 tq->data_ring.base, tq->data_ring.basePA);
441 tq->data_ring.base = NULL; 441 tq->data_ring.base = NULL;
442 } 442 }
443 if (tq->comp_ring.base) { 443 if (tq->comp_ring.base) {
444 pci_free_consistent(adapter->pdev, tq->comp_ring.size * 444 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
445 sizeof(struct Vmxnet3_TxCompDesc), 445 sizeof(struct Vmxnet3_TxCompDesc),
446 tq->comp_ring.base, tq->comp_ring.basePA); 446 tq->comp_ring.base, tq->comp_ring.basePA);
447 tq->comp_ring.base = NULL; 447 tq->comp_ring.base = NULL;
448 } 448 }
449 kfree(tq->buf_info); 449 if (tq->buf_info) {
450 tq->buf_info = NULL; 450 dma_free_coherent(&adapter->pdev->dev,
451 tq->tx_ring.size * sizeof(tq->buf_info[0]),
452 tq->buf_info, tq->buf_info_pa);
453 tq->buf_info = NULL;
454 }
451} 455}
452 456
453 457
@@ -496,37 +500,38 @@ static int
496vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, 500vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
497 struct vmxnet3_adapter *adapter) 501 struct vmxnet3_adapter *adapter)
498{ 502{
503 size_t sz;
504
499 BUG_ON(tq->tx_ring.base || tq->data_ring.base || 505 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
500 tq->comp_ring.base || tq->buf_info); 506 tq->comp_ring.base || tq->buf_info);
501 507
502 tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size 508 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
503 * sizeof(struct Vmxnet3_TxDesc), 509 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
504 &tq->tx_ring.basePA); 510 &tq->tx_ring.basePA, GFP_KERNEL);
505 if (!tq->tx_ring.base) { 511 if (!tq->tx_ring.base) {
506 netdev_err(adapter->netdev, "failed to allocate tx ring\n"); 512 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
507 goto err; 513 goto err;
508 } 514 }
509 515
510 tq->data_ring.base = pci_alloc_consistent(adapter->pdev, 516 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
511 tq->data_ring.size * 517 tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc),
512 sizeof(struct Vmxnet3_TxDataDesc), 518 &tq->data_ring.basePA, GFP_KERNEL);
513 &tq->data_ring.basePA);
514 if (!tq->data_ring.base) { 519 if (!tq->data_ring.base) {
515 netdev_err(adapter->netdev, "failed to allocate data ring\n"); 520 netdev_err(adapter->netdev, "failed to allocate data ring\n");
516 goto err; 521 goto err;
517 } 522 }
518 523
519 tq->comp_ring.base = pci_alloc_consistent(adapter->pdev, 524 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
520 tq->comp_ring.size * 525 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
521 sizeof(struct Vmxnet3_TxCompDesc), 526 &tq->comp_ring.basePA, GFP_KERNEL);
522 &tq->comp_ring.basePA);
523 if (!tq->comp_ring.base) { 527 if (!tq->comp_ring.base) {
524 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n"); 528 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
525 goto err; 529 goto err;
526 } 530 }
527 531
528 tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]), 532 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
529 GFP_KERNEL); 533 tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
534 &tq->buf_info_pa, GFP_KERNEL);
530 if (!tq->buf_info) 535 if (!tq->buf_info)
531 goto err; 536 goto err;
532 537
@@ -578,7 +583,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
578 break; 583 break;
579 } 584 }
580 585
581 rbi->dma_addr = pci_map_single(adapter->pdev, 586 rbi->dma_addr = dma_map_single(
587 &adapter->pdev->dev,
582 rbi->skb->data, rbi->len, 588 rbi->skb->data, rbi->len,
583 PCI_DMA_FROMDEVICE); 589 PCI_DMA_FROMDEVICE);
584 } else { 590 } else {
@@ -595,7 +601,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
595 rq->stats.rx_buf_alloc_failure++; 601 rq->stats.rx_buf_alloc_failure++;
596 break; 602 break;
597 } 603 }
598 rbi->dma_addr = pci_map_page(adapter->pdev, 604 rbi->dma_addr = dma_map_page(
605 &adapter->pdev->dev,
599 rbi->page, 0, PAGE_SIZE, 606 rbi->page, 0, PAGE_SIZE,
600 PCI_DMA_FROMDEVICE); 607 PCI_DMA_FROMDEVICE);
601 } else { 608 } else {
@@ -705,7 +712,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
705 712
706 tbi = tq->buf_info + tq->tx_ring.next2fill; 713 tbi = tq->buf_info + tq->tx_ring.next2fill;
707 tbi->map_type = VMXNET3_MAP_SINGLE; 714 tbi->map_type = VMXNET3_MAP_SINGLE;
708 tbi->dma_addr = pci_map_single(adapter->pdev, 715 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
709 skb->data + buf_offset, buf_size, 716 skb->data + buf_offset, buf_size,
710 PCI_DMA_TODEVICE); 717 PCI_DMA_TODEVICE);
711 718
@@ -1221,7 +1228,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1221 goto rcd_done; 1228 goto rcd_done;
1222 } 1229 }
1223 1230
1224 pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, 1231 dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
1232 rbi->len,
1225 PCI_DMA_FROMDEVICE); 1233 PCI_DMA_FROMDEVICE);
1226 1234
1227#ifdef VMXNET3_RSS 1235#ifdef VMXNET3_RSS
@@ -1233,7 +1241,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1233 1241
1234 /* Immediate refill */ 1242 /* Immediate refill */
1235 rbi->skb = new_skb; 1243 rbi->skb = new_skb;
1236 rbi->dma_addr = pci_map_single(adapter->pdev, 1244 rbi->dma_addr = dma_map_single(&adapter->pdev->dev,
1237 rbi->skb->data, rbi->len, 1245 rbi->skb->data, rbi->len,
1238 PCI_DMA_FROMDEVICE); 1246 PCI_DMA_FROMDEVICE);
1239 rxd->addr = cpu_to_le64(rbi->dma_addr); 1247 rxd->addr = cpu_to_le64(rbi->dma_addr);
@@ -1267,7 +1275,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1267 } 1275 }
1268 1276
1269 if (rcd->len) { 1277 if (rcd->len) {
1270 pci_unmap_page(adapter->pdev, 1278 dma_unmap_page(&adapter->pdev->dev,
1271 rbi->dma_addr, rbi->len, 1279 rbi->dma_addr, rbi->len,
1272 PCI_DMA_FROMDEVICE); 1280 PCI_DMA_FROMDEVICE);
1273 1281
@@ -1276,7 +1284,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1276 1284
1277 /* Immediate refill */ 1285 /* Immediate refill */
1278 rbi->page = new_page; 1286 rbi->page = new_page;
1279 rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page, 1287 rbi->dma_addr = dma_map_page(&adapter->pdev->dev,
1288 rbi->page,
1280 0, PAGE_SIZE, 1289 0, PAGE_SIZE,
1281 PCI_DMA_FROMDEVICE); 1290 PCI_DMA_FROMDEVICE);
1282 rxd->addr = cpu_to_le64(rbi->dma_addr); 1291 rxd->addr = cpu_to_le64(rbi->dma_addr);
@@ -1352,13 +1361,13 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1352 1361
1353 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && 1362 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1354 rq->buf_info[ring_idx][i].skb) { 1363 rq->buf_info[ring_idx][i].skb) {
1355 pci_unmap_single(adapter->pdev, rxd->addr, 1364 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1356 rxd->len, PCI_DMA_FROMDEVICE); 1365 rxd->len, PCI_DMA_FROMDEVICE);
1357 dev_kfree_skb(rq->buf_info[ring_idx][i].skb); 1366 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1358 rq->buf_info[ring_idx][i].skb = NULL; 1367 rq->buf_info[ring_idx][i].skb = NULL;
1359 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && 1368 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1360 rq->buf_info[ring_idx][i].page) { 1369 rq->buf_info[ring_idx][i].page) {
1361 pci_unmap_page(adapter->pdev, rxd->addr, 1370 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1362 rxd->len, PCI_DMA_FROMDEVICE); 1371 rxd->len, PCI_DMA_FROMDEVICE);
1363 put_page(rq->buf_info[ring_idx][i].page); 1372 put_page(rq->buf_info[ring_idx][i].page);
1364 rq->buf_info[ring_idx][i].page = NULL; 1373 rq->buf_info[ring_idx][i].page = NULL;
@@ -1400,25 +1409,31 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1400 } 1409 }
1401 1410
1402 1411
1403 kfree(rq->buf_info[0]);
1404
1405 for (i = 0; i < 2; i++) { 1412 for (i = 0; i < 2; i++) {
1406 if (rq->rx_ring[i].base) { 1413 if (rq->rx_ring[i].base) {
1407 pci_free_consistent(adapter->pdev, rq->rx_ring[i].size 1414 dma_free_coherent(&adapter->pdev->dev,
1408 * sizeof(struct Vmxnet3_RxDesc), 1415 rq->rx_ring[i].size
1409 rq->rx_ring[i].base, 1416 * sizeof(struct Vmxnet3_RxDesc),
1410 rq->rx_ring[i].basePA); 1417 rq->rx_ring[i].base,
1418 rq->rx_ring[i].basePA);
1411 rq->rx_ring[i].base = NULL; 1419 rq->rx_ring[i].base = NULL;
1412 } 1420 }
1413 rq->buf_info[i] = NULL; 1421 rq->buf_info[i] = NULL;
1414 } 1422 }
1415 1423
1416 if (rq->comp_ring.base) { 1424 if (rq->comp_ring.base) {
1417 pci_free_consistent(adapter->pdev, rq->comp_ring.size * 1425 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1418 sizeof(struct Vmxnet3_RxCompDesc), 1426 * sizeof(struct Vmxnet3_RxCompDesc),
1419 rq->comp_ring.base, rq->comp_ring.basePA); 1427 rq->comp_ring.base, rq->comp_ring.basePA);
1420 rq->comp_ring.base = NULL; 1428 rq->comp_ring.base = NULL;
1421 } 1429 }
1430
1431 if (rq->buf_info[0]) {
1432 size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
1433 (rq->rx_ring[0].size + rq->rx_ring[1].size);
1434 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1435 rq->buf_info_pa);
1436 }
1422} 1437}
1423 1438
1424 1439
@@ -1503,8 +1518,10 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1503 for (i = 0; i < 2; i++) { 1518 for (i = 0; i < 2; i++) {
1504 1519
1505 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); 1520 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1506 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz, 1521 rq->rx_ring[i].base = dma_alloc_coherent(
1507 &rq->rx_ring[i].basePA); 1522 &adapter->pdev->dev, sz,
1523 &rq->rx_ring[i].basePA,
1524 GFP_KERNEL);
1508 if (!rq->rx_ring[i].base) { 1525 if (!rq->rx_ring[i].base) {
1509 netdev_err(adapter->netdev, 1526 netdev_err(adapter->netdev,
1510 "failed to allocate rx ring %d\n", i); 1527 "failed to allocate rx ring %d\n", i);
@@ -1513,8 +1530,9 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1513 } 1530 }
1514 1531
1515 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); 1532 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1516 rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz, 1533 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1517 &rq->comp_ring.basePA); 1534 &rq->comp_ring.basePA,
1535 GFP_KERNEL);
1518 if (!rq->comp_ring.base) { 1536 if (!rq->comp_ring.base) {
1519 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n"); 1537 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
1520 goto err; 1538 goto err;
@@ -1522,7 +1540,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1522 1540
1523 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + 1541 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1524 rq->rx_ring[1].size); 1542 rq->rx_ring[1].size);
1525 bi = kzalloc(sz, GFP_KERNEL); 1543 bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
1544 GFP_KERNEL);
1526 if (!bi) 1545 if (!bi)
1527 goto err; 1546 goto err;
1528 1547
@@ -2005,6 +2024,7 @@ vmxnet3_set_mc(struct net_device *netdev)
2005 struct Vmxnet3_RxFilterConf *rxConf = 2024 struct Vmxnet3_RxFilterConf *rxConf =
2006 &adapter->shared->devRead.rxFilterConf; 2025 &adapter->shared->devRead.rxFilterConf;
2007 u8 *new_table = NULL; 2026 u8 *new_table = NULL;
2027 dma_addr_t new_table_pa = 0;
2008 u32 new_mode = VMXNET3_RXM_UCAST; 2028 u32 new_mode = VMXNET3_RXM_UCAST;
2009 2029
2010 if (netdev->flags & IFF_PROMISC) { 2030 if (netdev->flags & IFF_PROMISC) {
@@ -2028,8 +2048,12 @@ vmxnet3_set_mc(struct net_device *netdev)
2028 new_mode |= VMXNET3_RXM_MCAST; 2048 new_mode |= VMXNET3_RXM_MCAST;
2029 rxConf->mfTableLen = cpu_to_le16( 2049 rxConf->mfTableLen = cpu_to_le16(
2030 netdev_mc_count(netdev) * ETH_ALEN); 2050 netdev_mc_count(netdev) * ETH_ALEN);
2031 rxConf->mfTablePA = cpu_to_le64(virt_to_phys( 2051 new_table_pa = dma_map_single(
2032 new_table)); 2052 &adapter->pdev->dev,
2053 new_table,
2054 rxConf->mfTableLen,
2055 PCI_DMA_TODEVICE);
2056 rxConf->mfTablePA = cpu_to_le64(new_table_pa);
2033 } else { 2057 } else {
2034 netdev_info(netdev, "failed to copy mcast list" 2058 netdev_info(netdev, "failed to copy mcast list"
2035 ", setting ALL_MULTI\n"); 2059 ", setting ALL_MULTI\n");
@@ -2056,7 +2080,11 @@ vmxnet3_set_mc(struct net_device *netdev)
2056 VMXNET3_CMD_UPDATE_MAC_FILTERS); 2080 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2057 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2081 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2058 2082
2059 kfree(new_table); 2083 if (new_table) {
2084 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2085 rxConf->mfTableLen, PCI_DMA_TODEVICE);
2086 kfree(new_table);
2087 }
2060} 2088}
2061 2089
2062void 2090void
@@ -2096,7 +2124,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2096 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1); 2124 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2097 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1); 2125 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2098 2126
2099 devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter)); 2127 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2100 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter)); 2128 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2101 2129
2102 /* set up feature flags */ 2130 /* set up feature flags */
@@ -2125,7 +2153,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2125 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); 2153 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2126 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); 2154 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2127 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); 2155 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2128 tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info)); 2156 tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
2129 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); 2157 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2130 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); 2158 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2131 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); 2159 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
@@ -2143,8 +2171,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2143 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA); 2171 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2144 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA); 2172 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2145 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA); 2173 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2146 rqc->ddPA = cpu_to_le64(virt_to_phys( 2174 rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
2147 rq->buf_info));
2148 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size); 2175 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2149 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size); 2176 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2150 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size); 2177 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
@@ -2184,8 +2211,9 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2184 i, adapter->num_rx_queues); 2211 i, adapter->num_rx_queues);
2185 2212
2186 devRead->rssConfDesc.confVer = 1; 2213 devRead->rssConfDesc.confVer = 1;
2187 devRead->rssConfDesc.confLen = sizeof(*rssConf); 2214 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2188 devRead->rssConfDesc.confPA = virt_to_phys(rssConf); 2215 devRead->rssConfDesc.confPA =
2216 cpu_to_le64(adapter->rss_conf_pa);
2189 } 2217 }
2190 2218
2191#endif /* VMXNET3_RSS */ 2219#endif /* VMXNET3_RSS */
@@ -2948,9 +2976,13 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2948 adapter->pdev = pdev; 2976 adapter->pdev = pdev;
2949 2977
2950 spin_lock_init(&adapter->cmd_lock); 2978 spin_lock_init(&adapter->cmd_lock);
2951 adapter->shared = pci_alloc_consistent(adapter->pdev, 2979 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
2952 sizeof(struct Vmxnet3_DriverShared), 2980 sizeof(struct vmxnet3_adapter),
2953 &adapter->shared_pa); 2981 PCI_DMA_TODEVICE);
2982 adapter->shared = dma_alloc_coherent(
2983 &adapter->pdev->dev,
2984 sizeof(struct Vmxnet3_DriverShared),
2985 &adapter->shared_pa, GFP_KERNEL);
2954 if (!adapter->shared) { 2986 if (!adapter->shared) {
2955 dev_err(&pdev->dev, "Failed to allocate memory\n"); 2987 dev_err(&pdev->dev, "Failed to allocate memory\n");
2956 err = -ENOMEM; 2988 err = -ENOMEM;
@@ -2963,8 +2995,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2963 2995
2964 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; 2996 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2965 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; 2997 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
2966 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size, 2998 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
2967 &adapter->queue_desc_pa); 2999 &adapter->queue_desc_pa,
3000 GFP_KERNEL);
2968 3001
2969 if (!adapter->tqd_start) { 3002 if (!adapter->tqd_start) {
2970 dev_err(&pdev->dev, "Failed to allocate memory\n"); 3003 dev_err(&pdev->dev, "Failed to allocate memory\n");
@@ -2974,7 +3007,10 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2974 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + 3007 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
2975 adapter->num_tx_queues); 3008 adapter->num_tx_queues);
2976 3009
2977 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); 3010 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3011 sizeof(struct Vmxnet3_PMConf),
3012 &adapter->pm_conf_pa,
3013 GFP_KERNEL);
2978 if (adapter->pm_conf == NULL) { 3014 if (adapter->pm_conf == NULL) {
2979 err = -ENOMEM; 3015 err = -ENOMEM;
2980 goto err_alloc_pm; 3016 goto err_alloc_pm;
@@ -2982,7 +3018,10 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2982 3018
2983#ifdef VMXNET3_RSS 3019#ifdef VMXNET3_RSS
2984 3020
2985 adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL); 3021 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3022 sizeof(struct UPT1_RSSConf),
3023 &adapter->rss_conf_pa,
3024 GFP_KERNEL);
2986 if (adapter->rss_conf == NULL) { 3025 if (adapter->rss_conf == NULL) {
2987 err = -ENOMEM; 3026 err = -ENOMEM;
2988 goto err_alloc_rss; 3027 goto err_alloc_rss;
@@ -3077,17 +3116,22 @@ err_ver:
3077 vmxnet3_free_pci_resources(adapter); 3116 vmxnet3_free_pci_resources(adapter);
3078err_alloc_pci: 3117err_alloc_pci:
3079#ifdef VMXNET3_RSS 3118#ifdef VMXNET3_RSS
3080 kfree(adapter->rss_conf); 3119 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3120 adapter->rss_conf, adapter->rss_conf_pa);
3081err_alloc_rss: 3121err_alloc_rss:
3082#endif 3122#endif
3083 kfree(adapter->pm_conf); 3123 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3124 adapter->pm_conf, adapter->pm_conf_pa);
3084err_alloc_pm: 3125err_alloc_pm:
3085 pci_free_consistent(adapter->pdev, size, adapter->tqd_start, 3126 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3086 adapter->queue_desc_pa); 3127 adapter->queue_desc_pa);
3087err_alloc_queue_desc: 3128err_alloc_queue_desc:
3088 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 3129 dma_free_coherent(&adapter->pdev->dev,
3089 adapter->shared, adapter->shared_pa); 3130 sizeof(struct Vmxnet3_DriverShared),
3131 adapter->shared, adapter->shared_pa);
3090err_alloc_shared: 3132err_alloc_shared:
3133 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3134 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3091 pci_set_drvdata(pdev, NULL); 3135 pci_set_drvdata(pdev, NULL);
3092 free_netdev(netdev); 3136 free_netdev(netdev);
3093 return err; 3137 return err;
@@ -3118,16 +3162,21 @@ vmxnet3_remove_device(struct pci_dev *pdev)
3118 vmxnet3_free_intr_resources(adapter); 3162 vmxnet3_free_intr_resources(adapter);
3119 vmxnet3_free_pci_resources(adapter); 3163 vmxnet3_free_pci_resources(adapter);
3120#ifdef VMXNET3_RSS 3164#ifdef VMXNET3_RSS
3121 kfree(adapter->rss_conf); 3165 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3166 adapter->rss_conf, adapter->rss_conf_pa);
3122#endif 3167#endif
3123 kfree(adapter->pm_conf); 3168 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3169 adapter->pm_conf, adapter->pm_conf_pa);
3124 3170
3125 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; 3171 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3126 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues; 3172 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3127 pci_free_consistent(adapter->pdev, size, adapter->tqd_start, 3173 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3128 adapter->queue_desc_pa); 3174 adapter->queue_desc_pa);
3129 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 3175 dma_free_coherent(&adapter->pdev->dev,
3130 adapter->shared, adapter->shared_pa); 3176 sizeof(struct Vmxnet3_DriverShared),
3177 adapter->shared, adapter->shared_pa);
3178 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3179 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3131 free_netdev(netdev); 3180 free_netdev(netdev);
3132} 3181}
3133 3182
@@ -3227,8 +3276,8 @@ skip_arp:
3227 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); 3276 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3228 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( 3277 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3229 *pmConf)); 3278 *pmConf));
3230 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( 3279 adapter->shared->devRead.pmConfDesc.confPA =
3231 pmConf)); 3280 cpu_to_le64(adapter->pm_conf_pa);
3232 3281
3233 spin_lock_irqsave(&adapter->cmd_lock, flags); 3282 spin_lock_irqsave(&adapter->cmd_lock, flags);
3234 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3283 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -3265,8 +3314,8 @@ vmxnet3_resume(struct device *device)
3265 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1); 3314 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3266 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof( 3315 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3267 *pmConf)); 3316 *pmConf));
3268 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( 3317 adapter->shared->devRead.pmConfDesc.confPA =
3269 pmConf)); 3318 cpu_to_le64(adapter->pm_conf_pa);
3270 3319
3271 netif_device_attach(netdev); 3320 netif_device_attach(netdev);
3272 pci_set_power_state(pdev, PCI_D0); 3321 pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 35418146fa17..a03f358fd58b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -70,10 +70,10 @@
70/* 70/*
71 * Version numbers 71 * Version numbers
72 */ 72 */
73#define VMXNET3_DRIVER_VERSION_STRING "1.1.30.0-k" 73#define VMXNET3_DRIVER_VERSION_STRING "1.2.0.0-k"
74 74
75/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 75/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
76#define VMXNET3_DRIVER_VERSION_NUM 0x01011E00 76#define VMXNET3_DRIVER_VERSION_NUM 0x01020000
77 77
78#if defined(CONFIG_PCI_MSI) 78#if defined(CONFIG_PCI_MSI)
79 /* RSS only makes sense if MSI-X is supported. */ 79 /* RSS only makes sense if MSI-X is supported. */
@@ -229,6 +229,7 @@ struct vmxnet3_tx_queue {
229 spinlock_t tx_lock; 229 spinlock_t tx_lock;
230 struct vmxnet3_cmd_ring tx_ring; 230 struct vmxnet3_cmd_ring tx_ring;
231 struct vmxnet3_tx_buf_info *buf_info; 231 struct vmxnet3_tx_buf_info *buf_info;
232 dma_addr_t buf_info_pa;
232 struct vmxnet3_tx_data_ring data_ring; 233 struct vmxnet3_tx_data_ring data_ring;
233 struct vmxnet3_comp_ring comp_ring; 234 struct vmxnet3_comp_ring comp_ring;
234 struct Vmxnet3_TxQueueCtrl *shared; 235 struct Vmxnet3_TxQueueCtrl *shared;
@@ -277,6 +278,7 @@ struct vmxnet3_rx_queue {
277 u32 qid; /* rqID in RCD for buffer from 1st ring */ 278 u32 qid; /* rqID in RCD for buffer from 1st ring */
278 u32 qid2; /* rqID in RCD for buffer from 2nd ring */ 279 u32 qid2; /* rqID in RCD for buffer from 2nd ring */
279 struct vmxnet3_rx_buf_info *buf_info[2]; 280 struct vmxnet3_rx_buf_info *buf_info[2];
281 dma_addr_t buf_info_pa;
280 struct Vmxnet3_RxQueueCtrl *shared; 282 struct Vmxnet3_RxQueueCtrl *shared;
281 struct vmxnet3_rq_driver_stats stats; 283 struct vmxnet3_rq_driver_stats stats;
282} __attribute__((__aligned__(SMP_CACHE_BYTES))); 284} __attribute__((__aligned__(SMP_CACHE_BYTES)));
@@ -353,6 +355,10 @@ struct vmxnet3_adapter {
353 unsigned long state; /* VMXNET3_STATE_BIT_xxx */ 355 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
354 356
355 int share_intr; 357 int share_intr;
358
359 dma_addr_t adapter_pa;
360 dma_addr_t pm_conf_pa;
361 dma_addr_t rss_conf_pa;
356}; 362};
357 363
358#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ 364#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \