summaryrefslogtreecommitdiffstats
path: root/drivers/net/vmxnet3
diff options
context:
space:
mode:
authorShrikrishna Khare <skhare@vmware.com>2016-06-16 13:51:56 -0400
committerDavid S. Miller <davem@davemloft.net>2016-06-17 01:37:04 -0400
commit50a5ce3e7116a70edb7a1d1d209e3bc537752427 (patch)
treecd7fae14e989039d2582da4ad7bf1e4006cf6bef /drivers/net/vmxnet3
parent3c8b3efc061a745d888869dc3462ac4f7dd582d9 (diff)
vmxnet3: add receive data ring support
vmxnet3 driver preallocates buffers for receiving packets and posts the buffers to the emulation. In order to deliver a received packet to the guest, the emulation must map buffer(s) and copy the packet into it. To avoid this memory mapping overhead, this patch introduces the receive data ring - a set of small sized buffers that are always mapped by the emulation. If a packet fits into the receive data ring buffer, the emulation delivers the packet via the receive data ring (which must be copied by the guest driver), or else the usual receive path is used. Receive Data Ring buffer length is configurable via ethtool -G ethX rx-mini Signed-off-by: Shrikrishna Khare <skhare@vmware.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/vmxnet3')
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c153
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c48
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h23
4 files changed, 193 insertions, 45 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 701d98944c58..f3b31c2d8abc 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -174,6 +174,8 @@ struct Vmxnet3_TxDataDesc {
174 u8 data[VMXNET3_HDR_COPY_SIZE]; 174 u8 data[VMXNET3_HDR_COPY_SIZE];
175}; 175};
176 176
177typedef u8 Vmxnet3_RxDataDesc;
178
177#define VMXNET3_TCD_GEN_SHIFT 31 179#define VMXNET3_TCD_GEN_SHIFT 31
178#define VMXNET3_TCD_GEN_SIZE 1 180#define VMXNET3_TCD_GEN_SIZE 1
179#define VMXNET3_TCD_TXIDX_SHIFT 0 181#define VMXNET3_TCD_TXIDX_SHIFT 0
@@ -382,6 +384,10 @@ union Vmxnet3_GenericDesc {
382#define VMXNET3_TXDATA_DESC_SIZE_ALIGN 64 384#define VMXNET3_TXDATA_DESC_SIZE_ALIGN 64
383#define VMXNET3_TXDATA_DESC_SIZE_MASK (VMXNET3_TXDATA_DESC_SIZE_ALIGN - 1) 385#define VMXNET3_TXDATA_DESC_SIZE_MASK (VMXNET3_TXDATA_DESC_SIZE_ALIGN - 1)
384 386
387/* Rx Data Ring buffer size must be a multiple of 64 */
388#define VMXNET3_RXDATA_DESC_SIZE_ALIGN 64
389#define VMXNET3_RXDATA_DESC_SIZE_MASK (VMXNET3_RXDATA_DESC_SIZE_ALIGN - 1)
390
385/* Max ring size */ 391/* Max ring size */
386#define VMXNET3_TX_RING_MAX_SIZE 4096 392#define VMXNET3_TX_RING_MAX_SIZE 4096
387#define VMXNET3_TC_RING_MAX_SIZE 4096 393#define VMXNET3_TC_RING_MAX_SIZE 4096
@@ -392,6 +398,8 @@ union Vmxnet3_GenericDesc {
392#define VMXNET3_TXDATA_DESC_MIN_SIZE 128 398#define VMXNET3_TXDATA_DESC_MIN_SIZE 128
393#define VMXNET3_TXDATA_DESC_MAX_SIZE 2048 399#define VMXNET3_TXDATA_DESC_MAX_SIZE 2048
394 400
401#define VMXNET3_RXDATA_DESC_MAX_SIZE 2048
402
395/* a list of reasons for queue stop */ 403/* a list of reasons for queue stop */
396 404
397enum { 405enum {
@@ -488,12 +496,14 @@ struct Vmxnet3_RxQueueConf {
488 __le64 rxRingBasePA[2]; 496 __le64 rxRingBasePA[2];
489 __le64 compRingBasePA; 497 __le64 compRingBasePA;
490 __le64 ddPA; /* driver data */ 498 __le64 ddPA; /* driver data */
491 __le64 reserved; 499 __le64 rxDataRingBasePA;
492 __le32 rxRingSize[2]; /* # of rx desc */ 500 __le32 rxRingSize[2]; /* # of rx desc */
493 __le32 compRingSize; /* # of rx comp desc */ 501 __le32 compRingSize; /* # of rx comp desc */
494 __le32 ddLen; /* size of driver data */ 502 __le32 ddLen; /* size of driver data */
495 u8 intrIdx; 503 u8 intrIdx;
496 u8 _pad[7]; 504 u8 _pad1[1];
505 __le16 rxDataRingDescSize; /* size of rx data ring buffer */
506 u8 _pad2[4];
497}; 507};
498 508
499 509
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 4e42eb04a198..6449d2e6d94f 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1284,9 +1284,10 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1284 */ 1284 */
1285 break; 1285 break;
1286 } 1286 }
1287 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); 1287 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1288 rcd->rqID != rq->dataRingQid);
1288 idx = rcd->rxdIdx; 1289 idx = rcd->rxdIdx;
1289 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; 1290 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
1290 ring = rq->rx_ring + ring_idx; 1291 ring = rq->rx_ring + ring_idx;
1291 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, 1292 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1292 &rxCmdDesc); 1293 &rxCmdDesc);
@@ -1301,8 +1302,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1301 } 1302 }
1302 1303
1303 if (rcd->sop) { /* first buf of the pkt */ 1304 if (rcd->sop) { /* first buf of the pkt */
1305 bool rxDataRingUsed;
1306 u16 len;
1307
1304 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD || 1308 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1305 rcd->rqID != rq->qid); 1309 (rcd->rqID != rq->qid &&
1310 rcd->rqID != rq->dataRingQid));
1306 1311
1307 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB); 1312 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1308 BUG_ON(ctx->skb != NULL || rbi->skb == NULL); 1313 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
@@ -1318,8 +1323,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1318 1323
1319 skip_page_frags = false; 1324 skip_page_frags = false;
1320 ctx->skb = rbi->skb; 1325 ctx->skb = rbi->skb;
1326
1327 rxDataRingUsed =
1328 VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1329 len = rxDataRingUsed ? rcd->len : rbi->len;
1321 new_skb = netdev_alloc_skb_ip_align(adapter->netdev, 1330 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1322 rbi->len); 1331 len);
1323 if (new_skb == NULL) { 1332 if (new_skb == NULL) {
1324 /* Skb allocation failed, do not handover this 1333 /* Skb allocation failed, do not handover this
1325 * skb to stack. Reuse it. Drop the existing pkt 1334 * skb to stack. Reuse it. Drop the existing pkt
@@ -1330,25 +1339,48 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1330 skip_page_frags = true; 1339 skip_page_frags = true;
1331 goto rcd_done; 1340 goto rcd_done;
1332 } 1341 }
1333 new_dma_addr = dma_map_single(&adapter->pdev->dev,
1334 new_skb->data, rbi->len,
1335 PCI_DMA_FROMDEVICE);
1336 if (dma_mapping_error(&adapter->pdev->dev,
1337 new_dma_addr)) {
1338 dev_kfree_skb(new_skb);
1339 /* Skb allocation failed, do not handover this
1340 * skb to stack. Reuse it. Drop the existing pkt
1341 */
1342 rq->stats.rx_buf_alloc_failure++;
1343 ctx->skb = NULL;
1344 rq->stats.drop_total++;
1345 skip_page_frags = true;
1346 goto rcd_done;
1347 }
1348 1342
1349 dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, 1343 if (rxDataRingUsed) {
1350 rbi->len, 1344 size_t sz;
1351 PCI_DMA_FROMDEVICE); 1345
1346 BUG_ON(rcd->len > rq->data_ring.desc_size);
1347
1348 ctx->skb = new_skb;
1349 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1350 memcpy(new_skb->data,
1351 &rq->data_ring.base[sz], rcd->len);
1352 } else {
1353 ctx->skb = rbi->skb;
1354
1355 new_dma_addr =
1356 dma_map_single(&adapter->pdev->dev,
1357 new_skb->data, rbi->len,
1358 PCI_DMA_FROMDEVICE);
1359 if (dma_mapping_error(&adapter->pdev->dev,
1360 new_dma_addr)) {
1361 dev_kfree_skb(new_skb);
1362 /* Skb allocation failed, do not
1363 * handover this skb to stack. Reuse
1364 * it. Drop the existing pkt.
1365 */
1366 rq->stats.rx_buf_alloc_failure++;
1367 ctx->skb = NULL;
1368 rq->stats.drop_total++;
1369 skip_page_frags = true;
1370 goto rcd_done;
1371 }
1372
1373 dma_unmap_single(&adapter->pdev->dev,
1374 rbi->dma_addr,
1375 rbi->len,
1376 PCI_DMA_FROMDEVICE);
1377
1378 /* Immediate refill */
1379 rbi->skb = new_skb;
1380 rbi->dma_addr = new_dma_addr;
1381 rxd->addr = cpu_to_le64(rbi->dma_addr);
1382 rxd->len = rbi->len;
1383 }
1352 1384
1353#ifdef VMXNET3_RSS 1385#ifdef VMXNET3_RSS
1354 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE && 1386 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
@@ -1359,11 +1391,6 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1359#endif 1391#endif
1360 skb_put(ctx->skb, rcd->len); 1392 skb_put(ctx->skb, rcd->len);
1361 1393
1362 /* Immediate refill */
1363 rbi->skb = new_skb;
1364 rbi->dma_addr = new_dma_addr;
1365 rxd->addr = cpu_to_le64(rbi->dma_addr);
1366 rxd->len = rbi->len;
1367 if (VMXNET3_VERSION_GE_2(adapter) && 1394 if (VMXNET3_VERSION_GE_2(adapter) &&
1368 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { 1395 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1369 struct Vmxnet3_RxCompDescExt *rcdlro; 1396 struct Vmxnet3_RxCompDescExt *rcdlro;
@@ -1590,6 +1617,13 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1590 rq->buf_info[i] = NULL; 1617 rq->buf_info[i] = NULL;
1591 } 1618 }
1592 1619
1620 if (rq->data_ring.base) {
1621 dma_free_coherent(&adapter->pdev->dev,
1622 rq->rx_ring[0].size * rq->data_ring.desc_size,
1623 rq->data_ring.base, rq->data_ring.basePA);
1624 rq->data_ring.base = NULL;
1625 }
1626
1593 if (rq->comp_ring.base) { 1627 if (rq->comp_ring.base) {
1594 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size 1628 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1595 * sizeof(struct Vmxnet3_RxCompDesc), 1629 * sizeof(struct Vmxnet3_RxCompDesc),
@@ -1605,6 +1639,25 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1605 } 1639 }
1606} 1640}
1607 1641
1642void
1643vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
1644{
1645 int i;
1646
1647 for (i = 0; i < adapter->num_rx_queues; i++) {
1648 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1649
1650 if (rq->data_ring.base) {
1651 dma_free_coherent(&adapter->pdev->dev,
1652 (rq->rx_ring[0].size *
1653 rq->data_ring.desc_size),
1654 rq->data_ring.base,
1655 rq->data_ring.basePA);
1656 rq->data_ring.base = NULL;
1657 rq->data_ring.desc_size = 0;
1658 }
1659 }
1660}
1608 1661
1609static int 1662static int
1610vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, 1663vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
@@ -1698,6 +1751,22 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1698 } 1751 }
1699 } 1752 }
1700 1753
1754 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
1755 sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
1756 rq->data_ring.base =
1757 dma_alloc_coherent(&adapter->pdev->dev, sz,
1758 &rq->data_ring.basePA,
1759 GFP_KERNEL);
1760 if (!rq->data_ring.base) {
1761 netdev_err(adapter->netdev,
1762 "rx data ring will be disabled\n");
1763 adapter->rxdataring_enabled = false;
1764 }
1765 } else {
1766 rq->data_ring.base = NULL;
1767 rq->data_ring.desc_size = 0;
1768 }
1769
1701 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); 1770 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1702 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz, 1771 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1703 &rq->comp_ring.basePA, 1772 &rq->comp_ring.basePA,
@@ -1730,6 +1799,8 @@ vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1730{ 1799{
1731 int i, err = 0; 1800 int i, err = 0;
1732 1801
1802 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
1803
1733 for (i = 0; i < adapter->num_rx_queues; i++) { 1804 for (i = 0; i < adapter->num_rx_queues; i++) {
1734 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter); 1805 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1735 if (unlikely(err)) { 1806 if (unlikely(err)) {
@@ -1739,6 +1810,10 @@ vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1739 goto err_out; 1810 goto err_out;
1740 } 1811 }
1741 } 1812 }
1813
1814 if (!adapter->rxdataring_enabled)
1815 vmxnet3_rq_destroy_all_rxdataring(adapter);
1816
1742 return err; 1817 return err;
1743err_out: 1818err_out:
1744 vmxnet3_rq_destroy_all(adapter); 1819 vmxnet3_rq_destroy_all(adapter);
@@ -2046,10 +2121,9 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2046 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 2121 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2047 rq->qid = i; 2122 rq->qid = i;
2048 rq->qid2 = i + adapter->num_rx_queues; 2123 rq->qid2 = i + adapter->num_rx_queues;
2124 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2049 } 2125 }
2050 2126
2051
2052
2053 /* init our intr settings */ 2127 /* init our intr settings */
2054 for (i = 0; i < intr->num_intrs; i++) 2128 for (i = 0; i < intr->num_intrs; i++)
2055 intr->mod_levels[i] = UPT1_IML_ADAPTIVE; 2129 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
@@ -2362,6 +2436,12 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2362 (rqc->rxRingSize[0] + 2436 (rqc->rxRingSize[0] +
2363 rqc->rxRingSize[1])); 2437 rqc->rxRingSize[1]));
2364 rqc->intrIdx = rq->comp_ring.intr_idx; 2438 rqc->intrIdx = rq->comp_ring.intr_idx;
2439 if (VMXNET3_VERSION_GE_3(adapter)) {
2440 rqc->rxDataRingBasePA =
2441 cpu_to_le64(rq->data_ring.basePA);
2442 rqc->rxDataRingDescSize =
2443 cpu_to_le16(rq->data_ring.desc_size);
2444 }
2365 } 2445 }
2366 2446
2367#ifdef VMXNET3_RSS 2447#ifdef VMXNET3_RSS
@@ -2692,7 +2772,7 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2692int 2772int
2693vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, 2773vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2694 u32 rx_ring_size, u32 rx_ring2_size, 2774 u32 rx_ring_size, u32 rx_ring2_size,
2695 u16 txdata_desc_size) 2775 u16 txdata_desc_size, u16 rxdata_desc_size)
2696{ 2776{
2697 int err = 0, i; 2777 int err = 0, i;
2698 2778
@@ -2718,12 +2798,15 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2718 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size; 2798 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2719 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size; 2799 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2720 vmxnet3_adjust_rx_ring_size(adapter); 2800 vmxnet3_adjust_rx_ring_size(adapter);
2801
2802 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
2721 for (i = 0; i < adapter->num_rx_queues; i++) { 2803 for (i = 0; i < adapter->num_rx_queues; i++) {
2722 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; 2804 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2723 /* qid and qid2 for rx queues will be assigned later when num 2805 /* qid and qid2 for rx queues will be assigned later when num
2724 * of rx queues is finalized after allocating intrs */ 2806 * of rx queues is finalized after allocating intrs */
2725 rq->shared = &adapter->rqd_start[i].ctrl; 2807 rq->shared = &adapter->rqd_start[i].ctrl;
2726 rq->adapter = adapter; 2808 rq->adapter = adapter;
2809 rq->data_ring.desc_size = rxdata_desc_size;
2727 err = vmxnet3_rq_create(rq, adapter); 2810 err = vmxnet3_rq_create(rq, adapter);
2728 if (err) { 2811 if (err) {
2729 if (i == 0) { 2812 if (i == 0) {
@@ -2741,6 +2824,10 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2741 } 2824 }
2742 } 2825 }
2743 } 2826 }
2827
2828 if (!adapter->rxdataring_enabled)
2829 vmxnet3_rq_destroy_all_rxdataring(adapter);
2830
2744 return err; 2831 return err;
2745queue_err: 2832queue_err:
2746 vmxnet3_tq_destroy_all(adapter); 2833 vmxnet3_tq_destroy_all(adapter);
@@ -2785,7 +2872,8 @@ vmxnet3_open(struct net_device *netdev)
2785 adapter->tx_ring_size, 2872 adapter->tx_ring_size,
2786 adapter->rx_ring_size, 2873 adapter->rx_ring_size,
2787 adapter->rx_ring2_size, 2874 adapter->rx_ring2_size,
2788 adapter->txdata_desc_size); 2875 adapter->txdata_desc_size,
2876 adapter->rxdata_desc_size);
2789 if (err) 2877 if (err)
2790 goto queue_err; 2878 goto queue_err;
2791 2879
@@ -3260,6 +3348,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3260 SET_NETDEV_DEV(netdev, &pdev->dev); 3348 SET_NETDEV_DEV(netdev, &pdev->dev);
3261 vmxnet3_declare_features(adapter, dma64); 3349 vmxnet3_declare_features(adapter, dma64);
3262 3350
3351 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
3352 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
3353
3263 if (adapter->num_tx_queues == adapter->num_rx_queues) 3354 if (adapter->num_tx_queues == adapter->num_rx_queues)
3264 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE; 3355 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
3265 else 3356 else
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 3b70cfef9748..38f7c7975e1f 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -430,11 +430,10 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
430 buf[j++] = rq->rx_ring[1].next2comp; 430 buf[j++] = rq->rx_ring[1].next2comp;
431 buf[j++] = rq->rx_ring[1].gen; 431 buf[j++] = rq->rx_ring[1].gen;
432 432
433 /* receive data ring */ 433 buf[j++] = VMXNET3_GET_ADDR_LO(rq->data_ring.basePA);
434 buf[j++] = 0; 434 buf[j++] = VMXNET3_GET_ADDR_HI(rq->data_ring.basePA);
435 buf[j++] = 0; 435 buf[j++] = rq->rx_ring[0].size;
436 buf[j++] = 0; 436 buf[j++] = rq->data_ring.desc_size;
437 buf[j++] = 0;
438 437
439 buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA); 438 buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA);
440 buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA); 439 buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA);
@@ -503,12 +502,14 @@ vmxnet3_get_ringparam(struct net_device *netdev,
503 502
504 param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE; 503 param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
505 param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE; 504 param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
506 param->rx_mini_max_pending = 0; 505 param->rx_mini_max_pending = VMXNET3_VERSION_GE_3(adapter) ?
506 VMXNET3_RXDATA_DESC_MAX_SIZE : 0;
507 param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE; 507 param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE;
508 508
509 param->rx_pending = adapter->rx_ring_size; 509 param->rx_pending = adapter->rx_ring_size;
510 param->tx_pending = adapter->tx_ring_size; 510 param->tx_pending = adapter->tx_ring_size;
511 param->rx_mini_pending = 0; 511 param->rx_mini_pending = VMXNET3_VERSION_GE_3(adapter) ?
512 adapter->rxdata_desc_size : 0;
512 param->rx_jumbo_pending = adapter->rx_ring2_size; 513 param->rx_jumbo_pending = adapter->rx_ring2_size;
513} 514}
514 515
@@ -519,6 +520,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
519{ 520{
520 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 521 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
521 u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size; 522 u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size;
523 u16 new_rxdata_desc_size;
522 u32 sz; 524 u32 sz;
523 int err = 0; 525 int err = 0;
524 526
@@ -541,6 +543,15 @@ vmxnet3_set_ringparam(struct net_device *netdev,
541 return -EOPNOTSUPP; 543 return -EOPNOTSUPP;
542 } 544 }
543 545
546 if (VMXNET3_VERSION_GE_3(adapter)) {
547 if (param->rx_mini_pending < 0 ||
548 param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE) {
549 return -EINVAL;
550 }
551 } else if (param->rx_mini_pending != 0) {
552 return -EINVAL;
553 }
554
544 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ 555 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
545 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & 556 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
546 ~VMXNET3_RING_SIZE_MASK; 557 ~VMXNET3_RING_SIZE_MASK;
@@ -567,9 +578,19 @@ vmxnet3_set_ringparam(struct net_device *netdev,
567 new_rx_ring2_size = min_t(u32, new_rx_ring2_size, 578 new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
568 VMXNET3_RX_RING2_MAX_SIZE); 579 VMXNET3_RX_RING2_MAX_SIZE);
569 580
581 /* rx data ring buffer size has to be a multiple of
582 * VMXNET3_RXDATA_DESC_SIZE_ALIGN
583 */
584 new_rxdata_desc_size =
585 (param->rx_mini_pending + VMXNET3_RXDATA_DESC_SIZE_MASK) &
586 ~VMXNET3_RXDATA_DESC_SIZE_MASK;
587 new_rxdata_desc_size = min_t(u16, new_rxdata_desc_size,
588 VMXNET3_RXDATA_DESC_MAX_SIZE);
589
570 if (new_tx_ring_size == adapter->tx_ring_size && 590 if (new_tx_ring_size == adapter->tx_ring_size &&
571 new_rx_ring_size == adapter->rx_ring_size && 591 new_rx_ring_size == adapter->rx_ring_size &&
572 new_rx_ring2_size == adapter->rx_ring2_size) { 592 new_rx_ring2_size == adapter->rx_ring2_size &&
593 new_rxdata_desc_size == adapter->rxdata_desc_size) {
573 return 0; 594 return 0;
574 } 595 }
575 596
@@ -591,8 +612,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
591 612
592 err = vmxnet3_create_queues(adapter, new_tx_ring_size, 613 err = vmxnet3_create_queues(adapter, new_tx_ring_size,
593 new_rx_ring_size, new_rx_ring2_size, 614 new_rx_ring_size, new_rx_ring2_size,
594 adapter->txdata_desc_size); 615 adapter->txdata_desc_size,
595 616 new_rxdata_desc_size);
596 if (err) { 617 if (err) {
597 /* failed, most likely because of OOM, try default 618 /* failed, most likely because of OOM, try default
598 * size */ 619 * size */
@@ -601,11 +622,15 @@ vmxnet3_set_ringparam(struct net_device *netdev,
601 new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; 622 new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
602 new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; 623 new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
603 new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; 624 new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
625 new_rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
626 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
627
604 err = vmxnet3_create_queues(adapter, 628 err = vmxnet3_create_queues(adapter,
605 new_tx_ring_size, 629 new_tx_ring_size,
606 new_rx_ring_size, 630 new_rx_ring_size,
607 new_rx_ring2_size, 631 new_rx_ring2_size,
608 adapter->txdata_desc_size); 632 adapter->txdata_desc_size,
633 new_rxdata_desc_size);
609 if (err) { 634 if (err) {
610 netdev_err(netdev, "failed to create queues " 635 netdev_err(netdev, "failed to create queues "
611 "with default sizes. Closing it\n"); 636 "with default sizes. Closing it\n");
@@ -621,6 +646,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
621 adapter->tx_ring_size = new_tx_ring_size; 646 adapter->tx_ring_size = new_tx_ring_size;
622 adapter->rx_ring_size = new_rx_ring_size; 647 adapter->rx_ring_size = new_rx_ring_size;
623 adapter->rx_ring2_size = new_rx_ring2_size; 648 adapter->rx_ring2_size = new_rx_ring2_size;
649 adapter->rxdata_desc_size = new_rxdata_desc_size;
624 650
625out: 651out:
626 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 652 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 94010de67e3d..c46bf09ade5a 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -272,15 +272,23 @@ struct vmxnet3_rq_driver_stats {
272 u64 rx_buf_alloc_failure; 272 u64 rx_buf_alloc_failure;
273}; 273};
274 274
275struct vmxnet3_rx_data_ring {
276 Vmxnet3_RxDataDesc *base;
277 dma_addr_t basePA;
278 u16 desc_size;
279};
280
275struct vmxnet3_rx_queue { 281struct vmxnet3_rx_queue {
276 char name[IFNAMSIZ + 8]; /* To identify interrupt */ 282 char name[IFNAMSIZ + 8]; /* To identify interrupt */
277 struct vmxnet3_adapter *adapter; 283 struct vmxnet3_adapter *adapter;
278 struct napi_struct napi; 284 struct napi_struct napi;
279 struct vmxnet3_cmd_ring rx_ring[2]; 285 struct vmxnet3_cmd_ring rx_ring[2];
286 struct vmxnet3_rx_data_ring data_ring;
280 struct vmxnet3_comp_ring comp_ring; 287 struct vmxnet3_comp_ring comp_ring;
281 struct vmxnet3_rx_ctx rx_ctx; 288 struct vmxnet3_rx_ctx rx_ctx;
282 u32 qid; /* rqID in RCD for buffer from 1st ring */ 289 u32 qid; /* rqID in RCD for buffer from 1st ring */
283 u32 qid2; /* rqID in RCD for buffer from 2nd ring */ 290 u32 qid2; /* rqID in RCD for buffer from 2nd ring */
291 u32 dataRingQid; /* rqID in RCD for buffer from data ring */
284 struct vmxnet3_rx_buf_info *buf_info[2]; 292 struct vmxnet3_rx_buf_info *buf_info[2];
285 dma_addr_t buf_info_pa; 293 dma_addr_t buf_info_pa;
286 struct Vmxnet3_RxQueueCtrl *shared; 294 struct Vmxnet3_RxQueueCtrl *shared;
@@ -366,6 +374,9 @@ struct vmxnet3_adapter {
366 374
367 /* Size of buffer in the data ring */ 375 /* Size of buffer in the data ring */
368 u16 txdata_desc_size; 376 u16 txdata_desc_size;
377 u16 rxdata_desc_size;
378
379 bool rxdataring_enabled;
369 380
370 struct work_struct work; 381 struct work_struct work;
371 382
@@ -405,9 +416,19 @@ struct vmxnet3_adapter {
405#define VMXNET3_DEF_RX_RING_SIZE 256 416#define VMXNET3_DEF_RX_RING_SIZE 256
406#define VMXNET3_DEF_RX_RING2_SIZE 128 417#define VMXNET3_DEF_RX_RING2_SIZE 128
407 418
419#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
420
408#define VMXNET3_MAX_ETH_HDR_SIZE 22 421#define VMXNET3_MAX_ETH_HDR_SIZE 22
409#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) 422#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
410 423
424#define VMXNET3_GET_RING_IDX(adapter, rqID) \
425 ((rqID >= adapter->num_rx_queues && \
426 rqID < 2 * adapter->num_rx_queues) ? 1 : 0) \
427
428#define VMXNET3_RX_DATA_RING(adapter, rqID) \
429 (rqID >= 2 * adapter->num_rx_queues && \
430 rqID < 3 * adapter->num_rx_queues) \
431
411int 432int
412vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); 433vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
413 434
@@ -432,7 +453,7 @@ vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
432int 453int
433vmxnet3_create_queues(struct vmxnet3_adapter *adapter, 454vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
434 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size, 455 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size,
435 u16 txdata_desc_size); 456 u16 txdata_desc_size, u16 rxdata_desc_size);
436 457
437void vmxnet3_set_ethtool_ops(struct net_device *netdev); 458void vmxnet3_set_ethtool_ops(struct net_device *netdev);
438 459