summaryrefslogtreecommitdiffstats
path: root/drivers/net/vmxnet3
diff options
context:
space:
mode:
authorShrikrishna Khare <skhare@vmware.com>2016-06-16 13:51:55 -0400
committerDavid S. Miller <davem@davemloft.net>2016-06-17 01:37:04 -0400
commit3c8b3efc061a745d888869dc3462ac4f7dd582d9 (patch)
tree1149b4276b76ca224ee99ab78146e643d563bcfb /drivers/net/vmxnet3
parentf35c7480f81b70f9c3030d96a3807e8faba34cf7 (diff)
vmxnet3: allow variable length transmit data ring buffer
vmxnet3 driver supports transmit data ring viz. a set of fixed size buffers used by the driver to copy packet headers. Small packets that fit these buffers are copied into these buffers entirely. Currently this buffer size of fixed at 128 bytes. This patch extends transmit data ring implementation to allow variable length transmit data ring buffers. The length of the buffer is read from the emulation during initialization. Signed-off-by: Sriram Rangarajan <rangarajans@vmware.com> Signed-off-by: Shrikrishna Khare <skhare@vmware.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/vmxnet3')
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c55
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h7
4 files changed, 64 insertions, 19 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index a26a69df0e3d..701d98944c58 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -92,6 +92,7 @@ enum {
92 VMXNET3_CMD_GET_DEV_EXTRA_INFO, 92 VMXNET3_CMD_GET_DEV_EXTRA_INFO,
93 VMXNET3_CMD_GET_CONF_INTR, 93 VMXNET3_CMD_GET_CONF_INTR,
94 VMXNET3_CMD_GET_RESERVED1, 94 VMXNET3_CMD_GET_RESERVED1,
95 VMXNET3_CMD_GET_TXDATA_DESC_SIZE
95}; 96};
96 97
97/* 98/*
@@ -377,6 +378,10 @@ union Vmxnet3_GenericDesc {
377#define VMXNET3_RING_SIZE_ALIGN 32 378#define VMXNET3_RING_SIZE_ALIGN 32
378#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1) 379#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
379 380
381/* Tx Data Ring buffer size must be a multiple of 64 */
382#define VMXNET3_TXDATA_DESC_SIZE_ALIGN 64
383#define VMXNET3_TXDATA_DESC_SIZE_MASK (VMXNET3_TXDATA_DESC_SIZE_ALIGN - 1)
384
380/* Max ring size */ 385/* Max ring size */
381#define VMXNET3_TX_RING_MAX_SIZE 4096 386#define VMXNET3_TX_RING_MAX_SIZE 4096
382#define VMXNET3_TC_RING_MAX_SIZE 4096 387#define VMXNET3_TC_RING_MAX_SIZE 4096
@@ -384,6 +389,9 @@ union Vmxnet3_GenericDesc {
384#define VMXNET3_RX_RING2_MAX_SIZE 4096 389#define VMXNET3_RX_RING2_MAX_SIZE 4096
385#define VMXNET3_RC_RING_MAX_SIZE 8192 390#define VMXNET3_RC_RING_MAX_SIZE 8192
386 391
392#define VMXNET3_TXDATA_DESC_MIN_SIZE 128
393#define VMXNET3_TXDATA_DESC_MAX_SIZE 2048
394
387/* a list of reasons for queue stop */ 395/* a list of reasons for queue stop */
388 396
389enum { 397enum {
@@ -470,7 +478,9 @@ struct Vmxnet3_TxQueueConf {
470 __le32 compRingSize; /* # of comp desc */ 478 __le32 compRingSize; /* # of comp desc */
471 __le32 ddLen; /* size of driver data */ 479 __le32 ddLen; /* size of driver data */
472 u8 intrIdx; 480 u8 intrIdx;
473 u8 _pad[7]; 481 u8 _pad1[1];
482 __le16 txDataRingDescSize;
483 u8 _pad2[4];
474}; 484};
475 485
476 486
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 507c53d4e09c..4e42eb04a198 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -435,8 +435,8 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
435 tq->tx_ring.base = NULL; 435 tq->tx_ring.base = NULL;
436 } 436 }
437 if (tq->data_ring.base) { 437 if (tq->data_ring.base) {
438 dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size * 438 dma_free_coherent(&adapter->pdev->dev,
439 sizeof(struct Vmxnet3_TxDataDesc), 439 tq->data_ring.size * tq->txdata_desc_size,
440 tq->data_ring.base, tq->data_ring.basePA); 440 tq->data_ring.base, tq->data_ring.basePA);
441 tq->data_ring.base = NULL; 441 tq->data_ring.base = NULL;
442 } 442 }
@@ -478,8 +478,8 @@ vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
478 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; 478 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
479 tq->tx_ring.gen = VMXNET3_INIT_GEN; 479 tq->tx_ring.gen = VMXNET3_INIT_GEN;
480 480
481 memset(tq->data_ring.base, 0, tq->data_ring.size * 481 memset(tq->data_ring.base, 0,
482 sizeof(struct Vmxnet3_TxDataDesc)); 482 tq->data_ring.size * tq->txdata_desc_size);
483 483
484 /* reset the tx comp ring contents to 0 and reset comp ring states */ 484 /* reset the tx comp ring contents to 0 and reset comp ring states */
485 memset(tq->comp_ring.base, 0, tq->comp_ring.size * 485 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
@@ -514,10 +514,10 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
514 } 514 }
515 515
516 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, 516 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
517 tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc), 517 tq->data_ring.size * tq->txdata_desc_size,
518 &tq->data_ring.basePA, GFP_KERNEL); 518 &tq->data_ring.basePA, GFP_KERNEL);
519 if (!tq->data_ring.base) { 519 if (!tq->data_ring.base) {
520 netdev_err(adapter->netdev, "failed to allocate data ring\n"); 520 netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
521 goto err; 521 goto err;
522 } 522 }
523 523
@@ -689,7 +689,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
689 if (ctx->copy_size) { 689 if (ctx->copy_size) {
690 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA + 690 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
691 tq->tx_ring.next2fill * 691 tq->tx_ring.next2fill *
692 sizeof(struct Vmxnet3_TxDataDesc)); 692 tq->txdata_desc_size);
693 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size); 693 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
694 ctx->sop_txd->dword[3] = 0; 694 ctx->sop_txd->dword[3] = 0;
695 695
@@ -873,8 +873,9 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
873 ctx->eth_ip_hdr_size = 0; 873 ctx->eth_ip_hdr_size = 0;
874 ctx->l4_hdr_size = 0; 874 ctx->l4_hdr_size = 0;
875 /* copy as much as allowed */ 875 /* copy as much as allowed */
876 ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE 876 ctx->copy_size = min_t(unsigned int,
877 , skb_headlen(skb)); 877 tq->txdata_desc_size,
878 skb_headlen(skb));
878 } 879 }
879 880
880 if (skb->len <= VMXNET3_HDR_COPY_SIZE) 881 if (skb->len <= VMXNET3_HDR_COPY_SIZE)
@@ -885,7 +886,7 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
885 goto err; 886 goto err;
886 } 887 }
887 888
888 if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) { 889 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
889 tq->stats.oversized_hdr++; 890 tq->stats.oversized_hdr++;
890 ctx->copy_size = 0; 891 ctx->copy_size = 0;
891 return 0; 892 return 0;
@@ -2336,6 +2337,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2336 tqc->ddPA = cpu_to_le64(tq->buf_info_pa); 2337 tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
2337 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); 2338 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2338 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); 2339 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2340 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2339 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); 2341 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2340 tqc->ddLen = cpu_to_le32( 2342 tqc->ddLen = cpu_to_le32(
2341 sizeof(struct vmxnet3_tx_buf_info) * 2343 sizeof(struct vmxnet3_tx_buf_info) *
@@ -2689,7 +2691,8 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2689 2691
2690int 2692int
2691vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, 2693vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2692 u32 rx_ring_size, u32 rx_ring2_size) 2694 u32 rx_ring_size, u32 rx_ring2_size,
2695 u16 txdata_desc_size)
2693{ 2696{
2694 int err = 0, i; 2697 int err = 0, i;
2695 2698
@@ -2698,6 +2701,7 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2698 tq->tx_ring.size = tx_ring_size; 2701 tq->tx_ring.size = tx_ring_size;
2699 tq->data_ring.size = tx_ring_size; 2702 tq->data_ring.size = tx_ring_size;
2700 tq->comp_ring.size = tx_ring_size; 2703 tq->comp_ring.size = tx_ring_size;
2704 tq->txdata_desc_size = txdata_desc_size;
2701 tq->shared = &adapter->tqd_start[i].ctrl; 2705 tq->shared = &adapter->tqd_start[i].ctrl;
2702 tq->stopped = true; 2706 tq->stopped = true;
2703 tq->adapter = adapter; 2707 tq->adapter = adapter;
@@ -2754,9 +2758,34 @@ vmxnet3_open(struct net_device *netdev)
2754 for (i = 0; i < adapter->num_tx_queues; i++) 2758 for (i = 0; i < adapter->num_tx_queues; i++)
2755 spin_lock_init(&adapter->tx_queue[i].tx_lock); 2759 spin_lock_init(&adapter->tx_queue[i].tx_lock);
2756 2760
2757 err = vmxnet3_create_queues(adapter, adapter->tx_ring_size, 2761 if (VMXNET3_VERSION_GE_3(adapter)) {
2762 unsigned long flags;
2763 u16 txdata_desc_size;
2764
2765 spin_lock_irqsave(&adapter->cmd_lock, flags);
2766 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2767 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
2768 txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
2769 VMXNET3_REG_CMD);
2770 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2771
2772 if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
2773 (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
2774 (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
2775 adapter->txdata_desc_size =
2776 sizeof(struct Vmxnet3_TxDataDesc);
2777 } else {
2778 adapter->txdata_desc_size = txdata_desc_size;
2779 }
2780 } else {
2781 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
2782 }
2783
2784 err = vmxnet3_create_queues(adapter,
2785 adapter->tx_ring_size,
2758 adapter->rx_ring_size, 2786 adapter->rx_ring_size,
2759 adapter->rx_ring2_size); 2787 adapter->rx_ring2_size,
2788 adapter->txdata_desc_size);
2760 if (err) 2789 if (err)
2761 goto queue_err; 2790 goto queue_err;
2762 2791
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 163e99c91023..3b70cfef9748 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -396,8 +396,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
396 buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA); 396 buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA);
397 buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA); 397 buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA);
398 buf[j++] = tq->data_ring.size; 398 buf[j++] = tq->data_ring.size;
399 /* transmit data ring buffer size */ 399 buf[j++] = tq->txdata_desc_size;
400 buf[j++] = VMXNET3_HDR_COPY_SIZE;
401 400
402 buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA); 401 buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA);
403 buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA); 402 buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA);
@@ -591,7 +590,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
591 vmxnet3_rq_destroy_all(adapter); 590 vmxnet3_rq_destroy_all(adapter);
592 591
593 err = vmxnet3_create_queues(adapter, new_tx_ring_size, 592 err = vmxnet3_create_queues(adapter, new_tx_ring_size,
594 new_rx_ring_size, new_rx_ring2_size); 593 new_rx_ring_size, new_rx_ring2_size,
594 adapter->txdata_desc_size);
595 595
596 if (err) { 596 if (err) {
597 /* failed, most likely because of OOM, try default 597 /* failed, most likely because of OOM, try default
@@ -604,7 +604,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
604 err = vmxnet3_create_queues(adapter, 604 err = vmxnet3_create_queues(adapter,
605 new_tx_ring_size, 605 new_tx_ring_size,
606 new_rx_ring_size, 606 new_rx_ring_size,
607 new_rx_ring2_size); 607 new_rx_ring2_size,
608 adapter->txdata_desc_size);
608 if (err) { 609 if (err) {
609 netdev_err(netdev, "failed to create queues " 610 netdev_err(netdev, "failed to create queues "
610 "with default sizes. Closing it\n"); 611 "with default sizes. Closing it\n");
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index de068e92cedb..94010de67e3d 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -241,6 +241,7 @@ struct vmxnet3_tx_queue {
241 int num_stop; /* # of times the queue is 241 int num_stop; /* # of times the queue is
242 * stopped */ 242 * stopped */
243 int qid; 243 int qid;
244 u16 txdata_desc_size;
244} __attribute__((__aligned__(SMP_CACHE_BYTES))); 245} __attribute__((__aligned__(SMP_CACHE_BYTES)));
245 246
246enum vmxnet3_rx_buf_type { 247enum vmxnet3_rx_buf_type {
@@ -363,6 +364,9 @@ struct vmxnet3_adapter {
363 u32 rx_ring_size; 364 u32 rx_ring_size;
364 u32 rx_ring2_size; 365 u32 rx_ring2_size;
365 366
367 /* Size of buffer in the data ring */
368 u16 txdata_desc_size;
369
366 struct work_struct work; 370 struct work_struct work;
367 371
368 unsigned long state; /* VMXNET3_STATE_BIT_xxx */ 372 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
@@ -427,7 +431,8 @@ vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
427 431
428int 432int
429vmxnet3_create_queues(struct vmxnet3_adapter *adapter, 433vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
430 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size); 434 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size,
435 u16 txdata_desc_size);
431 436
432void vmxnet3_set_ethtool_ops(struct net_device *netdev); 437void vmxnet3_set_ethtool_ops(struct net_device *netdev);
433 438