aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/cisco
diff options
context:
space:
mode:
authorgovindarajulu.v <govindarajulu90@gmail.com>2013-09-04 01:47:14 -0400
committerDavid S. Miller <davem@davemloft.net>2013-09-05 12:39:32 -0400
commit822473b6c4e207a8af08518afce5dd2f2e13d765 (patch)
tree5688fbdb663fabc369ebb2b806d33c6d1d32a78b /drivers/net/ethernet/cisco
parent3c3769e63301fd92fcaf51870c371583dd0282ce (diff)
driver/net: enic: Add multi tx support for enic
The following patch adds multi tx support for enic. Signed-off-by: Nishank Trivedi <nistrive@cisco.com> Signed-off-by: Christian Benvenuti <benve@cisco.com> Signed-off-by: Govindarajulu Varadarajan <govindarajulu90@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/cisco')
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c36
2 files changed, 25 insertions, 13 deletions
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index be167318015a..34b637a797b7 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -37,7 +37,7 @@
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
39 39
40#define ENIC_WQ_MAX 1 40#define ENIC_WQ_MAX 8
41#define ENIC_RQ_MAX 8 41#define ENIC_RQ_MAX 8
42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) 42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) 43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index bcf15b176f41..1ab3f1809c14 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -128,10 +128,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
128 completed_index, enic_wq_free_buf, 128 completed_index, enic_wq_free_buf,
129 opaque); 129 opaque);
130 130
131 if (netif_queue_stopped(enic->netdev) && 131 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
132 vnic_wq_desc_avail(&enic->wq[q_number]) >= 132 vnic_wq_desc_avail(&enic->wq[q_number]) >=
133 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) 133 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
134 netif_wake_queue(enic->netdev); 134 netif_wake_subqueue(enic->netdev, q_number);
135 135
136 spin_unlock(&enic->wq_lock[q_number]); 136 spin_unlock(&enic->wq_lock[q_number]);
137 137
@@ -292,10 +292,15 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
292static irqreturn_t enic_isr_msix_wq(int irq, void *data) 292static irqreturn_t enic_isr_msix_wq(int irq, void *data)
293{ 293{
294 struct enic *enic = data; 294 struct enic *enic = data;
295 unsigned int cq = enic_cq_wq(enic, 0); 295 unsigned int cq;
296 unsigned int intr = enic_msix_wq_intr(enic, 0); 296 unsigned int intr;
297 unsigned int wq_work_to_do = -1; /* no limit */ 297 unsigned int wq_work_to_do = -1; /* no limit */
298 unsigned int wq_work_done; 298 unsigned int wq_work_done;
299 unsigned int wq_irq;
300
301 wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector;
302 cq = enic_cq_wq(enic, wq_irq);
303 intr = enic_msix_wq_intr(enic, wq_irq);
299 304
300 wq_work_done = vnic_cq_service(&enic->cq[cq], 305 wq_work_done = vnic_cq_service(&enic->cq[cq],
301 wq_work_to_do, enic_wq_service, NULL); 306 wq_work_to_do, enic_wq_service, NULL);
@@ -511,14 +516,18 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
511 struct net_device *netdev) 516 struct net_device *netdev)
512{ 517{
513 struct enic *enic = netdev_priv(netdev); 518 struct enic *enic = netdev_priv(netdev);
514 struct vnic_wq *wq = &enic->wq[0]; 519 struct vnic_wq *wq;
515 unsigned long flags; 520 unsigned long flags;
521 unsigned int txq_map;
516 522
517 if (skb->len <= 0) { 523 if (skb->len <= 0) {
518 dev_kfree_skb(skb); 524 dev_kfree_skb(skb);
519 return NETDEV_TX_OK; 525 return NETDEV_TX_OK;
520 } 526 }
521 527
528 txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
529 wq = &enic->wq[txq_map];
530
522 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, 531 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
523 * which is very likely. In the off chance it's going to take 532 * which is very likely. In the off chance it's going to take
524 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. 533 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
@@ -531,23 +540,23 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
531 return NETDEV_TX_OK; 540 return NETDEV_TX_OK;
532 } 541 }
533 542
534 spin_lock_irqsave(&enic->wq_lock[0], flags); 543 spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
535 544
536 if (vnic_wq_desc_avail(wq) < 545 if (vnic_wq_desc_avail(wq) <
537 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { 546 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
538 netif_stop_queue(netdev); 547 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
539 /* This is a hard error, log it */ 548 /* This is a hard error, log it */
540 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); 549 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
541 spin_unlock_irqrestore(&enic->wq_lock[0], flags); 550 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
542 return NETDEV_TX_BUSY; 551 return NETDEV_TX_BUSY;
543 } 552 }
544 553
545 enic_queue_wq_skb(enic, wq, skb); 554 enic_queue_wq_skb(enic, wq, skb);
546 555
547 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) 556 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
548 netif_stop_queue(netdev); 557 netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
549 558
550 spin_unlock_irqrestore(&enic->wq_lock[0], flags); 559 spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
551 560
552 return NETDEV_TX_OK; 561 return NETDEV_TX_OK;
553} 562}
@@ -1369,7 +1378,7 @@ static int enic_open(struct net_device *netdev)
1369 1378
1370 enic_set_rx_mode(netdev); 1379 enic_set_rx_mode(netdev);
1371 1380
1372 netif_wake_queue(netdev); 1381 netif_tx_wake_all_queues(netdev);
1373 1382
1374 for (i = 0; i < enic->rq_count; i++) 1383 for (i = 0; i < enic->rq_count; i++)
1375 napi_enable(&enic->napi[i]); 1384 napi_enable(&enic->napi[i]);
@@ -2032,7 +2041,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2032 * instance data is initialized to zero. 2041 * instance data is initialized to zero.
2033 */ 2042 */
2034 2043
2035 netdev = alloc_etherdev(sizeof(struct enic)); 2044 netdev = alloc_etherdev_mqs(sizeof(struct enic),
2045 ENIC_RQ_MAX, ENIC_WQ_MAX);
2036 if (!netdev) 2046 if (!netdev)
2037 return -ENOMEM; 2047 return -ENOMEM;
2038 2048
@@ -2198,6 +2208,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2198 goto err_out_dev_close; 2208 goto err_out_dev_close;
2199 } 2209 }
2200 2210
2211 netif_set_real_num_tx_queues(netdev, enic->wq_count);
2212
2201 /* Setup notification timer, HW reset task, and wq locks 2213 /* Setup notification timer, HW reset task, and wq locks
2202 */ 2214 */
2203 2215