aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSowmini Varadhan <sowmini.varadhan@oracle.com>2014-10-30 12:46:09 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-30 19:56:23 -0400
commitd51bffd16d39d7fe8b374a5fdebf16210ca41892 (patch)
treec761942e2ba52f6892521ab8b9292628a2850c02
parent7bd68bfd51f611ba61b3afdbd23ea9580c312bf5 (diff)
sunvnet: Use one Tx queue per vnet_port
Use multple Tx netdev queues for sunvnet by supporting a one-to-one mapping between vnet_port and Tx queue. Provide a ndo_select_queue indirection (vnet_select_queue()) which selects the queue based on the peer that would be selected in vnet_start_xmit() Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c94
-rw-r--r--drivers/net/ethernet/sun/sunvnet.h2
2 files changed, 65 insertions, 31 deletions
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 7ada479f9604..e7bb63b2d525 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -40,6 +40,8 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver");
40MODULE_LICENSE("GPL"); 40MODULE_LICENSE("GPL");
41MODULE_VERSION(DRV_MODULE_VERSION); 41MODULE_VERSION(DRV_MODULE_VERSION);
42 42
43#define VNET_MAX_TXQS 16
44
43/* Heuristic for the number of times to exponentially backoff and 45/* Heuristic for the number of times to exponentially backoff and
44 * retry sending an LDC trigger when EAGAIN is encountered 46 * retry sending an LDC trigger when EAGAIN is encountered
45 */ 47 */
@@ -551,6 +553,8 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
551 struct vnet *vp; 553 struct vnet *vp;
552 u32 end; 554 u32 end;
553 struct vio_net_desc *desc; 555 struct vio_net_desc *desc;
556 struct netdev_queue *txq;
557
554 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 558 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
555 return 0; 559 return 0;
556 560
@@ -580,7 +584,8 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
580 } 584 }
581 netif_tx_unlock(dev); 585 netif_tx_unlock(dev);
582 586
583 if (unlikely(netif_queue_stopped(dev) && 587 txq = netdev_get_tx_queue(dev, port->q_index);
588 if (unlikely(netif_tx_queue_stopped(txq) &&
584 vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) 589 vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
585 return 1; 590 return 1;
586 591
@@ -608,31 +613,23 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf)
608 return 0; 613 return 0;
609} 614}
610 615
611static void maybe_tx_wakeup(struct vnet *vp) 616/* Got back a STOPPED LDC message on port. If the queue is stopped,
617 * wake it up so that we'll send out another START message at the
618 * next TX.
619 */
620static void maybe_tx_wakeup(struct vnet_port *port)
612{ 621{
613 struct net_device *dev = vp->dev; 622 struct netdev_queue *txq;
614 623
615 netif_tx_lock(dev); 624 txq = netdev_get_tx_queue(port->vp->dev, port->q_index);
616 if (likely(netif_queue_stopped(dev))) { 625 __netif_tx_lock(txq, smp_processor_id());
617 struct vnet_port *port; 626 if (likely(netif_tx_queue_stopped(txq))) {
618 int wake = 1; 627 struct vio_dring_state *dr;
619 628
620 rcu_read_lock(); 629 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
621 list_for_each_entry_rcu(port, &vp->port_list, list) { 630 netif_tx_wake_queue(txq);
622 struct vio_dring_state *dr;
623
624 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
625 if (vnet_tx_dring_avail(dr) <
626 VNET_TX_WAKEUP_THRESH(dr)) {
627 wake = 0;
628 break;
629 }
630 }
631 rcu_read_unlock();
632 if (wake)
633 netif_wake_queue(dev);
634 } 631 }
635 netif_tx_unlock(dev); 632 __netif_tx_unlock(txq);
636} 633}
637 634
638static inline bool port_is_up(struct vnet_port *vnet) 635static inline bool port_is_up(struct vnet_port *vnet)
@@ -748,7 +745,7 @@ napi_resume:
748 break; 745 break;
749 } 746 }
750 if (unlikely(tx_wakeup && err != -ECONNRESET)) 747 if (unlikely(tx_wakeup && err != -ECONNRESET))
751 maybe_tx_wakeup(port->vp); 748 maybe_tx_wakeup(port);
752 return npkts; 749 return npkts;
753} 750}
754 751
@@ -953,6 +950,16 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart,
953 return skb; 950 return skb;
954} 951}
955 952
953static u16
954vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
955 void *accel_priv, select_queue_fallback_t fallback)
956{
957 struct vnet *vp = netdev_priv(dev);
958 struct vnet_port *port = __tx_port_find(vp, skb);
959
960 return port->q_index;
961}
962
956static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) 963static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
957{ 964{
958 struct vnet *vp = netdev_priv(dev); 965 struct vnet *vp = netdev_priv(dev);
@@ -965,6 +972,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
965 void *start = NULL; 972 void *start = NULL;
966 int nlen = 0; 973 int nlen = 0;
967 unsigned pending = 0; 974 unsigned pending = 0;
975 struct netdev_queue *txq;
968 976
969 skb = vnet_skb_shape(skb, &start, &nlen); 977 skb = vnet_skb_shape(skb, &start, &nlen);
970 if (unlikely(!skb)) 978 if (unlikely(!skb))
@@ -1008,9 +1016,11 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1008 } 1016 }
1009 1017
1010 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 1018 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1019 i = skb_get_queue_mapping(skb);
1020 txq = netdev_get_tx_queue(dev, i);
1011 if (unlikely(vnet_tx_dring_avail(dr) < 1)) { 1021 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1012 if (!netif_queue_stopped(dev)) { 1022 if (!netif_tx_queue_stopped(txq)) {
1013 netif_stop_queue(dev); 1023 netif_tx_stop_queue(txq);
1014 1024
1015 /* This is a hard error, log it. */ 1025 /* This is a hard error, log it. */
1016 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 1026 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
@@ -1104,9 +1114,9 @@ ldc_start_done:
1104 1114
1105 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); 1115 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
1106 if (unlikely(vnet_tx_dring_avail(dr) < 1)) { 1116 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1107 netif_stop_queue(dev); 1117 netif_tx_stop_queue(txq);
1108 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) 1118 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
1109 netif_wake_queue(dev); 1119 netif_tx_wake_queue(txq);
1110 } 1120 }
1111 1121
1112 (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); 1122 (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
@@ -1139,14 +1149,14 @@ static void vnet_tx_timeout(struct net_device *dev)
1139static int vnet_open(struct net_device *dev) 1149static int vnet_open(struct net_device *dev)
1140{ 1150{
1141 netif_carrier_on(dev); 1151 netif_carrier_on(dev);
1142 netif_start_queue(dev); 1152 netif_tx_start_all_queues(dev);
1143 1153
1144 return 0; 1154 return 0;
1145} 1155}
1146 1156
1147static int vnet_close(struct net_device *dev) 1157static int vnet_close(struct net_device *dev)
1148{ 1158{
1149 netif_stop_queue(dev); 1159 netif_tx_stop_all_queues(dev);
1150 netif_carrier_off(dev); 1160 netif_carrier_off(dev);
1151 1161
1152 return 0; 1162 return 0;
@@ -1420,6 +1430,7 @@ static const struct net_device_ops vnet_ops = {
1420 .ndo_tx_timeout = vnet_tx_timeout, 1430 .ndo_tx_timeout = vnet_tx_timeout,
1421 .ndo_change_mtu = vnet_change_mtu, 1431 .ndo_change_mtu = vnet_change_mtu,
1422 .ndo_start_xmit = vnet_start_xmit, 1432 .ndo_start_xmit = vnet_start_xmit,
1433 .ndo_select_queue = vnet_select_queue,
1423#ifdef CONFIG_NET_POLL_CONTROLLER 1434#ifdef CONFIG_NET_POLL_CONTROLLER
1424 .ndo_poll_controller = vnet_poll_controller, 1435 .ndo_poll_controller = vnet_poll_controller,
1425#endif 1436#endif
@@ -1431,7 +1442,7 @@ static struct vnet *vnet_new(const u64 *local_mac)
1431 struct vnet *vp; 1442 struct vnet *vp;
1432 int err, i; 1443 int err, i;
1433 1444
1434 dev = alloc_etherdev(sizeof(*vp)); 1445 dev = alloc_etherdev_mqs(sizeof(*vp), VNET_MAX_TXQS, 1);
1435 if (!dev) 1446 if (!dev)
1436 return ERR_PTR(-ENOMEM); 1447 return ERR_PTR(-ENOMEM);
1437 dev->needed_headroom = VNET_PACKET_SKIP + 8; 1448 dev->needed_headroom = VNET_PACKET_SKIP + 8;
@@ -1556,6 +1567,25 @@ static void print_version(void)
1556 1567
1557const char *remote_macaddr_prop = "remote-mac-address"; 1568const char *remote_macaddr_prop = "remote-mac-address";
1558 1569
1570static void
1571vnet_port_add_txq(struct vnet_port *port)
1572{
1573 struct vnet *vp = port->vp;
1574 int n;
1575
1576 n = vp->nports++;
1577 n = n & (VNET_MAX_TXQS - 1);
1578 port->q_index = n;
1579 netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index));
1580}
1581
1582static void
1583vnet_port_rm_txq(struct vnet_port *port)
1584{
1585 port->vp->nports--;
1586 netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index));
1587}
1588
1559static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) 1589static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1560{ 1590{
1561 struct mdesc_handle *hp; 1591 struct mdesc_handle *hp;
@@ -1624,6 +1654,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1624 list_add_tail_rcu(&port->list, &vp->port_list); 1654 list_add_tail_rcu(&port->list, &vp->port_list);
1625 hlist_add_head_rcu(&port->hash, 1655 hlist_add_head_rcu(&port->hash,
1626 &vp->port_hash[vnet_hashfn(port->raddr)]); 1656 &vp->port_hash[vnet_hashfn(port->raddr)]);
1657 vnet_port_add_txq(port);
1627 spin_unlock_irqrestore(&vp->lock, flags); 1658 spin_unlock_irqrestore(&vp->lock, flags);
1628 1659
1629 dev_set_drvdata(&vdev->dev, port); 1660 dev_set_drvdata(&vdev->dev, port);
@@ -1668,6 +1699,7 @@ static int vnet_port_remove(struct vio_dev *vdev)
1668 1699
1669 synchronize_rcu(); 1700 synchronize_rcu();
1670 del_timer_sync(&port->clean_timer); 1701 del_timer_sync(&port->clean_timer);
1702 vnet_port_rm_txq(port);
1671 netif_napi_del(&port->napi); 1703 netif_napi_del(&port->napi);
1672 vnet_port_free_tx_bufs(port); 1704 vnet_port_free_tx_bufs(port);
1673 vio_ldc_free(&port->vio); 1705 vio_ldc_free(&port->vio);
diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h
index c8a862e471dd..cd5d343ea232 100644
--- a/drivers/net/ethernet/sun/sunvnet.h
+++ b/drivers/net/ethernet/sun/sunvnet.h
@@ -61,6 +61,7 @@ struct vnet_port {
61 u32 napi_stop_idx; 61 u32 napi_stop_idx;
62 bool napi_resume; 62 bool napi_resume;
63 int rx_event; 63 int rx_event;
64 u16 q_index;
64}; 65};
65 66
66static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio) 67static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
@@ -102,6 +103,7 @@ struct vnet {
102 struct list_head list; 103 struct list_head list;
103 u64 local_mac; 104 u64 local_mac;
104 105
106 int nports;
105}; 107};
106 108
107#endif /* _SUNVNET_H */ 109#endif /* _SUNVNET_H */