aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sun/sunvnet.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sun/sunvnet.c')
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c38
1 files changed, 35 insertions, 3 deletions
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index d813bfb1a847..23c89ab5a6ad 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -32,6 +32,11 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver");
32MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
33MODULE_VERSION(DRV_MODULE_VERSION); 33MODULE_VERSION(DRV_MODULE_VERSION);
34 34
35/* Heuristic for the number of times to exponentially backoff and
36 * retry sending an LDC trigger when EAGAIN is encountered
37 */
38#define VNET_MAX_RETRIES 10
39
35/* Ordered from largest major to lowest */ 40/* Ordered from largest major to lowest */
36static struct vio_version vnet_versions[] = { 41static struct vio_version vnet_versions[] = {
37 { .major = 1, .minor = 0 }, 42 { .major = 1, .minor = 0 },
@@ -260,6 +265,7 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
260 .state = vio_dring_state, 265 .state = vio_dring_state,
261 }; 266 };
262 int err, delay; 267 int err, delay;
268 int retries = 0;
263 269
264 hdr.seq = dr->snd_nxt; 270 hdr.seq = dr->snd_nxt;
265 delay = 1; 271 delay = 1;
@@ -272,6 +278,13 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
272 udelay(delay); 278 udelay(delay);
273 if ((delay <<= 1) > 128) 279 if ((delay <<= 1) > 128)
274 delay = 128; 280 delay = 128;
281 if (retries++ > VNET_MAX_RETRIES) {
282 pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n",
283 port->raddr[0], port->raddr[1],
284 port->raddr[2], port->raddr[3],
285 port->raddr[4], port->raddr[5]);
286 err = -ECONNRESET;
287 }
275 } while (err == -EAGAIN); 288 } while (err == -EAGAIN);
276 289
277 return err; 290 return err;
@@ -475,8 +488,9 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf)
475 return 0; 488 return 0;
476} 489}
477 490
478static void maybe_tx_wakeup(struct vnet *vp) 491static void maybe_tx_wakeup(unsigned long param)
479{ 492{
493 struct vnet *vp = (struct vnet *)param;
480 struct net_device *dev = vp->dev; 494 struct net_device *dev = vp->dev;
481 495
482 netif_tx_lock(dev); 496 netif_tx_lock(dev);
@@ -573,8 +587,13 @@ static void vnet_event(void *arg, int event)
573 break; 587 break;
574 } 588 }
575 spin_unlock(&vio->lock); 589 spin_unlock(&vio->lock);
590 /* Kick off a tasklet to wake the queue. We cannot call
591 * maybe_tx_wakeup directly here because we could deadlock on
592 * netif_tx_lock() with dev_watchdog()
593 */
576 if (unlikely(tx_wakeup && err != -ECONNRESET)) 594 if (unlikely(tx_wakeup && err != -ECONNRESET))
577 maybe_tx_wakeup(port->vp); 595 tasklet_schedule(&port->vp->vnet_tx_wakeup);
596
578 local_irq_restore(flags); 597 local_irq_restore(flags);
579} 598}
580 599
@@ -593,6 +612,7 @@ static int __vnet_tx_trigger(struct vnet_port *port)
593 .end_idx = (u32) -1, 612 .end_idx = (u32) -1,
594 }; 613 };
595 int err, delay; 614 int err, delay;
615 int retries = 0;
596 616
597 hdr.seq = dr->snd_nxt; 617 hdr.seq = dr->snd_nxt;
598 delay = 1; 618 delay = 1;
@@ -605,6 +625,8 @@ static int __vnet_tx_trigger(struct vnet_port *port)
605 udelay(delay); 625 udelay(delay);
606 if ((delay <<= 1) > 128) 626 if ((delay <<= 1) > 128)
607 delay = 128; 627 delay = 128;
628 if (retries++ > VNET_MAX_RETRIES)
629 break;
608 } while (err == -EAGAIN); 630 } while (err == -EAGAIN);
609 631
610 return err; 632 return err;
@@ -691,7 +713,15 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
691 memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len); 713 memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len);
692 } 714 }
693 715
694 d->hdr.ack = VIO_ACK_ENABLE; 716 /* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
717 * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
718 * the protocol itself does not require it as long as the peer
719 * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED.
720 *
721 * An ACK for every packet in the ring is expensive as the
722 * sending of LDC messages is slow and affects performance.
723 */
724 d->hdr.ack = VIO_ACK_DISABLE;
695 d->size = len; 725 d->size = len;
696 d->ncookies = port->tx_bufs[dr->prod].ncookies; 726 d->ncookies = port->tx_bufs[dr->prod].ncookies;
697 for (i = 0; i < d->ncookies; i++) 727 for (i = 0; i < d->ncookies; i++)
@@ -1046,6 +1076,7 @@ static struct vnet *vnet_new(const u64 *local_mac)
1046 vp = netdev_priv(dev); 1076 vp = netdev_priv(dev);
1047 1077
1048 spin_lock_init(&vp->lock); 1078 spin_lock_init(&vp->lock);
1079 tasklet_init(&vp->vnet_tx_wakeup, maybe_tx_wakeup, (unsigned long)vp);
1049 vp->dev = dev; 1080 vp->dev = dev;
1050 1081
1051 INIT_LIST_HEAD(&vp->port_list); 1082 INIT_LIST_HEAD(&vp->port_list);
@@ -1105,6 +1136,7 @@ static void vnet_cleanup(void)
1105 vp = list_first_entry(&vnet_list, struct vnet, list); 1136 vp = list_first_entry(&vnet_list, struct vnet, list);
1106 list_del(&vp->list); 1137 list_del(&vp->list);
1107 dev = vp->dev; 1138 dev = vp->dev;
1139 tasklet_kill(&vp->vnet_tx_wakeup);
1108 /* vio_unregister_driver() should have cleaned up port_list */ 1140 /* vio_unregister_driver() should have cleaned up port_list */
1109 BUG_ON(!list_empty(&vp->port_list)); 1141 BUG_ON(!list_empty(&vp->port_list));
1110 unregister_netdev(dev); 1142 unregister_netdev(dev);