aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sun/sunvnet.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sun/sunvnet.c')
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c90
1 files changed, 51 insertions, 39 deletions
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 3699b98d5b2c..2b10b85d8a08 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -50,6 +50,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
50#define VNET_MAX_RETRIES 10 50#define VNET_MAX_RETRIES 10
51 51
52static int __vnet_tx_trigger(struct vnet_port *port, u32 start); 52static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
53static void vnet_port_reset(struct vnet_port *port);
53 54
54/* Ordered from largest major to lowest */ 55/* Ordered from largest major to lowest */
55static struct vio_version vnet_versions[] = { 56static struct vio_version vnet_versions[] = {
@@ -351,10 +352,15 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
351 unsigned int len = desc->size; 352 unsigned int len = desc->size;
352 unsigned int copy_len; 353 unsigned int copy_len;
353 struct sk_buff *skb; 354 struct sk_buff *skb;
355 int maxlen;
354 int err; 356 int err;
355 357
356 err = -EMSGSIZE; 358 err = -EMSGSIZE;
357 if (unlikely(len < ETH_ZLEN || len > port->rmtu)) { 359 if (port->tso && port->tsolen > port->rmtu)
360 maxlen = port->tsolen;
361 else
362 maxlen = port->rmtu;
363 if (unlikely(len < ETH_ZLEN || len > maxlen)) {
358 dev->stats.rx_length_errors++; 364 dev->stats.rx_length_errors++;
359 goto out_dropped; 365 goto out_dropped;
360 } 366 }
@@ -731,9 +737,7 @@ ldc_ctrl:
731 vio_link_state_change(vio, event); 737 vio_link_state_change(vio, event);
732 738
733 if (event == LDC_EVENT_RESET) { 739 if (event == LDC_EVENT_RESET) {
734 port->rmtu = 0; 740 vnet_port_reset(port);
735 port->tso = true;
736 port->tsolen = 0;
737 vio_port_up(vio); 741 vio_port_up(vio);
738 } 742 }
739 port->rx_event = 0; 743 port->rx_event = 0;
@@ -929,36 +933,36 @@ static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
929 933
930 *pending = 0; 934 *pending = 0;
931 935
932 txi = dr->prod-1; 936 txi = dr->prod;
933 if (txi < 0)
934 txi = VNET_TX_RING_SIZE-1;
935
936 for (i = 0; i < VNET_TX_RING_SIZE; ++i) { 937 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
937 struct vio_net_desc *d; 938 struct vio_net_desc *d;
938 939
939 d = vio_dring_entry(dr, txi); 940 --txi;
940 941 if (txi < 0)
941 if (d->hdr.state == VIO_DESC_DONE) { 942 txi = VNET_TX_RING_SIZE-1;
942 if (port->tx_bufs[txi].skb) {
943 BUG_ON(port->tx_bufs[txi].skb->next);
944 943
945 port->tx_bufs[txi].skb->next = skb; 944 d = vio_dring_entry(dr, txi);
946 skb = port->tx_bufs[txi].skb;
947 port->tx_bufs[txi].skb = NULL;
948 945
949 ldc_unmap(port->vio.lp, 946 if (d->hdr.state == VIO_DESC_READY) {
950 port->tx_bufs[txi].cookies,
951 port->tx_bufs[txi].ncookies);
952 }
953 d->hdr.state = VIO_DESC_FREE;
954 } else if (d->hdr.state == VIO_DESC_READY) {
955 (*pending)++; 947 (*pending)++;
956 } else if (d->hdr.state == VIO_DESC_FREE) { 948 continue;
957 break;
958 } 949 }
959 --txi; 950 if (port->tx_bufs[txi].skb) {
960 if (txi < 0) 951 if (d->hdr.state != VIO_DESC_DONE)
961 txi = VNET_TX_RING_SIZE-1; 952 pr_notice("invalid ring buffer state %d\n",
953 d->hdr.state);
954 BUG_ON(port->tx_bufs[txi].skb->next);
955
956 port->tx_bufs[txi].skb->next = skb;
957 skb = port->tx_bufs[txi].skb;
958 port->tx_bufs[txi].skb = NULL;
959
960 ldc_unmap(port->vio.lp,
961 port->tx_bufs[txi].cookies,
962 port->tx_bufs[txi].ncookies);
963 } else if (d->hdr.state == VIO_DESC_FREE)
964 break;
965 d->hdr.state = VIO_DESC_FREE;
962 } 966 }
963 return skb; 967 return skb;
964} 968}
@@ -1633,16 +1637,9 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
1633 int i; 1637 int i;
1634 1638
1635 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 1639 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1636 if (dr->base) { 1640
1637 ldc_free_exp_dring(port->vio.lp, dr->base, 1641 if (dr->base == NULL)
1638 (dr->entry_size * dr->num_entries), 1642 return;
1639 dr->cookies, dr->ncookies);
1640 dr->base = NULL;
1641 dr->entry_size = 0;
1642 dr->num_entries = 0;
1643 dr->pending = 0;
1644 dr->ncookies = 0;
1645 }
1646 1643
1647 for (i = 0; i < VNET_TX_RING_SIZE; i++) { 1644 for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1648 struct vio_net_desc *d; 1645 struct vio_net_desc *d;
@@ -1652,8 +1649,6 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
1652 continue; 1649 continue;
1653 1650
1654 d = vio_dring_entry(dr, i); 1651 d = vio_dring_entry(dr, i);
1655 if (d->hdr.state == VIO_DESC_READY)
1656 pr_warn("active transmit buffers freed\n");
1657 1652
1658 ldc_unmap(port->vio.lp, 1653 ldc_unmap(port->vio.lp,
1659 port->tx_bufs[i].cookies, 1654 port->tx_bufs[i].cookies,
@@ -1662,6 +1657,23 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
1662 port->tx_bufs[i].skb = NULL; 1657 port->tx_bufs[i].skb = NULL;
1663 d->hdr.state = VIO_DESC_FREE; 1658 d->hdr.state = VIO_DESC_FREE;
1664 } 1659 }
1660 ldc_free_exp_dring(port->vio.lp, dr->base,
1661 (dr->entry_size * dr->num_entries),
1662 dr->cookies, dr->ncookies);
1663 dr->base = NULL;
1664 dr->entry_size = 0;
1665 dr->num_entries = 0;
1666 dr->pending = 0;
1667 dr->ncookies = 0;
1668}
1669
1670static void vnet_port_reset(struct vnet_port *port)
1671{
1672 del_timer(&port->clean_timer);
1673 vnet_port_free_tx_bufs(port);
1674 port->rmtu = 0;
1675 port->tso = true;
1676 port->tsolen = 0;
1665} 1677}
1666 1678
1667static int vnet_port_alloc_tx_ring(struct vnet_port *port) 1679static int vnet_port_alloc_tx_ring(struct vnet_port *port)