diff options
author | Michael Ellerman <michael@ellerman.id.au> | 2005-08-31 21:29:19 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-08-31 22:42:45 -0400 |
commit | db5e8718eac0b8166d6fd05b1ed7f8114c243988 (patch) | |
tree | e0adc928ffa6735e228c6fd2867381f8442875e7 /drivers/net | |
parent | e0808494ff44d5cedcaf286bb8a93d08e8d9af49 (diff) |
[PATCH] iseries_veth: Fix bogus counting of TX errors
There's a number of problems with the way iseries_veth counts TX errors.
Firstly it counts conditions which aren't really errors as TX errors. This
includes if we don't have a connection struct for the other LPAR, or if the
other LPAR is currently down (or just doesn't want to talk to us). Neither
of these should count as TX errors.
Secondly, it counts one TX error for each LPAR that fails to accept the packet.
This can lead to TX error counts higher than the total number of packets sent
through the interface. This is confusing for users.
This patch fixes that behaviour. The non-error conditions are no longer
counted, and we introduce a new and I think saner meaning to the TX counts.
If a packet is successfully transmitted to any LPAR then it is transmitted
and tx_packets is incremented by 1.
If there is an error transmitting a packet to any LPAR then that is counted
as one error, ie. tx_errors is incremented by 1.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/iseries_veth.c | 47 |
1 files changed, 19 insertions, 28 deletions
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index eaff17cc9fb8..b945bf02d257 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c | |||
@@ -938,31 +938,25 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, | |||
938 | struct veth_port *port = (struct veth_port *) dev->priv; | 938 | struct veth_port *port = (struct veth_port *) dev->priv; |
939 | HvLpEvent_Rc rc; | 939 | HvLpEvent_Rc rc; |
940 | struct veth_msg *msg = NULL; | 940 | struct veth_msg *msg = NULL; |
941 | int err = 0; | ||
942 | unsigned long flags; | 941 | unsigned long flags; |
943 | 942 | ||
944 | if (! cnx) { | 943 | if (! cnx) |
945 | port->stats.tx_errors++; | ||
946 | dev_kfree_skb(skb); | ||
947 | return 0; | 944 | return 0; |
948 | } | ||
949 | 945 | ||
950 | spin_lock_irqsave(&cnx->lock, flags); | 946 | spin_lock_irqsave(&cnx->lock, flags); |
951 | 947 | ||
952 | if (! (cnx->state & VETH_STATE_READY)) | 948 | if (! (cnx->state & VETH_STATE_READY)) |
953 | goto drop; | 949 | goto no_error; |
954 | 950 | ||
955 | if ((skb->len - 14) > VETH_MAX_MTU) | 951 | if ((skb->len - ETH_HLEN) > VETH_MAX_MTU) |
956 | goto drop; | 952 | goto drop; |
957 | 953 | ||
958 | msg = veth_stack_pop(cnx); | 954 | msg = veth_stack_pop(cnx); |
959 | 955 | if (! msg) | |
960 | if (! msg) { | ||
961 | err = 1; | ||
962 | goto drop; | 956 | goto drop; |
963 | } | ||
964 | 957 | ||
965 | msg->in_use = 1; | 958 | msg->in_use = 1; |
959 | msg->skb = skb_get(skb); | ||
966 | 960 | ||
967 | msg->data.addr[0] = dma_map_single(port->dev, skb->data, | 961 | msg->data.addr[0] = dma_map_single(port->dev, skb->data, |
968 | skb->len, DMA_TO_DEVICE); | 962 | skb->len, DMA_TO_DEVICE); |
@@ -970,9 +964,6 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, | |||
970 | if (dma_mapping_error(msg->data.addr[0])) | 964 | if (dma_mapping_error(msg->data.addr[0])) |
971 | goto recycle_and_drop; | 965 | goto recycle_and_drop; |
972 | 966 | ||
973 | /* Is it really necessary to check the length and address | ||
974 | * fields of the first entry here? */ | ||
975 | msg->skb = skb; | ||
976 | msg->dev = port->dev; | 967 | msg->dev = port->dev; |
977 | msg->data.len[0] = skb->len; | 968 | msg->data.len[0] = skb->len; |
978 | msg->data.eofmask = 1 << VETH_EOF_SHIFT; | 969 | msg->data.eofmask = 1 << VETH_EOF_SHIFT; |
@@ -992,43 +983,43 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, | |||
992 | if (veth_stack_is_empty(cnx)) | 983 | if (veth_stack_is_empty(cnx)) |
993 | veth_stop_queues(cnx); | 984 | veth_stop_queues(cnx); |
994 | 985 | ||
986 | no_error: | ||
995 | spin_unlock_irqrestore(&cnx->lock, flags); | 987 | spin_unlock_irqrestore(&cnx->lock, flags); |
996 | return 0; | 988 | return 0; |
997 | 989 | ||
998 | recycle_and_drop: | 990 | recycle_and_drop: |
999 | /* we free the skb below, so tell veth_recycle_msg() not to. */ | ||
1000 | msg->skb = NULL; | ||
1001 | veth_recycle_msg(cnx, msg); | 991 | veth_recycle_msg(cnx, msg); |
1002 | drop: | 992 | drop: |
1003 | port->stats.tx_errors++; | ||
1004 | dev_kfree_skb(skb); | ||
1005 | spin_unlock_irqrestore(&cnx->lock, flags); | 993 | spin_unlock_irqrestore(&cnx->lock, flags); |
1006 | return err; | 994 | return 1; |
1007 | } | 995 | } |
1008 | 996 | ||
1009 | static HvLpIndexMap veth_transmit_to_many(struct sk_buff *skb, | 997 | static void veth_transmit_to_many(struct sk_buff *skb, |
1010 | HvLpIndexMap lpmask, | 998 | HvLpIndexMap lpmask, |
1011 | struct net_device *dev) | 999 | struct net_device *dev) |
1012 | { | 1000 | { |
1013 | struct veth_port *port = (struct veth_port *) dev->priv; | 1001 | struct veth_port *port = (struct veth_port *) dev->priv; |
1014 | int i; | 1002 | int i, success, error; |
1015 | int rc; | 1003 | |
1004 | success = error = 0; | ||
1016 | 1005 | ||
1017 | for (i = 0; i < HVMAXARCHITECTEDLPS; i++) { | 1006 | for (i = 0; i < HVMAXARCHITECTEDLPS; i++) { |
1018 | if ((lpmask & (1 << i)) == 0) | 1007 | if ((lpmask & (1 << i)) == 0) |
1019 | continue; | 1008 | continue; |
1020 | 1009 | ||
1021 | rc = veth_transmit_to_one(skb_get(skb), i, dev); | 1010 | if (veth_transmit_to_one(skb, i, dev)) |
1022 | if (! rc) | 1011 | error = 1; |
1023 | lpmask &= ~(1<<i); | 1012 | else |
1013 | success = 1; | ||
1024 | } | 1014 | } |
1025 | 1015 | ||
1026 | if (! lpmask) { | 1016 | if (error) |
1017 | port->stats.tx_errors++; | ||
1018 | |||
1019 | if (success) { | ||
1027 | port->stats.tx_packets++; | 1020 | port->stats.tx_packets++; |
1028 | port->stats.tx_bytes += skb->len; | 1021 | port->stats.tx_bytes += skb->len; |
1029 | } | 1022 | } |
1030 | |||
1031 | return lpmask; | ||
1032 | } | 1023 | } |
1033 | 1024 | ||
1034 | static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1025 | static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev) |