diff options
author | Ian Campbell <Ian.Campbell@citrix.com> | 2011-01-26 23:14:03 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-01-27 17:17:35 -0500 |
commit | e0ce4af920eb028f38bfd680b1d733f4c7a0b7cf (patch) | |
tree | 62d1e199745c9746780af51ef33c686892095a20 /drivers/net/xen-netfront.c | |
parent | 389f2a18c6a2a5531ac5a155c3ba25784065b1cb (diff) |
xen: netfront: handle incoming GSO SKBs which are not CHECKSUM_PARTIAL
The Linux network stack expects all GSO SKBs to have ip_summed ==
CHECKSUM_PARTIAL (which implies that the frame contains a partial
checksum) and the Xen network ring protocol similarly expects an SKB
which has GSO set to also have NETRX_csum_blank (which also implies a
partial checksum).
However there have been cases of buggy guests which mark a frame as
GSO but do not set csum_blank. If we detect that we a receiving such a
frame (which manifests as ip_summed != PARTIAL && skb_is_gso) then
force the SKB to partial and recalculate the checksum, since we cannot
rely on the peer having done so if they have not set csum_blank.
Add an ethtool stat to track occurances of this event.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: David Miller <davem@davemloft.net>
Cc: xen-devel@lists.xensource.com
Cc: netdev@vger.kernel.org
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r-- | drivers/net/xen-netfront.c | 96 |
1 files changed, 88 insertions, 8 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 546de5749824..da1f12120346 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -120,6 +120,9 @@ struct netfront_info { | |||
120 | unsigned long rx_pfn_array[NET_RX_RING_SIZE]; | 120 | unsigned long rx_pfn_array[NET_RX_RING_SIZE]; |
121 | struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; | 121 | struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; |
122 | struct mmu_update rx_mmu[NET_RX_RING_SIZE]; | 122 | struct mmu_update rx_mmu[NET_RX_RING_SIZE]; |
123 | |||
124 | /* Statistics */ | ||
125 | int rx_gso_checksum_fixup; | ||
123 | }; | 126 | }; |
124 | 127 | ||
125 | struct netfront_rx_info { | 128 | struct netfront_rx_info { |
@@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np, | |||
770 | return cons; | 773 | return cons; |
771 | } | 774 | } |
772 | 775 | ||
773 | static int skb_checksum_setup(struct sk_buff *skb) | 776 | static int checksum_setup(struct net_device *dev, struct sk_buff *skb) |
774 | { | 777 | { |
775 | struct iphdr *iph; | 778 | struct iphdr *iph; |
776 | unsigned char *th; | 779 | unsigned char *th; |
777 | int err = -EPROTO; | 780 | int err = -EPROTO; |
781 | int recalculate_partial_csum = 0; | ||
782 | |||
783 | /* | ||
784 | * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy | ||
785 | * peers can fail to set NETRXF_csum_blank when sending a GSO | ||
786 | * frame. In this case force the SKB to CHECKSUM_PARTIAL and | ||
787 | * recalculate the partial checksum. | ||
788 | */ | ||
789 | if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { | ||
790 | struct netfront_info *np = netdev_priv(dev); | ||
791 | np->rx_gso_checksum_fixup++; | ||
792 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
793 | recalculate_partial_csum = 1; | ||
794 | } | ||
795 | |||
796 | /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ | ||
797 | if (skb->ip_summed != CHECKSUM_PARTIAL) | ||
798 | return 0; | ||
778 | 799 | ||
779 | if (skb->protocol != htons(ETH_P_IP)) | 800 | if (skb->protocol != htons(ETH_P_IP)) |
780 | goto out; | 801 | goto out; |
@@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb) | |||
788 | switch (iph->protocol) { | 809 | switch (iph->protocol) { |
789 | case IPPROTO_TCP: | 810 | case IPPROTO_TCP: |
790 | skb->csum_offset = offsetof(struct tcphdr, check); | 811 | skb->csum_offset = offsetof(struct tcphdr, check); |
812 | |||
813 | if (recalculate_partial_csum) { | ||
814 | struct tcphdr *tcph = (struct tcphdr *)th; | ||
815 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
816 | skb->len - iph->ihl*4, | ||
817 | IPPROTO_TCP, 0); | ||
818 | } | ||
791 | break; | 819 | break; |
792 | case IPPROTO_UDP: | 820 | case IPPROTO_UDP: |
793 | skb->csum_offset = offsetof(struct udphdr, check); | 821 | skb->csum_offset = offsetof(struct udphdr, check); |
822 | |||
823 | if (recalculate_partial_csum) { | ||
824 | struct udphdr *udph = (struct udphdr *)th; | ||
825 | udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
826 | skb->len - iph->ihl*4, | ||
827 | IPPROTO_UDP, 0); | ||
828 | } | ||
794 | break; | 829 | break; |
795 | default: | 830 | default: |
796 | if (net_ratelimit()) | 831 | if (net_ratelimit()) |
@@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev, | |||
829 | /* Ethernet work: Delayed to here as it peeks the header. */ | 864 | /* Ethernet work: Delayed to here as it peeks the header. */ |
830 | skb->protocol = eth_type_trans(skb, dev); | 865 | skb->protocol = eth_type_trans(skb, dev); |
831 | 866 | ||
832 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 867 | if (checksum_setup(dev, skb)) { |
833 | if (skb_checksum_setup(skb)) { | 868 | kfree_skb(skb); |
834 | kfree_skb(skb); | 869 | packets_dropped++; |
835 | packets_dropped++; | 870 | dev->stats.rx_errors++; |
836 | dev->stats.rx_errors++; | 871 | continue; |
837 | continue; | ||
838 | } | ||
839 | } | 872 | } |
840 | 873 | ||
841 | dev->stats.rx_packets++; | 874 | dev->stats.rx_packets++; |
@@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev, | |||
1632 | } | 1665 | } |
1633 | } | 1666 | } |
1634 | 1667 | ||
1668 | static const struct xennet_stat { | ||
1669 | char name[ETH_GSTRING_LEN]; | ||
1670 | u16 offset; | ||
1671 | } xennet_stats[] = { | ||
1672 | { | ||
1673 | "rx_gso_checksum_fixup", | ||
1674 | offsetof(struct netfront_info, rx_gso_checksum_fixup) | ||
1675 | }, | ||
1676 | }; | ||
1677 | |||
1678 | static int xennet_get_sset_count(struct net_device *dev, int string_set) | ||
1679 | { | ||
1680 | switch (string_set) { | ||
1681 | case ETH_SS_STATS: | ||
1682 | return ARRAY_SIZE(xennet_stats); | ||
1683 | default: | ||
1684 | return -EINVAL; | ||
1685 | } | ||
1686 | } | ||
1687 | |||
1688 | static void xennet_get_ethtool_stats(struct net_device *dev, | ||
1689 | struct ethtool_stats *stats, u64 * data) | ||
1690 | { | ||
1691 | void *np = netdev_priv(dev); | ||
1692 | int i; | ||
1693 | |||
1694 | for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) | ||
1695 | data[i] = *(int *)(np + xennet_stats[i].offset); | ||
1696 | } | ||
1697 | |||
1698 | static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) | ||
1699 | { | ||
1700 | int i; | ||
1701 | |||
1702 | switch (stringset) { | ||
1703 | case ETH_SS_STATS: | ||
1704 | for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) | ||
1705 | memcpy(data + i * ETH_GSTRING_LEN, | ||
1706 | xennet_stats[i].name, ETH_GSTRING_LEN); | ||
1707 | break; | ||
1708 | } | ||
1709 | } | ||
1710 | |||
1635 | static const struct ethtool_ops xennet_ethtool_ops = | 1711 | static const struct ethtool_ops xennet_ethtool_ops = |
1636 | { | 1712 | { |
1637 | .set_tx_csum = ethtool_op_set_tx_csum, | 1713 | .set_tx_csum = ethtool_op_set_tx_csum, |
1638 | .set_sg = xennet_set_sg, | 1714 | .set_sg = xennet_set_sg, |
1639 | .set_tso = xennet_set_tso, | 1715 | .set_tso = xennet_set_tso, |
1640 | .get_link = ethtool_op_get_link, | 1716 | .get_link = ethtool_op_get_link, |
1717 | |||
1718 | .get_sset_count = xennet_get_sset_count, | ||
1719 | .get_ethtool_stats = xennet_get_ethtool_stats, | ||
1720 | .get_strings = xennet_get_strings, | ||
1641 | }; | 1721 | }; |
1642 | 1722 | ||
1643 | #ifdef CONFIG_SYSFS | 1723 | #ifdef CONFIG_SYSFS |