diff options
author | Santiago Leon <santil@us.ibm.com> | 2008-08-20 15:09:19 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-08-27 05:36:57 -0400 |
commit | 45e15bb734e4e559ab32f26196e44d5cf1417f10 (patch) | |
tree | e71322bc295822ad97af55b0298668eab093cfec /drivers/net | |
parent | c213f286f2cf6590f83f541f66a625ee8d20c6f4 (diff) |
ibmveth: fix bad UDP checksums
This patch fixes a ibmveth bug where bad UDP checksums are being transmitted
when checksum offloading is enabled.
The hypervisor does checksum offloading only on TCP packets, so ibmveth calls
skb_checksum_help() for any other protocol. The bug happens because
the packet is being modified after the DMA map, so we would need a memory
barrier before making the hypervisor call. Reordering the code so that the
DMA map happens after skb_checksum_help() has the additional advantage of
fixing a DMA map leak if skb_checksum_help() where to fail.
Signed-off-by: Santiago Leon <santil@us.ibm.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ibmveth.c | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index a03fe1fb61ca..c2d57f836088 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -904,8 +904,6 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
904 | unsigned long data_dma_addr; | 904 | unsigned long data_dma_addr; |
905 | 905 | ||
906 | desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; | 906 | desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; |
907 | data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, | ||
908 | skb->len, DMA_TO_DEVICE); | ||
909 | 907 | ||
910 | if (skb->ip_summed == CHECKSUM_PARTIAL && | 908 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
911 | ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { | 909 | ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { |
@@ -924,6 +922,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
924 | buf[1] = 0; | 922 | buf[1] = 0; |
925 | } | 923 | } |
926 | 924 | ||
925 | data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, | ||
926 | skb->len, DMA_TO_DEVICE); | ||
927 | if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { | 927 | if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { |
928 | if (!firmware_has_feature(FW_FEATURE_CMO)) | 928 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
929 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); | 929 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); |
@@ -932,6 +932,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
932 | desc.fields.address = adapter->bounce_buffer_dma; | 932 | desc.fields.address = adapter->bounce_buffer_dma; |
933 | tx_map_failed++; | 933 | tx_map_failed++; |
934 | used_bounce = 1; | 934 | used_bounce = 1; |
935 | wmb(); | ||
935 | } else | 936 | } else |
936 | desc.fields.address = data_dma_addr; | 937 | desc.fields.address = data_dma_addr; |
937 | 938 | ||