aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mv643xx_eth.c
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2008-09-18 22:05:00 -0400
committerLennert Buytenhek <buytenh@marvell.com>2008-09-18 23:13:31 -0400
commit4df89bd5a5fc33860f15f5f001a78f2b3f150725 (patch)
treeb250546aa4e801a207b1df7acfcf730fa9f426ee /drivers/net/mv643xx_eth.c
parent170e7108a368c52df1ec466966fd1db6e45a7ad2 (diff)
mv643xx_eth: deal with unexpected ethernet header sizes
When the IP header doesn't start 14, 18, 22 or 26 bytes into the packet (which are the only four cases that the hardware can deal with if asked to do IP checksumming on transmit), invoke the software checksum helper instead of letting the packet go out with a corrupt checksum inserted into the packet in the wrong place. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r--drivers/net/mv643xx_eth.c95
1 files changed, 47 insertions, 48 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 94c13be292a3..9522c449ccea 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -699,79 +699,74 @@ static inline __be16 sum16_as_be(__sum16 sum)
699 return (__force __be16)sum; 699 return (__force __be16)sum;
700} 700}
701 701
702static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) 702static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
703{ 703{
704 struct mv643xx_eth_private *mp = txq_to_mp(txq); 704 struct mv643xx_eth_private *mp = txq_to_mp(txq);
705 int nr_frags = skb_shinfo(skb)->nr_frags; 705 int nr_frags = skb_shinfo(skb)->nr_frags;
706 int tx_index; 706 int tx_index;
707 struct tx_desc *desc; 707 struct tx_desc *desc;
708 u32 cmd_sts; 708 u32 cmd_sts;
709 u16 l4i_chk;
709 int length; 710 int length;
710 711
711 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 712 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
712 713 l4i_chk = 0;
713 tx_index = txq_alloc_desc_index(txq);
714 desc = &txq->tx_desc_area[tx_index];
715
716 if (nr_frags) {
717 txq_submit_frag_skb(txq, skb);
718 length = skb_headlen(skb);
719 } else {
720 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
721 length = skb->len;
722 }
723
724 desc->byte_cnt = length;
725 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
726 714
727 if (skb->ip_summed == CHECKSUM_PARTIAL) { 715 if (skb->ip_summed == CHECKSUM_PARTIAL) {
728 int mac_hdr_len; 716 int tag_bytes;
729 717
730 BUG_ON(skb->protocol != htons(ETH_P_IP) && 718 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
731 skb->protocol != htons(ETH_P_8021Q)); 719 skb->protocol != htons(ETH_P_8021Q));
732 720
733 cmd_sts |= GEN_TCP_UDP_CHECKSUM | 721 tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN;
734 GEN_IP_V4_CHECKSUM | 722 if (unlikely(tag_bytes & ~12)) {
735 ip_hdr(skb)->ihl << TX_IHL_SHIFT; 723 if (skb_checksum_help(skb) == 0)
724 goto no_csum;
725 kfree_skb(skb);
726 return 1;
727 }
736 728
737 mac_hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; 729 if (tag_bytes & 4)
738 switch (mac_hdr_len - ETH_HLEN) {
739 case 0:
740 break;
741 case 4:
742 cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
743 break;
744 case 8:
745 cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
746 break;
747 case 12:
748 cmd_sts |= MAC_HDR_EXTRA_4_BYTES; 730 cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
731 if (tag_bytes & 8)
749 cmd_sts |= MAC_HDR_EXTRA_8_BYTES; 732 cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
750 break; 733
751 default: 734 cmd_sts |= GEN_TCP_UDP_CHECKSUM |
752 if (net_ratelimit()) 735 GEN_IP_V4_CHECKSUM |
753 dev_printk(KERN_ERR, &txq_to_mp(txq)->dev->dev, 736 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
754 "mac header length is %d?!\n", mac_hdr_len);
755 break;
756 }
757 737
758 switch (ip_hdr(skb)->protocol) { 738 switch (ip_hdr(skb)->protocol) {
759 case IPPROTO_UDP: 739 case IPPROTO_UDP:
760 cmd_sts |= UDP_FRAME; 740 cmd_sts |= UDP_FRAME;
761 desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); 741 l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
762 break; 742 break;
763 case IPPROTO_TCP: 743 case IPPROTO_TCP:
764 desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); 744 l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
765 break; 745 break;
766 default: 746 default:
767 BUG(); 747 BUG();
768 } 748 }
769 } else { 749 } else {
750no_csum:
770 /* Errata BTS #50, IHL must be 5 if no HW checksum */ 751 /* Errata BTS #50, IHL must be 5 if no HW checksum */
771 cmd_sts |= 5 << TX_IHL_SHIFT; 752 cmd_sts |= 5 << TX_IHL_SHIFT;
772 desc->l4i_chk = 0;
773 } 753 }
774 754
755 tx_index = txq_alloc_desc_index(txq);
756 desc = &txq->tx_desc_area[tx_index];
757
758 if (nr_frags) {
759 txq_submit_frag_skb(txq, skb);
760 length = skb_headlen(skb);
761 } else {
762 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
763 length = skb->len;
764 }
765
766 desc->l4i_chk = l4i_chk;
767 desc->byte_cnt = length;
768 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
769
775 __skb_queue_tail(&txq->tx_skb, skb); 770 __skb_queue_tail(&txq->tx_skb, skb);
776 771
777 /* ensure all other descriptors are written before first cmd_sts */ 772 /* ensure all other descriptors are written before first cmd_sts */
@@ -786,6 +781,8 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
786 txq_enable(txq); 781 txq_enable(txq);
787 782
788 txq->tx_desc_count += nr_frags + 1; 783 txq->tx_desc_count += nr_frags + 1;
784
785 return 0;
789} 786}
790 787
791static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 788static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -794,7 +791,6 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
794 int queue; 791 int queue;
795 struct tx_queue *txq; 792 struct tx_queue *txq;
796 struct netdev_queue *nq; 793 struct netdev_queue *nq;
797 int entries_left;
798 794
799 queue = skb_get_queue_mapping(skb); 795 queue = skb_get_queue_mapping(skb);
800 txq = mp->txq + queue; 796 txq = mp->txq + queue;
@@ -815,14 +811,17 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
815 return NETDEV_TX_OK; 811 return NETDEV_TX_OK;
816 } 812 }
817 813
818 txq_submit_skb(txq, skb); 814 if (!txq_submit_skb(txq, skb)) {
819 txq->tx_bytes += skb->len; 815 int entries_left;
820 txq->tx_packets++; 816
821 dev->trans_start = jiffies; 817 txq->tx_bytes += skb->len;
818 txq->tx_packets++;
819 dev->trans_start = jiffies;
822 820
823 entries_left = txq->tx_ring_size - txq->tx_desc_count; 821 entries_left = txq->tx_ring_size - txq->tx_desc_count;
824 if (entries_left < MAX_SKB_FRAGS + 1) 822 if (entries_left < MAX_SKB_FRAGS + 1)
825 netif_tx_stop_queue(nq); 823 netif_tx_stop_queue(nq);
824 }
826 825
827 return NETDEV_TX_OK; 826 return NETDEV_TX_OK;
828} 827}