aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-01-27 17:59:08 -0500
committerDavid S. Miller <davem@davemloft.net>2011-01-27 17:59:08 -0500
commit1397e171f143878dd16ad5f8c99f7b9440cc8911 (patch)
tree8aadcac291a2550028950bcaa5d8d0c0d6bedc45 /drivers
parent144001bddcb4db62c2261f1d703d835851031577 (diff)
parent8f2771f2b85aea4d0f9a0137ad3b63d1173c0962 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/bnx2.c17
-rw-r--r--drivers/net/bnx2.h1
-rw-r--r--drivers/net/cnic.c12
-rw-r--r--drivers/net/dl2k.c4
-rw-r--r--drivers/net/xen-netfront.c96
5 files changed, 106 insertions, 24 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 231aa97745e..3dbaf58f681 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -7966,11 +7966,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7966 7966
7967 /* AER (Advanced Error Reporting) hooks */ 7967 /* AER (Advanced Error Reporting) hooks */
7968 err = pci_enable_pcie_error_reporting(pdev); 7968 err = pci_enable_pcie_error_reporting(pdev);
7969 if (err) { 7969 if (!err)
7970 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting " 7970 bp->flags |= BNX2_FLAG_AER_ENABLED;
7971 "failed 0x%x\n", err);
7972 /* non-fatal, continue */
7973 }
7974 7971
7975 } else { 7972 } else {
7976 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); 7973 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
@@ -8233,8 +8230,10 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8233 return 0; 8230 return 0;
8234 8231
8235err_out_unmap: 8232err_out_unmap:
8236 if (bp->flags & BNX2_FLAG_PCIE) 8233 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8237 pci_disable_pcie_error_reporting(pdev); 8234 pci_disable_pcie_error_reporting(pdev);
8235 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8236 }
8238 8237
8239 if (bp->regview) { 8238 if (bp->regview) {
8240 iounmap(bp->regview); 8239 iounmap(bp->regview);
@@ -8422,8 +8421,10 @@ bnx2_remove_one(struct pci_dev *pdev)
8422 8421
8423 kfree(bp->temp_stats_blk); 8422 kfree(bp->temp_stats_blk);
8424 8423
8425 if (bp->flags & BNX2_FLAG_PCIE) 8424 if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8426 pci_disable_pcie_error_reporting(pdev); 8425 pci_disable_pcie_error_reporting(pdev);
8426 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8427 }
8427 8428
8428 free_netdev(dev); 8429 free_netdev(dev);
8429 8430
@@ -8539,7 +8540,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8539 } 8540 }
8540 rtnl_unlock(); 8541 rtnl_unlock();
8541 8542
8542 if (!(bp->flags & BNX2_FLAG_PCIE)) 8543 if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8543 return result; 8544 return result;
8544 8545
8545 err = pci_cleanup_aer_uncorrect_error_status(pdev); 8546 err = pci_cleanup_aer_uncorrect_error_status(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 5488a2e82fe..f459fb2f9ad 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6741,6 +6741,7 @@ struct bnx2 {
6741#define BNX2_FLAG_JUMBO_BROKEN 0x00000800 6741#define BNX2_FLAG_JUMBO_BROKEN 0x00000800
6742#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000 6742#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000
6743#define BNX2_FLAG_BROKEN_STATS 0x00002000 6743#define BNX2_FLAG_BROKEN_STATS 0x00002000
6744#define BNX2_FLAG_AER_ENABLED 0x00004000
6744 6745
6745 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; 6746 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC];
6746 6747
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 263a2944566..7ff170cbc7d 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -699,13 +699,13 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
699static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 699static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
700{ 700{
701 int i; 701 int i;
702 u32 *page_table = dma->pgtbl; 702 __le32 *page_table = (__le32 *) dma->pgtbl;
703 703
704 for (i = 0; i < dma->num_pages; i++) { 704 for (i = 0; i < dma->num_pages; i++) {
705 /* Each entry needs to be in big endian format. */ 705 /* Each entry needs to be in big endian format. */
706 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 706 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
707 page_table++; 707 page_table++;
708 *page_table = (u32) dma->pg_map_arr[i]; 708 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
709 page_table++; 709 page_table++;
710 } 710 }
711} 711}
@@ -713,13 +713,13 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
713static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) 713static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
714{ 714{
715 int i; 715 int i;
716 u32 *page_table = dma->pgtbl; 716 __le32 *page_table = (__le32 *) dma->pgtbl;
717 717
718 for (i = 0; i < dma->num_pages; i++) { 718 for (i = 0; i < dma->num_pages; i++) {
719 /* Each entry needs to be in little endian format. */ 719 /* Each entry needs to be in little endian format. */
720 *page_table = dma->pg_map_arr[i] & 0xffffffff; 720 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
721 page_table++; 721 page_table++;
722 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 722 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
723 page_table++; 723 page_table++;
724 } 724 }
725} 725}
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index e1a8216ff69..c05db604605 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev)
1753 1753
1754 /* Free all the skbuffs in the queue. */ 1754 /* Free all the skbuffs in the queue. */
1755 for (i = 0; i < RX_RING_SIZE; i++) { 1755 for (i = 0; i < RX_RING_SIZE; i++) {
1756 np->rx_ring[i].status = 0;
1757 np->rx_ring[i].fraginfo = 0;
1758 skb = np->rx_skbuff[i]; 1756 skb = np->rx_skbuff[i];
1759 if (skb) { 1757 if (skb) {
1760 pci_unmap_single(np->pdev, 1758 pci_unmap_single(np->pdev,
@@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev)
1763 dev_kfree_skb (skb); 1761 dev_kfree_skb (skb);
1764 np->rx_skbuff[i] = NULL; 1762 np->rx_skbuff[i] = NULL;
1765 } 1763 }
1764 np->rx_ring[i].status = 0;
1765 np->rx_ring[i].fraginfo = 0;
1766 } 1766 }
1767 for (i = 0; i < TX_RING_SIZE; i++) { 1767 for (i = 0; i < TX_RING_SIZE; i++) {
1768 skb = np->tx_skbuff[i]; 1768 skb = np->tx_skbuff[i];
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 546de574982..da1f1212034 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -120,6 +120,9 @@ struct netfront_info {
120 unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 120 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
121 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 121 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
122 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 122 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
123
124 /* Statistics */
125 int rx_gso_checksum_fixup;
123}; 126};
124 127
125struct netfront_rx_info { 128struct netfront_rx_info {
@@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
770 return cons; 773 return cons;
771} 774}
772 775
773static int skb_checksum_setup(struct sk_buff *skb) 776static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
774{ 777{
775 struct iphdr *iph; 778 struct iphdr *iph;
776 unsigned char *th; 779 unsigned char *th;
777 int err = -EPROTO; 780 int err = -EPROTO;
781 int recalculate_partial_csum = 0;
782
783 /*
784 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
785 * peers can fail to set NETRXF_csum_blank when sending a GSO
786 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
787 * recalculate the partial checksum.
788 */
789 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
790 struct netfront_info *np = netdev_priv(dev);
791 np->rx_gso_checksum_fixup++;
792 skb->ip_summed = CHECKSUM_PARTIAL;
793 recalculate_partial_csum = 1;
794 }
795
796 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
797 if (skb->ip_summed != CHECKSUM_PARTIAL)
798 return 0;
778 799
779 if (skb->protocol != htons(ETH_P_IP)) 800 if (skb->protocol != htons(ETH_P_IP))
780 goto out; 801 goto out;
@@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb)
788 switch (iph->protocol) { 809 switch (iph->protocol) {
789 case IPPROTO_TCP: 810 case IPPROTO_TCP:
790 skb->csum_offset = offsetof(struct tcphdr, check); 811 skb->csum_offset = offsetof(struct tcphdr, check);
812
813 if (recalculate_partial_csum) {
814 struct tcphdr *tcph = (struct tcphdr *)th;
815 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
816 skb->len - iph->ihl*4,
817 IPPROTO_TCP, 0);
818 }
791 break; 819 break;
792 case IPPROTO_UDP: 820 case IPPROTO_UDP:
793 skb->csum_offset = offsetof(struct udphdr, check); 821 skb->csum_offset = offsetof(struct udphdr, check);
822
823 if (recalculate_partial_csum) {
824 struct udphdr *udph = (struct udphdr *)th;
825 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
826 skb->len - iph->ihl*4,
827 IPPROTO_UDP, 0);
828 }
794 break; 829 break;
795 default: 830 default:
796 if (net_ratelimit()) 831 if (net_ratelimit())
@@ -829,13 +864,11 @@ static int handle_incoming_queue(struct net_device *dev,
829 /* Ethernet work: Delayed to here as it peeks the header. */ 864 /* Ethernet work: Delayed to here as it peeks the header. */
830 skb->protocol = eth_type_trans(skb, dev); 865 skb->protocol = eth_type_trans(skb, dev);
831 866
832 if (skb->ip_summed == CHECKSUM_PARTIAL) { 867 if (checksum_setup(dev, skb)) {
833 if (skb_checksum_setup(skb)) { 868 kfree_skb(skb);
834 kfree_skb(skb); 869 packets_dropped++;
835 packets_dropped++; 870 dev->stats.rx_errors++;
836 dev->stats.rx_errors++; 871 continue;
837 continue;
838 }
839 } 872 }
840 873
841 dev->stats.rx_packets++; 874 dev->stats.rx_packets++;
@@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev,
1632 } 1665 }
1633} 1666}
1634 1667
1668static const struct xennet_stat {
1669 char name[ETH_GSTRING_LEN];
1670 u16 offset;
1671} xennet_stats[] = {
1672 {
1673 "rx_gso_checksum_fixup",
1674 offsetof(struct netfront_info, rx_gso_checksum_fixup)
1675 },
1676};
1677
1678static int xennet_get_sset_count(struct net_device *dev, int string_set)
1679{
1680 switch (string_set) {
1681 case ETH_SS_STATS:
1682 return ARRAY_SIZE(xennet_stats);
1683 default:
1684 return -EINVAL;
1685 }
1686}
1687
1688static void xennet_get_ethtool_stats(struct net_device *dev,
1689 struct ethtool_stats *stats, u64 * data)
1690{
1691 void *np = netdev_priv(dev);
1692 int i;
1693
1694 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1695 data[i] = *(int *)(np + xennet_stats[i].offset);
1696}
1697
1698static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1699{
1700 int i;
1701
1702 switch (stringset) {
1703 case ETH_SS_STATS:
1704 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1705 memcpy(data + i * ETH_GSTRING_LEN,
1706 xennet_stats[i].name, ETH_GSTRING_LEN);
1707 break;
1708 }
1709}
1710
1635static const struct ethtool_ops xennet_ethtool_ops = 1711static const struct ethtool_ops xennet_ethtool_ops =
1636{ 1712{
1637 .set_tx_csum = ethtool_op_set_tx_csum, 1713 .set_tx_csum = ethtool_op_set_tx_csum,
1638 .set_sg = xennet_set_sg, 1714 .set_sg = xennet_set_sg,
1639 .set_tso = xennet_set_tso, 1715 .set_tso = xennet_set_tso,
1640 .get_link = ethtool_op_get_link, 1716 .get_link = ethtool_op_get_link,
1717
1718 .get_sset_count = xennet_get_sset_count,
1719 .get_ethtool_stats = xennet_get_ethtool_stats,
1720 .get_strings = xennet_get_strings,
1641}; 1721};
1642 1722
1643#ifdef CONFIG_SYSFS 1723#ifdef CONFIG_SYSFS