aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
authorAl Viro <viro@ftp.linux.org.uk>2007-12-09 11:06:41 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:07:13 -0500
commit5bb7ea26148369315492c3dfc43c3b6366a9f279 (patch)
tree3250e30a8d7c3aec8bd4d49e63329bb9a7b8de7f /drivers/net/forcedeth.c
parent79ea13ce07c951bb4d95471e7300baa0f1be9e78 (diff)
forcedeth endianness bugs
* misannotation: struct register_test members are actually host-endian * bug: cpu_to_le64(n) >> 32 instead of cpu_to_le32(n >> 32) in setting ->bufhigh and similar for ->buflow (take low bits, _then_ convert to little-endian, not the other way round). * bug: setup_hw_rings() should not convert to little-endian at all (we feed the result to writel(), not store in shared data structure), let alone try to play with shifting and masking little-endian values. Introduced when setup_hw_rings() went in, screwed both 64bit case and the old code for 32bit rings it had replaced. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c46
1 files changed, 28 insertions, 18 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index f84c752997a..7667a62ac31 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -712,8 +712,8 @@ static const struct nv_ethtool_str nv_etests_str[] = {
712}; 712};
713 713
714struct register_test { 714struct register_test {
715 __le32 reg; 715 __u32 reg;
716 __le32 mask; 716 __u32 mask;
717}; 717};
718 718
719static const struct register_test nv_registers_test[] = { 719static const struct register_test nv_registers_test[] = {
@@ -929,6 +929,16 @@ static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
929#define NV_SETUP_RX_RING 0x01 929#define NV_SETUP_RX_RING 0x01
930#define NV_SETUP_TX_RING 0x02 930#define NV_SETUP_TX_RING 0x02
931 931
932static inline u32 dma_low(dma_addr_t addr)
933{
934 return addr;
935}
936
937static inline u32 dma_high(dma_addr_t addr)
938{
939 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
940}
941
932static void setup_hw_rings(struct net_device *dev, int rxtx_flags) 942static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
933{ 943{
934 struct fe_priv *np = get_nvpriv(dev); 944 struct fe_priv *np = get_nvpriv(dev);
@@ -936,19 +946,19 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
936 946
937 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 947 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
938 if (rxtx_flags & NV_SETUP_RX_RING) { 948 if (rxtx_flags & NV_SETUP_RX_RING) {
939 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); 949 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
940 } 950 }
941 if (rxtx_flags & NV_SETUP_TX_RING) { 951 if (rxtx_flags & NV_SETUP_TX_RING) {
942 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 952 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
943 } 953 }
944 } else { 954 } else {
945 if (rxtx_flags & NV_SETUP_RX_RING) { 955 if (rxtx_flags & NV_SETUP_RX_RING) {
946 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); 956 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
947 writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); 957 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
948 } 958 }
949 if (rxtx_flags & NV_SETUP_TX_RING) { 959 if (rxtx_flags & NV_SETUP_TX_RING) {
950 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); 960 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
951 writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); 961 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
952 } 962 }
953 } 963 }
954} 964}
@@ -1571,8 +1581,8 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1571 skb_tailroom(skb), 1581 skb_tailroom(skb),
1572 PCI_DMA_FROMDEVICE); 1582 PCI_DMA_FROMDEVICE);
1573 np->put_rx_ctx->dma_len = skb_tailroom(skb); 1583 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1574 np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32; 1584 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1575 np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF; 1585 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1576 wmb(); 1586 wmb();
1577 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1587 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1578 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) 1588 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
@@ -1937,8 +1947,8 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
1937 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, 1947 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1938 PCI_DMA_TODEVICE); 1948 PCI_DMA_TODEVICE);
1939 np->put_tx_ctx->dma_len = bcnt; 1949 np->put_tx_ctx->dma_len = bcnt;
1940 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; 1950 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
1941 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; 1951 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
1942 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1952 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1943 1953
1944 tx_flags = NV_TX2_VALID; 1954 tx_flags = NV_TX2_VALID;
@@ -1963,8 +1973,8 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
1963 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 1973 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1964 PCI_DMA_TODEVICE); 1974 PCI_DMA_TODEVICE);
1965 np->put_tx_ctx->dma_len = bcnt; 1975 np->put_tx_ctx->dma_len = bcnt;
1966 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32; 1976 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
1967 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF; 1977 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
1968 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); 1978 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1969 1979
1970 offset += bcnt; 1980 offset += bcnt;
@@ -2680,8 +2690,8 @@ static void nv_set_multicast(struct net_device *dev)
2680 walk = dev->mc_list; 2690 walk = dev->mc_list;
2681 while (walk != NULL) { 2691 while (walk != NULL) {
2682 u32 a, b; 2692 u32 a, b;
2683 a = le32_to_cpu(*(u32 *) walk->dmi_addr); 2693 a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
2684 b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4])); 2694 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
2685 alwaysOn[0] &= a; 2695 alwaysOn[0] &= a;
2686 alwaysOff[0] &= ~a; 2696 alwaysOff[0] &= ~a;
2687 alwaysOn[1] &= b; 2697 alwaysOn[1] &= b;
@@ -4539,8 +4549,8 @@ static int nv_loopback_test(struct net_device *dev)
4539 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 4549 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4540 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4550 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4541 } else { 4551 } else {
4542 np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32; 4552 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
4543 np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; 4553 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
4544 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4554 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4545 } 4555 }
4546 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4556 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);