aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tokenring
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tokenring')
-rw-r--r--drivers/net/tokenring/3c359.c90
-rw-r--r--drivers/net/tokenring/3c359.h38
2 files changed, 62 insertions, 66 deletions
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 5d31519a6c67..44a06f8b588f 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -570,7 +570,7 @@ static int xl_open(struct net_device *dev)
570 struct xl_private *xl_priv=netdev_priv(dev); 570 struct xl_private *xl_priv=netdev_priv(dev);
571 u8 __iomem *xl_mmio = xl_priv->xl_mmio ; 571 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
572 u8 i ; 572 u8 i ;
573 u16 hwaddr[3] ; /* Should be u8[6] but we get word return values */ 573 __le16 hwaddr[3] ; /* Should be u8[6] but we get word return values */
574 int open_err ; 574 int open_err ;
575 575
576 u16 switchsettings, switchsettings_eeprom ; 576 u16 switchsettings, switchsettings_eeprom ;
@@ -580,15 +580,12 @@ static int xl_open(struct net_device *dev)
580 } 580 }
581 581
582 /* 582 /*
583 * Read the information from the EEPROM that we need. I know we 583 * Read the information from the EEPROM that we need.
584 * should use ntohs, but the word gets stored reversed in the 16
585 * bit field anyway and it all works its self out when we memcpy
586 * it into dev->dev_addr.
587 */ 584 */
588 585
589 hwaddr[0] = xl_ee_read(dev,0x10) ; 586 hwaddr[0] = cpu_to_le16(xl_ee_read(dev,0x10));
590 hwaddr[1] = xl_ee_read(dev,0x11) ; 587 hwaddr[1] = cpu_to_le16(xl_ee_read(dev,0x11));
591 hwaddr[2] = xl_ee_read(dev,0x12) ; 588 hwaddr[2] = cpu_to_le16(xl_ee_read(dev,0x12));
592 589
593 /* Ring speed */ 590 /* Ring speed */
594 591
@@ -665,8 +662,8 @@ static int xl_open(struct net_device *dev)
665 break ; 662 break ;
666 663
667 skb->dev = dev ; 664 skb->dev = dev ;
668 xl_priv->xl_rx_ring[i].upfragaddr = pci_map_single(xl_priv->pdev, skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE) ; 665 xl_priv->xl_rx_ring[i].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
669 xl_priv->xl_rx_ring[i].upfraglen = xl_priv->pkt_buf_sz | RXUPLASTFRAG; 666 xl_priv->xl_rx_ring[i].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG;
670 xl_priv->rx_ring_skb[i] = skb ; 667 xl_priv->rx_ring_skb[i] = skb ;
671 } 668 }
672 669
@@ -680,7 +677,7 @@ static int xl_open(struct net_device *dev)
680 xl_priv->rx_ring_tail = 0 ; 677 xl_priv->rx_ring_tail = 0 ;
681 xl_priv->rx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_rx_ring, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_TODEVICE) ; 678 xl_priv->rx_ring_dma_addr = pci_map_single(xl_priv->pdev,xl_priv->xl_rx_ring, sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE, PCI_DMA_TODEVICE) ;
682 for (i=0;i<(xl_priv->rx_ring_no-1);i++) { 679 for (i=0;i<(xl_priv->rx_ring_no-1);i++) {
683 xl_priv->xl_rx_ring[i].upnextptr = xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * (i+1)) ; 680 xl_priv->xl_rx_ring[i].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * (i+1)));
684 } 681 }
685 xl_priv->xl_rx_ring[i].upnextptr = 0 ; 682 xl_priv->xl_rx_ring[i].upnextptr = 0 ;
686 683
@@ -698,7 +695,7 @@ static int xl_open(struct net_device *dev)
698 * Setup the first dummy DPD entry for polling to start working. 695 * Setup the first dummy DPD entry for polling to start working.
699 */ 696 */
700 697
701 xl_priv->xl_tx_ring[0].framestartheader = TXDPDEMPTY ; 698 xl_priv->xl_tx_ring[0].framestartheader = TXDPDEMPTY;
702 xl_priv->xl_tx_ring[0].buffer = 0 ; 699 xl_priv->xl_tx_ring[0].buffer = 0 ;
703 xl_priv->xl_tx_ring[0].buffer_length = 0 ; 700 xl_priv->xl_tx_ring[0].buffer_length = 0 ;
704 xl_priv->xl_tx_ring[0].dnnextptr = 0 ; 701 xl_priv->xl_tx_ring[0].dnnextptr = 0 ;
@@ -811,17 +808,17 @@ static int xl_open_hw(struct net_device *dev)
811 return open_err ; 808 return open_err ;
812 } else { 809 } else {
813 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 810 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 8, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
814 xl_priv->asb = ntohs(readw(xl_mmio + MMIO_MACDATA)) ; 811 xl_priv->asb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
815 printk(KERN_INFO "%s: Adapter Opened Details: ",dev->name) ; 812 printk(KERN_INFO "%s: Adapter Opened Details: ",dev->name) ;
816 printk("ASB: %04x",xl_priv->asb ) ; 813 printk("ASB: %04x",xl_priv->asb ) ;
817 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 10, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 814 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 10, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
818 printk(", SRB: %04x",ntohs(readw(xl_mmio + MMIO_MACDATA)) ) ; 815 printk(", SRB: %04x",swab16(readw(xl_mmio + MMIO_MACDATA)) ) ;
819 816
820 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 817 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
821 xl_priv->arb = ntohs(readw(xl_mmio + MMIO_MACDATA)) ; 818 xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
822 printk(", ARB: %04x \n",xl_priv->arb ) ; 819 printk(", ARB: %04x \n",xl_priv->arb ) ;
823 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 820 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
824 vsoff = ntohs(readw(xl_mmio + MMIO_MACDATA)) ; 821 vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
825 822
826 /* 823 /*
827 * Interesting, sending the individual characters directly to printk was causing klogd to use 824 * Interesting, sending the individual characters directly to printk was causing klogd to use
@@ -873,16 +870,15 @@ static int xl_open_hw(struct net_device *dev)
873static void adv_rx_ring(struct net_device *dev) /* Advance rx_ring, cut down on bloat in xl_rx */ 870static void adv_rx_ring(struct net_device *dev) /* Advance rx_ring, cut down on bloat in xl_rx */
874{ 871{
875 struct xl_private *xl_priv=netdev_priv(dev); 872 struct xl_private *xl_priv=netdev_priv(dev);
876 int prev_ring_loc ; 873 int n = xl_priv->rx_ring_tail;
877 874 int prev_ring_loc;
878 prev_ring_loc = (xl_priv->rx_ring_tail + XL_RX_RING_SIZE - 1) & (XL_RX_RING_SIZE - 1); 875
879 xl_priv->xl_rx_ring[prev_ring_loc].upnextptr = xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * xl_priv->rx_ring_tail) ; 876 prev_ring_loc = (n + XL_RX_RING_SIZE - 1) & (XL_RX_RING_SIZE - 1);
880 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus = 0 ; 877 xl_priv->xl_rx_ring[prev_ring_loc].upnextptr = cpu_to_le32(xl_priv->rx_ring_dma_addr + (sizeof (struct xl_rx_desc) * n));
881 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upnextptr = 0 ; 878 xl_priv->xl_rx_ring[n].framestatus = 0;
882 xl_priv->rx_ring_tail++ ; 879 xl_priv->xl_rx_ring[n].upnextptr = 0;
883 xl_priv->rx_ring_tail &= (XL_RX_RING_SIZE-1) ; 880 xl_priv->rx_ring_tail++;
884 881 xl_priv->rx_ring_tail &= (XL_RX_RING_SIZE-1);
885 return ;
886} 882}
887 883
888static void xl_rx(struct net_device *dev) 884static void xl_rx(struct net_device *dev)
@@ -914,7 +910,7 @@ static void xl_rx(struct net_device *dev)
914 temp_ring_loc &= (XL_RX_RING_SIZE-1) ; 910 temp_ring_loc &= (XL_RX_RING_SIZE-1) ;
915 } 911 }
916 912
917 frame_length = xl_priv->xl_rx_ring[temp_ring_loc].framestatus & 0x7FFF ; 913 frame_length = le32_to_cpu(xl_priv->xl_rx_ring[temp_ring_loc].framestatus) & 0x7FFF;
918 914
919 skb = dev_alloc_skb(frame_length) ; 915 skb = dev_alloc_skb(frame_length) ;
920 916
@@ -931,29 +927,29 @@ static void xl_rx(struct net_device *dev)
931 } 927 }
932 928
933 while (xl_priv->rx_ring_tail != temp_ring_loc) { 929 while (xl_priv->rx_ring_tail != temp_ring_loc) {
934 copy_len = xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen & 0x7FFF ; 930 copy_len = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen) & 0x7FFF;
935 frame_length -= copy_len ; 931 frame_length -= copy_len ;
936 pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 932 pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
937 skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail], 933 skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
938 skb_put(skb, copy_len), 934 skb_put(skb, copy_len),
939 copy_len); 935 copy_len);
940 pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 936 pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
941 adv_rx_ring(dev) ; 937 adv_rx_ring(dev) ;
942 } 938 }
943 939
944 /* Now we have found the last fragment */ 940 /* Now we have found the last fragment */
945 pci_dma_sync_single_for_cpu(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 941 pci_dma_sync_single_for_cpu(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
946 skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail], 942 skb_copy_from_linear_data(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail],
947 skb_put(skb,copy_len), frame_length); 943 skb_put(skb,copy_len), frame_length);
948/* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */ 944/* memcpy(skb_put(skb,frame_length), bus_to_virt(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), frame_length) ; */
949 pci_dma_sync_single_for_device(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 945 pci_dma_sync_single_for_device(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE);
950 adv_rx_ring(dev) ; 946 adv_rx_ring(dev) ;
951 skb->protocol = tr_type_trans(skb,dev) ; 947 skb->protocol = tr_type_trans(skb,dev) ;
952 netif_rx(skb) ; 948 netif_rx(skb) ;
953 949
954 } else { /* Single Descriptor Used, simply swap buffers over, fast path */ 950 } else { /* Single Descriptor Used, simply swap buffers over, fast path */
955 951
956 frame_length = xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus & 0x7FFF ; 952 frame_length = le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].framestatus) & 0x7FFF;
957 953
958 skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ; 954 skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
959 955
@@ -966,13 +962,13 @@ static void xl_rx(struct net_device *dev)
966 } 962 }
967 963
968 skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ; 964 skb2 = xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] ;
969 pci_unmap_single(xl_priv->pdev, xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr, xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ; 965 pci_unmap_single(xl_priv->pdev, le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr), xl_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
970 skb_put(skb2, frame_length) ; 966 skb_put(skb2, frame_length) ;
971 skb2->protocol = tr_type_trans(skb2,dev) ; 967 skb2->protocol = tr_type_trans(skb2,dev) ;
972 968
973 xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] = skb ; 969 xl_priv->rx_ring_skb[xl_priv->rx_ring_tail] = skb ;
974 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr = pci_map_single(xl_priv->pdev,skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE) ; 970 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr = cpu_to_le32(pci_map_single(xl_priv->pdev,skb->data,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
975 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen = xl_priv->pkt_buf_sz | RXUPLASTFRAG ; 971 xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfraglen = cpu_to_le32(xl_priv->pkt_buf_sz) | RXUPLASTFRAG;
976 adv_rx_ring(dev) ; 972 adv_rx_ring(dev) ;
977 xl_priv->xl_stats.rx_packets++ ; 973 xl_priv->xl_stats.rx_packets++ ;
978 xl_priv->xl_stats.rx_bytes += frame_length ; 974 xl_priv->xl_stats.rx_bytes += frame_length ;
@@ -1022,7 +1018,7 @@ static void xl_freemem(struct net_device *dev)
1022 1018
1023 for (i=0;i<XL_RX_RING_SIZE;i++) { 1019 for (i=0;i<XL_RX_RING_SIZE;i++) {
1024 dev_kfree_skb_irq(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]) ; 1020 dev_kfree_skb_irq(xl_priv->rx_ring_skb[xl_priv->rx_ring_tail]) ;
1025 pci_unmap_single(xl_priv->pdev,xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr,xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE) ; 1021 pci_unmap_single(xl_priv->pdev,le32_to_cpu(xl_priv->xl_rx_ring[xl_priv->rx_ring_tail].upfragaddr),xl_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
1026 xl_priv->rx_ring_tail++ ; 1022 xl_priv->rx_ring_tail++ ;
1027 xl_priv->rx_ring_tail &= XL_RX_RING_SIZE-1; 1023 xl_priv->rx_ring_tail &= XL_RX_RING_SIZE-1;
1028 } 1024 }
@@ -1181,9 +1177,9 @@ static int xl_xmit(struct sk_buff *skb, struct net_device *dev)
1181 1177
1182 txd = &(xl_priv->xl_tx_ring[tx_head]) ; 1178 txd = &(xl_priv->xl_tx_ring[tx_head]) ;
1183 txd->dnnextptr = 0 ; 1179 txd->dnnextptr = 0 ;
1184 txd->framestartheader = skb->len | TXDNINDICATE ; 1180 txd->framestartheader = cpu_to_le32(skb->len) | TXDNINDICATE;
1185 txd->buffer = pci_map_single(xl_priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE) ; 1181 txd->buffer = cpu_to_le32(pci_map_single(xl_priv->pdev, skb->data, skb->len, PCI_DMA_TODEVICE));
1186 txd->buffer_length = skb->len | TXDNFRAGLAST ; 1182 txd->buffer_length = cpu_to_le32(skb->len) | TXDNFRAGLAST;
1187 xl_priv->tx_ring_skb[tx_head] = skb ; 1183 xl_priv->tx_ring_skb[tx_head] = skb ;
1188 xl_priv->xl_stats.tx_packets++ ; 1184 xl_priv->xl_stats.tx_packets++ ;
1189 xl_priv->xl_stats.tx_bytes += skb->len ; 1185 xl_priv->xl_stats.tx_bytes += skb->len ;
@@ -1199,7 +1195,7 @@ static int xl_xmit(struct sk_buff *skb, struct net_device *dev)
1199 xl_priv->tx_ring_head &= (XL_TX_RING_SIZE - 1) ; 1195 xl_priv->tx_ring_head &= (XL_TX_RING_SIZE - 1) ;
1200 xl_priv->free_ring_entries-- ; 1196 xl_priv->free_ring_entries-- ;
1201 1197
1202 xl_priv->xl_tx_ring[tx_prev].dnnextptr = xl_priv->tx_ring_dma_addr + (sizeof (struct xl_tx_desc) * tx_head) ; 1198 xl_priv->xl_tx_ring[tx_prev].dnnextptr = cpu_to_le32(xl_priv->tx_ring_dma_addr + (sizeof (struct xl_tx_desc) * tx_head));
1203 1199
1204 /* Sneaky, by doing a read on DnListPtr we can force the card to poll on the DnNextPtr */ 1200 /* Sneaky, by doing a read on DnListPtr we can force the card to poll on the DnNextPtr */
1205 /* readl(xl_mmio + MMIO_DNLISTPTR) ; */ 1201 /* readl(xl_mmio + MMIO_DNLISTPTR) ; */
@@ -1237,9 +1233,9 @@ static void xl_dn_comp(struct net_device *dev)
1237 1233
1238 while (xl_priv->xl_tx_ring[xl_priv->tx_ring_tail].framestartheader & TXDNCOMPLETE ) { 1234 while (xl_priv->xl_tx_ring[xl_priv->tx_ring_tail].framestartheader & TXDNCOMPLETE ) {
1239 txd = &(xl_priv->xl_tx_ring[xl_priv->tx_ring_tail]) ; 1235 txd = &(xl_priv->xl_tx_ring[xl_priv->tx_ring_tail]) ;
1240 pci_unmap_single(xl_priv->pdev,txd->buffer, xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]->len, PCI_DMA_TODEVICE) ; 1236 pci_unmap_single(xl_priv->pdev, le32_to_cpu(txd->buffer), xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]->len, PCI_DMA_TODEVICE);
1241 txd->framestartheader = 0 ; 1237 txd->framestartheader = 0 ;
1242 txd->buffer = 0xdeadbeef ; 1238 txd->buffer = cpu_to_le32(0xdeadbeef);
1243 txd->buffer_length = 0 ; 1239 txd->buffer_length = 0 ;
1244 dev_kfree_skb_irq(xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]) ; 1240 dev_kfree_skb_irq(xl_priv->tx_ring_skb[xl_priv->tx_ring_tail]) ;
1245 xl_priv->tx_ring_tail++ ; 1241 xl_priv->tx_ring_tail++ ;
@@ -1507,9 +1503,9 @@ static void xl_arb_cmd(struct net_device *dev)
1507 if (arb_cmd == RING_STATUS_CHANGE) { /* Ring.Status.Change */ 1503 if (arb_cmd == RING_STATUS_CHANGE) { /* Ring.Status.Change */
1508 writel( ( (MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ; 1504 writel( ( (MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1509 1505
1510 printk(KERN_INFO "%s: Ring Status Change: New Status = %04x\n", dev->name, ntohs(readw(xl_mmio + MMIO_MACDATA) )) ; 1506 printk(KERN_INFO "%s: Ring Status Change: New Status = %04x\n", dev->name, swab16(readw(xl_mmio + MMIO_MACDATA) )) ;
1511 1507
1512 lan_status = ntohs(readw(xl_mmio + MMIO_MACDATA)); 1508 lan_status = swab16(readw(xl_mmio + MMIO_MACDATA));
1513 1509
1514 /* Acknowledge interrupt, this tells nic we are done with the arb */ 1510 /* Acknowledge interrupt, this tells nic we are done with the arb */
1515 writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 1511 writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
@@ -1573,7 +1569,7 @@ static void xl_arb_cmd(struct net_device *dev)
1573 printk(KERN_INFO "Received.Data \n") ; 1569 printk(KERN_INFO "Received.Data \n") ;
1574#endif 1570#endif
1575 writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ; 1571 writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1576 xl_priv->mac_buffer = ntohs(readw(xl_mmio + MMIO_MACDATA)) ; 1572 xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
1577 1573
1578 /* Now we are going to be really basic here and not do anything 1574 /* Now we are going to be really basic here and not do anything
1579 * with the data at all. The tech docs do not give me enough 1575 * with the data at all. The tech docs do not give me enough
@@ -1634,7 +1630,7 @@ static void xl_asb_cmd(struct net_device *dev)
1634 writeb(0x81, xl_mmio + MMIO_MACDATA) ; 1630 writeb(0x81, xl_mmio + MMIO_MACDATA) ;
1635 1631
1636 writel(MEM_WORD_WRITE | 0xd0000 | xl_priv->asb | 6, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 1632 writel(MEM_WORD_WRITE | 0xd0000 | xl_priv->asb | 6, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1637 writew(ntohs(xl_priv->mac_buffer), xl_mmio + MMIO_MACDATA) ; 1633 writew(swab16(xl_priv->mac_buffer), xl_mmio + MMIO_MACDATA) ;
1638 1634
1639 xl_wait_misr_flags(dev) ; 1635 xl_wait_misr_flags(dev) ;
1640 1636
diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h
index 05c860368852..b880cba0f6fd 100644
--- a/drivers/net/tokenring/3c359.h
+++ b/drivers/net/tokenring/3c359.h
@@ -156,19 +156,19 @@
156#define HOSTERRINT (1<<1) 156#define HOSTERRINT (1<<1)
157 157
158/* Receive descriptor bits */ 158/* Receive descriptor bits */
159#define RXOVERRUN (1<<19) 159#define RXOVERRUN cpu_to_le32(1<<19)
160#define RXFC (1<<21) 160#define RXFC cpu_to_le32(1<<21)
161#define RXAR (1<<22) 161#define RXAR cpu_to_le32(1<<22)
162#define RXUPDCOMPLETE (1<<23) 162#define RXUPDCOMPLETE cpu_to_le32(1<<23)
163#define RXUPDFULL (1<<24) 163#define RXUPDFULL cpu_to_le32(1<<24)
164#define RXUPLASTFRAG (1<<31) 164#define RXUPLASTFRAG cpu_to_le32(1<<31)
165 165
166/* Transmit descriptor bits */ 166/* Transmit descriptor bits */
167#define TXDNCOMPLETE (1<<16) 167#define TXDNCOMPLETE cpu_to_le32(1<<16)
168#define TXTXINDICATE (1<<27) 168#define TXTXINDICATE cpu_to_le32(1<<27)
169#define TXDPDEMPTY (1<<29) 169#define TXDPDEMPTY cpu_to_le32(1<<29)
170#define TXDNINDICATE (1<<31) 170#define TXDNINDICATE cpu_to_le32(1<<31)
171#define TXDNFRAGLAST (1<<31) 171#define TXDNFRAGLAST cpu_to_le32(1<<31)
172 172
173/* Interrupts to Acknowledge */ 173/* Interrupts to Acknowledge */
174#define LATCH_ACK 1 174#define LATCH_ACK 1
@@ -232,17 +232,17 @@
232/* 3c359 data structures */ 232/* 3c359 data structures */
233 233
234struct xl_tx_desc { 234struct xl_tx_desc {
235 u32 dnnextptr ; 235 __le32 dnnextptr;
236 u32 framestartheader ; 236 __le32 framestartheader;
237 u32 buffer ; 237 __le32 buffer;
238 u32 buffer_length ; 238 __le32 buffer_length;
239}; 239};
240 240
241struct xl_rx_desc { 241struct xl_rx_desc {
242 u32 upnextptr ; 242 __le32 upnextptr;
243 u32 framestatus ; 243 __le32 framestatus;
244 u32 upfragaddr ; 244 __le32 upfragaddr;
245 u32 upfraglen ; 245 __le32 upfraglen;
246}; 246};
247 247
248struct xl_private { 248struct xl_private {