diff options
-rw-r--r-- | drivers/net/dl2k.c | 51 | ||||
-rw-r--r-- | drivers/net/dl2k.h | 6 |
2 files changed, 30 insertions, 27 deletions
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 5066beb2e7bc..47cce9cad30f 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c | |||
@@ -332,7 +332,7 @@ parse_eeprom (struct net_device *dev) | |||
332 | #endif | 332 | #endif |
333 | /* Read eeprom */ | 333 | /* Read eeprom */ |
334 | for (i = 0; i < 128; i++) { | 334 | for (i = 0; i < 128; i++) { |
335 | ((u16 *) sromdata)[i] = le16_to_cpu (read_eeprom (ioaddr, i)); | 335 | ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom (ioaddr, i)); |
336 | } | 336 | } |
337 | #ifdef MEM_MAPPING | 337 | #ifdef MEM_MAPPING |
338 | ioaddr = dev->base_addr; | 338 | ioaddr = dev->base_addr; |
@@ -516,7 +516,7 @@ rio_timer (unsigned long data) | |||
516 | PCI_DMA_FROMDEVICE)); | 516 | PCI_DMA_FROMDEVICE)); |
517 | } | 517 | } |
518 | np->rx_ring[entry].fraginfo |= | 518 | np->rx_ring[entry].fraginfo |= |
519 | cpu_to_le64 (np->rx_buf_sz) << 48; | 519 | cpu_to_le64((u64)np->rx_buf_sz << 48); |
520 | np->rx_ring[entry].status = 0; | 520 | np->rx_ring[entry].status = 0; |
521 | } /* end for */ | 521 | } /* end for */ |
522 | } /* end if */ | 522 | } /* end if */ |
@@ -584,11 +584,11 @@ alloc_list (struct net_device *dev) | |||
584 | cpu_to_le64 ( pci_map_single ( | 584 | cpu_to_le64 ( pci_map_single ( |
585 | np->pdev, skb->data, np->rx_buf_sz, | 585 | np->pdev, skb->data, np->rx_buf_sz, |
586 | PCI_DMA_FROMDEVICE)); | 586 | PCI_DMA_FROMDEVICE)); |
587 | np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48; | 587 | np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); |
588 | } | 588 | } |
589 | 589 | ||
590 | /* Set RFDListPtr */ | 590 | /* Set RFDListPtr */ |
591 | writel (cpu_to_le32 (np->rx_ring_dma), dev->base_addr + RFDListPtr0); | 591 | writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0); |
592 | writel (0, dev->base_addr + RFDListPtr1); | 592 | writel (0, dev->base_addr + RFDListPtr1); |
593 | 593 | ||
594 | return; | 594 | return; |
@@ -620,15 +620,14 @@ start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
620 | } | 620 | } |
621 | #endif | 621 | #endif |
622 | if (np->vlan) { | 622 | if (np->vlan) { |
623 | tfc_vlan_tag = | 623 | tfc_vlan_tag = VLANTagInsert | |
624 | cpu_to_le64 (VLANTagInsert) | | 624 | ((u64)np->vlan << 32) | |
625 | (cpu_to_le64 (np->vlan) << 32) | | 625 | ((u64)skb->priority << 45); |
626 | (cpu_to_le64 (skb->priority) << 45); | ||
627 | } | 626 | } |
628 | txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, | 627 | txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, |
629 | skb->len, | 628 | skb->len, |
630 | PCI_DMA_TODEVICE)); | 629 | PCI_DMA_TODEVICE)); |
631 | txdesc->fraginfo |= cpu_to_le64 (skb->len) << 48; | 630 | txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48); |
632 | 631 | ||
633 | /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode | 632 | /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode |
634 | * Work around: Always use 1 descriptor in 10Mbps mode */ | 633 | * Work around: Always use 1 descriptor in 10Mbps mode */ |
@@ -708,6 +707,11 @@ rio_interrupt (int irq, void *dev_instance) | |||
708 | return IRQ_RETVAL(handled); | 707 | return IRQ_RETVAL(handled); |
709 | } | 708 | } |
710 | 709 | ||
710 | static inline dma_addr_t desc_to_dma(struct netdev_desc *desc) | ||
711 | { | ||
712 | return le64_to_cpu(desc->fraginfo) & DMA_48BIT_MASK; | ||
713 | } | ||
714 | |||
711 | static void | 715 | static void |
712 | rio_free_tx (struct net_device *dev, int irq) | 716 | rio_free_tx (struct net_device *dev, int irq) |
713 | { | 717 | { |
@@ -725,11 +729,11 @@ rio_free_tx (struct net_device *dev, int irq) | |||
725 | while (entry != np->cur_tx) { | 729 | while (entry != np->cur_tx) { |
726 | struct sk_buff *skb; | 730 | struct sk_buff *skb; |
727 | 731 | ||
728 | if (!(np->tx_ring[entry].status & TFDDone)) | 732 | if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone))) |
729 | break; | 733 | break; |
730 | skb = np->tx_skbuff[entry]; | 734 | skb = np->tx_skbuff[entry]; |
731 | pci_unmap_single (np->pdev, | 735 | pci_unmap_single (np->pdev, |
732 | np->tx_ring[entry].fraginfo & DMA_48BIT_MASK, | 736 | desc_to_dma(&np->tx_ring[entry]), |
733 | skb->len, PCI_DMA_TODEVICE); | 737 | skb->len, PCI_DMA_TODEVICE); |
734 | if (irq) | 738 | if (irq) |
735 | dev_kfree_skb_irq (skb); | 739 | dev_kfree_skb_irq (skb); |
@@ -831,13 +835,14 @@ receive_packet (struct net_device *dev) | |||
831 | int pkt_len; | 835 | int pkt_len; |
832 | u64 frame_status; | 836 | u64 frame_status; |
833 | 837 | ||
834 | if (!(desc->status & RFDDone) || | 838 | if (!(desc->status & cpu_to_le64(RFDDone)) || |
835 | !(desc->status & FrameStart) || !(desc->status & FrameEnd)) | 839 | !(desc->status & cpu_to_le64(FrameStart)) || |
840 | !(desc->status & cpu_to_le64(FrameEnd))) | ||
836 | break; | 841 | break; |
837 | 842 | ||
838 | /* Chip omits the CRC. */ | 843 | /* Chip omits the CRC. */ |
839 | pkt_len = le64_to_cpu (desc->status & 0xffff); | 844 | frame_status = le64_to_cpu(desc->status); |
840 | frame_status = le64_to_cpu (desc->status); | 845 | pkt_len = frame_status & 0xffff; |
841 | if (--cnt < 0) | 846 | if (--cnt < 0) |
842 | break; | 847 | break; |
843 | /* Update rx error statistics, drop packet. */ | 848 | /* Update rx error statistics, drop packet. */ |
@@ -857,15 +862,14 @@ receive_packet (struct net_device *dev) | |||
857 | /* Small skbuffs for short packets */ | 862 | /* Small skbuffs for short packets */ |
858 | if (pkt_len > copy_thresh) { | 863 | if (pkt_len > copy_thresh) { |
859 | pci_unmap_single (np->pdev, | 864 | pci_unmap_single (np->pdev, |
860 | desc->fraginfo & DMA_48BIT_MASK, | 865 | desc_to_dma(desc), |
861 | np->rx_buf_sz, | 866 | np->rx_buf_sz, |
862 | PCI_DMA_FROMDEVICE); | 867 | PCI_DMA_FROMDEVICE); |
863 | skb_put (skb = np->rx_skbuff[entry], pkt_len); | 868 | skb_put (skb = np->rx_skbuff[entry], pkt_len); |
864 | np->rx_skbuff[entry] = NULL; | 869 | np->rx_skbuff[entry] = NULL; |
865 | } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { | 870 | } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { |
866 | pci_dma_sync_single_for_cpu(np->pdev, | 871 | pci_dma_sync_single_for_cpu(np->pdev, |
867 | desc->fraginfo & | 872 | desc_to_dma(desc), |
868 | DMA_48BIT_MASK, | ||
869 | np->rx_buf_sz, | 873 | np->rx_buf_sz, |
870 | PCI_DMA_FROMDEVICE); | 874 | PCI_DMA_FROMDEVICE); |
871 | /* 16 byte align the IP header */ | 875 | /* 16 byte align the IP header */ |
@@ -875,8 +879,7 @@ receive_packet (struct net_device *dev) | |||
875 | pkt_len); | 879 | pkt_len); |
876 | skb_put (skb, pkt_len); | 880 | skb_put (skb, pkt_len); |
877 | pci_dma_sync_single_for_device(np->pdev, | 881 | pci_dma_sync_single_for_device(np->pdev, |
878 | desc->fraginfo & | 882 | desc_to_dma(desc), |
879 | DMA_48BIT_MASK, | ||
880 | np->rx_buf_sz, | 883 | np->rx_buf_sz, |
881 | PCI_DMA_FROMDEVICE); | 884 | PCI_DMA_FROMDEVICE); |
882 | } | 885 | } |
@@ -919,7 +922,7 @@ receive_packet (struct net_device *dev) | |||
919 | PCI_DMA_FROMDEVICE)); | 922 | PCI_DMA_FROMDEVICE)); |
920 | } | 923 | } |
921 | np->rx_ring[entry].fraginfo |= | 924 | np->rx_ring[entry].fraginfo |= |
922 | cpu_to_le64 (np->rx_buf_sz) << 48; | 925 | cpu_to_le64((u64)np->rx_buf_sz << 48); |
923 | np->rx_ring[entry].status = 0; | 926 | np->rx_ring[entry].status = 0; |
924 | entry = (entry + 1) % RX_RING_SIZE; | 927 | entry = (entry + 1) % RX_RING_SIZE; |
925 | } | 928 | } |
@@ -1121,7 +1124,7 @@ set_multicast (struct net_device *dev) | |||
1121 | 1124 | ||
1122 | hash_table[0] = hash_table[1] = 0; | 1125 | hash_table[0] = hash_table[1] = 0; |
1123 | /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ | 1126 | /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ |
1124 | hash_table[1] |= cpu_to_le32(0x02000000); | 1127 | hash_table[1] |= 0x02000000; |
1125 | if (dev->flags & IFF_PROMISC) { | 1128 | if (dev->flags & IFF_PROMISC) { |
1126 | /* Receive all frames promiscuously. */ | 1129 | /* Receive all frames promiscuously. */ |
1127 | rx_mode = ReceiveAllFrames; | 1130 | rx_mode = ReceiveAllFrames; |
@@ -1762,7 +1765,7 @@ rio_close (struct net_device *dev) | |||
1762 | skb = np->rx_skbuff[i]; | 1765 | skb = np->rx_skbuff[i]; |
1763 | if (skb) { | 1766 | if (skb) { |
1764 | pci_unmap_single(np->pdev, | 1767 | pci_unmap_single(np->pdev, |
1765 | np->rx_ring[i].fraginfo & DMA_48BIT_MASK, | 1768 | desc_to_dma(&np->rx_ring[i]), |
1766 | skb->len, PCI_DMA_FROMDEVICE); | 1769 | skb->len, PCI_DMA_FROMDEVICE); |
1767 | dev_kfree_skb (skb); | 1770 | dev_kfree_skb (skb); |
1768 | np->rx_skbuff[i] = NULL; | 1771 | np->rx_skbuff[i] = NULL; |
@@ -1772,7 +1775,7 @@ rio_close (struct net_device *dev) | |||
1772 | skb = np->tx_skbuff[i]; | 1775 | skb = np->tx_skbuff[i]; |
1773 | if (skb) { | 1776 | if (skb) { |
1774 | pci_unmap_single(np->pdev, | 1777 | pci_unmap_single(np->pdev, |
1775 | np->tx_ring[i].fraginfo & DMA_48BIT_MASK, | 1778 | desc_to_dma(&np->tx_ring[i]), |
1776 | skb->len, PCI_DMA_TODEVICE); | 1779 | skb->len, PCI_DMA_TODEVICE); |
1777 | dev_kfree_skb (skb); | 1780 | dev_kfree_skb (skb); |
1778 | np->tx_skbuff[i] = NULL; | 1781 | np->tx_skbuff[i] = NULL; |
diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h index 5b801775f42d..014b77ce96df 100644 --- a/drivers/net/dl2k.h +++ b/drivers/net/dl2k.h | |||
@@ -633,9 +633,9 @@ struct mii_data { | |||
633 | 633 | ||
634 | /* The Rx and Tx buffer descriptors. */ | 634 | /* The Rx and Tx buffer descriptors. */ |
635 | struct netdev_desc { | 635 | struct netdev_desc { |
636 | u64 next_desc; | 636 | __le64 next_desc; |
637 | u64 status; | 637 | __le64 status; |
638 | u64 fraginfo; | 638 | __le64 fraginfo; |
639 | }; | 639 | }; |
640 | 640 | ||
641 | #define PRIV_ALIGN 15 /* Required alignment mask */ | 641 | #define PRIV_ALIGN 15 /* Required alignment mask */ |