aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/dl2k.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/dl2k.c')
-rw-r--r--drivers/net/dl2k.c266
1 files changed, 134 insertions, 132 deletions
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 5066beb2e7bc..e233d04a2132 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -332,7 +332,7 @@ parse_eeprom (struct net_device *dev)
332#endif 332#endif
333 /* Read eeprom */ 333 /* Read eeprom */
334 for (i = 0; i < 128; i++) { 334 for (i = 0; i < 128; i++) {
335 ((u16 *) sromdata)[i] = le16_to_cpu (read_eeprom (ioaddr, i)); 335 ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom (ioaddr, i));
336 } 336 }
337#ifdef MEM_MAPPING 337#ifdef MEM_MAPPING
338 ioaddr = dev->base_addr; 338 ioaddr = dev->base_addr;
@@ -516,7 +516,7 @@ rio_timer (unsigned long data)
516 PCI_DMA_FROMDEVICE)); 516 PCI_DMA_FROMDEVICE));
517 } 517 }
518 np->rx_ring[entry].fraginfo |= 518 np->rx_ring[entry].fraginfo |=
519 cpu_to_le64 (np->rx_buf_sz) << 48; 519 cpu_to_le64((u64)np->rx_buf_sz << 48);
520 np->rx_ring[entry].status = 0; 520 np->rx_ring[entry].status = 0;
521 } /* end for */ 521 } /* end for */
522 } /* end if */ 522 } /* end if */
@@ -584,11 +584,11 @@ alloc_list (struct net_device *dev)
584 cpu_to_le64 ( pci_map_single ( 584 cpu_to_le64 ( pci_map_single (
585 np->pdev, skb->data, np->rx_buf_sz, 585 np->pdev, skb->data, np->rx_buf_sz,
586 PCI_DMA_FROMDEVICE)); 586 PCI_DMA_FROMDEVICE));
587 np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48; 587 np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
588 } 588 }
589 589
590 /* Set RFDListPtr */ 590 /* Set RFDListPtr */
591 writel (cpu_to_le32 (np->rx_ring_dma), dev->base_addr + RFDListPtr0); 591 writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
592 writel (0, dev->base_addr + RFDListPtr1); 592 writel (0, dev->base_addr + RFDListPtr1);
593 593
594 return; 594 return;
@@ -620,15 +620,14 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
620 } 620 }
621#endif 621#endif
622 if (np->vlan) { 622 if (np->vlan) {
623 tfc_vlan_tag = 623 tfc_vlan_tag = VLANTagInsert |
624 cpu_to_le64 (VLANTagInsert) | 624 ((u64)np->vlan << 32) |
625 (cpu_to_le64 (np->vlan) << 32) | 625 ((u64)skb->priority << 45);
626 (cpu_to_le64 (skb->priority) << 45);
627 } 626 }
628 txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, 627 txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
629 skb->len, 628 skb->len,
630 PCI_DMA_TODEVICE)); 629 PCI_DMA_TODEVICE));
631 txdesc->fraginfo |= cpu_to_le64 (skb->len) << 48; 630 txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);
632 631
633 /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode 632 /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
634 * Work around: Always use 1 descriptor in 10Mbps mode */ 633 * Work around: Always use 1 descriptor in 10Mbps mode */
@@ -708,6 +707,11 @@ rio_interrupt (int irq, void *dev_instance)
708 return IRQ_RETVAL(handled); 707 return IRQ_RETVAL(handled);
709} 708}
710 709
710static inline dma_addr_t desc_to_dma(struct netdev_desc *desc)
711{
712 return le64_to_cpu(desc->fraginfo) & DMA_48BIT_MASK;
713}
714
711static void 715static void
712rio_free_tx (struct net_device *dev, int irq) 716rio_free_tx (struct net_device *dev, int irq)
713{ 717{
@@ -725,11 +729,11 @@ rio_free_tx (struct net_device *dev, int irq)
725 while (entry != np->cur_tx) { 729 while (entry != np->cur_tx) {
726 struct sk_buff *skb; 730 struct sk_buff *skb;
727 731
728 if (!(np->tx_ring[entry].status & TFDDone)) 732 if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone)))
729 break; 733 break;
730 skb = np->tx_skbuff[entry]; 734 skb = np->tx_skbuff[entry];
731 pci_unmap_single (np->pdev, 735 pci_unmap_single (np->pdev,
732 np->tx_ring[entry].fraginfo & DMA_48BIT_MASK, 736 desc_to_dma(&np->tx_ring[entry]),
733 skb->len, PCI_DMA_TODEVICE); 737 skb->len, PCI_DMA_TODEVICE);
734 if (irq) 738 if (irq)
735 dev_kfree_skb_irq (skb); 739 dev_kfree_skb_irq (skb);
@@ -831,13 +835,14 @@ receive_packet (struct net_device *dev)
831 int pkt_len; 835 int pkt_len;
832 u64 frame_status; 836 u64 frame_status;
833 837
834 if (!(desc->status & RFDDone) || 838 if (!(desc->status & cpu_to_le64(RFDDone)) ||
835 !(desc->status & FrameStart) || !(desc->status & FrameEnd)) 839 !(desc->status & cpu_to_le64(FrameStart)) ||
840 !(desc->status & cpu_to_le64(FrameEnd)))
836 break; 841 break;
837 842
838 /* Chip omits the CRC. */ 843 /* Chip omits the CRC. */
839 pkt_len = le64_to_cpu (desc->status & 0xffff); 844 frame_status = le64_to_cpu(desc->status);
840 frame_status = le64_to_cpu (desc->status); 845 pkt_len = frame_status & 0xffff;
841 if (--cnt < 0) 846 if (--cnt < 0)
842 break; 847 break;
843 /* Update rx error statistics, drop packet. */ 848 /* Update rx error statistics, drop packet. */
@@ -857,15 +862,14 @@ receive_packet (struct net_device *dev)
857 /* Small skbuffs for short packets */ 862 /* Small skbuffs for short packets */
858 if (pkt_len > copy_thresh) { 863 if (pkt_len > copy_thresh) {
859 pci_unmap_single (np->pdev, 864 pci_unmap_single (np->pdev,
860 desc->fraginfo & DMA_48BIT_MASK, 865 desc_to_dma(desc),
861 np->rx_buf_sz, 866 np->rx_buf_sz,
862 PCI_DMA_FROMDEVICE); 867 PCI_DMA_FROMDEVICE);
863 skb_put (skb = np->rx_skbuff[entry], pkt_len); 868 skb_put (skb = np->rx_skbuff[entry], pkt_len);
864 np->rx_skbuff[entry] = NULL; 869 np->rx_skbuff[entry] = NULL;
865 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { 870 } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
866 pci_dma_sync_single_for_cpu(np->pdev, 871 pci_dma_sync_single_for_cpu(np->pdev,
867 desc->fraginfo & 872 desc_to_dma(desc),
868 DMA_48BIT_MASK,
869 np->rx_buf_sz, 873 np->rx_buf_sz,
870 PCI_DMA_FROMDEVICE); 874 PCI_DMA_FROMDEVICE);
871 /* 16 byte align the IP header */ 875 /* 16 byte align the IP header */
@@ -875,8 +879,7 @@ receive_packet (struct net_device *dev)
875 pkt_len); 879 pkt_len);
876 skb_put (skb, pkt_len); 880 skb_put (skb, pkt_len);
877 pci_dma_sync_single_for_device(np->pdev, 881 pci_dma_sync_single_for_device(np->pdev,
878 desc->fraginfo & 882 desc_to_dma(desc),
879 DMA_48BIT_MASK,
880 np->rx_buf_sz, 883 np->rx_buf_sz,
881 PCI_DMA_FROMDEVICE); 884 PCI_DMA_FROMDEVICE);
882 } 885 }
@@ -919,7 +922,7 @@ receive_packet (struct net_device *dev)
919 PCI_DMA_FROMDEVICE)); 922 PCI_DMA_FROMDEVICE));
920 } 923 }
921 np->rx_ring[entry].fraginfo |= 924 np->rx_ring[entry].fraginfo |=
922 cpu_to_le64 (np->rx_buf_sz) << 48; 925 cpu_to_le64((u64)np->rx_buf_sz << 48);
923 np->rx_ring[entry].status = 0; 926 np->rx_ring[entry].status = 0;
924 entry = (entry + 1) % RX_RING_SIZE; 927 entry = (entry + 1) % RX_RING_SIZE;
925 } 928 }
@@ -1121,7 +1124,7 @@ set_multicast (struct net_device *dev)
1121 1124
1122 hash_table[0] = hash_table[1] = 0; 1125 hash_table[0] = hash_table[1] = 0;
1123 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ 1126 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
1124 hash_table[1] |= cpu_to_le32(0x02000000); 1127 hash_table[1] |= 0x02000000;
1125 if (dev->flags & IFF_PROMISC) { 1128 if (dev->flags & IFF_PROMISC) {
1126 /* Receive all frames promiscuously. */ 1129 /* Receive all frames promiscuously. */
1127 rx_mode = ReceiveAllFrames; 1130 rx_mode = ReceiveAllFrames;
@@ -1313,9 +1316,10 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1313 ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x", 1316 ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
1314 i, 1317 i,
1315 (u32) (np->tx_ring_dma + i * sizeof (*desc)), 1318 (u32) (np->tx_ring_dma + i * sizeof (*desc)),
1316 (u32) desc->next_desc, 1319 (u32)le64_to_cpu(desc->next_desc),
1317 (u32) desc->status, (u32) (desc->fraginfo >> 32), 1320 (u32)le64_to_cpu(desc->status),
1318 (u32) desc->fraginfo); 1321 (u32)(le64_to_cpu(desc->fraginfo) >> 32),
1322 (u32)le64_to_cpu(desc->fraginfo));
1319 printk ("\n"); 1323 printk ("\n");
1320 } 1324 }
1321 printk ("\n"); 1325 printk ("\n");
@@ -1432,7 +1436,7 @@ mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data)
1432static int 1436static int
1433mii_wait_link (struct net_device *dev, int wait) 1437mii_wait_link (struct net_device *dev, int wait)
1434{ 1438{
1435 BMSR_t bmsr; 1439 __u16 bmsr;
1436 int phy_addr; 1440 int phy_addr;
1437 struct netdev_private *np; 1441 struct netdev_private *np;
1438 1442
@@ -1440,8 +1444,8 @@ mii_wait_link (struct net_device *dev, int wait)
1440 phy_addr = np->phy_addr; 1444 phy_addr = np->phy_addr;
1441 1445
1442 do { 1446 do {
1443 bmsr.image = mii_read (dev, phy_addr, MII_BMSR); 1447 bmsr = mii_read (dev, phy_addr, MII_BMSR);
1444 if (bmsr.bits.link_status) 1448 if (bmsr & MII_BMSR_LINK_STATUS)
1445 return 0; 1449 return 0;
1446 mdelay (1); 1450 mdelay (1);
1447 } while (--wait > 0); 1451 } while (--wait > 0);
@@ -1450,70 +1454,72 @@ mii_wait_link (struct net_device *dev, int wait)
1450static int 1454static int
1451mii_get_media (struct net_device *dev) 1455mii_get_media (struct net_device *dev)
1452{ 1456{
1453 ANAR_t negotiate; 1457 __u16 negotiate;
1454 BMSR_t bmsr; 1458 __u16 bmsr;
1455 BMCR_t bmcr; 1459 __u16 mscr;
1456 MSCR_t mscr; 1460 __u16 mssr;
1457 MSSR_t mssr;
1458 int phy_addr; 1461 int phy_addr;
1459 struct netdev_private *np; 1462 struct netdev_private *np;
1460 1463
1461 np = netdev_priv(dev); 1464 np = netdev_priv(dev);
1462 phy_addr = np->phy_addr; 1465 phy_addr = np->phy_addr;
1463 1466
1464 bmsr.image = mii_read (dev, phy_addr, MII_BMSR); 1467 bmsr = mii_read (dev, phy_addr, MII_BMSR);
1465 if (np->an_enable) { 1468 if (np->an_enable) {
1466 if (!bmsr.bits.an_complete) { 1469 if (!(bmsr & MII_BMSR_AN_COMPLETE)) {
1467 /* Auto-Negotiation not completed */ 1470 /* Auto-Negotiation not completed */
1468 return -1; 1471 return -1;
1469 } 1472 }
1470 negotiate.image = mii_read (dev, phy_addr, MII_ANAR) & 1473 negotiate = mii_read (dev, phy_addr, MII_ANAR) &
1471 mii_read (dev, phy_addr, MII_ANLPAR); 1474 mii_read (dev, phy_addr, MII_ANLPAR);
1472 mscr.image = mii_read (dev, phy_addr, MII_MSCR); 1475 mscr = mii_read (dev, phy_addr, MII_MSCR);
1473 mssr.image = mii_read (dev, phy_addr, MII_MSSR); 1476 mssr = mii_read (dev, phy_addr, MII_MSSR);
1474 if (mscr.bits.media_1000BT_FD & mssr.bits.lp_1000BT_FD) { 1477 if (mscr & MII_MSCR_1000BT_FD && mssr & MII_MSSR_LP_1000BT_FD) {
1475 np->speed = 1000; 1478 np->speed = 1000;
1476 np->full_duplex = 1; 1479 np->full_duplex = 1;
1477 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1480 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
1478 } else if (mscr.bits.media_1000BT_HD & mssr.bits.lp_1000BT_HD) { 1481 } else if (mscr & MII_MSCR_1000BT_HD && mssr & MII_MSSR_LP_1000BT_HD) {
1479 np->speed = 1000; 1482 np->speed = 1000;
1480 np->full_duplex = 0; 1483 np->full_duplex = 0;
1481 printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n"); 1484 printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n");
1482 } else if (negotiate.bits.media_100BX_FD) { 1485 } else if (negotiate & MII_ANAR_100BX_FD) {
1483 np->speed = 100; 1486 np->speed = 100;
1484 np->full_duplex = 1; 1487 np->full_duplex = 1;
1485 printk (KERN_INFO "Auto 100 Mbps, Full duplex\n"); 1488 printk (KERN_INFO "Auto 100 Mbps, Full duplex\n");
1486 } else if (negotiate.bits.media_100BX_HD) { 1489 } else if (negotiate & MII_ANAR_100BX_HD) {
1487 np->speed = 100; 1490 np->speed = 100;
1488 np->full_duplex = 0; 1491 np->full_duplex = 0;
1489 printk (KERN_INFO "Auto 100 Mbps, Half duplex\n"); 1492 printk (KERN_INFO "Auto 100 Mbps, Half duplex\n");
1490 } else if (negotiate.bits.media_10BT_FD) { 1493 } else if (negotiate & MII_ANAR_10BT_FD) {
1491 np->speed = 10; 1494 np->speed = 10;
1492 np->full_duplex = 1; 1495 np->full_duplex = 1;
1493 printk (KERN_INFO "Auto 10 Mbps, Full duplex\n"); 1496 printk (KERN_INFO "Auto 10 Mbps, Full duplex\n");
1494 } else if (negotiate.bits.media_10BT_HD) { 1497 } else if (negotiate & MII_ANAR_10BT_HD) {
1495 np->speed = 10; 1498 np->speed = 10;
1496 np->full_duplex = 0; 1499 np->full_duplex = 0;
1497 printk (KERN_INFO "Auto 10 Mbps, Half duplex\n"); 1500 printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
1498 } 1501 }
1499 if (negotiate.bits.pause) { 1502 if (negotiate & MII_ANAR_PAUSE) {
1500 np->tx_flow &= 1; 1503 np->tx_flow &= 1;
1501 np->rx_flow &= 1; 1504 np->rx_flow &= 1;
1502 } else if (negotiate.bits.asymmetric) { 1505 } else if (negotiate & MII_ANAR_ASYMMETRIC) {
1503 np->tx_flow = 0; 1506 np->tx_flow = 0;
1504 np->rx_flow &= 1; 1507 np->rx_flow &= 1;
1505 } 1508 }
1506 /* else tx_flow, rx_flow = user select */ 1509 /* else tx_flow, rx_flow = user select */
1507 } else { 1510 } else {
1508 bmcr.image = mii_read (dev, phy_addr, MII_BMCR); 1511 __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR);
1509 if (bmcr.bits.speed100 == 1 && bmcr.bits.speed1000 == 0) { 1512 switch (bmcr & (MII_BMCR_SPEED_100 | MII_BMCR_SPEED_1000)) {
1513 case MII_BMCR_SPEED_1000:
1514 printk (KERN_INFO "Operating at 1000 Mbps, ");
1515 break;
1516 case MII_BMCR_SPEED_100:
1510 printk (KERN_INFO "Operating at 100 Mbps, "); 1517 printk (KERN_INFO "Operating at 100 Mbps, ");
1511 } else if (bmcr.bits.speed100 == 0 && bmcr.bits.speed1000 == 0) { 1518 break;
1519 case 0:
1512 printk (KERN_INFO "Operating at 10 Mbps, "); 1520 printk (KERN_INFO "Operating at 10 Mbps, ");
1513 } else if (bmcr.bits.speed100 == 0 && bmcr.bits.speed1000 == 1) {
1514 printk (KERN_INFO "Operating at 1000 Mbps, ");
1515 } 1521 }
1516 if (bmcr.bits.duplex_mode) { 1522 if (bmcr & MII_BMCR_DUPLEX_MODE) {
1517 printk ("Full duplex\n"); 1523 printk ("Full duplex\n");
1518 } else { 1524 } else {
1519 printk ("Half duplex\n"); 1525 printk ("Half duplex\n");
@@ -1534,10 +1540,10 @@ mii_get_media (struct net_device *dev)
1534static int 1540static int
1535mii_set_media (struct net_device *dev) 1541mii_set_media (struct net_device *dev)
1536{ 1542{
1537 PHY_SCR_t pscr; 1543 __u16 pscr;
1538 BMCR_t bmcr; 1544 __u16 bmcr;
1539 BMSR_t bmsr; 1545 __u16 bmsr;
1540 ANAR_t anar; 1546 __u16 anar;
1541 int phy_addr; 1547 int phy_addr;
1542 struct netdev_private *np; 1548 struct netdev_private *np;
1543 np = netdev_priv(dev); 1549 np = netdev_priv(dev);
@@ -1546,76 +1552,77 @@ mii_set_media (struct net_device *dev)
1546 /* Does user set speed? */ 1552 /* Does user set speed? */
1547 if (np->an_enable) { 1553 if (np->an_enable) {
1548 /* Advertise capabilities */ 1554 /* Advertise capabilities */
1549 bmsr.image = mii_read (dev, phy_addr, MII_BMSR); 1555 bmsr = mii_read (dev, phy_addr, MII_BMSR);
1550 anar.image = mii_read (dev, phy_addr, MII_ANAR); 1556 anar = mii_read (dev, phy_addr, MII_ANAR) &
1551 anar.bits.media_100BX_FD = bmsr.bits.media_100BX_FD; 1557 ~MII_ANAR_100BX_FD &
1552 anar.bits.media_100BX_HD = bmsr.bits.media_100BX_HD; 1558 ~MII_ANAR_100BX_HD &
1553 anar.bits.media_100BT4 = bmsr.bits.media_100BT4; 1559 ~MII_ANAR_100BT4 &
1554 anar.bits.media_10BT_FD = bmsr.bits.media_10BT_FD; 1560 ~MII_ANAR_10BT_FD &
1555 anar.bits.media_10BT_HD = bmsr.bits.media_10BT_HD; 1561 ~MII_ANAR_10BT_HD;
1556 anar.bits.pause = 1; 1562 if (bmsr & MII_BMSR_100BX_FD)
1557 anar.bits.asymmetric = 1; 1563 anar |= MII_ANAR_100BX_FD;
1558 mii_write (dev, phy_addr, MII_ANAR, anar.image); 1564 if (bmsr & MII_BMSR_100BX_HD)
1565 anar |= MII_ANAR_100BX_HD;
1566 if (bmsr & MII_BMSR_100BT4)
1567 anar |= MII_ANAR_100BT4;
1568 if (bmsr & MII_BMSR_10BT_FD)
1569 anar |= MII_ANAR_10BT_FD;
1570 if (bmsr & MII_BMSR_10BT_HD)
1571 anar |= MII_ANAR_10BT_HD;
1572 anar |= MII_ANAR_PAUSE | MII_ANAR_ASYMMETRIC;
1573 mii_write (dev, phy_addr, MII_ANAR, anar);
1559 1574
1560 /* Enable Auto crossover */ 1575 /* Enable Auto crossover */
1561 pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR); 1576 pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
1562 pscr.bits.mdi_crossover_mode = 3; /* 11'b */ 1577 pscr |= 3 << 5; /* 11'b */
1563 mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image); 1578 mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
1564 1579
1565 /* Soft reset PHY */ 1580 /* Soft reset PHY */
1566 mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET); 1581 mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
1567 bmcr.image = 0; 1582 bmcr = MII_BMCR_AN_ENABLE | MII_BMCR_RESTART_AN | MII_BMCR_RESET;
1568 bmcr.bits.an_enable = 1; 1583 mii_write (dev, phy_addr, MII_BMCR, bmcr);
1569 bmcr.bits.restart_an = 1;
1570 bmcr.bits.reset = 1;
1571 mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1572 mdelay(1); 1584 mdelay(1);
1573 } else { 1585 } else {
1574 /* Force speed setting */ 1586 /* Force speed setting */
1575 /* 1) Disable Auto crossover */ 1587 /* 1) Disable Auto crossover */
1576 pscr.image = mii_read (dev, phy_addr, MII_PHY_SCR); 1588 pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
1577 pscr.bits.mdi_crossover_mode = 0; 1589 pscr &= ~(3 << 5);
1578 mii_write (dev, phy_addr, MII_PHY_SCR, pscr.image); 1590 mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
1579 1591
1580 /* 2) PHY Reset */ 1592 /* 2) PHY Reset */
1581 bmcr.image = mii_read (dev, phy_addr, MII_BMCR); 1593 bmcr = mii_read (dev, phy_addr, MII_BMCR);
1582 bmcr.bits.reset = 1; 1594 bmcr |= MII_BMCR_RESET;
1583 mii_write (dev, phy_addr, MII_BMCR, bmcr.image); 1595 mii_write (dev, phy_addr, MII_BMCR, bmcr);
1584 1596
1585 /* 3) Power Down */ 1597 /* 3) Power Down */
1586 bmcr.image = 0x1940; /* must be 0x1940 */ 1598 bmcr = 0x1940; /* must be 0x1940 */
1587 mii_write (dev, phy_addr, MII_BMCR, bmcr.image); 1599 mii_write (dev, phy_addr, MII_BMCR, bmcr);
1588 mdelay (100); /* wait a certain time */ 1600 mdelay (100); /* wait a certain time */
1589 1601
1590 /* 4) Advertise nothing */ 1602 /* 4) Advertise nothing */
1591 mii_write (dev, phy_addr, MII_ANAR, 0); 1603 mii_write (dev, phy_addr, MII_ANAR, 0);
1592 1604
1593 /* 5) Set media and Power Up */ 1605 /* 5) Set media and Power Up */
1594 bmcr.image = 0; 1606 bmcr = MII_BMCR_POWER_DOWN;
1595 bmcr.bits.power_down = 1;
1596 if (np->speed == 100) { 1607 if (np->speed == 100) {
1597 bmcr.bits.speed100 = 1; 1608 bmcr |= MII_BMCR_SPEED_100;
1598 bmcr.bits.speed1000 = 0;
1599 printk (KERN_INFO "Manual 100 Mbps, "); 1609 printk (KERN_INFO "Manual 100 Mbps, ");
1600 } else if (np->speed == 10) { 1610 } else if (np->speed == 10) {
1601 bmcr.bits.speed100 = 0;
1602 bmcr.bits.speed1000 = 0;
1603 printk (KERN_INFO "Manual 10 Mbps, "); 1611 printk (KERN_INFO "Manual 10 Mbps, ");
1604 } 1612 }
1605 if (np->full_duplex) { 1613 if (np->full_duplex) {
1606 bmcr.bits.duplex_mode = 1; 1614 bmcr |= MII_BMCR_DUPLEX_MODE;
1607 printk ("Full duplex\n"); 1615 printk ("Full duplex\n");
1608 } else { 1616 } else {
1609 bmcr.bits.duplex_mode = 0;
1610 printk ("Half duplex\n"); 1617 printk ("Half duplex\n");
1611 } 1618 }
1612#if 0 1619#if 0
1613 /* Set 1000BaseT Master/Slave setting */ 1620 /* Set 1000BaseT Master/Slave setting */
1614 mscr.image = mii_read (dev, phy_addr, MII_MSCR); 1621 mscr = mii_read (dev, phy_addr, MII_MSCR);
1615 mscr.bits.cfg_enable = 1; 1622 mscr |= MII_MSCR_CFG_ENABLE;
1616 mscr.bits.cfg_value = 0; 1623 mscr &= ~MII_MSCR_CFG_VALUE = 0;
1617#endif 1624#endif
1618 mii_write (dev, phy_addr, MII_BMCR, bmcr.image); 1625 mii_write (dev, phy_addr, MII_BMCR, bmcr);
1619 mdelay(10); 1626 mdelay(10);
1620 } 1627 }
1621 return 0; 1628 return 0;
@@ -1624,43 +1631,42 @@ mii_set_media (struct net_device *dev)
1624static int 1631static int
1625mii_get_media_pcs (struct net_device *dev) 1632mii_get_media_pcs (struct net_device *dev)
1626{ 1633{
1627 ANAR_PCS_t negotiate; 1634 __u16 negotiate;
1628 BMSR_t bmsr; 1635 __u16 bmsr;
1629 BMCR_t bmcr;
1630 int phy_addr; 1636 int phy_addr;
1631 struct netdev_private *np; 1637 struct netdev_private *np;
1632 1638
1633 np = netdev_priv(dev); 1639 np = netdev_priv(dev);
1634 phy_addr = np->phy_addr; 1640 phy_addr = np->phy_addr;
1635 1641
1636 bmsr.image = mii_read (dev, phy_addr, PCS_BMSR); 1642 bmsr = mii_read (dev, phy_addr, PCS_BMSR);
1637 if (np->an_enable) { 1643 if (np->an_enable) {
1638 if (!bmsr.bits.an_complete) { 1644 if (!(bmsr & MII_BMSR_AN_COMPLETE)) {
1639 /* Auto-Negotiation not completed */ 1645 /* Auto-Negotiation not completed */
1640 return -1; 1646 return -1;
1641 } 1647 }
1642 negotiate.image = mii_read (dev, phy_addr, PCS_ANAR) & 1648 negotiate = mii_read (dev, phy_addr, PCS_ANAR) &
1643 mii_read (dev, phy_addr, PCS_ANLPAR); 1649 mii_read (dev, phy_addr, PCS_ANLPAR);
1644 np->speed = 1000; 1650 np->speed = 1000;
1645 if (negotiate.bits.full_duplex) { 1651 if (negotiate & PCS_ANAR_FULL_DUPLEX) {
1646 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1652 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
1647 np->full_duplex = 1; 1653 np->full_duplex = 1;
1648 } else { 1654 } else {
1649 printk (KERN_INFO "Auto 1000 Mbps, half duplex\n"); 1655 printk (KERN_INFO "Auto 1000 Mbps, half duplex\n");
1650 np->full_duplex = 0; 1656 np->full_duplex = 0;
1651 } 1657 }
1652 if (negotiate.bits.pause) { 1658 if (negotiate & PCS_ANAR_PAUSE) {
1653 np->tx_flow &= 1; 1659 np->tx_flow &= 1;
1654 np->rx_flow &= 1; 1660 np->rx_flow &= 1;
1655 } else if (negotiate.bits.asymmetric) { 1661 } else if (negotiate & PCS_ANAR_ASYMMETRIC) {
1656 np->tx_flow = 0; 1662 np->tx_flow = 0;
1657 np->rx_flow &= 1; 1663 np->rx_flow &= 1;
1658 } 1664 }
1659 /* else tx_flow, rx_flow = user select */ 1665 /* else tx_flow, rx_flow = user select */
1660 } else { 1666 } else {
1661 bmcr.image = mii_read (dev, phy_addr, PCS_BMCR); 1667 __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR);
1662 printk (KERN_INFO "Operating at 1000 Mbps, "); 1668 printk (KERN_INFO "Operating at 1000 Mbps, ");
1663 if (bmcr.bits.duplex_mode) { 1669 if (bmcr & MII_BMCR_DUPLEX_MODE) {
1664 printk ("Full duplex\n"); 1670 printk ("Full duplex\n");
1665 } else { 1671 } else {
1666 printk ("Half duplex\n"); 1672 printk ("Half duplex\n");
@@ -1681,9 +1687,9 @@ mii_get_media_pcs (struct net_device *dev)
1681static int 1687static int
1682mii_set_media_pcs (struct net_device *dev) 1688mii_set_media_pcs (struct net_device *dev)
1683{ 1689{
1684 BMCR_t bmcr; 1690 __u16 bmcr;
1685 ESR_t esr; 1691 __u16 esr;
1686 ANAR_PCS_t anar; 1692 __u16 anar;
1687 int phy_addr; 1693 int phy_addr;
1688 struct netdev_private *np; 1694 struct netdev_private *np;
1689 np = netdev_priv(dev); 1695 np = netdev_priv(dev);
@@ -1692,41 +1698,37 @@ mii_set_media_pcs (struct net_device *dev)
1692 /* Auto-Negotiation? */ 1698 /* Auto-Negotiation? */
1693 if (np->an_enable) { 1699 if (np->an_enable) {
1694 /* Advertise capabilities */ 1700 /* Advertise capabilities */
1695 esr.image = mii_read (dev, phy_addr, PCS_ESR); 1701 esr = mii_read (dev, phy_addr, PCS_ESR);
1696 anar.image = mii_read (dev, phy_addr, MII_ANAR); 1702 anar = mii_read (dev, phy_addr, MII_ANAR) &
1697 anar.bits.half_duplex = 1703 ~PCS_ANAR_HALF_DUPLEX &
1698 esr.bits.media_1000BT_HD | esr.bits.media_1000BX_HD; 1704 ~PCS_ANAR_FULL_DUPLEX;
1699 anar.bits.full_duplex = 1705 if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD))
1700 esr.bits.media_1000BT_FD | esr.bits.media_1000BX_FD; 1706 anar |= PCS_ANAR_HALF_DUPLEX;
1701 anar.bits.pause = 1; 1707 if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD))
1702 anar.bits.asymmetric = 1; 1708 anar |= PCS_ANAR_FULL_DUPLEX;
1703 mii_write (dev, phy_addr, MII_ANAR, anar.image); 1709 anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC;
1710 mii_write (dev, phy_addr, MII_ANAR, anar);
1704 1711
1705 /* Soft reset PHY */ 1712 /* Soft reset PHY */
1706 mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET); 1713 mii_write (dev, phy_addr, MII_BMCR, MII_BMCR_RESET);
1707 bmcr.image = 0; 1714 bmcr = MII_BMCR_AN_ENABLE | MII_BMCR_RESTART_AN |
1708 bmcr.bits.an_enable = 1; 1715 MII_BMCR_RESET;
1709 bmcr.bits.restart_an = 1; 1716 mii_write (dev, phy_addr, MII_BMCR, bmcr);
1710 bmcr.bits.reset = 1;
1711 mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1712 mdelay(1); 1717 mdelay(1);
1713 } else { 1718 } else {
1714 /* Force speed setting */ 1719 /* Force speed setting */
1715 /* PHY Reset */ 1720 /* PHY Reset */
1716 bmcr.image = 0; 1721 bmcr = MII_BMCR_RESET;
1717 bmcr.bits.reset = 1; 1722 mii_write (dev, phy_addr, MII_BMCR, bmcr);
1718 mii_write (dev, phy_addr, MII_BMCR, bmcr.image);
1719 mdelay(10); 1723 mdelay(10);
1720 bmcr.image = 0;
1721 bmcr.bits.an_enable = 0;
1722 if (np->full_duplex) { 1724 if (np->full_duplex) {
1723 bmcr.bits.duplex_mode = 1; 1725 bmcr = MII_BMCR_DUPLEX_MODE;
1724 printk (KERN_INFO "Manual full duplex\n"); 1726 printk (KERN_INFO "Manual full duplex\n");
1725 } else { 1727 } else {
1726 bmcr.bits.duplex_mode = 0; 1728 bmcr = 0;
1727 printk (KERN_INFO "Manual half duplex\n"); 1729 printk (KERN_INFO "Manual half duplex\n");
1728 } 1730 }
1729 mii_write (dev, phy_addr, MII_BMCR, bmcr.image); 1731 mii_write (dev, phy_addr, MII_BMCR, bmcr);
1730 mdelay(10); 1732 mdelay(10);
1731 1733
1732 /* Advertise nothing */ 1734 /* Advertise nothing */
@@ -1762,7 +1764,7 @@ rio_close (struct net_device *dev)
1762 skb = np->rx_skbuff[i]; 1764 skb = np->rx_skbuff[i];
1763 if (skb) { 1765 if (skb) {
1764 pci_unmap_single(np->pdev, 1766 pci_unmap_single(np->pdev,
1765 np->rx_ring[i].fraginfo & DMA_48BIT_MASK, 1767 desc_to_dma(&np->rx_ring[i]),
1766 skb->len, PCI_DMA_FROMDEVICE); 1768 skb->len, PCI_DMA_FROMDEVICE);
1767 dev_kfree_skb (skb); 1769 dev_kfree_skb (skb);
1768 np->rx_skbuff[i] = NULL; 1770 np->rx_skbuff[i] = NULL;
@@ -1772,7 +1774,7 @@ rio_close (struct net_device *dev)
1772 skb = np->tx_skbuff[i]; 1774 skb = np->tx_skbuff[i];
1773 if (skb) { 1775 if (skb) {
1774 pci_unmap_single(np->pdev, 1776 pci_unmap_single(np->pdev,
1775 np->tx_ring[i].fraginfo & DMA_48BIT_MASK, 1777 desc_to_dma(&np->tx_ring[i]),
1776 skb->len, PCI_DMA_TODEVICE); 1778 skb->len, PCI_DMA_TODEVICE);
1777 dev_kfree_skb (skb); 1779 dev_kfree_skb (skb);
1778 np->tx_skbuff[i] = NULL; 1780 np->tx_skbuff[i] = NULL;