diff options
author | Jeff Garzik <jeff@garzik.org> | 2008-04-29 01:45:04 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-04-29 01:45:04 -0400 |
commit | 090bf62199d6079cc47c0b78ced9508391b24fa1 (patch) | |
tree | 49f473dfdd76a8d7cd1617b591e7fddb44a51fbe /drivers/net | |
parent | 8ceee660aacb29721e26f08e336c58dc4847d1bd (diff) | |
parent | 697c269610179051cf19e45566fee3dcebbb1e93 (diff) |
Merge branch 'sis190' of git://git.kernel.org/pub/scm/linux/kernel/git/romieu/netdev-2.6 into upstream
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/sis190.c | 136 |
1 files changed, 81 insertions, 55 deletions
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index 20745fd4e973..abc63b0663be 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c | |||
@@ -212,6 +212,12 @@ enum _DescStatusBit { | |||
212 | THOL2 = 0x20000000, | 212 | THOL2 = 0x20000000, |
213 | THOL1 = 0x10000000, | 213 | THOL1 = 0x10000000, |
214 | THOL0 = 0x00000000, | 214 | THOL0 = 0x00000000, |
215 | |||
216 | WND = 0x00080000, | ||
217 | TABRT = 0x00040000, | ||
218 | FIFO = 0x00020000, | ||
219 | LINK = 0x00010000, | ||
220 | ColCountMask = 0x0000ffff, | ||
215 | /* RxDesc.status */ | 221 | /* RxDesc.status */ |
216 | IPON = 0x20000000, | 222 | IPON = 0x20000000, |
217 | TCPON = 0x10000000, | 223 | TCPON = 0x10000000, |
@@ -480,30 +486,23 @@ static inline void sis190_make_unusable_by_asic(struct RxDesc *desc) | |||
480 | desc->status = 0x0; | 486 | desc->status = 0x0; |
481 | } | 487 | } |
482 | 488 | ||
483 | static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff, | 489 | static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp, |
484 | struct RxDesc *desc, u32 rx_buf_sz) | 490 | struct RxDesc *desc) |
485 | { | 491 | { |
492 | u32 rx_buf_sz = tp->rx_buf_sz; | ||
486 | struct sk_buff *skb; | 493 | struct sk_buff *skb; |
487 | dma_addr_t mapping; | ||
488 | int ret = 0; | ||
489 | |||
490 | skb = dev_alloc_skb(rx_buf_sz); | ||
491 | if (!skb) | ||
492 | goto err_out; | ||
493 | |||
494 | *sk_buff = skb; | ||
495 | 494 | ||
496 | mapping = pci_map_single(pdev, skb->data, rx_buf_sz, | 495 | skb = netdev_alloc_skb(tp->dev, rx_buf_sz); |
497 | PCI_DMA_FROMDEVICE); | 496 | if (likely(skb)) { |
497 | dma_addr_t mapping; | ||
498 | 498 | ||
499 | sis190_map_to_asic(desc, mapping, rx_buf_sz); | 499 | mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz, |
500 | out: | 500 | PCI_DMA_FROMDEVICE); |
501 | return ret; | 501 | sis190_map_to_asic(desc, mapping, rx_buf_sz); |
502 | } else | ||
503 | sis190_make_unusable_by_asic(desc); | ||
502 | 504 | ||
503 | err_out: | 505 | return skb; |
504 | ret = -ENOMEM; | ||
505 | sis190_make_unusable_by_asic(desc); | ||
506 | goto out; | ||
507 | } | 506 | } |
508 | 507 | ||
509 | static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev, | 508 | static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev, |
@@ -512,37 +511,41 @@ static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev, | |||
512 | u32 cur; | 511 | u32 cur; |
513 | 512 | ||
514 | for (cur = start; cur < end; cur++) { | 513 | for (cur = start; cur < end; cur++) { |
515 | int ret, i = cur % NUM_RX_DESC; | 514 | unsigned int i = cur % NUM_RX_DESC; |
516 | 515 | ||
517 | if (tp->Rx_skbuff[i]) | 516 | if (tp->Rx_skbuff[i]) |
518 | continue; | 517 | continue; |
519 | 518 | ||
520 | ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i, | 519 | tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i); |
521 | tp->RxDescRing + i, tp->rx_buf_sz); | 520 | |
522 | if (ret < 0) | 521 | if (!tp->Rx_skbuff[i]) |
523 | break; | 522 | break; |
524 | } | 523 | } |
525 | return cur - start; | 524 | return cur - start; |
526 | } | 525 | } |
527 | 526 | ||
528 | static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size, | 527 | static bool sis190_try_rx_copy(struct sis190_private *tp, |
529 | struct RxDesc *desc, int rx_buf_sz) | 528 | struct sk_buff **sk_buff, int pkt_size, |
529 | dma_addr_t addr) | ||
530 | { | 530 | { |
531 | int ret = -1; | 531 | struct sk_buff *skb; |
532 | bool done = false; | ||
532 | 533 | ||
533 | if (pkt_size < rx_copybreak) { | 534 | if (pkt_size >= rx_copybreak) |
534 | struct sk_buff *skb; | 535 | goto out; |
535 | 536 | ||
536 | skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); | 537 | skb = netdev_alloc_skb(tp->dev, pkt_size + 2); |
537 | if (skb) { | 538 | if (!skb) |
538 | skb_reserve(skb, NET_IP_ALIGN); | 539 | goto out; |
539 | skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); | 540 | |
540 | *sk_buff = skb; | 541 | pci_dma_sync_single_for_device(tp->pci_dev, addr, pkt_size, |
541 | sis190_give_to_asic(desc, rx_buf_sz); | 542 | PCI_DMA_FROMDEVICE); |
542 | ret = 0; | 543 | skb_reserve(skb, 2); |
543 | } | 544 | skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); |
544 | } | 545 | *sk_buff = skb; |
545 | return ret; | 546 | done = true; |
547 | out: | ||
548 | return done; | ||
546 | } | 549 | } |
547 | 550 | ||
548 | static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats) | 551 | static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats) |
@@ -592,9 +595,9 @@ static int sis190_rx_interrupt(struct net_device *dev, | |||
592 | sis190_give_to_asic(desc, tp->rx_buf_sz); | 595 | sis190_give_to_asic(desc, tp->rx_buf_sz); |
593 | else { | 596 | else { |
594 | struct sk_buff *skb = tp->Rx_skbuff[entry]; | 597 | struct sk_buff *skb = tp->Rx_skbuff[entry]; |
598 | dma_addr_t addr = le32_to_cpu(desc->addr); | ||
595 | int pkt_size = (status & RxSizeMask) - 4; | 599 | int pkt_size = (status & RxSizeMask) - 4; |
596 | void (*pci_action)(struct pci_dev *, dma_addr_t, | 600 | struct pci_dev *pdev = tp->pci_dev; |
597 | size_t, int) = pci_dma_sync_single_for_device; | ||
598 | 601 | ||
599 | if (unlikely(pkt_size > tp->rx_buf_sz)) { | 602 | if (unlikely(pkt_size > tp->rx_buf_sz)) { |
600 | net_intr(tp, KERN_INFO | 603 | net_intr(tp, KERN_INFO |
@@ -606,20 +609,18 @@ static int sis190_rx_interrupt(struct net_device *dev, | |||
606 | continue; | 609 | continue; |
607 | } | 610 | } |
608 | 611 | ||
609 | pci_dma_sync_single_for_cpu(tp->pci_dev, | ||
610 | le32_to_cpu(desc->addr), tp->rx_buf_sz, | ||
611 | PCI_DMA_FROMDEVICE); | ||
612 | 612 | ||
613 | if (sis190_try_rx_copy(&skb, pkt_size, desc, | 613 | if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) { |
614 | tp->rx_buf_sz)) { | 614 | pci_dma_sync_single_for_device(pdev, addr, |
615 | pci_action = pci_unmap_single; | 615 | tp->rx_buf_sz, PCI_DMA_FROMDEVICE); |
616 | sis190_give_to_asic(desc, tp->rx_buf_sz); | ||
617 | } else { | ||
618 | pci_unmap_single(pdev, addr, tp->rx_buf_sz, | ||
619 | PCI_DMA_FROMDEVICE); | ||
616 | tp->Rx_skbuff[entry] = NULL; | 620 | tp->Rx_skbuff[entry] = NULL; |
617 | sis190_make_unusable_by_asic(desc); | 621 | sis190_make_unusable_by_asic(desc); |
618 | } | 622 | } |
619 | 623 | ||
620 | pci_action(tp->pci_dev, le32_to_cpu(desc->addr), | ||
621 | tp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
622 | |||
623 | skb_put(skb, pkt_size); | 624 | skb_put(skb, pkt_size); |
624 | skb->protocol = eth_type_trans(skb, dev); | 625 | skb->protocol = eth_type_trans(skb, dev); |
625 | 626 | ||
@@ -658,9 +659,31 @@ static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, | |||
658 | memset(desc, 0x00, sizeof(*desc)); | 659 | memset(desc, 0x00, sizeof(*desc)); |
659 | } | 660 | } |
660 | 661 | ||
662 | static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats) | ||
663 | { | ||
664 | #define TxErrMask (WND | TABRT | FIFO | LINK) | ||
665 | |||
666 | if (!unlikely(status & TxErrMask)) | ||
667 | return 0; | ||
668 | |||
669 | if (status & WND) | ||
670 | stats->tx_window_errors++; | ||
671 | if (status & TABRT) | ||
672 | stats->tx_aborted_errors++; | ||
673 | if (status & FIFO) | ||
674 | stats->tx_fifo_errors++; | ||
675 | if (status & LINK) | ||
676 | stats->tx_carrier_errors++; | ||
677 | |||
678 | stats->tx_errors++; | ||
679 | |||
680 | return -1; | ||
681 | } | ||
682 | |||
661 | static void sis190_tx_interrupt(struct net_device *dev, | 683 | static void sis190_tx_interrupt(struct net_device *dev, |
662 | struct sis190_private *tp, void __iomem *ioaddr) | 684 | struct sis190_private *tp, void __iomem *ioaddr) |
663 | { | 685 | { |
686 | struct net_device_stats *stats = &dev->stats; | ||
664 | u32 pending, dirty_tx = tp->dirty_tx; | 687 | u32 pending, dirty_tx = tp->dirty_tx; |
665 | /* | 688 | /* |
666 | * It would not be needed if queueing was allowed to be enabled | 689 | * It would not be needed if queueing was allowed to be enabled |
@@ -675,15 +698,19 @@ static void sis190_tx_interrupt(struct net_device *dev, | |||
675 | for (; pending; pending--, dirty_tx++) { | 698 | for (; pending; pending--, dirty_tx++) { |
676 | unsigned int entry = dirty_tx % NUM_TX_DESC; | 699 | unsigned int entry = dirty_tx % NUM_TX_DESC; |
677 | struct TxDesc *txd = tp->TxDescRing + entry; | 700 | struct TxDesc *txd = tp->TxDescRing + entry; |
701 | u32 status = le32_to_cpu(txd->status); | ||
678 | struct sk_buff *skb; | 702 | struct sk_buff *skb; |
679 | 703 | ||
680 | if (le32_to_cpu(txd->status) & OWNbit) | 704 | if (status & OWNbit) |
681 | break; | 705 | break; |
682 | 706 | ||
683 | skb = tp->Tx_skbuff[entry]; | 707 | skb = tp->Tx_skbuff[entry]; |
684 | 708 | ||
685 | dev->stats.tx_packets++; | 709 | if (likely(sis190_tx_pkt_err(status, stats) == 0)) { |
686 | dev->stats.tx_bytes += skb->len; | 710 | stats->tx_packets++; |
711 | stats->tx_bytes += skb->len; | ||
712 | stats->collisions += ((status & ColCountMask) - 1); | ||
713 | } | ||
687 | 714 | ||
688 | sis190_unmap_tx_skb(tp->pci_dev, skb, txd); | 715 | sis190_unmap_tx_skb(tp->pci_dev, skb, txd); |
689 | tp->Tx_skbuff[entry] = NULL; | 716 | tp->Tx_skbuff[entry] = NULL; |
@@ -904,10 +931,9 @@ static void sis190_phy_task(struct work_struct *work) | |||
904 | mod_timer(&tp->timer, jiffies + HZ/10); | 931 | mod_timer(&tp->timer, jiffies + HZ/10); |
905 | } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) & | 932 | } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) & |
906 | BMSR_ANEGCOMPLETE)) { | 933 | BMSR_ANEGCOMPLETE)) { |
907 | net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n", | ||
908 | dev->name); | ||
909 | netif_carrier_off(dev); | 934 | netif_carrier_off(dev); |
910 | mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET); | 935 | net_link(tp, KERN_WARNING "%s: auto-negotiating...\n", |
936 | dev->name); | ||
911 | mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT); | 937 | mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT); |
912 | } else { | 938 | } else { |
913 | /* Rejoice ! */ | 939 | /* Rejoice ! */ |