aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/pcnet32.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/pcnet32.c')
-rw-r--r--drivers/net/pcnet32.c520
1 files changed, 382 insertions, 138 deletions
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index d768f3d1ac28..4daafe303358 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -58,18 +58,15 @@ static const char *const version =
58 * PCI device identifiers for "new style" Linux PCI Device Drivers 58 * PCI device identifiers for "new style" Linux PCI Device Drivers
59 */ 59 */
60static struct pci_device_id pcnet32_pci_tbl[] = { 60static struct pci_device_id pcnet32_pci_tbl[] = {
61 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, 61 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
62 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 62 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
63 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE,
64 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
65 63
66 /* 64 /*
67 * Adapters that were sold with IBM's RS/6000 or pSeries hardware have 65 * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
68 * the incorrect vendor id. 66 * the incorrect vendor id.
69 */ 67 */
70 { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, 68 { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
71 PCI_ANY_ID, PCI_ANY_ID, 69 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
72 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0},
73 70
74 { } /* terminate list */ 71 { } /* terminate list */
75}; 72};
@@ -188,6 +185,23 @@ static int homepna[MAX_UNITS];
188 185
189#define PCNET32_TOTAL_SIZE 0x20 186#define PCNET32_TOTAL_SIZE 0x20
190 187
188#define CSR0 0
189#define CSR0_INIT 0x1
190#define CSR0_START 0x2
191#define CSR0_STOP 0x4
192#define CSR0_TXPOLL 0x8
193#define CSR0_INTEN 0x40
194#define CSR0_IDON 0x0100
195#define CSR0_NORMAL (CSR0_START | CSR0_INTEN)
196#define PCNET32_INIT_LOW 1
197#define PCNET32_INIT_HIGH 2
198#define CSR3 3
199#define CSR4 4
200#define CSR5 5
201#define CSR5_SUSPEND 0x0001
202#define CSR15 15
203#define PCNET32_MC_FILTER 8
204
191/* The PCNET32 Rx and Tx ring descriptors. */ 205/* The PCNET32 Rx and Tx ring descriptors. */
192struct pcnet32_rx_head { 206struct pcnet32_rx_head {
193 u32 base; 207 u32 base;
@@ -277,7 +291,6 @@ struct pcnet32_private {
277 u32 phymask; 291 u32 phymask;
278}; 292};
279 293
280static void pcnet32_probe_vlbus(void);
281static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); 294static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
282static int pcnet32_probe1(unsigned long, int, struct pci_dev *); 295static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
283static int pcnet32_open(struct net_device *); 296static int pcnet32_open(struct net_device *);
@@ -419,6 +432,238 @@ static struct pcnet32_access pcnet32_dwio = {
419 .reset = pcnet32_dwio_reset 432 .reset = pcnet32_dwio_reset
420}; 433};
421 434
435static void pcnet32_netif_stop(struct net_device *dev)
436{
437 dev->trans_start = jiffies;
438 netif_poll_disable(dev);
439 netif_tx_disable(dev);
440}
441
442static void pcnet32_netif_start(struct net_device *dev)
443{
444 netif_wake_queue(dev);
445 netif_poll_enable(dev);
446}
447
448/*
449 * Allocate space for the new sized tx ring.
450 * Free old resources
451 * Save new resources.
452 * Any failure keeps old resources.
453 * Must be called with lp->lock held.
454 */
455static void pcnet32_realloc_tx_ring(struct net_device *dev,
456 struct pcnet32_private *lp,
457 unsigned int size)
458{
459 dma_addr_t new_ring_dma_addr;
460 dma_addr_t *new_dma_addr_list;
461 struct pcnet32_tx_head *new_tx_ring;
462 struct sk_buff **new_skb_list;
463
464 pcnet32_purge_tx_ring(dev);
465
466 new_tx_ring = pci_alloc_consistent(lp->pci_dev,
467 sizeof(struct pcnet32_tx_head) *
468 (1 << size),
469 &new_ring_dma_addr);
470 if (new_tx_ring == NULL) {
471 if (netif_msg_drv(lp))
472 printk("\n" KERN_ERR
473 "%s: Consistent memory allocation failed.\n",
474 dev->name);
475 return;
476 }
477 memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
478
479 new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
480 GFP_ATOMIC);
481 if (!new_dma_addr_list) {
482 if (netif_msg_drv(lp))
483 printk("\n" KERN_ERR
484 "%s: Memory allocation failed.\n", dev->name);
485 goto free_new_tx_ring;
486 }
487
488 new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
489 GFP_ATOMIC);
490 if (!new_skb_list) {
491 if (netif_msg_drv(lp))
492 printk("\n" KERN_ERR
493 "%s: Memory allocation failed.\n", dev->name);
494 goto free_new_lists;
495 }
496
497 kfree(lp->tx_skbuff);
498 kfree(lp->tx_dma_addr);
499 pci_free_consistent(lp->pci_dev,
500 sizeof(struct pcnet32_tx_head) *
501 lp->tx_ring_size, lp->tx_ring,
502 lp->tx_ring_dma_addr);
503
504 lp->tx_ring_size = (1 << size);
505 lp->tx_mod_mask = lp->tx_ring_size - 1;
506 lp->tx_len_bits = (size << 12);
507 lp->tx_ring = new_tx_ring;
508 lp->tx_ring_dma_addr = new_ring_dma_addr;
509 lp->tx_dma_addr = new_dma_addr_list;
510 lp->tx_skbuff = new_skb_list;
511 return;
512
513 free_new_lists:
514 kfree(new_dma_addr_list);
515 free_new_tx_ring:
516 pci_free_consistent(lp->pci_dev,
517 sizeof(struct pcnet32_tx_head) *
518 (1 << size),
519 new_tx_ring,
520 new_ring_dma_addr);
521 return;
522}
523
524/*
525 * Allocate space for the new sized rx ring.
526 * Re-use old receive buffers.
527 * alloc extra buffers
528 * free unneeded buffers
529 * free unneeded buffers
530 * Save new resources.
531 * Any failure keeps old resources.
532 * Must be called with lp->lock held.
533 */
534static void pcnet32_realloc_rx_ring(struct net_device *dev,
535 struct pcnet32_private *lp,
536 unsigned int size)
537{
538 dma_addr_t new_ring_dma_addr;
539 dma_addr_t *new_dma_addr_list;
540 struct pcnet32_rx_head *new_rx_ring;
541 struct sk_buff **new_skb_list;
542 int new, overlap;
543
544 new_rx_ring = pci_alloc_consistent(lp->pci_dev,
545 sizeof(struct pcnet32_rx_head) *
546 (1 << size),
547 &new_ring_dma_addr);
548 if (new_rx_ring == NULL) {
549 if (netif_msg_drv(lp))
550 printk("\n" KERN_ERR
551 "%s: Consistent memory allocation failed.\n",
552 dev->name);
553 return;
554 }
555 memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
556
557 new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
558 GFP_ATOMIC);
559 if (!new_dma_addr_list) {
560 if (netif_msg_drv(lp))
561 printk("\n" KERN_ERR
562 "%s: Memory allocation failed.\n", dev->name);
563 goto free_new_rx_ring;
564 }
565
566 new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
567 GFP_ATOMIC);
568 if (!new_skb_list) {
569 if (netif_msg_drv(lp))
570 printk("\n" KERN_ERR
571 "%s: Memory allocation failed.\n", dev->name);
572 goto free_new_lists;
573 }
574
575 /* first copy the current receive buffers */
576 overlap = min(size, lp->rx_ring_size);
577 for (new = 0; new < overlap; new++) {
578 new_rx_ring[new] = lp->rx_ring[new];
579 new_dma_addr_list[new] = lp->rx_dma_addr[new];
580 new_skb_list[new] = lp->rx_skbuff[new];
581 }
582 /* now allocate any new buffers needed */
583 for (; new < size; new++ ) {
584 struct sk_buff *rx_skbuff;
585 new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ);
586 if (!(rx_skbuff = new_skb_list[new])) {
587 /* keep the original lists and buffers */
588 if (netif_msg_drv(lp))
589 printk(KERN_ERR
590 "%s: pcnet32_realloc_rx_ring dev_alloc_skb failed.\n",
591 dev->name);
592 goto free_all_new;
593 }
594 skb_reserve(rx_skbuff, 2);
595
596 new_dma_addr_list[new] =
597 pci_map_single(lp->pci_dev, rx_skbuff->data,
598 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
599 new_rx_ring[new].base = (u32) le32_to_cpu(new_dma_addr_list[new]);
600 new_rx_ring[new].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
601 new_rx_ring[new].status = le16_to_cpu(0x8000);
602 }
603 /* and free any unneeded buffers */
604 for (; new < lp->rx_ring_size; new++) {
605 if (lp->rx_skbuff[new]) {
606 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
607 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
608 dev_kfree_skb(lp->rx_skbuff[new]);
609 }
610 }
611
612 kfree(lp->rx_skbuff);
613 kfree(lp->rx_dma_addr);
614 pci_free_consistent(lp->pci_dev,
615 sizeof(struct pcnet32_rx_head) *
616 lp->rx_ring_size, lp->rx_ring,
617 lp->rx_ring_dma_addr);
618
619 lp->rx_ring_size = (1 << size);
620 lp->rx_mod_mask = lp->rx_ring_size - 1;
621 lp->rx_len_bits = (size << 4);
622 lp->rx_ring = new_rx_ring;
623 lp->rx_ring_dma_addr = new_ring_dma_addr;
624 lp->rx_dma_addr = new_dma_addr_list;
625 lp->rx_skbuff = new_skb_list;
626 return;
627
628 free_all_new:
629 for (; --new >= lp->rx_ring_size; ) {
630 if (new_skb_list[new]) {
631 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
632 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
633 dev_kfree_skb(new_skb_list[new]);
634 }
635 }
636 kfree(new_skb_list);
637 free_new_lists:
638 kfree(new_dma_addr_list);
639 free_new_rx_ring:
640 pci_free_consistent(lp->pci_dev,
641 sizeof(struct pcnet32_rx_head) *
642 (1 << size),
643 new_rx_ring,
644 new_ring_dma_addr);
645 return;
646}
647
648static void pcnet32_purge_rx_ring(struct net_device *dev)
649{
650 struct pcnet32_private *lp = dev->priv;
651 int i;
652
653 /* free all allocated skbuffs */
654 for (i = 0; i < lp->rx_ring_size; i++) {
655 lp->rx_ring[i].status = 0; /* CPU owns buffer */
656 wmb(); /* Make sure adapter sees owner change */
657 if (lp->rx_skbuff[i]) {
658 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
659 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
660 dev_kfree_skb_any(lp->rx_skbuff[i]);
661 }
662 lp->rx_skbuff[i] = NULL;
663 lp->rx_dma_addr[i] = 0;
664 }
665}
666
422#ifdef CONFIG_NET_POLL_CONTROLLER 667#ifdef CONFIG_NET_POLL_CONTROLLER
423static void pcnet32_poll_controller(struct net_device *dev) 668static void pcnet32_poll_controller(struct net_device *dev)
424{ 669{
@@ -519,10 +764,10 @@ static void pcnet32_get_ringparam(struct net_device *dev,
519{ 764{
520 struct pcnet32_private *lp = dev->priv; 765 struct pcnet32_private *lp = dev->priv;
521 766
522 ering->tx_max_pending = TX_MAX_RING_SIZE - 1; 767 ering->tx_max_pending = TX_MAX_RING_SIZE;
523 ering->tx_pending = lp->tx_ring_size - 1; 768 ering->tx_pending = lp->tx_ring_size;
524 ering->rx_max_pending = RX_MAX_RING_SIZE - 1; 769 ering->rx_max_pending = RX_MAX_RING_SIZE;
525 ering->rx_pending = lp->rx_ring_size - 1; 770 ering->rx_pending = lp->rx_ring_size;
526} 771}
527 772
528static int pcnet32_set_ringparam(struct net_device *dev, 773static int pcnet32_set_ringparam(struct net_device *dev,
@@ -530,56 +775,53 @@ static int pcnet32_set_ringparam(struct net_device *dev,
530{ 775{
531 struct pcnet32_private *lp = dev->priv; 776 struct pcnet32_private *lp = dev->priv;
532 unsigned long flags; 777 unsigned long flags;
778 unsigned int size;
779 ulong ioaddr = dev->base_addr;
533 int i; 780 int i;
534 781
535 if (ering->rx_mini_pending || ering->rx_jumbo_pending) 782 if (ering->rx_mini_pending || ering->rx_jumbo_pending)
536 return -EINVAL; 783 return -EINVAL;
537 784
538 if (netif_running(dev)) 785 if (netif_running(dev))
539 pcnet32_close(dev); 786 pcnet32_netif_stop(dev);
540 787
541 spin_lock_irqsave(&lp->lock, flags); 788 spin_lock_irqsave(&lp->lock, flags);
542 pcnet32_free_ring(dev); 789 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
543 lp->tx_ring_size = 790
544 min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); 791 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
545 lp->rx_ring_size =
546 min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
547 792
548 /* set the minimum ring size to 4, to allow the loopback test to work 793 /* set the minimum ring size to 4, to allow the loopback test to work
549 * unchanged. 794 * unchanged.
550 */ 795 */
551 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { 796 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
552 if (lp->tx_ring_size <= (1 << i)) 797 if (size <= (1 << i))
553 break; 798 break;
554 } 799 }
555 lp->tx_ring_size = (1 << i); 800 if ((1 << i) != lp->tx_ring_size)
556 lp->tx_mod_mask = lp->tx_ring_size - 1; 801 pcnet32_realloc_tx_ring(dev, lp, i);
557 lp->tx_len_bits = (i << 12); 802
558 803 size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
559 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { 804 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
560 if (lp->rx_ring_size <= (1 << i)) 805 if (size <= (1 << i))
561 break; 806 break;
562 } 807 }
563 lp->rx_ring_size = (1 << i); 808 if ((1 << i) != lp->rx_ring_size)
564 lp->rx_mod_mask = lp->rx_ring_size - 1; 809 pcnet32_realloc_rx_ring(dev, lp, i);
565 lp->rx_len_bits = (i << 4); 810
811 dev->weight = lp->rx_ring_size / 2;
566 812
567 if (pcnet32_alloc_ring(dev, dev->name)) { 813 if (netif_running(dev)) {
568 pcnet32_free_ring(dev); 814 pcnet32_netif_start(dev);
569 spin_unlock_irqrestore(&lp->lock, flags); 815 pcnet32_restart(dev, CSR0_NORMAL);
570 return -ENOMEM;
571 } 816 }
572 817
573 spin_unlock_irqrestore(&lp->lock, flags); 818 spin_unlock_irqrestore(&lp->lock, flags);
574 819
575 if (pcnet32_debug & NETIF_MSG_DRV) 820 if (netif_msg_drv(lp))
576 printk(KERN_INFO PFX 821 printk(KERN_INFO
577 "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name, 822 "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
578 lp->rx_ring_size, lp->tx_ring_size); 823 lp->rx_ring_size, lp->tx_ring_size);
579 824
580 if (netif_running(dev))
581 pcnet32_open(dev);
582
583 return 0; 825 return 0;
584} 826}
585 827
@@ -633,29 +875,27 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
633 unsigned long flags; 875 unsigned long flags;
634 unsigned long ticks; 876 unsigned long ticks;
635 877
636 *data1 = 1; /* status of test, default to fail */
637 rc = 1; /* default to fail */ 878 rc = 1; /* default to fail */
638 879
639 if (netif_running(dev)) 880 if (netif_running(dev))
640 pcnet32_close(dev); 881 pcnet32_close(dev);
641 882
642 spin_lock_irqsave(&lp->lock, flags); 883 spin_lock_irqsave(&lp->lock, flags);
884 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
885
886 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
643 887
644 /* Reset the PCNET32 */ 888 /* Reset the PCNET32 */
645 lp->a.reset(ioaddr); 889 lp->a.reset(ioaddr);
890 lp->a.write_csr(ioaddr, CSR4, 0x0915);
646 891
647 /* switch pcnet32 to 32bit mode */ 892 /* switch pcnet32 to 32bit mode */
648 lp->a.write_bcr(ioaddr, 20, 2); 893 lp->a.write_bcr(ioaddr, 20, 2);
649 894
650 lp->init_block.mode =
651 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
652 lp->init_block.filter[0] = 0;
653 lp->init_block.filter[1] = 0;
654
655 /* purge & init rings but don't actually restart */ 895 /* purge & init rings but don't actually restart */
656 pcnet32_restart(dev, 0x0000); 896 pcnet32_restart(dev, 0x0000);
657 897
658 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ 898 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
659 899
660 /* Initialize Transmit buffers. */ 900 /* Initialize Transmit buffers. */
661 size = data_len + 15; 901 size = data_len + 15;
@@ -697,14 +937,15 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
697 } 937 }
698 } 938 }
699 939
700 x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */ 940 x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
701 x = x | 0x0002; 941 a->write_bcr(ioaddr, 32, x | 0x0002);
702 a->write_bcr(ioaddr, 32, x);
703 942
704 lp->a.write_csr(ioaddr, 15, 0x0044); /* set int loopback in CSR15 */ 943 /* set int loopback in CSR15 */
944 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
945 lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
705 946
706 teststatus = le16_to_cpu(0x8000); 947 teststatus = le16_to_cpu(0x8000);
707 lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */ 948 lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
708 949
709 /* Check status of descriptors */ 950 /* Check status of descriptors */
710 for (x = 0; x < numbuffs; x++) { 951 for (x = 0; x < numbuffs; x++) {
@@ -712,7 +953,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
712 rmb(); 953 rmb();
713 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { 954 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
714 spin_unlock_irqrestore(&lp->lock, flags); 955 spin_unlock_irqrestore(&lp->lock, flags);
715 mdelay(1); 956 msleep(1);
716 spin_lock_irqsave(&lp->lock, flags); 957 spin_lock_irqsave(&lp->lock, flags);
717 rmb(); 958 rmb();
718 ticks++; 959 ticks++;
@@ -725,7 +966,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
725 } 966 }
726 } 967 }
727 968
728 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ 969 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
729 wmb(); 970 wmb();
730 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { 971 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
731 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name); 972 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
@@ -758,25 +999,24 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
758 } 999 }
759 x++; 1000 x++;
760 } 1001 }
761 if (!rc) {
762 *data1 = 0;
763 }
764 1002
765 clean_up: 1003 clean_up:
1004 *data1 = rc;
766 pcnet32_purge_tx_ring(dev); 1005 pcnet32_purge_tx_ring(dev);
767 x = a->read_csr(ioaddr, 15) & 0xFFFF;
768 a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */
769 1006
770 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ 1007 x = a->read_csr(ioaddr, CSR15);
771 x = x & ~0x0002; 1008 a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */
772 a->write_bcr(ioaddr, 32, x);
773 1009
774 spin_unlock_irqrestore(&lp->lock, flags); 1010 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
1011 a->write_bcr(ioaddr, 32, (x & ~0x0002));
775 1012
776 if (netif_running(dev)) { 1013 if (netif_running(dev)) {
1014 spin_unlock_irqrestore(&lp->lock, flags);
777 pcnet32_open(dev); 1015 pcnet32_open(dev);
778 } else { 1016 } else {
1017 pcnet32_purge_rx_ring(dev);
779 lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ 1018 lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
1019 spin_unlock_irqrestore(&lp->lock, flags);
780 } 1020 }
781 1021
782 return (rc); 1022 return (rc);
@@ -839,6 +1079,43 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data)
839 return 0; 1079 return 0;
840} 1080}
841 1081
1082/*
1083 * lp->lock must be held.
1084 */
1085static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
1086 int can_sleep)
1087{
1088 int csr5;
1089 struct pcnet32_private *lp = dev->priv;
1090 struct pcnet32_access *a = &lp->a;
1091 ulong ioaddr = dev->base_addr;
1092 int ticks;
1093
1094 /* set SUSPEND (SPND) - CSR5 bit 0 */
1095 csr5 = a->read_csr(ioaddr, CSR5);
1096 a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
1097
1098 /* poll waiting for bit to be set */
1099 ticks = 0;
1100 while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
1101 spin_unlock_irqrestore(&lp->lock, *flags);
1102 if (can_sleep)
1103 msleep(1);
1104 else
1105 mdelay(1);
1106 spin_lock_irqsave(&lp->lock, *flags);
1107 ticks++;
1108 if (ticks > 200) {
1109 if (netif_msg_hw(lp))
1110 printk(KERN_DEBUG
1111 "%s: Error getting into suspend!\n",
1112 dev->name);
1113 return 0;
1114 }
1115 }
1116 return 1;
1117}
1118
842#define PCNET32_REGS_PER_PHY 32 1119#define PCNET32_REGS_PER_PHY 32
843#define PCNET32_MAX_PHYS 32 1120#define PCNET32_MAX_PHYS 32
844static int pcnet32_get_regs_len(struct net_device *dev) 1121static int pcnet32_get_regs_len(struct net_device *dev)
@@ -857,32 +1134,13 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
857 struct pcnet32_private *lp = dev->priv; 1134 struct pcnet32_private *lp = dev->priv;
858 struct pcnet32_access *a = &lp->a; 1135 struct pcnet32_access *a = &lp->a;
859 ulong ioaddr = dev->base_addr; 1136 ulong ioaddr = dev->base_addr;
860 int ticks;
861 unsigned long flags; 1137 unsigned long flags;
862 1138
863 spin_lock_irqsave(&lp->lock, flags); 1139 spin_lock_irqsave(&lp->lock, flags);
864 1140
865 csr0 = a->read_csr(ioaddr, 0); 1141 csr0 = a->read_csr(ioaddr, CSR0);
866 if (!(csr0 & 0x0004)) { /* If not stopped */ 1142 if (!(csr0 & CSR0_STOP)) /* If not stopped */
867 /* set SUSPEND (SPND) - CSR5 bit 0 */ 1143 pcnet32_suspend(dev, &flags, 1);
868 a->write_csr(ioaddr, 5, 0x0001);
869
870 /* poll waiting for bit to be set */
871 ticks = 0;
872 while (!(a->read_csr(ioaddr, 5) & 0x0001)) {
873 spin_unlock_irqrestore(&lp->lock, flags);
874 mdelay(1);
875 spin_lock_irqsave(&lp->lock, flags);
876 ticks++;
877 if (ticks > 200) {
878 if (netif_msg_hw(lp))
879 printk(KERN_DEBUG
880 "%s: Error getting into suspend!\n",
881 dev->name);
882 break;
883 }
884 }
885 }
886 1144
887 /* read address PROM */ 1145 /* read address PROM */
888 for (i = 0; i < 16; i += 2) 1146 for (i = 0; i < 16; i += 2)
@@ -919,9 +1177,12 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
919 } 1177 }
920 } 1178 }
921 1179
922 if (!(csr0 & 0x0004)) { /* If not stopped */ 1180 if (!(csr0 & CSR0_STOP)) { /* If not stopped */
1181 int csr5;
1182
923 /* clear SUSPEND (SPND) - CSR5 bit 0 */ 1183 /* clear SUSPEND (SPND) - CSR5 bit 0 */
924 a->write_csr(ioaddr, 5, 0x0000); 1184 csr5 = a->read_csr(ioaddr, CSR5);
1185 a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
925 } 1186 }
926 1187
927 spin_unlock_irqrestore(&lp->lock, flags); 1188 spin_unlock_irqrestore(&lp->lock, flags);
@@ -952,7 +1213,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = {
952/* only probes for non-PCI devices, the rest are handled by 1213/* only probes for non-PCI devices, the rest are handled by
953 * pci_register_driver via pcnet32_probe_pci */ 1214 * pci_register_driver via pcnet32_probe_pci */
954 1215
955static void __devinit pcnet32_probe_vlbus(void) 1216static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
956{ 1217{
957 unsigned int *port, ioaddr; 1218 unsigned int *port, ioaddr;
958 1219
@@ -1436,7 +1697,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, char *name)
1436 lp->tx_ring_size, 1697 lp->tx_ring_size,
1437 &lp->tx_ring_dma_addr); 1698 &lp->tx_ring_dma_addr);
1438 if (lp->tx_ring == NULL) { 1699 if (lp->tx_ring == NULL) {
1439 if (pcnet32_debug & NETIF_MSG_DRV) 1700 if (netif_msg_drv(lp))
1440 printk("\n" KERN_ERR PFX 1701 printk("\n" KERN_ERR PFX
1441 "%s: Consistent memory allocation failed.\n", 1702 "%s: Consistent memory allocation failed.\n",
1442 name); 1703 name);
@@ -1448,52 +1709,48 @@ static int pcnet32_alloc_ring(struct net_device *dev, char *name)
1448 lp->rx_ring_size, 1709 lp->rx_ring_size,
1449 &lp->rx_ring_dma_addr); 1710 &lp->rx_ring_dma_addr);
1450 if (lp->rx_ring == NULL) { 1711 if (lp->rx_ring == NULL) {
1451 if (pcnet32_debug & NETIF_MSG_DRV) 1712 if (netif_msg_drv(lp))
1452 printk("\n" KERN_ERR PFX 1713 printk("\n" KERN_ERR PFX
1453 "%s: Consistent memory allocation failed.\n", 1714 "%s: Consistent memory allocation failed.\n",
1454 name); 1715 name);
1455 return -ENOMEM; 1716 return -ENOMEM;
1456 } 1717 }
1457 1718
1458 lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, 1719 lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
1459 GFP_ATOMIC); 1720 GFP_ATOMIC);
1460 if (!lp->tx_dma_addr) { 1721 if (!lp->tx_dma_addr) {
1461 if (pcnet32_debug & NETIF_MSG_DRV) 1722 if (netif_msg_drv(lp))
1462 printk("\n" KERN_ERR PFX 1723 printk("\n" KERN_ERR PFX
1463 "%s: Memory allocation failed.\n", name); 1724 "%s: Memory allocation failed.\n", name);
1464 return -ENOMEM; 1725 return -ENOMEM;
1465 } 1726 }
1466 memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
1467 1727
1468 lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, 1728 lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
1469 GFP_ATOMIC); 1729 GFP_ATOMIC);
1470 if (!lp->rx_dma_addr) { 1730 if (!lp->rx_dma_addr) {
1471 if (pcnet32_debug & NETIF_MSG_DRV) 1731 if (netif_msg_drv(lp))
1472 printk("\n" KERN_ERR PFX 1732 printk("\n" KERN_ERR PFX
1473 "%s: Memory allocation failed.\n", name); 1733 "%s: Memory allocation failed.\n", name);
1474 return -ENOMEM; 1734 return -ENOMEM;
1475 } 1735 }
1476 memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
1477 1736
1478 lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, 1737 lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
1479 GFP_ATOMIC); 1738 GFP_ATOMIC);
1480 if (!lp->tx_skbuff) { 1739 if (!lp->tx_skbuff) {
1481 if (pcnet32_debug & NETIF_MSG_DRV) 1740 if (netif_msg_drv(lp))
1482 printk("\n" KERN_ERR PFX 1741 printk("\n" KERN_ERR PFX
1483 "%s: Memory allocation failed.\n", name); 1742 "%s: Memory allocation failed.\n", name);
1484 return -ENOMEM; 1743 return -ENOMEM;
1485 } 1744 }
1486 memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
1487 1745
1488 lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, 1746 lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
1489 GFP_ATOMIC); 1747 GFP_ATOMIC);
1490 if (!lp->rx_skbuff) { 1748 if (!lp->rx_skbuff) {
1491 if (pcnet32_debug & NETIF_MSG_DRV) 1749 if (netif_msg_drv(lp))
1492 printk("\n" KERN_ERR PFX 1750 printk("\n" KERN_ERR PFX
1493 "%s: Memory allocation failed.\n", name); 1751 "%s: Memory allocation failed.\n", name);
1494 return -ENOMEM; 1752 return -ENOMEM;
1495 } 1753 }
1496 memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
1497 1754
1498 return 0; 1755 return 0;
1499} 1756}
@@ -1757,16 +2014,7 @@ static int pcnet32_open(struct net_device *dev)
1757 2014
1758 err_free_ring: 2015 err_free_ring:
1759 /* free any allocated skbuffs */ 2016 /* free any allocated skbuffs */
1760 for (i = 0; i < lp->rx_ring_size; i++) { 2017 pcnet32_purge_rx_ring(dev);
1761 lp->rx_ring[i].status = 0;
1762 if (lp->rx_skbuff[i]) {
1763 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
1764 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
1765 dev_kfree_skb(lp->rx_skbuff[i]);
1766 }
1767 lp->rx_skbuff[i] = NULL;
1768 lp->rx_dma_addr[i] = 0;
1769 }
1770 2018
1771 /* 2019 /*
1772 * Switch back to 16bit mode to avoid problems with dumb 2020 * Switch back to 16bit mode to avoid problems with dumb
@@ -2348,7 +2596,6 @@ static int pcnet32_close(struct net_device *dev)
2348{ 2596{
2349 unsigned long ioaddr = dev->base_addr; 2597 unsigned long ioaddr = dev->base_addr;
2350 struct pcnet32_private *lp = dev->priv; 2598 struct pcnet32_private *lp = dev->priv;
2351 int i;
2352 unsigned long flags; 2599 unsigned long flags;
2353 2600
2354 del_timer_sync(&lp->watchdog_timer); 2601 del_timer_sync(&lp->watchdog_timer);
@@ -2379,31 +2626,8 @@ static int pcnet32_close(struct net_device *dev)
2379 2626
2380 spin_lock_irqsave(&lp->lock, flags); 2627 spin_lock_irqsave(&lp->lock, flags);
2381 2628
2382 /* free all allocated skbuffs */ 2629 pcnet32_purge_rx_ring(dev);
2383 for (i = 0; i < lp->rx_ring_size; i++) { 2630 pcnet32_purge_tx_ring(dev);
2384 lp->rx_ring[i].status = 0;
2385 wmb(); /* Make sure adapter sees owner change */
2386 if (lp->rx_skbuff[i]) {
2387 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
2388 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
2389 dev_kfree_skb(lp->rx_skbuff[i]);
2390 }
2391 lp->rx_skbuff[i] = NULL;
2392 lp->rx_dma_addr[i] = 0;
2393 }
2394
2395 for (i = 0; i < lp->tx_ring_size; i++) {
2396 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2397 wmb(); /* Make sure adapter sees owner change */
2398 if (lp->tx_skbuff[i]) {
2399 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
2400 lp->tx_skbuff[i]->len,
2401 PCI_DMA_TODEVICE);
2402 dev_kfree_skb(lp->tx_skbuff[i]);
2403 }
2404 lp->tx_skbuff[i] = NULL;
2405 lp->tx_dma_addr[i] = 0;
2406 }
2407 2631
2408 spin_unlock_irqrestore(&lp->lock, flags); 2632 spin_unlock_irqrestore(&lp->lock, flags);
2409 2633
@@ -2433,6 +2657,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
2433 volatile struct pcnet32_init_block *ib = &lp->init_block; 2657 volatile struct pcnet32_init_block *ib = &lp->init_block;
2434 volatile u16 *mcast_table = (u16 *) & ib->filter; 2658 volatile u16 *mcast_table = (u16 *) & ib->filter;
2435 struct dev_mc_list *dmi = dev->mc_list; 2659 struct dev_mc_list *dmi = dev->mc_list;
2660 unsigned long ioaddr = dev->base_addr;
2436 char *addrs; 2661 char *addrs;
2437 int i; 2662 int i;
2438 u32 crc; 2663 u32 crc;
@@ -2441,6 +2666,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
2441 if (dev->flags & IFF_ALLMULTI) { 2666 if (dev->flags & IFF_ALLMULTI) {
2442 ib->filter[0] = 0xffffffff; 2667 ib->filter[0] = 0xffffffff;
2443 ib->filter[1] = 0xffffffff; 2668 ib->filter[1] = 0xffffffff;
2669 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
2670 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
2671 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
2672 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
2444 return; 2673 return;
2445 } 2674 }
2446 /* clear the multicast filter */ 2675 /* clear the multicast filter */
@@ -2462,6 +2691,9 @@ static void pcnet32_load_multicast(struct net_device *dev)
2462 le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) | 2691 le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) |
2463 (1 << (crc & 0xf))); 2692 (1 << (crc & 0xf)));
2464 } 2693 }
2694 for (i = 0; i < 4; i++)
2695 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
2696 le16_to_cpu(mcast_table[i]));
2465 return; 2697 return;
2466} 2698}
2467 2699
@@ -2472,8 +2704,11 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
2472{ 2704{
2473 unsigned long ioaddr = dev->base_addr, flags; 2705 unsigned long ioaddr = dev->base_addr, flags;
2474 struct pcnet32_private *lp = dev->priv; 2706 struct pcnet32_private *lp = dev->priv;
2707 int csr15, suspended;
2475 2708
2476 spin_lock_irqsave(&lp->lock, flags); 2709 spin_lock_irqsave(&lp->lock, flags);
2710 suspended = pcnet32_suspend(dev, &flags, 0);
2711 csr15 = lp->a.read_csr(ioaddr, CSR15);
2477 if (dev->flags & IFF_PROMISC) { 2712 if (dev->flags & IFF_PROMISC) {
2478 /* Log any net taps. */ 2713 /* Log any net taps. */
2479 if (netif_msg_hw(lp)) 2714 if (netif_msg_hw(lp))
@@ -2482,15 +2717,24 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
2482 lp->init_block.mode = 2717 lp->init_block.mode =
2483 le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 2718 le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
2484 7); 2719 7);
2720 lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
2485 } else { 2721 } else {
2486 lp->init_block.mode = 2722 lp->init_block.mode =
2487 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); 2723 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
2724 lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
2488 pcnet32_load_multicast(dev); 2725 pcnet32_load_multicast(dev);
2489 } 2726 }
2490 2727
2491 lp->a.write_csr(ioaddr, 0, 0x0004); /* Temporarily stop the lance. */ 2728 if (suspended) {
2492 pcnet32_restart(dev, 0x0042); /* Resume normal operation */ 2729 int csr5;
2493 netif_wake_queue(dev); 2730 /* clear SUSPEND (SPND) - CSR5 bit 0 */
2731 csr5 = lp->a.read_csr(ioaddr, CSR5);
2732 lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
2733 } else {
2734 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
2735 pcnet32_restart(dev, CSR0_NORMAL);
2736 netif_wake_queue(dev);
2737 }
2494 2738
2495 spin_unlock_irqrestore(&lp->lock, flags); 2739 spin_unlock_irqrestore(&lp->lock, flags);
2496} 2740}
@@ -2730,7 +2974,7 @@ static int __init pcnet32_init_module(void)
2730 2974
2731 /* should we find any remaining VLbus devices ? */ 2975 /* should we find any remaining VLbus devices ? */
2732 if (pcnet32vlb) 2976 if (pcnet32vlb)
2733 pcnet32_probe_vlbus(); 2977 pcnet32_probe_vlbus(pcnet32_portlist);
2734 2978
2735 if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) 2979 if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
2736 printk(KERN_INFO PFX "%d cards_found.\n", cards_found); 2980 printk(KERN_INFO PFX "%d cards_found.\n", cards_found);