diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-22 22:09:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-22 22:09:51 -0400 |
commit | c010b2f76c3032e48097a6eef291d8593d5d79a6 (patch) | |
tree | 16077c83703527732991a55dea1abe330c0ccdc6 | |
parent | 6069fb2ef5d4f47432359c97f350e0cfcc4d208e (diff) | |
parent | 521c4d96e0840ecce25b956e00f416ed499ef2ba (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (82 commits)
ipw2200: Call netif_*_queue() interfaces properly.
netxen: Needs to include linux/vmalloc.h
[netdrvr] atl1d: fix !CONFIG_PM build
r6040: rework init_one error handling
r6040: bump release number to 0.18
r6040: handle RX fifo full and no descriptor interrupts
r6040: change the default waiting time
r6040: use definitions for magic values in descriptor status
r6040: completely rework the RX path
r6040: call napi_disable when puting down the interface and set lp->dev accordingly.
mv643xx_eth: fix NETPOLL build
r6040: rework the RX buffers allocation routine
r6040: fix scheduling while atomic in r6040_tx_timeout
r6040: fix null pointer access and tx timeouts
r6040: prefix all functions with r6040
rndis_host: support WM6 devices as modems
at91_ether: use netstats in net_device structure
sfc: Create one RX queue and interrupt per CPU package by default
sfc: Use a separate workqueue for resets
sfc: I2C adapter initialisation fixes
...
99 files changed, 13448 insertions, 7133 deletions
diff --git a/Documentation/networking/e1000.txt b/Documentation/networking/e1000.txt index 61b171cf5313..2df71861e578 100644 --- a/Documentation/networking/e1000.txt +++ b/Documentation/networking/e1000.txt | |||
@@ -513,21 +513,11 @@ Additional Configurations | |||
513 | Intel(R) PRO/1000 PT Dual Port Server Connection | 513 | Intel(R) PRO/1000 PT Dual Port Server Connection |
514 | Intel(R) PRO/1000 PT Dual Port Server Adapter | 514 | Intel(R) PRO/1000 PT Dual Port Server Adapter |
515 | Intel(R) PRO/1000 PF Dual Port Server Adapter | 515 | Intel(R) PRO/1000 PF Dual Port Server Adapter |
516 | Intel(R) PRO/1000 PT Quad Port Server Adapter | 516 | Intel(R) PRO/1000 PT Quad Port Server Adapter |
517 | 517 | ||
518 | NAPI | 518 | NAPI |
519 | ---- | 519 | ---- |
520 | NAPI (Rx polling mode) is supported in the e1000 driver. NAPI is enabled | 520 | NAPI (Rx polling mode) is enabled in the e1000 driver. |
521 | or disabled based on the configuration of the kernel. To override | ||
522 | the default, use the following compile-time flags. | ||
523 | |||
524 | To enable NAPI, compile the driver module, passing in a configuration option: | ||
525 | |||
526 | make CFLAGS_EXTRA=-DE1000_NAPI install | ||
527 | |||
528 | To disable NAPI, compile the driver module, passing in a configuration option: | ||
529 | |||
530 | make CFLAGS_EXTRA=-DE1000_NO_NAPI install | ||
531 | 521 | ||
532 | See www.cyberus.ca/~hadi/usenix-paper.tgz for more information on NAPI. | 522 | See www.cyberus.ca/~hadi/usenix-paper.tgz for more information on NAPI. |
533 | 523 | ||
diff --git a/Documentation/networking/udplite.txt b/Documentation/networking/udplite.txt index 3870f280280b..855d8da57a23 100644 --- a/Documentation/networking/udplite.txt +++ b/Documentation/networking/udplite.txt | |||
@@ -148,7 +148,7 @@ | |||
148 | getsockopt(sockfd, SOL_SOCKET, SO_NO_CHECK, &value, ...); | 148 | getsockopt(sockfd, SOL_SOCKET, SO_NO_CHECK, &value, ...); |
149 | 149 | ||
150 | is meaningless (as in TCP). Packets with a zero checksum field are | 150 | is meaningless (as in TCP). Packets with a zero checksum field are |
151 | illegal (cf. RFC 3828, sec. 3.1) will be silently discarded. | 151 | illegal (cf. RFC 3828, sec. 3.1) and will be silently discarded. |
152 | 152 | ||
153 | 4) Fragmentation | 153 | 4) Fragmentation |
154 | 154 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 11944b44c2ff..0652ab384d51 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -3533,7 +3533,7 @@ S: Supported | |||
3533 | 3533 | ||
3534 | S390 NETWORK DRIVERS | 3534 | S390 NETWORK DRIVERS |
3535 | P: Ursula Braun | 3535 | P: Ursula Braun |
3536 | M: ubraun@linux.vnet.ibm.com | 3536 | M: ursula.braun@de.ibm.com |
3537 | P: Frank Blaschka | 3537 | P: Frank Blaschka |
3538 | M: blaschka@linux.vnet.ibm.com | 3538 | M: blaschka@linux.vnet.ibm.com |
3539 | M: linux390@de.ibm.com | 3539 | M: linux390@de.ibm.com |
@@ -3553,7 +3553,7 @@ S: Supported | |||
3553 | 3553 | ||
3554 | S390 IUCV NETWORK LAYER | 3554 | S390 IUCV NETWORK LAYER |
3555 | P: Ursula Braun | 3555 | P: Ursula Braun |
3556 | M: ubraun@linux.vnet.ibm.com | 3556 | M: ursula.braun@de.ibm.com |
3557 | M: linux390@de.ibm.com | 3557 | M: linux390@de.ibm.com |
3558 | L: linux-s390@vger.kernel.org | 3558 | L: linux-s390@vger.kernel.org |
3559 | W: http://www.ibm.com/developerworks/linux/linux390/ | 3559 | W: http://www.ibm.com/developerworks/linux/linux390/ |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 75317a14ad1c..8a5b0d293f75 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -98,7 +98,6 @@ | |||
98 | #include <linux/compiler.h> | 98 | #include <linux/compiler.h> |
99 | #include <linux/pci.h> | 99 | #include <linux/pci.h> |
100 | #include <linux/init.h> | 100 | #include <linux/init.h> |
101 | #include <linux/ioport.h> | ||
102 | #include <linux/netdevice.h> | 101 | #include <linux/netdevice.h> |
103 | #include <linux/etherdevice.h> | 102 | #include <linux/etherdevice.h> |
104 | #include <linux/rtnetlink.h> | 103 | #include <linux/rtnetlink.h> |
@@ -120,11 +119,6 @@ | |||
120 | NETIF_MSG_LINK) | 119 | NETIF_MSG_LINK) |
121 | 120 | ||
122 | 121 | ||
123 | /* enable PIO instead of MMIO, if CONFIG_8139TOO_PIO is selected */ | ||
124 | #ifdef CONFIG_8139TOO_PIO | ||
125 | #define USE_IO_OPS 1 | ||
126 | #endif | ||
127 | |||
128 | /* define to 1, 2 or 3 to enable copious debugging info */ | 122 | /* define to 1, 2 or 3 to enable copious debugging info */ |
129 | #define RTL8139_DEBUG 0 | 123 | #define RTL8139_DEBUG 0 |
130 | 124 | ||
@@ -156,6 +150,13 @@ | |||
156 | static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; | 150 | static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; |
157 | static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; | 151 | static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; |
158 | 152 | ||
153 | /* Whether to use MMIO or PIO. Default to MMIO. */ | ||
154 | #ifdef CONFIG_8139TOO_PIO | ||
155 | static int use_io = 1; | ||
156 | #else | ||
157 | static int use_io = 0; | ||
158 | #endif | ||
159 | |||
159 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). | 160 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). |
160 | The RTL chips use a 64 element hash table based on the Ethernet CRC. */ | 161 | The RTL chips use a 64 element hash table based on the Ethernet CRC. */ |
161 | static int multicast_filter_limit = 32; | 162 | static int multicast_filter_limit = 32; |
@@ -614,6 +615,8 @@ MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver"); | |||
614 | MODULE_LICENSE("GPL"); | 615 | MODULE_LICENSE("GPL"); |
615 | MODULE_VERSION(DRV_VERSION); | 616 | MODULE_VERSION(DRV_VERSION); |
616 | 617 | ||
618 | module_param(use_io, int, 0); | ||
619 | MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO"); | ||
617 | module_param(multicast_filter_limit, int, 0); | 620 | module_param(multicast_filter_limit, int, 0); |
618 | module_param_array(media, int, NULL, 0); | 621 | module_param_array(media, int, NULL, 0); |
619 | module_param_array(full_duplex, int, NULL, 0); | 622 | module_param_array(full_duplex, int, NULL, 0); |
@@ -709,13 +712,8 @@ static void __rtl8139_cleanup_dev (struct net_device *dev) | |||
709 | assert (tp->pci_dev != NULL); | 712 | assert (tp->pci_dev != NULL); |
710 | pdev = tp->pci_dev; | 713 | pdev = tp->pci_dev; |
711 | 714 | ||
712 | #ifdef USE_IO_OPS | ||
713 | if (tp->mmio_addr) | ||
714 | ioport_unmap (tp->mmio_addr); | ||
715 | #else | ||
716 | if (tp->mmio_addr) | 715 | if (tp->mmio_addr) |
717 | pci_iounmap (pdev, tp->mmio_addr); | 716 | pci_iounmap (pdev, tp->mmio_addr); |
718 | #endif /* USE_IO_OPS */ | ||
719 | 717 | ||
720 | /* it's ok to call this even if we have no regions to free */ | 718 | /* it's ok to call this even if we have no regions to free */ |
721 | pci_release_regions (pdev); | 719 | pci_release_regions (pdev); |
@@ -790,32 +788,33 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev, | |||
790 | DPRINTK("PIO region size == 0x%02X\n", pio_len); | 788 | DPRINTK("PIO region size == 0x%02X\n", pio_len); |
791 | DPRINTK("MMIO region size == 0x%02lX\n", mmio_len); | 789 | DPRINTK("MMIO region size == 0x%02lX\n", mmio_len); |
792 | 790 | ||
793 | #ifdef USE_IO_OPS | 791 | retry: |
794 | /* make sure PCI base addr 0 is PIO */ | 792 | if (use_io) { |
795 | if (!(pio_flags & IORESOURCE_IO)) { | 793 | /* make sure PCI base addr 0 is PIO */ |
796 | dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n"); | 794 | if (!(pio_flags & IORESOURCE_IO)) { |
797 | rc = -ENODEV; | 795 | dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n"); |
798 | goto err_out; | 796 | rc = -ENODEV; |
799 | } | 797 | goto err_out; |
800 | /* check for weird/broken PCI region reporting */ | 798 | } |
801 | if (pio_len < RTL_MIN_IO_SIZE) { | 799 | /* check for weird/broken PCI region reporting */ |
802 | dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n"); | 800 | if (pio_len < RTL_MIN_IO_SIZE) { |
803 | rc = -ENODEV; | 801 | dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n"); |
804 | goto err_out; | 802 | rc = -ENODEV; |
805 | } | 803 | goto err_out; |
806 | #else | 804 | } |
807 | /* make sure PCI base addr 1 is MMIO */ | 805 | } else { |
808 | if (!(mmio_flags & IORESOURCE_MEM)) { | 806 | /* make sure PCI base addr 1 is MMIO */ |
809 | dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n"); | 807 | if (!(mmio_flags & IORESOURCE_MEM)) { |
810 | rc = -ENODEV; | 808 | dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n"); |
811 | goto err_out; | 809 | rc = -ENODEV; |
812 | } | 810 | goto err_out; |
813 | if (mmio_len < RTL_MIN_IO_SIZE) { | 811 | } |
814 | dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n"); | 812 | if (mmio_len < RTL_MIN_IO_SIZE) { |
815 | rc = -ENODEV; | 813 | dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n"); |
816 | goto err_out; | 814 | rc = -ENODEV; |
815 | goto err_out; | ||
816 | } | ||
817 | } | 817 | } |
818 | #endif | ||
819 | 818 | ||
820 | rc = pci_request_regions (pdev, DRV_NAME); | 819 | rc = pci_request_regions (pdev, DRV_NAME); |
821 | if (rc) | 820 | if (rc) |
@@ -825,28 +824,28 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev, | |||
825 | /* enable PCI bus-mastering */ | 824 | /* enable PCI bus-mastering */ |
826 | pci_set_master (pdev); | 825 | pci_set_master (pdev); |
827 | 826 | ||
828 | #ifdef USE_IO_OPS | 827 | if (use_io) { |
829 | ioaddr = ioport_map(pio_start, pio_len); | 828 | ioaddr = pci_iomap(pdev, 0, 0); |
830 | if (!ioaddr) { | 829 | if (!ioaddr) { |
831 | dev_err(&pdev->dev, "cannot map PIO, aborting\n"); | 830 | dev_err(&pdev->dev, "cannot map PIO, aborting\n"); |
832 | rc = -EIO; | 831 | rc = -EIO; |
833 | goto err_out; | 832 | goto err_out; |
834 | } | 833 | } |
835 | dev->base_addr = pio_start; | 834 | dev->base_addr = pio_start; |
836 | tp->mmio_addr = ioaddr; | 835 | tp->regs_len = pio_len; |
837 | tp->regs_len = pio_len; | 836 | } else { |
838 | #else | 837 | /* ioremap MMIO region */ |
839 | /* ioremap MMIO region */ | 838 | ioaddr = pci_iomap(pdev, 1, 0); |
840 | ioaddr = pci_iomap(pdev, 1, 0); | 839 | if (ioaddr == NULL) { |
841 | if (ioaddr == NULL) { | 840 | dev_err(&pdev->dev, "cannot remap MMIO, trying PIO\n"); |
842 | dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); | 841 | pci_release_regions(pdev); |
843 | rc = -EIO; | 842 | use_io = 1; |
844 | goto err_out; | 843 | goto retry; |
844 | } | ||
845 | dev->base_addr = (long) ioaddr; | ||
846 | tp->regs_len = mmio_len; | ||
845 | } | 847 | } |
846 | dev->base_addr = (long) ioaddr; | ||
847 | tp->mmio_addr = ioaddr; | 848 | tp->mmio_addr = ioaddr; |
848 | tp->regs_len = mmio_len; | ||
849 | #endif /* USE_IO_OPS */ | ||
850 | 849 | ||
851 | /* Bring old chips out of low-power mode. */ | 850 | /* Bring old chips out of low-power mode. */ |
852 | RTL_W8 (HltClk, 'R'); | 851 | RTL_W8 (HltClk, 'R'); |
@@ -952,6 +951,14 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, | |||
952 | "Use the \"8139cp\" driver for improved performance and stability.\n"); | 951 | "Use the \"8139cp\" driver for improved performance and stability.\n"); |
953 | } | 952 | } |
954 | 953 | ||
954 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && | ||
955 | pdev->device == PCI_DEVICE_ID_REALTEK_8139 && | ||
956 | pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS && | ||
957 | pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) { | ||
958 | printk(KERN_INFO "8139too: OQO Model 2 detected. Forcing PIO\n"); | ||
959 | use_io = 1; | ||
960 | } | ||
961 | |||
955 | i = rtl8139_init_board (pdev, &dev); | 962 | i = rtl8139_init_board (pdev, &dev); |
956 | if (i < 0) | 963 | if (i < 0) |
957 | return i; | 964 | return i; |
@@ -2381,20 +2388,24 @@ static void rtl8139_set_msglevel(struct net_device *dev, u32 datum) | |||
2381 | np->msg_enable = datum; | 2388 | np->msg_enable = datum; |
2382 | } | 2389 | } |
2383 | 2390 | ||
2384 | /* TODO: we are too slack to do reg dumping for pio, for now */ | ||
2385 | #ifdef CONFIG_8139TOO_PIO | ||
2386 | #define rtl8139_get_regs_len NULL | ||
2387 | #define rtl8139_get_regs NULL | ||
2388 | #else | ||
2389 | static int rtl8139_get_regs_len(struct net_device *dev) | 2391 | static int rtl8139_get_regs_len(struct net_device *dev) |
2390 | { | 2392 | { |
2391 | struct rtl8139_private *np = netdev_priv(dev); | 2393 | struct rtl8139_private *np; |
2394 | /* TODO: we are too slack to do reg dumping for pio, for now */ | ||
2395 | if (use_io) | ||
2396 | return 0; | ||
2397 | np = netdev_priv(dev); | ||
2392 | return np->regs_len; | 2398 | return np->regs_len; |
2393 | } | 2399 | } |
2394 | 2400 | ||
2395 | static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) | 2401 | static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) |
2396 | { | 2402 | { |
2397 | struct rtl8139_private *np = netdev_priv(dev); | 2403 | struct rtl8139_private *np; |
2404 | |||
2405 | /* TODO: we are too slack to do reg dumping for pio, for now */ | ||
2406 | if (use_io) | ||
2407 | return; | ||
2408 | np = netdev_priv(dev); | ||
2398 | 2409 | ||
2399 | regs->version = RTL_REGS_VER; | 2410 | regs->version = RTL_REGS_VER; |
2400 | 2411 | ||
@@ -2402,7 +2413,6 @@ static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
2402 | memcpy_fromio(regbuf, np->mmio_addr, regs->len); | 2413 | memcpy_fromio(regbuf, np->mmio_addr, regs->len); |
2403 | spin_unlock_irq(&np->lock); | 2414 | spin_unlock_irq(&np->lock); |
2404 | } | 2415 | } |
2405 | #endif /* CONFIG_8139TOO_MMIO */ | ||
2406 | 2416 | ||
2407 | static int rtl8139_get_sset_count(struct net_device *dev, int sset) | 2417 | static int rtl8139_get_sset_count(struct net_device *dev, int sset) |
2408 | { | 2418 | { |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 3e5e64c33e18..fa533c27052a 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1926,20 +1926,6 @@ config E1000 | |||
1926 | To compile this driver as a module, choose M here. The module | 1926 | To compile this driver as a module, choose M here. The module |
1927 | will be called e1000. | 1927 | will be called e1000. |
1928 | 1928 | ||
1929 | config E1000_NAPI | ||
1930 | bool "Use Rx Polling (NAPI)" | ||
1931 | depends on E1000 | ||
1932 | help | ||
1933 | NAPI is a new driver API designed to reduce CPU and interrupt load | ||
1934 | when the driver is receiving lots of packets from the card. It is | ||
1935 | still somewhat experimental and thus not yet enabled by default. | ||
1936 | |||
1937 | If your estimated Rx load is 10kpps or more, or if the card will be | ||
1938 | deployed on potentially unfriendly networks (e.g. in a firewall), | ||
1939 | then say Y here. | ||
1940 | |||
1941 | If in doubt, say N. | ||
1942 | |||
1943 | config E1000_DISABLE_PACKET_SPLIT | 1929 | config E1000_DISABLE_PACKET_SPLIT |
1944 | bool "Disable Packet Split for PCI express adapters" | 1930 | bool "Disable Packet Split for PCI express adapters" |
1945 | depends on E1000 | 1931 | depends on E1000 |
@@ -2304,6 +2290,17 @@ config ATL1 | |||
2304 | To compile this driver as a module, choose M here. The module | 2290 | To compile this driver as a module, choose M here. The module |
2305 | will be called atl1. | 2291 | will be called atl1. |
2306 | 2292 | ||
2293 | config ATL1E | ||
2294 | tristate "Atheros L1E Gigabit Ethernet support (EXPERIMENTAL)" | ||
2295 | depends on PCI && EXPERIMENTAL | ||
2296 | select CRC32 | ||
2297 | select MII | ||
2298 | help | ||
2299 | This driver supports the Atheros L1E gigabit ethernet adapter. | ||
2300 | |||
2301 | To compile this driver as a module, choose M here. The module | ||
2302 | will be called atl1e. | ||
2303 | |||
2307 | endif # NETDEV_1000 | 2304 | endif # NETDEV_1000 |
2308 | 2305 | ||
2309 | # | 2306 | # |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 4b17a9ab7861..7629c9017215 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -15,6 +15,7 @@ obj-$(CONFIG_EHEA) += ehea/ | |||
15 | obj-$(CONFIG_CAN) += can/ | 15 | obj-$(CONFIG_CAN) += can/ |
16 | obj-$(CONFIG_BONDING) += bonding/ | 16 | obj-$(CONFIG_BONDING) += bonding/ |
17 | obj-$(CONFIG_ATL1) += atlx/ | 17 | obj-$(CONFIG_ATL1) += atlx/ |
18 | obj-$(CONFIG_ATL1E) += atl1e/ | ||
18 | obj-$(CONFIG_GIANFAR) += gianfar_driver.o | 19 | obj-$(CONFIG_GIANFAR) += gianfar_driver.o |
19 | obj-$(CONFIG_TEHUTI) += tehuti.o | 20 | obj-$(CONFIG_TEHUTI) += tehuti.o |
20 | 21 | ||
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c index 71f7cec30911..ffae266e2d7f 100644 --- a/drivers/net/arm/at91_ether.c +++ b/drivers/net/arm/at91_ether.c | |||
@@ -820,7 +820,7 @@ static int at91ether_tx(struct sk_buff *skb, struct net_device *dev) | |||
820 | lp->skb = skb; | 820 | lp->skb = skb; |
821 | lp->skb_length = skb->len; | 821 | lp->skb_length = skb->len; |
822 | lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); | 822 | lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); |
823 | lp->stats.tx_bytes += skb->len; | 823 | dev->stats.tx_bytes += skb->len; |
824 | 824 | ||
825 | /* Set address of the data in the Transmit Address register */ | 825 | /* Set address of the data in the Transmit Address register */ |
826 | at91_emac_write(AT91_EMAC_TAR, lp->skb_physaddr); | 826 | at91_emac_write(AT91_EMAC_TAR, lp->skb_physaddr); |
@@ -843,34 +843,33 @@ static int at91ether_tx(struct sk_buff *skb, struct net_device *dev) | |||
843 | */ | 843 | */ |
844 | static struct net_device_stats *at91ether_stats(struct net_device *dev) | 844 | static struct net_device_stats *at91ether_stats(struct net_device *dev) |
845 | { | 845 | { |
846 | struct at91_private *lp = netdev_priv(dev); | ||
847 | int ale, lenerr, seqe, lcol, ecol; | 846 | int ale, lenerr, seqe, lcol, ecol; |
848 | 847 | ||
849 | if (netif_running(dev)) { | 848 | if (netif_running(dev)) { |
850 | lp->stats.rx_packets += at91_emac_read(AT91_EMAC_OK); /* Good frames received */ | 849 | dev->stats.rx_packets += at91_emac_read(AT91_EMAC_OK); /* Good frames received */ |
851 | ale = at91_emac_read(AT91_EMAC_ALE); | 850 | ale = at91_emac_read(AT91_EMAC_ALE); |
852 | lp->stats.rx_frame_errors += ale; /* Alignment errors */ | 851 | dev->stats.rx_frame_errors += ale; /* Alignment errors */ |
853 | lenerr = at91_emac_read(AT91_EMAC_ELR) + at91_emac_read(AT91_EMAC_USF); | 852 | lenerr = at91_emac_read(AT91_EMAC_ELR) + at91_emac_read(AT91_EMAC_USF); |
854 | lp->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */ | 853 | dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */ |
855 | seqe = at91_emac_read(AT91_EMAC_SEQE); | 854 | seqe = at91_emac_read(AT91_EMAC_SEQE); |
856 | lp->stats.rx_crc_errors += seqe; /* CRC error */ | 855 | dev->stats.rx_crc_errors += seqe; /* CRC error */ |
857 | lp->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC); /* Receive buffer not available */ | 856 | dev->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC); /* Receive buffer not available */ |
858 | lp->stats.rx_errors += (ale + lenerr + seqe | 857 | dev->stats.rx_errors += (ale + lenerr + seqe |
859 | + at91_emac_read(AT91_EMAC_CDE) + at91_emac_read(AT91_EMAC_RJB)); | 858 | + at91_emac_read(AT91_EMAC_CDE) + at91_emac_read(AT91_EMAC_RJB)); |
860 | 859 | ||
861 | lp->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA); /* Frames successfully transmitted */ | 860 | dev->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA); /* Frames successfully transmitted */ |
862 | lp->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE); /* Transmit FIFO underruns */ | 861 | dev->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE); /* Transmit FIFO underruns */ |
863 | lp->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE); /* Carrier Sense errors */ | 862 | dev->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE); /* Carrier Sense errors */ |
864 | lp->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */ | 863 | dev->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */ |
865 | 864 | ||
866 | lcol = at91_emac_read(AT91_EMAC_LCOL); | 865 | lcol = at91_emac_read(AT91_EMAC_LCOL); |
867 | ecol = at91_emac_read(AT91_EMAC_ECOL); | 866 | ecol = at91_emac_read(AT91_EMAC_ECOL); |
868 | lp->stats.tx_window_errors += lcol; /* Late collisions */ | 867 | dev->stats.tx_window_errors += lcol; /* Late collisions */ |
869 | lp->stats.tx_aborted_errors += ecol; /* 16 collisions */ | 868 | dev->stats.tx_aborted_errors += ecol; /* 16 collisions */ |
870 | 869 | ||
871 | lp->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol); | 870 | dev->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol); |
872 | } | 871 | } |
873 | return &lp->stats; | 872 | return &dev->stats; |
874 | } | 873 | } |
875 | 874 | ||
876 | /* | 875 | /* |
@@ -896,16 +895,16 @@ static void at91ether_rx(struct net_device *dev) | |||
896 | 895 | ||
897 | skb->protocol = eth_type_trans(skb, dev); | 896 | skb->protocol = eth_type_trans(skb, dev); |
898 | dev->last_rx = jiffies; | 897 | dev->last_rx = jiffies; |
899 | lp->stats.rx_bytes += pktlen; | 898 | dev->stats.rx_bytes += pktlen; |
900 | netif_rx(skb); | 899 | netif_rx(skb); |
901 | } | 900 | } |
902 | else { | 901 | else { |
903 | lp->stats.rx_dropped += 1; | 902 | dev->stats.rx_dropped += 1; |
904 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); | 903 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); |
905 | } | 904 | } |
906 | 905 | ||
907 | if (dlist->descriptors[lp->rxBuffIndex].size & EMAC_MULTICAST) | 906 | if (dlist->descriptors[lp->rxBuffIndex].size & EMAC_MULTICAST) |
908 | lp->stats.multicast++; | 907 | dev->stats.multicast++; |
909 | 908 | ||
910 | dlist->descriptors[lp->rxBuffIndex].addr &= ~EMAC_DESC_DONE; /* reset ownership bit */ | 909 | dlist->descriptors[lp->rxBuffIndex].addr &= ~EMAC_DESC_DONE; /* reset ownership bit */ |
911 | if (lp->rxBuffIndex == MAX_RX_DESCR-1) /* wrap after last buffer */ | 910 | if (lp->rxBuffIndex == MAX_RX_DESCR-1) /* wrap after last buffer */ |
@@ -934,7 +933,7 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id) | |||
934 | if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */ | 933 | if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */ |
935 | /* The TCOM bit is set even if the transmission failed. */ | 934 | /* The TCOM bit is set even if the transmission failed. */ |
936 | if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY)) | 935 | if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY)) |
937 | lp->stats.tx_errors += 1; | 936 | dev->stats.tx_errors += 1; |
938 | 937 | ||
939 | if (lp->skb) { | 938 | if (lp->skb) { |
940 | dev_kfree_skb_irq(lp->skb); | 939 | dev_kfree_skb_irq(lp->skb); |
diff --git a/drivers/net/arm/at91_ether.h b/drivers/net/arm/at91_ether.h index a38fd2d053a6..353f4dab62be 100644 --- a/drivers/net/arm/at91_ether.h +++ b/drivers/net/arm/at91_ether.h | |||
@@ -84,7 +84,6 @@ struct recv_desc_bufs | |||
84 | 84 | ||
85 | struct at91_private | 85 | struct at91_private |
86 | { | 86 | { |
87 | struct net_device_stats stats; | ||
88 | struct mii_if_info mii; /* ethtool support */ | 87 | struct mii_if_info mii; /* ethtool support */ |
89 | struct at91_eth_data board_data; /* board-specific configuration */ | 88 | struct at91_eth_data board_data; /* board-specific configuration */ |
90 | struct clk *ether_clk; /* clock */ | 89 | struct clk *ether_clk; /* clock */ |
diff --git a/drivers/net/atl1e/Makefile b/drivers/net/atl1e/Makefile new file mode 100644 index 000000000000..bc11be824e76 --- /dev/null +++ b/drivers/net/atl1e/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_ATL1E) += atl1e.o | ||
2 | atl1e-objs += atl1e_main.o atl1e_hw.o atl1e_ethtool.o atl1e_param.o | ||
diff --git a/drivers/net/atl1e/atl1e.h b/drivers/net/atl1e/atl1e.h new file mode 100644 index 000000000000..b645fa0f3f64 --- /dev/null +++ b/drivers/net/atl1e/atl1e.h | |||
@@ -0,0 +1,503 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2007 Atheros Corporation. All rights reserved. | ||
3 | * Copyright(c) 2007 xiong huang <xiong.huang@atheros.com> | ||
4 | * | ||
5 | * Derived from Intel e1000 driver | ||
6 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along with | ||
19 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
21 | */ | ||
22 | |||
23 | #ifndef _ATL1E_H_ | ||
24 | #define _ATL1E_H_ | ||
25 | |||
26 | #include <linux/version.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/errno.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <linux/netdevice.h> | ||
33 | #include <linux/etherdevice.h> | ||
34 | #include <linux/skbuff.h> | ||
35 | #include <linux/ioport.h> | ||
36 | #include <linux/slab.h> | ||
37 | #include <linux/list.h> | ||
38 | #include <linux/delay.h> | ||
39 | #include <linux/sched.h> | ||
40 | #include <linux/in.h> | ||
41 | #include <linux/ip.h> | ||
42 | #include <linux/ipv6.h> | ||
43 | #include <linux/udp.h> | ||
44 | #include <linux/mii.h> | ||
45 | #include <linux/io.h> | ||
46 | #include <linux/vmalloc.h> | ||
47 | #include <linux/pagemap.h> | ||
48 | #include <linux/tcp.h> | ||
49 | #include <linux/mii.h> | ||
50 | #include <linux/ethtool.h> | ||
51 | #include <linux/if_vlan.h> | ||
52 | #include <linux/workqueue.h> | ||
53 | #include <net/checksum.h> | ||
54 | #include <net/ip6_checksum.h> | ||
55 | |||
56 | #include "atl1e_hw.h" | ||
57 | |||
58 | #define PCI_REG_COMMAND 0x04 /* PCI Command Register */ | ||
59 | #define CMD_IO_SPACE 0x0001 | ||
60 | #define CMD_MEMORY_SPACE 0x0002 | ||
61 | #define CMD_BUS_MASTER 0x0004 | ||
62 | |||
63 | #define BAR_0 0 | ||
64 | #define BAR_1 1 | ||
65 | #define BAR_5 5 | ||
66 | |||
67 | /* Wake Up Filter Control */ | ||
68 | #define AT_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ | ||
69 | #define AT_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ | ||
70 | #define AT_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ | ||
71 | #define AT_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */ | ||
72 | #define AT_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ | ||
73 | |||
74 | #define SPEED_0 0xffff | ||
75 | #define HALF_DUPLEX 1 | ||
76 | #define FULL_DUPLEX 2 | ||
77 | |||
78 | /* Error Codes */ | ||
79 | #define AT_ERR_EEPROM 1 | ||
80 | #define AT_ERR_PHY 2 | ||
81 | #define AT_ERR_CONFIG 3 | ||
82 | #define AT_ERR_PARAM 4 | ||
83 | #define AT_ERR_MAC_TYPE 5 | ||
84 | #define AT_ERR_PHY_TYPE 6 | ||
85 | #define AT_ERR_PHY_SPEED 7 | ||
86 | #define AT_ERR_PHY_RES 8 | ||
87 | #define AT_ERR_TIMEOUT 9 | ||
88 | |||
89 | #define MAX_JUMBO_FRAME_SIZE 0x2000 | ||
90 | |||
91 | #define AT_VLAN_TAG_TO_TPD_TAG(_vlan, _tpd) \ | ||
92 | _tpd = (((_vlan) << (4)) | (((_vlan) >> 13) & 7) |\ | ||
93 | (((_vlan) >> 9) & 8)) | ||
94 | |||
95 | #define AT_TPD_TAG_TO_VLAN_TAG(_tpd, _vlan) \ | ||
96 | _vlan = (((_tpd) >> 8) | (((_tpd) & 0x77) << 9) |\ | ||
97 | (((_tdp) & 0x88) << 5)) | ||
98 | |||
99 | #define AT_MAX_RECEIVE_QUEUE 4 | ||
100 | #define AT_PAGE_NUM_PER_QUEUE 2 | ||
101 | |||
102 | #define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL | ||
103 | #define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL | ||
104 | |||
105 | #define AT_TX_WATCHDOG (5 * HZ) | ||
106 | #define AT_MAX_INT_WORK 10 | ||
107 | #define AT_TWSI_EEPROM_TIMEOUT 100 | ||
108 | #define AT_HW_MAX_IDLE_DELAY 10 | ||
109 | #define AT_SUSPEND_LINK_TIMEOUT 28 | ||
110 | |||
111 | #define AT_REGS_LEN 75 | ||
112 | #define AT_EEPROM_LEN 512 | ||
113 | #define AT_ADV_MASK (ADVERTISE_10_HALF |\ | ||
114 | ADVERTISE_10_FULL |\ | ||
115 | ADVERTISE_100_HALF |\ | ||
116 | ADVERTISE_100_FULL |\ | ||
117 | ADVERTISE_1000_FULL) | ||
118 | |||
119 | /* tpd word 2 */ | ||
120 | #define TPD_BUFLEN_MASK 0x3FFF | ||
121 | #define TPD_BUFLEN_SHIFT 0 | ||
122 | #define TPD_DMAINT_MASK 0x0001 | ||
123 | #define TPD_DMAINT_SHIFT 14 | ||
124 | #define TPD_PKTNT_MASK 0x0001 | ||
125 | #define TPD_PKTINT_SHIFT 15 | ||
126 | #define TPD_VLANTAG_MASK 0xFFFF | ||
127 | #define TPD_VLAN_SHIFT 16 | ||
128 | |||
129 | /* tpd word 3 bits 0:4 */ | ||
130 | #define TPD_EOP_MASK 0x0001 | ||
131 | #define TPD_EOP_SHIFT 0 | ||
132 | #define TPD_IP_VERSION_MASK 0x0001 | ||
133 | #define TPD_IP_VERSION_SHIFT 1 /* 0 : IPV4, 1 : IPV6 */ | ||
134 | #define TPD_INS_VL_TAG_MASK 0x0001 | ||
135 | #define TPD_INS_VL_TAG_SHIFT 2 | ||
136 | #define TPD_CC_SEGMENT_EN_MASK 0x0001 | ||
137 | #define TPD_CC_SEGMENT_EN_SHIFT 3 | ||
138 | #define TPD_SEGMENT_EN_MASK 0x0001 | ||
139 | #define TPD_SEGMENT_EN_SHIFT 4 | ||
140 | |||
141 | /* tdp word 3 bits 5:7 if ip version is 0 */ | ||
142 | #define TPD_IP_CSUM_MASK 0x0001 | ||
143 | #define TPD_IP_CSUM_SHIFT 5 | ||
144 | #define TPD_TCP_CSUM_MASK 0x0001 | ||
145 | #define TPD_TCP_CSUM_SHIFT 6 | ||
146 | #define TPD_UDP_CSUM_MASK 0x0001 | ||
147 | #define TPD_UDP_CSUM_SHIFT 7 | ||
148 | |||
149 | /* tdp word 3 bits 5:7 if ip version is 1 */ | ||
150 | #define TPD_V6_IPHLLO_MASK 0x0007 | ||
151 | #define TPD_V6_IPHLLO_SHIFT 7 | ||
152 | |||
153 | /* tpd word 3 bits 8:9 bit */ | ||
154 | #define TPD_VL_TAGGED_MASK 0x0001 | ||
155 | #define TPD_VL_TAGGED_SHIFT 8 | ||
156 | #define TPD_ETHTYPE_MASK 0x0001 | ||
157 | #define TPD_ETHTYPE_SHIFT 9 | ||
158 | |||
159 | /* tdp word 3 bits 10:13 if ip version is 0 */ | ||
160 | #define TDP_V4_IPHL_MASK 0x000F | ||
161 | #define TPD_V4_IPHL_SHIFT 10 | ||
162 | |||
163 | /* tdp word 3 bits 10:13 if ip version is 1 */ | ||
164 | #define TPD_V6_IPHLHI_MASK 0x000F | ||
165 | #define TPD_V6_IPHLHI_SHIFT 10 | ||
166 | |||
167 | /* tpd word 3 bit 14:31 if segment enabled */ | ||
168 | #define TPD_TCPHDRLEN_MASK 0x000F | ||
169 | #define TPD_TCPHDRLEN_SHIFT 14 | ||
170 | #define TPD_HDRFLAG_MASK 0x0001 | ||
171 | #define TPD_HDRFLAG_SHIFT 18 | ||
172 | #define TPD_MSS_MASK 0x1FFF | ||
173 | #define TPD_MSS_SHIFT 19 | ||
174 | |||
175 | /* tdp word 3 bit 16:31 if custom csum enabled */ | ||
176 | #define TPD_PLOADOFFSET_MASK 0x00FF | ||
177 | #define TPD_PLOADOFFSET_SHIFT 16 | ||
178 | #define TPD_CCSUMOFFSET_MASK 0x00FF | ||
179 | #define TPD_CCSUMOFFSET_SHIFT 24 | ||
180 | |||
181 | struct atl1e_tpd_desc { | ||
182 | __le64 buffer_addr; | ||
183 | __le32 word2; | ||
184 | __le32 word3; | ||
185 | }; | ||
186 | |||
187 | /* how about 0x2000 */ | ||
188 | #define MAX_TX_BUF_LEN 0x2000 | ||
189 | #define MAX_TX_BUF_SHIFT 13 | ||
190 | /*#define MAX_TX_BUF_LEN 0x3000 */ | ||
191 | |||
192 | /* rrs word 1 bit 0:31 */ | ||
193 | #define RRS_RX_CSUM_MASK 0xFFFF | ||
194 | #define RRS_RX_CSUM_SHIFT 0 | ||
195 | #define RRS_PKT_SIZE_MASK 0x3FFF | ||
196 | #define RRS_PKT_SIZE_SHIFT 16 | ||
197 | #define RRS_CPU_NUM_MASK 0x0003 | ||
198 | #define RRS_CPU_NUM_SHIFT 30 | ||
199 | |||
200 | #define RRS_IS_RSS_IPV4 0x0001 | ||
201 | #define RRS_IS_RSS_IPV4_TCP 0x0002 | ||
202 | #define RRS_IS_RSS_IPV6 0x0004 | ||
203 | #define RRS_IS_RSS_IPV6_TCP 0x0008 | ||
204 | #define RRS_IS_IPV6 0x0010 | ||
205 | #define RRS_IS_IP_FRAG 0x0020 | ||
206 | #define RRS_IS_IP_DF 0x0040 | ||
207 | #define RRS_IS_802_3 0x0080 | ||
208 | #define RRS_IS_VLAN_TAG 0x0100 | ||
209 | #define RRS_IS_ERR_FRAME 0x0200 | ||
210 | #define RRS_IS_IPV4 0x0400 | ||
211 | #define RRS_IS_UDP 0x0800 | ||
212 | #define RRS_IS_TCP 0x1000 | ||
213 | #define RRS_IS_BCAST 0x2000 | ||
214 | #define RRS_IS_MCAST 0x4000 | ||
215 | #define RRS_IS_PAUSE 0x8000 | ||
216 | |||
217 | #define RRS_ERR_BAD_CRC 0x0001 | ||
218 | #define RRS_ERR_CODE 0x0002 | ||
219 | #define RRS_ERR_DRIBBLE 0x0004 | ||
220 | #define RRS_ERR_RUNT 0x0008 | ||
221 | #define RRS_ERR_RX_OVERFLOW 0x0010 | ||
222 | #define RRS_ERR_TRUNC 0x0020 | ||
223 | #define RRS_ERR_IP_CSUM 0x0040 | ||
224 | #define RRS_ERR_L4_CSUM 0x0080 | ||
225 | #define RRS_ERR_LENGTH 0x0100 | ||
226 | #define RRS_ERR_DES_ADDR 0x0200 | ||
227 | |||
228 | struct atl1e_recv_ret_status { | ||
229 | u16 seq_num; | ||
230 | u16 hash_lo; | ||
231 | __le32 word1; | ||
232 | u16 pkt_flag; | ||
233 | u16 err_flag; | ||
234 | u16 hash_hi; | ||
235 | u16 vtag; | ||
236 | }; | ||
237 | |||
238 | enum atl1e_dma_req_block { | ||
239 | atl1e_dma_req_128 = 0, | ||
240 | atl1e_dma_req_256 = 1, | ||
241 | atl1e_dma_req_512 = 2, | ||
242 | atl1e_dma_req_1024 = 3, | ||
243 | atl1e_dma_req_2048 = 4, | ||
244 | atl1e_dma_req_4096 = 5 | ||
245 | }; | ||
246 | |||
247 | enum atl1e_rrs_type { | ||
248 | atl1e_rrs_disable = 0, | ||
249 | atl1e_rrs_ipv4 = 1, | ||
250 | atl1e_rrs_ipv4_tcp = 2, | ||
251 | atl1e_rrs_ipv6 = 4, | ||
252 | atl1e_rrs_ipv6_tcp = 8 | ||
253 | }; | ||
254 | |||
255 | enum atl1e_nic_type { | ||
256 | athr_l1e = 0, | ||
257 | athr_l2e_revA = 1, | ||
258 | athr_l2e_revB = 2 | ||
259 | }; | ||
260 | |||
261 | struct atl1e_hw_stats { | ||
262 | /* rx */ | ||
263 | unsigned long rx_ok; /* The number of good packet received. */ | ||
264 | unsigned long rx_bcast; /* The number of good broadcast packet received. */ | ||
265 | unsigned long rx_mcast; /* The number of good multicast packet received. */ | ||
266 | unsigned long rx_pause; /* The number of Pause packet received. */ | ||
267 | unsigned long rx_ctrl; /* The number of Control packet received other than Pause frame. */ | ||
268 | unsigned long rx_fcs_err; /* The number of packets with bad FCS. */ | ||
269 | unsigned long rx_len_err; /* The number of packets with mismatch of length field and actual size. */ | ||
270 | unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */ | ||
271 | unsigned long rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */ | ||
272 | unsigned long rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */ | ||
273 | unsigned long rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */ | ||
274 | unsigned long rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */ | ||
275 | unsigned long rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */ | ||
276 | unsigned long rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */ | ||
277 | unsigned long rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */ | ||
278 | unsigned long rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */ | ||
279 | unsigned long rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */ | ||
280 | unsigned long rx_sz_ov; /* The number of good and bad packets received that are more than MTU size truncated by Selene. */ | ||
281 | unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */ | ||
282 | unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */ | ||
283 | unsigned long rx_align_err; /* Alignment Error */ | ||
284 | unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */ | ||
285 | unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */ | ||
286 | unsigned long rx_err_addr; /* The number of packets dropped due to address filtering. */ | ||
287 | |||
288 | /* tx */ | ||
289 | unsigned long tx_ok; /* The number of good packet transmitted. */ | ||
290 | unsigned long tx_bcast; /* The number of good broadcast packet transmitted. */ | ||
291 | unsigned long tx_mcast; /* The number of good multicast packet transmitted. */ | ||
292 | unsigned long tx_pause; /* The number of Pause packet transmitted. */ | ||
293 | unsigned long tx_exc_defer; /* The number of packets transmitted with excessive deferral. */ | ||
294 | unsigned long tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */ | ||
295 | unsigned long tx_defer; /* The number of packets transmitted that is deferred. */ | ||
296 | unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */ | ||
297 | unsigned long tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */ | ||
298 | unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */ | ||
299 | unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */ | ||
300 | unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */ | ||
301 | unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */ | ||
302 | unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */ | ||
303 | unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */ | ||
304 | unsigned long tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */ | ||
305 | unsigned long tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */ | ||
306 | unsigned long tx_late_col; /* The number of packets transmitted with late collisions. */ | ||
307 | unsigned long tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */ | ||
308 | unsigned long tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ | ||
309 | unsigned long tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */ | ||
310 | unsigned long tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */ | ||
311 | unsigned long tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */ | ||
312 | unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */ | ||
313 | unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */ | ||
314 | }; | ||
315 | |||
316 | struct atl1e_hw { | ||
317 | u8 __iomem *hw_addr; /* inner register address */ | ||
318 | resource_size_t mem_rang; | ||
319 | struct atl1e_adapter *adapter; | ||
320 | enum atl1e_nic_type nic_type; | ||
321 | u16 device_id; | ||
322 | u16 vendor_id; | ||
323 | u16 subsystem_id; | ||
324 | u16 subsystem_vendor_id; | ||
325 | u8 revision_id; | ||
326 | u16 pci_cmd_word; | ||
327 | u8 mac_addr[ETH_ALEN]; | ||
328 | u8 perm_mac_addr[ETH_ALEN]; | ||
329 | u8 preamble_len; | ||
330 | u16 max_frame_size; | ||
331 | u16 rx_jumbo_th; | ||
332 | u16 tx_jumbo_th; | ||
333 | |||
334 | u16 media_type; | ||
335 | #define MEDIA_TYPE_AUTO_SENSOR 0 | ||
336 | #define MEDIA_TYPE_100M_FULL 1 | ||
337 | #define MEDIA_TYPE_100M_HALF 2 | ||
338 | #define MEDIA_TYPE_10M_FULL 3 | ||
339 | #define MEDIA_TYPE_10M_HALF 4 | ||
340 | |||
341 | u16 autoneg_advertised; | ||
342 | #define ADVERTISE_10_HALF 0x0001 | ||
343 | #define ADVERTISE_10_FULL 0x0002 | ||
344 | #define ADVERTISE_100_HALF 0x0004 | ||
345 | #define ADVERTISE_100_FULL 0x0008 | ||
346 | #define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ | ||
347 | #define ADVERTISE_1000_FULL 0x0020 | ||
348 | u16 mii_autoneg_adv_reg; | ||
349 | u16 mii_1000t_ctrl_reg; | ||
350 | |||
351 | u16 imt; /* Interrupt Moderator timer ( 2us resolution) */ | ||
352 | u16 ict; /* Interrupt Clear timer (2us resolution) */ | ||
353 | u32 smb_timer; | ||
354 | u16 rrd_thresh; /* Threshold of number of RRD produced to trigger | ||
355 | interrupt request */ | ||
356 | u16 tpd_thresh; | ||
357 | u16 rx_count_down; /* 2us resolution */ | ||
358 | u16 tx_count_down; | ||
359 | |||
360 | u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */ | ||
361 | enum atl1e_rrs_type rrs_type; | ||
362 | u32 base_cpu; | ||
363 | u32 indirect_tab; | ||
364 | |||
365 | enum atl1e_dma_req_block dmar_block; | ||
366 | enum atl1e_dma_req_block dmaw_block; | ||
367 | u8 dmaw_dly_cnt; | ||
368 | u8 dmar_dly_cnt; | ||
369 | |||
370 | bool phy_configured; | ||
371 | bool re_autoneg; | ||
372 | bool emi_ca; | ||
373 | }; | ||
374 | |||
375 | /* | ||
376 | * wrapper around a pointer to a socket buffer, | ||
377 | * so a DMA handle can be stored along with the buffer | ||
378 | */ | ||
379 | struct atl1e_tx_buffer { | ||
380 | struct sk_buff *skb; | ||
381 | u16 length; | ||
382 | dma_addr_t dma; | ||
383 | }; | ||
384 | |||
385 | struct atl1e_rx_page { | ||
386 | dma_addr_t dma; /* receive rage DMA address */ | ||
387 | u8 *addr; /* receive rage virtual address */ | ||
388 | dma_addr_t write_offset_dma; /* the DMA address which contain the | ||
389 | receive data offset in the page */ | ||
390 | u32 *write_offset_addr; /* the virtaul address which contain | ||
391 | the receive data offset in the page */ | ||
392 | u32 read_offset; /* the offset where we have read */ | ||
393 | }; | ||
394 | |||
395 | struct atl1e_rx_page_desc { | ||
396 | struct atl1e_rx_page rx_page[AT_PAGE_NUM_PER_QUEUE]; | ||
397 | u8 rx_using; | ||
398 | u16 rx_nxseq; | ||
399 | }; | ||
400 | |||
401 | /* transmit packet descriptor (tpd) ring */ | ||
402 | struct atl1e_tx_ring { | ||
403 | struct atl1e_tpd_desc *desc; /* descriptor ring virtual address */ | ||
404 | dma_addr_t dma; /* descriptor ring physical address */ | ||
405 | u16 count; /* the count of transmit rings */ | ||
406 | rwlock_t tx_lock; | ||
407 | u16 next_to_use; | ||
408 | atomic_t next_to_clean; | ||
409 | struct atl1e_tx_buffer *tx_buffer; | ||
410 | dma_addr_t cmb_dma; | ||
411 | u32 *cmb; | ||
412 | }; | ||
413 | |||
414 | /* receive packet descriptor ring */ | ||
415 | struct atl1e_rx_ring { | ||
416 | void *desc; | ||
417 | dma_addr_t dma; | ||
418 | int size; | ||
419 | u32 page_size; /* bytes length of rxf page */ | ||
420 | u32 real_page_size; /* real_page_size = page_size + jumbo + aliagn */ | ||
421 | struct atl1e_rx_page_desc rx_page_desc[AT_MAX_RECEIVE_QUEUE]; | ||
422 | }; | ||
423 | |||
424 | /* board specific private data structure */ | ||
425 | struct atl1e_adapter { | ||
426 | struct net_device *netdev; | ||
427 | struct pci_dev *pdev; | ||
428 | struct vlan_group *vlgrp; | ||
429 | struct napi_struct napi; | ||
430 | struct mii_if_info mii; /* MII interface info */ | ||
431 | struct atl1e_hw hw; | ||
432 | struct atl1e_hw_stats hw_stats; | ||
433 | struct net_device_stats net_stats; | ||
434 | |||
435 | bool have_msi; | ||
436 | u32 wol; | ||
437 | u16 link_speed; | ||
438 | u16 link_duplex; | ||
439 | |||
440 | spinlock_t mdio_lock; | ||
441 | spinlock_t tx_lock; | ||
442 | atomic_t irq_sem; | ||
443 | |||
444 | struct work_struct reset_task; | ||
445 | struct work_struct link_chg_task; | ||
446 | struct timer_list watchdog_timer; | ||
447 | struct timer_list phy_config_timer; | ||
448 | |||
449 | /* All Descriptor memory */ | ||
450 | dma_addr_t ring_dma; | ||
451 | void *ring_vir_addr; | ||
452 | int ring_size; | ||
453 | |||
454 | struct atl1e_tx_ring tx_ring; | ||
455 | struct atl1e_rx_ring rx_ring; | ||
456 | int num_rx_queues; | ||
457 | unsigned long flags; | ||
458 | #define __AT_TESTING 0x0001 | ||
459 | #define __AT_RESETTING 0x0002 | ||
460 | #define __AT_DOWN 0x0003 | ||
461 | |||
462 | u32 bd_number; /* board number;*/ | ||
463 | u32 pci_state[16]; | ||
464 | u32 *config_space; | ||
465 | }; | ||
466 | |||
467 | #define AT_WRITE_REG(a, reg, value) ( \ | ||
468 | writel((value), ((a)->hw_addr + reg))) | ||
469 | |||
470 | #define AT_WRITE_FLUSH(a) (\ | ||
471 | readl((a)->hw_addr)) | ||
472 | |||
473 | #define AT_READ_REG(a, reg) ( \ | ||
474 | readl((a)->hw_addr + reg)) | ||
475 | |||
476 | #define AT_WRITE_REGB(a, reg, value) (\ | ||
477 | writeb((value), ((a)->hw_addr + reg))) | ||
478 | |||
479 | #define AT_READ_REGB(a, reg) (\ | ||
480 | readb((a)->hw_addr + reg)) | ||
481 | |||
482 | #define AT_WRITE_REGW(a, reg, value) (\ | ||
483 | writew((value), ((a)->hw_addr + reg))) | ||
484 | |||
485 | #define AT_READ_REGW(a, reg) (\ | ||
486 | readw((a)->hw_addr + reg)) | ||
487 | |||
488 | #define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \ | ||
489 | writel((value), (((a)->hw_addr + reg) + ((offset) << 2)))) | ||
490 | |||
491 | #define AT_READ_REG_ARRAY(a, reg, offset) ( \ | ||
492 | readl(((a)->hw_addr + reg) + ((offset) << 2))) | ||
493 | |||
494 | extern char atl1e_driver_name[]; | ||
495 | extern char atl1e_driver_version[]; | ||
496 | |||
497 | extern void atl1e_check_options(struct atl1e_adapter *adapter); | ||
498 | extern int atl1e_up(struct atl1e_adapter *adapter); | ||
499 | extern void atl1e_down(struct atl1e_adapter *adapter); | ||
500 | extern void atl1e_reinit_locked(struct atl1e_adapter *adapter); | ||
501 | extern s32 atl1e_reset_hw(struct atl1e_hw *hw); | ||
502 | extern void atl1e_set_ethtool_ops(struct net_device *netdev); | ||
503 | #endif /* _ATL1_E_H_ */ | ||
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c new file mode 100644 index 000000000000..cdc3b85b10b9 --- /dev/null +++ b/drivers/net/atl1e/atl1e_ethtool.c | |||
@@ -0,0 +1,405 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2007 Atheros Corporation. All rights reserved. | ||
3 | * | ||
4 | * Derived from Intel e1000 driver | ||
5 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the Free | ||
9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
19 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/netdevice.h> | ||
24 | #include <linux/ethtool.h> | ||
25 | |||
26 | #include "atl1e.h" | ||
27 | |||
28 | static int atl1e_get_settings(struct net_device *netdev, | ||
29 | struct ethtool_cmd *ecmd) | ||
30 | { | ||
31 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
32 | struct atl1e_hw *hw = &adapter->hw; | ||
33 | |||
34 | ecmd->supported = (SUPPORTED_10baseT_Half | | ||
35 | SUPPORTED_10baseT_Full | | ||
36 | SUPPORTED_100baseT_Half | | ||
37 | SUPPORTED_100baseT_Full | | ||
38 | SUPPORTED_Autoneg | | ||
39 | SUPPORTED_TP); | ||
40 | if (hw->nic_type == athr_l1e) | ||
41 | ecmd->supported |= SUPPORTED_1000baseT_Full; | ||
42 | |||
43 | ecmd->advertising = ADVERTISED_TP; | ||
44 | |||
45 | ecmd->advertising |= ADVERTISED_Autoneg; | ||
46 | ecmd->advertising |= hw->autoneg_advertised; | ||
47 | |||
48 | ecmd->port = PORT_TP; | ||
49 | ecmd->phy_address = 0; | ||
50 | ecmd->transceiver = XCVR_INTERNAL; | ||
51 | |||
52 | if (adapter->link_speed != SPEED_0) { | ||
53 | ecmd->speed = adapter->link_speed; | ||
54 | if (adapter->link_duplex == FULL_DUPLEX) | ||
55 | ecmd->duplex = DUPLEX_FULL; | ||
56 | else | ||
57 | ecmd->duplex = DUPLEX_HALF; | ||
58 | } else { | ||
59 | ecmd->speed = -1; | ||
60 | ecmd->duplex = -1; | ||
61 | } | ||
62 | |||
63 | ecmd->autoneg = AUTONEG_ENABLE; | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | static int atl1e_set_settings(struct net_device *netdev, | ||
68 | struct ethtool_cmd *ecmd) | ||
69 | { | ||
70 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
71 | struct atl1e_hw *hw = &adapter->hw; | ||
72 | |||
73 | while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) | ||
74 | msleep(1); | ||
75 | |||
76 | if (ecmd->autoneg == AUTONEG_ENABLE) { | ||
77 | u16 adv4, adv9; | ||
78 | |||
79 | if ((ecmd->advertising&ADVERTISE_1000_FULL)) { | ||
80 | if (hw->nic_type == athr_l1e) { | ||
81 | hw->autoneg_advertised = | ||
82 | ecmd->advertising & AT_ADV_MASK; | ||
83 | } else { | ||
84 | clear_bit(__AT_RESETTING, &adapter->flags); | ||
85 | return -EINVAL; | ||
86 | } | ||
87 | } else if (ecmd->advertising&ADVERTISE_1000_HALF) { | ||
88 | clear_bit(__AT_RESETTING, &adapter->flags); | ||
89 | return -EINVAL; | ||
90 | } else { | ||
91 | hw->autoneg_advertised = | ||
92 | ecmd->advertising & AT_ADV_MASK; | ||
93 | } | ||
94 | ecmd->advertising = hw->autoneg_advertised | | ||
95 | ADVERTISED_TP | ADVERTISED_Autoneg; | ||
96 | |||
97 | adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK; | ||
98 | adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK; | ||
99 | if (hw->autoneg_advertised & ADVERTISE_10_HALF) | ||
100 | adv4 |= MII_AR_10T_HD_CAPS; | ||
101 | if (hw->autoneg_advertised & ADVERTISE_10_FULL) | ||
102 | adv4 |= MII_AR_10T_FD_CAPS; | ||
103 | if (hw->autoneg_advertised & ADVERTISE_100_HALF) | ||
104 | adv4 |= MII_AR_100TX_HD_CAPS; | ||
105 | if (hw->autoneg_advertised & ADVERTISE_100_FULL) | ||
106 | adv4 |= MII_AR_100TX_FD_CAPS; | ||
107 | if (hw->autoneg_advertised & ADVERTISE_1000_FULL) | ||
108 | adv9 |= MII_AT001_CR_1000T_FD_CAPS; | ||
109 | |||
110 | if (adv4 != hw->mii_autoneg_adv_reg || | ||
111 | adv9 != hw->mii_1000t_ctrl_reg) { | ||
112 | hw->mii_autoneg_adv_reg = adv4; | ||
113 | hw->mii_1000t_ctrl_reg = adv9; | ||
114 | hw->re_autoneg = true; | ||
115 | } | ||
116 | |||
117 | } else { | ||
118 | clear_bit(__AT_RESETTING, &adapter->flags); | ||
119 | return -EINVAL; | ||
120 | } | ||
121 | |||
122 | /* reset the link */ | ||
123 | |||
124 | if (netif_running(adapter->netdev)) { | ||
125 | atl1e_down(adapter); | ||
126 | atl1e_up(adapter); | ||
127 | } else | ||
128 | atl1e_reset_hw(&adapter->hw); | ||
129 | |||
130 | clear_bit(__AT_RESETTING, &adapter->flags); | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static u32 atl1e_get_tx_csum(struct net_device *netdev) | ||
135 | { | ||
136 | return (netdev->features & NETIF_F_HW_CSUM) != 0; | ||
137 | } | ||
138 | |||
139 | static u32 atl1e_get_msglevel(struct net_device *netdev) | ||
140 | { | ||
141 | #ifdef DBG | ||
142 | return 1; | ||
143 | #else | ||
144 | return 0; | ||
145 | #endif | ||
146 | } | ||
147 | |||
148 | static void atl1e_set_msglevel(struct net_device *netdev, u32 data) | ||
149 | { | ||
150 | } | ||
151 | |||
152 | static int atl1e_get_regs_len(struct net_device *netdev) | ||
153 | { | ||
154 | return AT_REGS_LEN * sizeof(u32); | ||
155 | } | ||
156 | |||
157 | static void atl1e_get_regs(struct net_device *netdev, | ||
158 | struct ethtool_regs *regs, void *p) | ||
159 | { | ||
160 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
161 | struct atl1e_hw *hw = &adapter->hw; | ||
162 | u32 *regs_buff = p; | ||
163 | u16 phy_data; | ||
164 | |||
165 | memset(p, 0, AT_REGS_LEN * sizeof(u32)); | ||
166 | |||
167 | regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; | ||
168 | |||
169 | regs_buff[0] = AT_READ_REG(hw, REG_VPD_CAP); | ||
170 | regs_buff[1] = AT_READ_REG(hw, REG_SPI_FLASH_CTRL); | ||
171 | regs_buff[2] = AT_READ_REG(hw, REG_SPI_FLASH_CONFIG); | ||
172 | regs_buff[3] = AT_READ_REG(hw, REG_TWSI_CTRL); | ||
173 | regs_buff[4] = AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL); | ||
174 | regs_buff[5] = AT_READ_REG(hw, REG_MASTER_CTRL); | ||
175 | regs_buff[6] = AT_READ_REG(hw, REG_MANUAL_TIMER_INIT); | ||
176 | regs_buff[7] = AT_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT); | ||
177 | regs_buff[8] = AT_READ_REG(hw, REG_GPHY_CTRL); | ||
178 | regs_buff[9] = AT_READ_REG(hw, REG_CMBDISDMA_TIMER); | ||
179 | regs_buff[10] = AT_READ_REG(hw, REG_IDLE_STATUS); | ||
180 | regs_buff[11] = AT_READ_REG(hw, REG_MDIO_CTRL); | ||
181 | regs_buff[12] = AT_READ_REG(hw, REG_SERDES_LOCK); | ||
182 | regs_buff[13] = AT_READ_REG(hw, REG_MAC_CTRL); | ||
183 | regs_buff[14] = AT_READ_REG(hw, REG_MAC_IPG_IFG); | ||
184 | regs_buff[15] = AT_READ_REG(hw, REG_MAC_STA_ADDR); | ||
185 | regs_buff[16] = AT_READ_REG(hw, REG_MAC_STA_ADDR+4); | ||
186 | regs_buff[17] = AT_READ_REG(hw, REG_RX_HASH_TABLE); | ||
187 | regs_buff[18] = AT_READ_REG(hw, REG_RX_HASH_TABLE+4); | ||
188 | regs_buff[19] = AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL); | ||
189 | regs_buff[20] = AT_READ_REG(hw, REG_MTU); | ||
190 | regs_buff[21] = AT_READ_REG(hw, REG_WOL_CTRL); | ||
191 | regs_buff[22] = AT_READ_REG(hw, REG_SRAM_TRD_ADDR); | ||
192 | regs_buff[23] = AT_READ_REG(hw, REG_SRAM_TRD_LEN); | ||
193 | regs_buff[24] = AT_READ_REG(hw, REG_SRAM_RXF_ADDR); | ||
194 | regs_buff[25] = AT_READ_REG(hw, REG_SRAM_RXF_LEN); | ||
195 | regs_buff[26] = AT_READ_REG(hw, REG_SRAM_TXF_ADDR); | ||
196 | regs_buff[27] = AT_READ_REG(hw, REG_SRAM_TXF_LEN); | ||
197 | regs_buff[28] = AT_READ_REG(hw, REG_SRAM_TCPH_ADDR); | ||
198 | regs_buff[29] = AT_READ_REG(hw, REG_SRAM_PKTH_ADDR); | ||
199 | |||
200 | atl1e_read_phy_reg(hw, MII_BMCR, &phy_data); | ||
201 | regs_buff[73] = (u32)phy_data; | ||
202 | atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); | ||
203 | regs_buff[74] = (u32)phy_data; | ||
204 | } | ||
205 | |||
206 | static int atl1e_get_eeprom_len(struct net_device *netdev) | ||
207 | { | ||
208 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
209 | |||
210 | if (!atl1e_check_eeprom_exist(&adapter->hw)) | ||
211 | return AT_EEPROM_LEN; | ||
212 | else | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static int atl1e_get_eeprom(struct net_device *netdev, | ||
217 | struct ethtool_eeprom *eeprom, u8 *bytes) | ||
218 | { | ||
219 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
220 | struct atl1e_hw *hw = &adapter->hw; | ||
221 | u32 *eeprom_buff; | ||
222 | int first_dword, last_dword; | ||
223 | int ret_val = 0; | ||
224 | int i; | ||
225 | |||
226 | if (eeprom->len == 0) | ||
227 | return -EINVAL; | ||
228 | |||
229 | if (atl1e_check_eeprom_exist(hw)) /* not exist */ | ||
230 | return -EINVAL; | ||
231 | |||
232 | eeprom->magic = hw->vendor_id | (hw->device_id << 16); | ||
233 | |||
234 | first_dword = eeprom->offset >> 2; | ||
235 | last_dword = (eeprom->offset + eeprom->len - 1) >> 2; | ||
236 | |||
237 | eeprom_buff = kmalloc(sizeof(u32) * | ||
238 | (last_dword - first_dword + 1), GFP_KERNEL); | ||
239 | if (eeprom_buff == NULL) | ||
240 | return -ENOMEM; | ||
241 | |||
242 | for (i = first_dword; i < last_dword; i++) { | ||
243 | if (!atl1e_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) { | ||
244 | kfree(eeprom_buff); | ||
245 | return -EIO; | ||
246 | } | ||
247 | } | ||
248 | |||
249 | memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), | ||
250 | eeprom->len); | ||
251 | kfree(eeprom_buff); | ||
252 | |||
253 | return ret_val; | ||
254 | } | ||
255 | |||
256 | static int atl1e_set_eeprom(struct net_device *netdev, | ||
257 | struct ethtool_eeprom *eeprom, u8 *bytes) | ||
258 | { | ||
259 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
260 | struct atl1e_hw *hw = &adapter->hw; | ||
261 | u32 *eeprom_buff; | ||
262 | u32 *ptr; | ||
263 | int first_dword, last_dword; | ||
264 | int ret_val = 0; | ||
265 | int i; | ||
266 | |||
267 | if (eeprom->len == 0) | ||
268 | return -EOPNOTSUPP; | ||
269 | |||
270 | if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) | ||
271 | return -EINVAL; | ||
272 | |||
273 | first_dword = eeprom->offset >> 2; | ||
274 | last_dword = (eeprom->offset + eeprom->len - 1) >> 2; | ||
275 | eeprom_buff = kmalloc(AT_EEPROM_LEN, GFP_KERNEL); | ||
276 | if (eeprom_buff == NULL) | ||
277 | return -ENOMEM; | ||
278 | |||
279 | ptr = (u32 *)eeprom_buff; | ||
280 | |||
281 | if (eeprom->offset & 3) { | ||
282 | /* need read/modify/write of first changed EEPROM word */ | ||
283 | /* only the second byte of the word is being modified */ | ||
284 | if (!atl1e_read_eeprom(hw, first_dword * 4, &(eeprom_buff[0]))) { | ||
285 | ret_val = -EIO; | ||
286 | goto out; | ||
287 | } | ||
288 | ptr++; | ||
289 | } | ||
290 | if (((eeprom->offset + eeprom->len) & 3)) { | ||
291 | /* need read/modify/write of last changed EEPROM word */ | ||
292 | /* only the first byte of the word is being modified */ | ||
293 | |||
294 | if (!atl1e_read_eeprom(hw, last_dword * 4, | ||
295 | &(eeprom_buff[last_dword - first_dword]))) { | ||
296 | ret_val = -EIO; | ||
297 | goto out; | ||
298 | } | ||
299 | } | ||
300 | |||
301 | /* Device's eeprom is always little-endian, word addressable */ | ||
302 | memcpy(ptr, bytes, eeprom->len); | ||
303 | |||
304 | for (i = 0; i < last_dword - first_dword + 1; i++) { | ||
305 | if (!atl1e_write_eeprom(hw, ((first_dword + i) * 4), | ||
306 | eeprom_buff[i])) { | ||
307 | ret_val = -EIO; | ||
308 | goto out; | ||
309 | } | ||
310 | } | ||
311 | out: | ||
312 | kfree(eeprom_buff); | ||
313 | return ret_val; | ||
314 | } | ||
315 | |||
316 | static void atl1e_get_drvinfo(struct net_device *netdev, | ||
317 | struct ethtool_drvinfo *drvinfo) | ||
318 | { | ||
319 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
320 | |||
321 | strncpy(drvinfo->driver, atl1e_driver_name, 32); | ||
322 | strncpy(drvinfo->version, atl1e_driver_version, 32); | ||
323 | strncpy(drvinfo->fw_version, "L1e", 32); | ||
324 | strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); | ||
325 | drvinfo->n_stats = 0; | ||
326 | drvinfo->testinfo_len = 0; | ||
327 | drvinfo->regdump_len = atl1e_get_regs_len(netdev); | ||
328 | drvinfo->eedump_len = atl1e_get_eeprom_len(netdev); | ||
329 | } | ||
330 | |||
331 | static void atl1e_get_wol(struct net_device *netdev, | ||
332 | struct ethtool_wolinfo *wol) | ||
333 | { | ||
334 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
335 | |||
336 | wol->supported = WAKE_MAGIC | WAKE_PHY; | ||
337 | wol->wolopts = 0; | ||
338 | |||
339 | if (adapter->wol & AT_WUFC_EX) | ||
340 | wol->wolopts |= WAKE_UCAST; | ||
341 | if (adapter->wol & AT_WUFC_MC) | ||
342 | wol->wolopts |= WAKE_MCAST; | ||
343 | if (adapter->wol & AT_WUFC_BC) | ||
344 | wol->wolopts |= WAKE_BCAST; | ||
345 | if (adapter->wol & AT_WUFC_MAG) | ||
346 | wol->wolopts |= WAKE_MAGIC; | ||
347 | if (adapter->wol & AT_WUFC_LNKC) | ||
348 | wol->wolopts |= WAKE_PHY; | ||
349 | |||
350 | return; | ||
351 | } | ||
352 | |||
353 | static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | ||
354 | { | ||
355 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
356 | |||
357 | if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | | ||
358 | WAKE_MCAST | WAKE_BCAST | WAKE_MCAST)) | ||
359 | return -EOPNOTSUPP; | ||
360 | /* these settings will always override what we currently have */ | ||
361 | adapter->wol = 0; | ||
362 | |||
363 | if (wol->wolopts & WAKE_MAGIC) | ||
364 | adapter->wol |= AT_WUFC_MAG; | ||
365 | if (wol->wolopts & WAKE_PHY) | ||
366 | adapter->wol |= AT_WUFC_LNKC; | ||
367 | |||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | static int atl1e_nway_reset(struct net_device *netdev) | ||
372 | { | ||
373 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
374 | if (netif_running(netdev)) | ||
375 | atl1e_reinit_locked(adapter); | ||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | static struct ethtool_ops atl1e_ethtool_ops = { | ||
380 | .get_settings = atl1e_get_settings, | ||
381 | .set_settings = atl1e_set_settings, | ||
382 | .get_drvinfo = atl1e_get_drvinfo, | ||
383 | .get_regs_len = atl1e_get_regs_len, | ||
384 | .get_regs = atl1e_get_regs, | ||
385 | .get_wol = atl1e_get_wol, | ||
386 | .set_wol = atl1e_set_wol, | ||
387 | .get_msglevel = atl1e_get_msglevel, | ||
388 | .set_msglevel = atl1e_set_msglevel, | ||
389 | .nway_reset = atl1e_nway_reset, | ||
390 | .get_link = ethtool_op_get_link, | ||
391 | .get_eeprom_len = atl1e_get_eeprom_len, | ||
392 | .get_eeprom = atl1e_get_eeprom, | ||
393 | .set_eeprom = atl1e_set_eeprom, | ||
394 | .get_tx_csum = atl1e_get_tx_csum, | ||
395 | .get_sg = ethtool_op_get_sg, | ||
396 | .set_sg = ethtool_op_set_sg, | ||
397 | #ifdef NETIF_F_TSO | ||
398 | .get_tso = ethtool_op_get_tso, | ||
399 | #endif | ||
400 | }; | ||
401 | |||
402 | void atl1e_set_ethtool_ops(struct net_device *netdev) | ||
403 | { | ||
404 | SET_ETHTOOL_OPS(netdev, &atl1e_ethtool_ops); | ||
405 | } | ||
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c new file mode 100644 index 000000000000..949e75358bf0 --- /dev/null +++ b/drivers/net/atl1e/atl1e_hw.c | |||
@@ -0,0 +1,664 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2007 Atheros Corporation. All rights reserved. | ||
3 | * | ||
4 | * Derived from Intel e1000 driver | ||
5 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the Free | ||
9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
19 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/mii.h> | ||
24 | #include <linux/crc32.h> | ||
25 | |||
26 | #include "atl1e.h" | ||
27 | |||
28 | /* | ||
29 | * check_eeprom_exist | ||
30 | * return 0 if eeprom exist | ||
31 | */ | ||
32 | int atl1e_check_eeprom_exist(struct atl1e_hw *hw) | ||
33 | { | ||
34 | u32 value; | ||
35 | |||
36 | value = AT_READ_REG(hw, REG_SPI_FLASH_CTRL); | ||
37 | if (value & SPI_FLASH_CTRL_EN_VPD) { | ||
38 | value &= ~SPI_FLASH_CTRL_EN_VPD; | ||
39 | AT_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); | ||
40 | } | ||
41 | value = AT_READ_REGW(hw, REG_PCIE_CAP_LIST); | ||
42 | return ((value & 0xFF00) == 0x6C00) ? 0 : 1; | ||
43 | } | ||
44 | |||
45 | void atl1e_hw_set_mac_addr(struct atl1e_hw *hw) | ||
46 | { | ||
47 | u32 value; | ||
48 | /* | ||
49 | * 00-0B-6A-F6-00-DC | ||
50 | * 0: 6AF600DC 1: 000B | ||
51 | * low dword | ||
52 | */ | ||
53 | value = (((u32)hw->mac_addr[2]) << 24) | | ||
54 | (((u32)hw->mac_addr[3]) << 16) | | ||
55 | (((u32)hw->mac_addr[4]) << 8) | | ||
56 | (((u32)hw->mac_addr[5])) ; | ||
57 | AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); | ||
58 | /* hight dword */ | ||
59 | value = (((u32)hw->mac_addr[0]) << 8) | | ||
60 | (((u32)hw->mac_addr[1])) ; | ||
61 | AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * atl1e_get_permanent_address | ||
66 | * return 0 if get valid mac address, | ||
67 | */ | ||
68 | static int atl1e_get_permanent_address(struct atl1e_hw *hw) | ||
69 | { | ||
70 | u32 addr[2]; | ||
71 | u32 i; | ||
72 | u32 twsi_ctrl_data; | ||
73 | u8 eth_addr[ETH_ALEN]; | ||
74 | |||
75 | if (is_valid_ether_addr(hw->perm_mac_addr)) | ||
76 | return 0; | ||
77 | |||
78 | /* init */ | ||
79 | addr[0] = addr[1] = 0; | ||
80 | |||
81 | if (!atl1e_check_eeprom_exist(hw)) { | ||
82 | /* eeprom exist */ | ||
83 | twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL); | ||
84 | twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; | ||
85 | AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data); | ||
86 | for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) { | ||
87 | msleep(10); | ||
88 | twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL); | ||
89 | if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0) | ||
90 | break; | ||
91 | } | ||
92 | if (i >= AT_TWSI_EEPROM_TIMEOUT) | ||
93 | return AT_ERR_TIMEOUT; | ||
94 | } | ||
95 | |||
96 | /* maybe MAC-address is from BIOS */ | ||
97 | addr[0] = AT_READ_REG(hw, REG_MAC_STA_ADDR); | ||
98 | addr[1] = AT_READ_REG(hw, REG_MAC_STA_ADDR + 4); | ||
99 | *(u32 *) ð_addr[2] = swab32(addr[0]); | ||
100 | *(u16 *) ð_addr[0] = swab16(*(u16 *)&addr[1]); | ||
101 | |||
102 | if (is_valid_ether_addr(eth_addr)) { | ||
103 | memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | return AT_ERR_EEPROM; | ||
108 | } | ||
109 | |||
110 | bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value) | ||
111 | { | ||
112 | return true; | ||
113 | } | ||
114 | |||
115 | bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value) | ||
116 | { | ||
117 | int i; | ||
118 | u32 control; | ||
119 | |||
120 | if (offset & 3) | ||
121 | return false; /* address do not align */ | ||
122 | |||
123 | AT_WRITE_REG(hw, REG_VPD_DATA, 0); | ||
124 | control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; | ||
125 | AT_WRITE_REG(hw, REG_VPD_CAP, control); | ||
126 | |||
127 | for (i = 0; i < 10; i++) { | ||
128 | msleep(2); | ||
129 | control = AT_READ_REG(hw, REG_VPD_CAP); | ||
130 | if (control & VPD_CAP_VPD_FLAG) | ||
131 | break; | ||
132 | } | ||
133 | if (control & VPD_CAP_VPD_FLAG) { | ||
134 | *p_value = AT_READ_REG(hw, REG_VPD_DATA); | ||
135 | return true; | ||
136 | } | ||
137 | return false; /* timeout */ | ||
138 | } | ||
139 | |||
140 | void atl1e_force_ps(struct atl1e_hw *hw) | ||
141 | { | ||
142 | AT_WRITE_REGW(hw, REG_GPHY_CTRL, | ||
143 | GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET); | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * Reads the adapter's MAC address from the EEPROM | ||
148 | * | ||
149 | * hw - Struct containing variables accessed by shared code | ||
150 | */ | ||
151 | int atl1e_read_mac_addr(struct atl1e_hw *hw) | ||
152 | { | ||
153 | int err = 0; | ||
154 | |||
155 | err = atl1e_get_permanent_address(hw); | ||
156 | if (err) | ||
157 | return AT_ERR_EEPROM; | ||
158 | memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr)); | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * atl1e_hash_mc_addr | ||
164 | * purpose | ||
165 | * set hash value for a multicast address | ||
166 | * hash calcu processing : | ||
167 | * 1. calcu 32bit CRC for multicast address | ||
168 | * 2. reverse crc with MSB to LSB | ||
169 | */ | ||
170 | u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr) | ||
171 | { | ||
172 | u32 crc32; | ||
173 | u32 value = 0; | ||
174 | int i; | ||
175 | |||
176 | crc32 = ether_crc_le(6, mc_addr); | ||
177 | crc32 = ~crc32; | ||
178 | for (i = 0; i < 32; i++) | ||
179 | value |= (((crc32 >> i) & 1) << (31 - i)); | ||
180 | |||
181 | return value; | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * Sets the bit in the multicast table corresponding to the hash value. | ||
186 | * hw - Struct containing variables accessed by shared code | ||
187 | * hash_value - Multicast address hash value | ||
188 | */ | ||
189 | void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value) | ||
190 | { | ||
191 | u32 hash_bit, hash_reg; | ||
192 | u32 mta; | ||
193 | |||
194 | /* | ||
195 | * The HASH Table is a register array of 2 32-bit registers. | ||
196 | * It is treated like an array of 64 bits. We want to set | ||
197 | * bit BitArray[hash_value]. So we figure out what register | ||
198 | * the bit is in, read it, OR in the new bit, then write | ||
199 | * back the new value. The register is determined by the | ||
200 | * upper 7 bits of the hash value and the bit within that | ||
201 | * register are determined by the lower 5 bits of the value. | ||
202 | */ | ||
203 | hash_reg = (hash_value >> 31) & 0x1; | ||
204 | hash_bit = (hash_value >> 26) & 0x1F; | ||
205 | |||
206 | mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); | ||
207 | |||
208 | mta |= (1 << hash_bit); | ||
209 | |||
210 | AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); | ||
211 | } | ||
212 | /* | ||
213 | * Reads the value from a PHY register | ||
214 | * hw - Struct containing variables accessed by shared code | ||
215 | * reg_addr - address of the PHY register to read | ||
216 | */ | ||
217 | int atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data) | ||
218 | { | ||
219 | u32 val; | ||
220 | int i; | ||
221 | |||
222 | val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | | ||
223 | MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | | ||
224 | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; | ||
225 | |||
226 | AT_WRITE_REG(hw, REG_MDIO_CTRL, val); | ||
227 | |||
228 | wmb(); | ||
229 | |||
230 | for (i = 0; i < MDIO_WAIT_TIMES; i++) { | ||
231 | udelay(2); | ||
232 | val = AT_READ_REG(hw, REG_MDIO_CTRL); | ||
233 | if (!(val & (MDIO_START | MDIO_BUSY))) | ||
234 | break; | ||
235 | wmb(); | ||
236 | } | ||
237 | if (!(val & (MDIO_START | MDIO_BUSY))) { | ||
238 | *phy_data = (u16)val; | ||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | return AT_ERR_PHY; | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Writes a value to a PHY register | ||
247 | * hw - Struct containing variables accessed by shared code | ||
248 | * reg_addr - address of the PHY register to write | ||
249 | * data - data to write to the PHY | ||
250 | */ | ||
251 | int atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data) | ||
252 | { | ||
253 | int i; | ||
254 | u32 val; | ||
255 | |||
256 | val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | | ||
257 | (reg_addr&MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | | ||
258 | MDIO_SUP_PREAMBLE | | ||
259 | MDIO_START | | ||
260 | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; | ||
261 | |||
262 | AT_WRITE_REG(hw, REG_MDIO_CTRL, val); | ||
263 | wmb(); | ||
264 | |||
265 | for (i = 0; i < MDIO_WAIT_TIMES; i++) { | ||
266 | udelay(2); | ||
267 | val = AT_READ_REG(hw, REG_MDIO_CTRL); | ||
268 | if (!(val & (MDIO_START | MDIO_BUSY))) | ||
269 | break; | ||
270 | wmb(); | ||
271 | } | ||
272 | |||
273 | if (!(val & (MDIO_START | MDIO_BUSY))) | ||
274 | return 0; | ||
275 | |||
276 | return AT_ERR_PHY; | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * atl1e_init_pcie - init PCIE module | ||
281 | */ | ||
282 | static void atl1e_init_pcie(struct atl1e_hw *hw) | ||
283 | { | ||
284 | u32 value; | ||
285 | /* comment 2lines below to save more power when sususpend | ||
286 | value = LTSSM_TEST_MODE_DEF; | ||
287 | AT_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value); | ||
288 | */ | ||
289 | |||
290 | /* pcie flow control mode change */ | ||
291 | value = AT_READ_REG(hw, 0x1008); | ||
292 | value |= 0x8000; | ||
293 | AT_WRITE_REG(hw, 0x1008, value); | ||
294 | } | ||
295 | /* | ||
296 | * Configures PHY autoneg and flow control advertisement settings | ||
297 | * | ||
298 | * hw - Struct containing variables accessed by shared code | ||
299 | */ | ||
300 | static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw) | ||
301 | { | ||
302 | s32 ret_val; | ||
303 | u16 mii_autoneg_adv_reg; | ||
304 | u16 mii_1000t_ctrl_reg; | ||
305 | |||
306 | if (0 != hw->mii_autoneg_adv_reg) | ||
307 | return 0; | ||
308 | /* Read the MII Auto-Neg Advertisement Register (Address 4/9). */ | ||
309 | mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; | ||
310 | mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK; | ||
311 | |||
312 | /* | ||
313 | * Need to parse autoneg_advertised and set up | ||
314 | * the appropriate PHY registers. First we will parse for | ||
315 | * autoneg_advertised software override. Since we can advertise | ||
316 | * a plethora of combinations, we need to check each bit | ||
317 | * individually. | ||
318 | */ | ||
319 | |||
320 | /* | ||
321 | * First we clear all the 10/100 mb speed bits in the Auto-Neg | ||
322 | * Advertisement Register (Address 4) and the 1000 mb speed bits in | ||
323 | * the 1000Base-T control Register (Address 9). | ||
324 | */ | ||
325 | mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; | ||
326 | mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK; | ||
327 | |||
328 | /* | ||
329 | * Need to parse MediaType and setup the | ||
330 | * appropriate PHY registers. | ||
331 | */ | ||
332 | switch (hw->media_type) { | ||
333 | case MEDIA_TYPE_AUTO_SENSOR: | ||
334 | mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | | ||
335 | MII_AR_10T_FD_CAPS | | ||
336 | MII_AR_100TX_HD_CAPS | | ||
337 | MII_AR_100TX_FD_CAPS); | ||
338 | hw->autoneg_advertised = ADVERTISE_10_HALF | | ||
339 | ADVERTISE_10_FULL | | ||
340 | ADVERTISE_100_HALF | | ||
341 | ADVERTISE_100_FULL; | ||
342 | if (hw->nic_type == athr_l1e) { | ||
343 | mii_1000t_ctrl_reg |= | ||
344 | MII_AT001_CR_1000T_FD_CAPS; | ||
345 | hw->autoneg_advertised |= ADVERTISE_1000_FULL; | ||
346 | } | ||
347 | break; | ||
348 | |||
349 | case MEDIA_TYPE_100M_FULL: | ||
350 | mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; | ||
351 | hw->autoneg_advertised = ADVERTISE_100_FULL; | ||
352 | break; | ||
353 | |||
354 | case MEDIA_TYPE_100M_HALF: | ||
355 | mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; | ||
356 | hw->autoneg_advertised = ADVERTISE_100_HALF; | ||
357 | break; | ||
358 | |||
359 | case MEDIA_TYPE_10M_FULL: | ||
360 | mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; | ||
361 | hw->autoneg_advertised = ADVERTISE_10_FULL; | ||
362 | break; | ||
363 | |||
364 | default: | ||
365 | mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; | ||
366 | hw->autoneg_advertised = ADVERTISE_10_HALF; | ||
367 | break; | ||
368 | } | ||
369 | |||
370 | /* flow control fixed to enable all */ | ||
371 | mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); | ||
372 | |||
373 | hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; | ||
374 | hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; | ||
375 | |||
376 | ret_val = atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); | ||
377 | if (ret_val) | ||
378 | return ret_val; | ||
379 | |||
380 | if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { | ||
381 | ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR, | ||
382 | mii_1000t_ctrl_reg); | ||
383 | if (ret_val) | ||
384 | return ret_val; | ||
385 | } | ||
386 | |||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | |||
391 | /* | ||
392 | * Resets the PHY and make all config validate | ||
393 | * | ||
394 | * hw - Struct containing variables accessed by shared code | ||
395 | * | ||
396 | * Sets bit 15 and 12 of the MII control regiser (for F001 bug) | ||
397 | */ | ||
398 | int atl1e_phy_commit(struct atl1e_hw *hw) | ||
399 | { | ||
400 | struct atl1e_adapter *adapter = (struct atl1e_adapter *)hw->adapter; | ||
401 | struct pci_dev *pdev = adapter->pdev; | ||
402 | int ret_val; | ||
403 | u16 phy_data; | ||
404 | |||
405 | phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; | ||
406 | |||
407 | ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data); | ||
408 | if (ret_val) { | ||
409 | u32 val; | ||
410 | int i; | ||
411 | /************************************** | ||
412 | * pcie serdes link may be down ! | ||
413 | **************************************/ | ||
414 | for (i = 0; i < 25; i++) { | ||
415 | msleep(1); | ||
416 | val = AT_READ_REG(hw, REG_MDIO_CTRL); | ||
417 | if (!(val & (MDIO_START | MDIO_BUSY))) | ||
418 | break; | ||
419 | } | ||
420 | |||
421 | if (0 != (val & (MDIO_START | MDIO_BUSY))) { | ||
422 | dev_err(&pdev->dev, | ||
423 | "pcie linkdown at least for 25ms\n"); | ||
424 | return ret_val; | ||
425 | } | ||
426 | |||
427 | dev_err(&pdev->dev, "pcie linkup after %d ms\n", i); | ||
428 | } | ||
429 | return 0; | ||
430 | } | ||
431 | |||
432 | int atl1e_phy_init(struct atl1e_hw *hw) | ||
433 | { | ||
434 | struct atl1e_adapter *adapter = (struct atl1e_adapter *)hw->adapter; | ||
435 | struct pci_dev *pdev = adapter->pdev; | ||
436 | s32 ret_val; | ||
437 | u16 phy_val; | ||
438 | |||
439 | if (hw->phy_configured) { | ||
440 | if (hw->re_autoneg) { | ||
441 | hw->re_autoneg = false; | ||
442 | return atl1e_restart_autoneg(hw); | ||
443 | } | ||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | /* RESET GPHY Core */ | ||
448 | AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT); | ||
449 | msleep(2); | ||
450 | AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT | | ||
451 | GPHY_CTRL_EXT_RESET); | ||
452 | msleep(2); | ||
453 | |||
454 | /* patches */ | ||
455 | /* p1. eable hibernation mode */ | ||
456 | ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0xB); | ||
457 | if (ret_val) | ||
458 | return ret_val; | ||
459 | ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0xBC00); | ||
460 | if (ret_val) | ||
461 | return ret_val; | ||
462 | /* p2. set Class A/B for all modes */ | ||
463 | ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0); | ||
464 | if (ret_val) | ||
465 | return ret_val; | ||
466 | phy_val = 0x02ef; | ||
467 | /* remove Class AB */ | ||
468 | /* phy_val = hw->emi_ca ? 0x02ef : 0x02df; */ | ||
469 | ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, phy_val); | ||
470 | if (ret_val) | ||
471 | return ret_val; | ||
472 | /* p3. 10B ??? */ | ||
473 | ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x12); | ||
474 | if (ret_val) | ||
475 | return ret_val; | ||
476 | ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x4C04); | ||
477 | if (ret_val) | ||
478 | return ret_val; | ||
479 | /* p4. 1000T power */ | ||
480 | ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x4); | ||
481 | if (ret_val) | ||
482 | return ret_val; | ||
483 | ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x8BBB); | ||
484 | if (ret_val) | ||
485 | return ret_val; | ||
486 | |||
487 | ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x5); | ||
488 | if (ret_val) | ||
489 | return ret_val; | ||
490 | ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x2C46); | ||
491 | if (ret_val) | ||
492 | return ret_val; | ||
493 | |||
494 | msleep(1); | ||
495 | |||
496 | /*Enable PHY LinkChange Interrupt */ | ||
497 | ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00); | ||
498 | if (ret_val) { | ||
499 | dev_err(&pdev->dev, "Error enable PHY linkChange Interrupt\n"); | ||
500 | return ret_val; | ||
501 | } | ||
502 | /* setup AutoNeg parameters */ | ||
503 | ret_val = atl1e_phy_setup_autoneg_adv(hw); | ||
504 | if (ret_val) { | ||
505 | dev_err(&pdev->dev, "Error Setting up Auto-Negotiation\n"); | ||
506 | return ret_val; | ||
507 | } | ||
508 | /* SW.Reset & En-Auto-Neg to restart Auto-Neg*/ | ||
509 | dev_dbg(&pdev->dev, "Restarting Auto-Neg"); | ||
510 | ret_val = atl1e_phy_commit(hw); | ||
511 | if (ret_val) { | ||
512 | dev_err(&pdev->dev, "Error Resetting the phy"); | ||
513 | return ret_val; | ||
514 | } | ||
515 | |||
516 | hw->phy_configured = true; | ||
517 | |||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | /* | ||
522 | * Reset the transmit and receive units; mask and clear all interrupts. | ||
523 | * hw - Struct containing variables accessed by shared code | ||
524 | * return : 0 or idle status (if error) | ||
525 | */ | ||
526 | int atl1e_reset_hw(struct atl1e_hw *hw) | ||
527 | { | ||
528 | struct atl1e_adapter *adapter = (struct atl1e_adapter *)hw->adapter; | ||
529 | struct pci_dev *pdev = adapter->pdev; | ||
530 | |||
531 | u32 idle_status_data = 0; | ||
532 | u16 pci_cfg_cmd_word = 0; | ||
533 | int timeout = 0; | ||
534 | |||
535 | /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ | ||
536 | pci_read_config_word(pdev, PCI_REG_COMMAND, &pci_cfg_cmd_word); | ||
537 | if ((pci_cfg_cmd_word & (CMD_IO_SPACE | | ||
538 | CMD_MEMORY_SPACE | CMD_BUS_MASTER)) | ||
539 | != (CMD_IO_SPACE | CMD_MEMORY_SPACE | CMD_BUS_MASTER)) { | ||
540 | pci_cfg_cmd_word |= (CMD_IO_SPACE | | ||
541 | CMD_MEMORY_SPACE | CMD_BUS_MASTER); | ||
542 | pci_write_config_word(pdev, PCI_REG_COMMAND, pci_cfg_cmd_word); | ||
543 | } | ||
544 | |||
545 | /* | ||
546 | * Issue Soft Reset to the MAC. This will reset the chip's | ||
547 | * transmit, receive, DMA. It will not effect | ||
548 | * the current PCI configuration. The global reset bit is self- | ||
549 | * clearing, and should clear within a microsecond. | ||
550 | */ | ||
551 | AT_WRITE_REG(hw, REG_MASTER_CTRL, | ||
552 | MASTER_CTRL_LED_MODE | MASTER_CTRL_SOFT_RST); | ||
553 | wmb(); | ||
554 | msleep(1); | ||
555 | |||
556 | /* Wait at least 10ms for All module to be Idle */ | ||
557 | for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { | ||
558 | idle_status_data = AT_READ_REG(hw, REG_IDLE_STATUS); | ||
559 | if (idle_status_data == 0) | ||
560 | break; | ||
561 | msleep(1); | ||
562 | cpu_relax(); | ||
563 | } | ||
564 | |||
565 | if (timeout >= AT_HW_MAX_IDLE_DELAY) { | ||
566 | dev_err(&pdev->dev, | ||
567 | "MAC state machine cann't be idle since" | ||
568 | " disabled for 10ms second\n"); | ||
569 | return AT_ERR_TIMEOUT; | ||
570 | } | ||
571 | |||
572 | return 0; | ||
573 | } | ||
574 | |||
575 | |||
576 | /* | ||
577 | * Performs basic configuration of the adapter. | ||
578 | * | ||
579 | * hw - Struct containing variables accessed by shared code | ||
580 | * Assumes that the controller has previously been reset and is in a | ||
581 | * post-reset uninitialized state. Initializes multicast table, | ||
582 | * and Calls routines to setup link | ||
583 | * Leaves the transmit and receive units disabled and uninitialized. | ||
584 | */ | ||
585 | int atl1e_init_hw(struct atl1e_hw *hw) | ||
586 | { | ||
587 | s32 ret_val = 0; | ||
588 | |||
589 | atl1e_init_pcie(hw); | ||
590 | |||
591 | /* Zero out the Multicast HASH table */ | ||
592 | /* clear the old settings from the multicast hash table */ | ||
593 | AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); | ||
594 | AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); | ||
595 | |||
596 | ret_val = atl1e_phy_init(hw); | ||
597 | |||
598 | return ret_val; | ||
599 | } | ||
600 | |||
601 | /* | ||
602 | * Detects the current speed and duplex settings of the hardware. | ||
603 | * | ||
604 | * hw - Struct containing variables accessed by shared code | ||
605 | * speed - Speed of the connection | ||
606 | * duplex - Duplex setting of the connection | ||
607 | */ | ||
608 | int atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex) | ||
609 | { | ||
610 | int err; | ||
611 | u16 phy_data; | ||
612 | |||
613 | /* Read PHY Specific Status Register (17) */ | ||
614 | err = atl1e_read_phy_reg(hw, MII_AT001_PSSR, &phy_data); | ||
615 | if (err) | ||
616 | return err; | ||
617 | |||
618 | if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED)) | ||
619 | return AT_ERR_PHY_RES; | ||
620 | |||
621 | switch (phy_data & MII_AT001_PSSR_SPEED) { | ||
622 | case MII_AT001_PSSR_1000MBS: | ||
623 | *speed = SPEED_1000; | ||
624 | break; | ||
625 | case MII_AT001_PSSR_100MBS: | ||
626 | *speed = SPEED_100; | ||
627 | break; | ||
628 | case MII_AT001_PSSR_10MBS: | ||
629 | *speed = SPEED_10; | ||
630 | break; | ||
631 | default: | ||
632 | return AT_ERR_PHY_SPEED; | ||
633 | break; | ||
634 | } | ||
635 | |||
636 | if (phy_data & MII_AT001_PSSR_DPLX) | ||
637 | *duplex = FULL_DUPLEX; | ||
638 | else | ||
639 | *duplex = HALF_DUPLEX; | ||
640 | |||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | int atl1e_restart_autoneg(struct atl1e_hw *hw) | ||
645 | { | ||
646 | int err = 0; | ||
647 | |||
648 | err = atl1e_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); | ||
649 | if (err) | ||
650 | return err; | ||
651 | |||
652 | if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { | ||
653 | err = atl1e_write_phy_reg(hw, MII_AT001_CR, | ||
654 | hw->mii_1000t_ctrl_reg); | ||
655 | if (err) | ||
656 | return err; | ||
657 | } | ||
658 | |||
659 | err = atl1e_write_phy_reg(hw, MII_BMCR, | ||
660 | MII_CR_RESET | MII_CR_AUTO_NEG_EN | | ||
661 | MII_CR_RESTART_AUTO_NEG); | ||
662 | return err; | ||
663 | } | ||
664 | |||
diff --git a/drivers/net/atl1e/atl1e_hw.h b/drivers/net/atl1e/atl1e_hw.h new file mode 100644 index 000000000000..5ea2f4d86cfa --- /dev/null +++ b/drivers/net/atl1e/atl1e_hw.h | |||
@@ -0,0 +1,793 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2007 Atheros Corporation. All rights reserved. | ||
3 | * | ||
4 | * Derived from Intel e1000 driver | ||
5 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the Free | ||
9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
19 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #ifndef _ATHL1E_HW_H_ | ||
23 | #define _ATHL1E_HW_H_ | ||
24 | |||
25 | #include <linux/types.h> | ||
26 | #include <linux/mii.h> | ||
27 | |||
28 | struct atl1e_adapter; | ||
29 | struct atl1e_hw; | ||
30 | |||
31 | /* function prototype */ | ||
32 | s32 atl1e_reset_hw(struct atl1e_hw *hw); | ||
33 | s32 atl1e_read_mac_addr(struct atl1e_hw *hw); | ||
34 | s32 atl1e_init_hw(struct atl1e_hw *hw); | ||
35 | s32 atl1e_phy_commit(struct atl1e_hw *hw); | ||
36 | s32 atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex); | ||
37 | u32 atl1e_auto_get_fc(struct atl1e_adapter *adapter, u16 duplex); | ||
38 | u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr); | ||
39 | void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value); | ||
40 | s32 atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data); | ||
41 | s32 atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data); | ||
42 | s32 atl1e_validate_mdi_setting(struct atl1e_hw *hw); | ||
43 | void atl1e_hw_set_mac_addr(struct atl1e_hw *hw); | ||
44 | bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value); | ||
45 | bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value); | ||
46 | s32 atl1e_phy_enter_power_saving(struct atl1e_hw *hw); | ||
47 | s32 atl1e_phy_leave_power_saving(struct atl1e_hw *hw); | ||
48 | s32 atl1e_phy_init(struct atl1e_hw *hw); | ||
49 | int atl1e_check_eeprom_exist(struct atl1e_hw *hw); | ||
50 | void atl1e_force_ps(struct atl1e_hw *hw); | ||
51 | s32 atl1e_restart_autoneg(struct atl1e_hw *hw); | ||
52 | |||
53 | /* register definition */ | ||
54 | #define REG_PM_CTRLSTAT 0x44 | ||
55 | |||
56 | #define REG_PCIE_CAP_LIST 0x58 | ||
57 | |||
58 | #define REG_DEVICE_CAP 0x5C | ||
59 | #define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 | ||
60 | #define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0 | ||
61 | |||
62 | #define REG_DEVICE_CTRL 0x60 | ||
63 | #define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7 | ||
64 | #define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5 | ||
65 | #define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7 | ||
66 | #define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12 | ||
67 | |||
68 | #define REG_VPD_CAP 0x6C | ||
69 | #define VPD_CAP_ID_MASK 0xff | ||
70 | #define VPD_CAP_ID_SHIFT 0 | ||
71 | #define VPD_CAP_NEXT_PTR_MASK 0xFF | ||
72 | #define VPD_CAP_NEXT_PTR_SHIFT 8 | ||
73 | #define VPD_CAP_VPD_ADDR_MASK 0x7FFF | ||
74 | #define VPD_CAP_VPD_ADDR_SHIFT 16 | ||
75 | #define VPD_CAP_VPD_FLAG 0x80000000 | ||
76 | |||
77 | #define REG_VPD_DATA 0x70 | ||
78 | |||
79 | #define REG_SPI_FLASH_CTRL 0x200 | ||
80 | #define SPI_FLASH_CTRL_STS_NON_RDY 0x1 | ||
81 | #define SPI_FLASH_CTRL_STS_WEN 0x2 | ||
82 | #define SPI_FLASH_CTRL_STS_WPEN 0x80 | ||
83 | #define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF | ||
84 | #define SPI_FLASH_CTRL_DEV_STS_SHIFT 0 | ||
85 | #define SPI_FLASH_CTRL_INS_MASK 0x7 | ||
86 | #define SPI_FLASH_CTRL_INS_SHIFT 8 | ||
87 | #define SPI_FLASH_CTRL_START 0x800 | ||
88 | #define SPI_FLASH_CTRL_EN_VPD 0x2000 | ||
89 | #define SPI_FLASH_CTRL_LDSTART 0x8000 | ||
90 | #define SPI_FLASH_CTRL_CS_HI_MASK 0x3 | ||
91 | #define SPI_FLASH_CTRL_CS_HI_SHIFT 16 | ||
92 | #define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3 | ||
93 | #define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18 | ||
94 | #define SPI_FLASH_CTRL_CLK_LO_MASK 0x3 | ||
95 | #define SPI_FLASH_CTRL_CLK_LO_SHIFT 20 | ||
96 | #define SPI_FLASH_CTRL_CLK_HI_MASK 0x3 | ||
97 | #define SPI_FLASH_CTRL_CLK_HI_SHIFT 22 | ||
98 | #define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3 | ||
99 | #define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24 | ||
100 | #define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3 | ||
101 | #define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26 | ||
102 | #define SPI_FLASH_CTRL_WAIT_READY 0x10000000 | ||
103 | |||
104 | #define REG_SPI_ADDR 0x204 | ||
105 | |||
106 | #define REG_SPI_DATA 0x208 | ||
107 | |||
108 | #define REG_SPI_FLASH_CONFIG 0x20C | ||
109 | #define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF | ||
110 | #define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0 | ||
111 | #define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3 | ||
112 | #define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24 | ||
113 | #define SPI_FLASH_CONFIG_LD_EXIST 0x4000000 | ||
114 | |||
115 | |||
116 | #define REG_SPI_FLASH_OP_PROGRAM 0x210 | ||
117 | #define REG_SPI_FLASH_OP_SC_ERASE 0x211 | ||
118 | #define REG_SPI_FLASH_OP_CHIP_ERASE 0x212 | ||
119 | #define REG_SPI_FLASH_OP_RDID 0x213 | ||
120 | #define REG_SPI_FLASH_OP_WREN 0x214 | ||
121 | #define REG_SPI_FLASH_OP_RDSR 0x215 | ||
122 | #define REG_SPI_FLASH_OP_WRSR 0x216 | ||
123 | #define REG_SPI_FLASH_OP_READ 0x217 | ||
124 | |||
125 | #define REG_TWSI_CTRL 0x218 | ||
126 | #define TWSI_CTRL_LD_OFFSET_MASK 0xFF | ||
127 | #define TWSI_CTRL_LD_OFFSET_SHIFT 0 | ||
128 | #define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7 | ||
129 | #define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 | ||
130 | #define TWSI_CTRL_SW_LDSTART 0x800 | ||
131 | #define TWSI_CTRL_HW_LDSTART 0x1000 | ||
132 | #define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x0x7F | ||
133 | #define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 | ||
134 | #define TWSI_CTRL_LD_EXIST 0x400000 | ||
135 | #define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 | ||
136 | #define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23 | ||
137 | #define TWSI_CTRL_FREQ_SEL_100K 0 | ||
138 | #define TWSI_CTRL_FREQ_SEL_200K 1 | ||
139 | #define TWSI_CTRL_FREQ_SEL_300K 2 | ||
140 | #define TWSI_CTRL_FREQ_SEL_400K 3 | ||
141 | #define TWSI_CTRL_SMB_SLV_ADDR | ||
142 | #define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3 | ||
143 | #define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24 | ||
144 | |||
145 | |||
146 | #define REG_PCIE_DEV_MISC_CTRL 0x21C | ||
147 | #define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2 | ||
148 | #define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1 | ||
149 | #define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4 | ||
150 | #define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8 | ||
151 | #define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10 | ||
152 | |||
153 | #define REG_PCIE_PHYMISC 0x1000 | ||
154 | #define PCIE_PHYMISC_FORCE_RCV_DET 0x4 | ||
155 | |||
156 | #define REG_LTSSM_TEST_MODE 0x12FC | ||
157 | #define LTSSM_TEST_MODE_DEF 0xE000 | ||
158 | |||
159 | /* Selene Master Control Register */ | ||
160 | #define REG_MASTER_CTRL 0x1400 | ||
161 | #define MASTER_CTRL_SOFT_RST 0x1 | ||
162 | #define MASTER_CTRL_MTIMER_EN 0x2 | ||
163 | #define MASTER_CTRL_ITIMER_EN 0x4 | ||
164 | #define MASTER_CTRL_MANUAL_INT 0x8 | ||
165 | #define MASTER_CTRL_ITIMER2_EN 0x20 | ||
166 | #define MASTER_CTRL_INT_RDCLR 0x40 | ||
167 | #define MASTER_CTRL_LED_MODE 0x200 | ||
168 | #define MASTER_CTRL_REV_NUM_SHIFT 16 | ||
169 | #define MASTER_CTRL_REV_NUM_MASK 0xff | ||
170 | #define MASTER_CTRL_DEV_ID_SHIFT 24 | ||
171 | #define MASTER_CTRL_DEV_ID_MASK 0xff | ||
172 | |||
173 | /* Timer Initial Value Register */ | ||
174 | #define REG_MANUAL_TIMER_INIT 0x1404 | ||
175 | |||
176 | |||
177 | /* IRQ ModeratorTimer Initial Value Register */ | ||
178 | #define REG_IRQ_MODU_TIMER_INIT 0x1408 /* w */ | ||
179 | #define REG_IRQ_MODU_TIMER2_INIT 0x140A /* w */ | ||
180 | |||
181 | |||
182 | #define REG_GPHY_CTRL 0x140C | ||
183 | #define GPHY_CTRL_EXT_RESET 1 | ||
184 | #define GPHY_CTRL_PIPE_MOD 2 | ||
185 | #define GPHY_CTRL_TEST_MODE_MASK 3 | ||
186 | #define GPHY_CTRL_TEST_MODE_SHIFT 2 | ||
187 | #define GPHY_CTRL_BERT_START 0x10 | ||
188 | #define GPHY_CTRL_GATE_25M_EN 0x20 | ||
189 | #define GPHY_CTRL_LPW_EXIT 0x40 | ||
190 | #define GPHY_CTRL_PHY_IDDQ 0x80 | ||
191 | #define GPHY_CTRL_PHY_IDDQ_DIS 0x100 | ||
192 | #define GPHY_CTRL_PCLK_SEL_DIS 0x200 | ||
193 | #define GPHY_CTRL_HIB_EN 0x400 | ||
194 | #define GPHY_CTRL_HIB_PULSE 0x800 | ||
195 | #define GPHY_CTRL_SEL_ANA_RST 0x1000 | ||
196 | #define GPHY_CTRL_PHY_PLL_ON 0x2000 | ||
197 | #define GPHY_CTRL_PWDOWN_HW 0x4000 | ||
198 | #define GPHY_CTRL_DEFAULT (\ | ||
199 | GPHY_CTRL_PHY_PLL_ON |\ | ||
200 | GPHY_CTRL_SEL_ANA_RST |\ | ||
201 | GPHY_CTRL_HIB_PULSE |\ | ||
202 | GPHY_CTRL_HIB_EN) | ||
203 | |||
204 | #define GPHY_CTRL_PW_WOL_DIS (\ | ||
205 | GPHY_CTRL_PHY_PLL_ON |\ | ||
206 | GPHY_CTRL_SEL_ANA_RST |\ | ||
207 | GPHY_CTRL_HIB_PULSE |\ | ||
208 | GPHY_CTRL_HIB_EN |\ | ||
209 | GPHY_CTRL_PWDOWN_HW |\ | ||
210 | GPHY_CTRL_PCLK_SEL_DIS |\ | ||
211 | GPHY_CTRL_PHY_IDDQ) | ||
212 | |||
213 | /* IRQ Anti-Lost Timer Initial Value Register */ | ||
214 | #define REG_CMBDISDMA_TIMER 0x140E | ||
215 | |||
216 | |||
217 | /* Block IDLE Status Register */ | ||
218 | #define REG_IDLE_STATUS 0x1410 | ||
219 | #define IDLE_STATUS_RXMAC 1 /* 1: RXMAC state machine is in non-IDLE state. 0: RXMAC is idling */ | ||
220 | #define IDLE_STATUS_TXMAC 2 /* 1: TXMAC state machine is in non-IDLE state. 0: TXMAC is idling */ | ||
221 | #define IDLE_STATUS_RXQ 4 /* 1: RXQ state machine is in non-IDLE state. 0: RXQ is idling */ | ||
222 | #define IDLE_STATUS_TXQ 8 /* 1: TXQ state machine is in non-IDLE state. 0: TXQ is idling */ | ||
223 | #define IDLE_STATUS_DMAR 0x10 /* 1: DMAR state machine is in non-IDLE state. 0: DMAR is idling */ | ||
224 | #define IDLE_STATUS_DMAW 0x20 /* 1: DMAW state machine is in non-IDLE state. 0: DMAW is idling */ | ||
225 | #define IDLE_STATUS_SMB 0x40 /* 1: SMB state machine is in non-IDLE state. 0: SMB is idling */ | ||
226 | #define IDLE_STATUS_CMB 0x80 /* 1: CMB state machine is in non-IDLE state. 0: CMB is idling */ | ||
227 | |||
228 | /* MDIO Control Register */ | ||
229 | #define REG_MDIO_CTRL 0x1414 | ||
230 | #define MDIO_DATA_MASK 0xffff /* On MDIO write, the 16-bit control data to write to PHY MII management register */ | ||
231 | #define MDIO_DATA_SHIFT 0 /* On MDIO read, the 16-bit status data that was read from the PHY MII management register*/ | ||
232 | #define MDIO_REG_ADDR_MASK 0x1f /* MDIO register address */ | ||
233 | #define MDIO_REG_ADDR_SHIFT 16 | ||
234 | #define MDIO_RW 0x200000 /* 1: read, 0: write */ | ||
235 | #define MDIO_SUP_PREAMBLE 0x400000 /* Suppress preamble */ | ||
236 | #define MDIO_START 0x800000 /* Write 1 to initiate the MDIO master. And this bit is self cleared after one cycle*/ | ||
237 | #define MDIO_CLK_SEL_SHIFT 24 | ||
238 | #define MDIO_CLK_25_4 0 | ||
239 | #define MDIO_CLK_25_6 2 | ||
240 | #define MDIO_CLK_25_8 3 | ||
241 | #define MDIO_CLK_25_10 4 | ||
242 | #define MDIO_CLK_25_14 5 | ||
243 | #define MDIO_CLK_25_20 6 | ||
244 | #define MDIO_CLK_25_28 7 | ||
245 | #define MDIO_BUSY 0x8000000 | ||
246 | #define MDIO_AP_EN 0x10000000 | ||
247 | #define MDIO_WAIT_TIMES 10 | ||
248 | |||
249 | /* MII PHY Status Register */ | ||
250 | #define REG_PHY_STATUS 0x1418 | ||
251 | #define PHY_STATUS_100M 0x20000 | ||
252 | #define PHY_STATUS_EMI_CA 0x40000 | ||
253 | |||
254 | /* BIST Control and Status Register0 (for the Packet Memory) */ | ||
255 | #define REG_BIST0_CTRL 0x141c | ||
256 | #define BIST0_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */ | ||
257 | /* BIST process and reset to zero when BIST is done */ | ||
258 | #define BIST0_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */ | ||
259 | /* decoder failure or more than 1 cell stuck-to-x failure */ | ||
260 | #define BIST0_FUSE_FLAG 0x4 /* 1: Indicating one cell has been fixed */ | ||
261 | |||
262 | /* BIST Control and Status Register1(for the retry buffer of PCI Express) */ | ||
263 | #define REG_BIST1_CTRL 0x1420 | ||
264 | #define BIST1_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */ | ||
265 | /* BIST process and reset to zero when BIST is done */ | ||
266 | #define BIST1_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */ | ||
267 | /* decoder failure or more than 1 cell stuck-to-x failure.*/ | ||
268 | #define BIST1_FUSE_FLAG 0x4 | ||
269 | |||
270 | /* SerDes Lock Detect Control and Status Register */ | ||
271 | #define REG_SERDES_LOCK 0x1424 | ||
272 | #define SERDES_LOCK_DETECT 1 /* 1: SerDes lock detected . This signal comes from Analog SerDes */ | ||
273 | #define SERDES_LOCK_DETECT_EN 2 /* 1: Enable SerDes Lock detect function */ | ||
274 | |||
275 | /* MAC Control Register */ | ||
276 | #define REG_MAC_CTRL 0x1480 | ||
277 | #define MAC_CTRL_TX_EN 1 /* 1: Transmit Enable */ | ||
278 | #define MAC_CTRL_RX_EN 2 /* 1: Receive Enable */ | ||
279 | #define MAC_CTRL_TX_FLOW 4 /* 1: Transmit Flow Control Enable */ | ||
280 | #define MAC_CTRL_RX_FLOW 8 /* 1: Receive Flow Control Enable */ | ||
281 | #define MAC_CTRL_LOOPBACK 0x10 /* 1: Loop back at G/MII Interface */ | ||
282 | #define MAC_CTRL_DUPLX 0x20 /* 1: Full-duplex mode 0: Half-duplex mode */ | ||
283 | #define MAC_CTRL_ADD_CRC 0x40 /* 1: Instruct MAC to attach CRC on all egress Ethernet frames */ | ||
284 | #define MAC_CTRL_PAD 0x80 /* 1: Instruct MAC to pad short frames to 60-bytes, and then attach CRC. This bit has higher priority over CRC_EN */ | ||
285 | #define MAC_CTRL_LENCHK 0x100 /* 1: Instruct MAC to check if length field matches the real packet length */ | ||
286 | #define MAC_CTRL_HUGE_EN 0x200 /* 1: receive Jumbo frame enable */ | ||
287 | #define MAC_CTRL_PRMLEN_SHIFT 10 /* Preamble length */ | ||
288 | #define MAC_CTRL_PRMLEN_MASK 0xf | ||
289 | #define MAC_CTRL_RMV_VLAN 0x4000 /* 1: to remove VLAN Tag automatically from all receive packets */ | ||
290 | #define MAC_CTRL_PROMIS_EN 0x8000 /* 1: Promiscuous Mode Enable */ | ||
291 | #define MAC_CTRL_TX_PAUSE 0x10000 /* 1: transmit test pause */ | ||
292 | #define MAC_CTRL_SCNT 0x20000 /* 1: shortcut slot time counter */ | ||
293 | #define MAC_CTRL_SRST_TX 0x40000 /* 1: synchronized reset Transmit MAC module */ | ||
294 | #define MAC_CTRL_TX_SIMURST 0x80000 /* 1: transmit simulation reset */ | ||
295 | #define MAC_CTRL_SPEED_SHIFT 20 /* 10: gigabit 01:10M/100M */ | ||
296 | #define MAC_CTRL_SPEED_MASK 0x300000 | ||
297 | #define MAC_CTRL_SPEED_1000 2 | ||
298 | #define MAC_CTRL_SPEED_10_100 1 | ||
299 | #define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 /* 1: transmit maximum backoff (half-duplex test bit) */ | ||
300 | #define MAC_CTRL_TX_HUGE 0x800000 /* 1: transmit huge enable */ | ||
301 | #define MAC_CTRL_RX_CHKSUM_EN 0x1000000 /* 1: RX checksum enable */ | ||
302 | #define MAC_CTRL_MC_ALL_EN 0x2000000 /* 1: upload all multicast frame without error to system */ | ||
303 | #define MAC_CTRL_BC_EN 0x4000000 /* 1: upload all broadcast frame without error to system */ | ||
304 | #define MAC_CTRL_DBG 0x8000000 /* 1: upload all received frame to system (Debug Mode) */ | ||
305 | |||
306 | /* MAC IPG/IFG Control Register */ | ||
307 | #define REG_MAC_IPG_IFG 0x1484 | ||
308 | #define MAC_IPG_IFG_IPGT_SHIFT 0 /* Desired back to back inter-packet gap. The default is 96-bit time */ | ||
309 | #define MAC_IPG_IFG_IPGT_MASK 0x7f | ||
310 | #define MAC_IPG_IFG_MIFG_SHIFT 8 /* Minimum number of IFG to enforce in between RX frames */ | ||
311 | #define MAC_IPG_IFG_MIFG_MASK 0xff /* Frame gap below such IFP is dropped */ | ||
312 | #define MAC_IPG_IFG_IPGR1_SHIFT 16 /* 64bit Carrier-Sense window */ | ||
313 | #define MAC_IPG_IFG_IPGR1_MASK 0x7f | ||
314 | #define MAC_IPG_IFG_IPGR2_SHIFT 24 /* 96-bit IPG window */ | ||
315 | #define MAC_IPG_IFG_IPGR2_MASK 0x7f | ||
316 | |||
317 | /* MAC STATION ADDRESS */ | ||
318 | #define REG_MAC_STA_ADDR 0x1488 | ||
319 | |||
320 | /* Hash table for multicast address */ | ||
321 | #define REG_RX_HASH_TABLE 0x1490 | ||
322 | |||
323 | |||
324 | /* MAC Half-Duplex Control Register */ | ||
325 | #define REG_MAC_HALF_DUPLX_CTRL 0x1498 | ||
326 | #define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 /* Collision Window */ | ||
327 | #define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff | ||
328 | #define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 /* Retransmission maximum, afterwards the packet will be discarded */ | ||
329 | #define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf | ||
330 | #define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 /* 1: Allow the transmission of a packet which has been excessively deferred */ | ||
331 | #define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 /* 1: No back-off on collision, immediately start the retransmission */ | ||
332 | #define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 /* 1: No back-off on backpressure, immediately start the transmission after back pressure */ | ||
333 | #define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 /* 1: Alternative Binary Exponential Back-off Enabled */ | ||
334 | #define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 /* Maximum binary exponential number */ | ||
335 | #define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf | ||
336 | #define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 /* IPG to start JAM for collision based flow control in half-duplex */ | ||
337 | #define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf /* mode. In unit of 8-bit time */ | ||
338 | |||
339 | /* Maximum Frame Length Control Register */ | ||
340 | #define REG_MTU 0x149c | ||
341 | |||
342 | /* Wake-On-Lan control register */ | ||
343 | #define REG_WOL_CTRL 0x14a0 | ||
344 | #define WOL_PATTERN_EN 0x00000001 | ||
345 | #define WOL_PATTERN_PME_EN 0x00000002 | ||
346 | #define WOL_MAGIC_EN 0x00000004 | ||
347 | #define WOL_MAGIC_PME_EN 0x00000008 | ||
348 | #define WOL_LINK_CHG_EN 0x00000010 | ||
349 | #define WOL_LINK_CHG_PME_EN 0x00000020 | ||
350 | #define WOL_PATTERN_ST 0x00000100 | ||
351 | #define WOL_MAGIC_ST 0x00000200 | ||
352 | #define WOL_LINKCHG_ST 0x00000400 | ||
353 | #define WOL_CLK_SWITCH_EN 0x00008000 | ||
354 | #define WOL_PT0_EN 0x00010000 | ||
355 | #define WOL_PT1_EN 0x00020000 | ||
356 | #define WOL_PT2_EN 0x00040000 | ||
357 | #define WOL_PT3_EN 0x00080000 | ||
358 | #define WOL_PT4_EN 0x00100000 | ||
359 | #define WOL_PT5_EN 0x00200000 | ||
360 | #define WOL_PT6_EN 0x00400000 | ||
361 | /* WOL Length ( 2 DWORD ) */ | ||
362 | #define REG_WOL_PATTERN_LEN 0x14a4 | ||
363 | #define WOL_PT_LEN_MASK 0x7f | ||
364 | #define WOL_PT0_LEN_SHIFT 0 | ||
365 | #define WOL_PT1_LEN_SHIFT 8 | ||
366 | #define WOL_PT2_LEN_SHIFT 16 | ||
367 | #define WOL_PT3_LEN_SHIFT 24 | ||
368 | #define WOL_PT4_LEN_SHIFT 0 | ||
369 | #define WOL_PT5_LEN_SHIFT 8 | ||
370 | #define WOL_PT6_LEN_SHIFT 16 | ||
371 | |||
372 | /* Internal SRAM Partition Register */ | ||
373 | #define REG_SRAM_TRD_ADDR 0x1518 | ||
374 | #define REG_SRAM_TRD_LEN 0x151C | ||
375 | #define REG_SRAM_RXF_ADDR 0x1520 | ||
376 | #define REG_SRAM_RXF_LEN 0x1524 | ||
377 | #define REG_SRAM_TXF_ADDR 0x1528 | ||
378 | #define REG_SRAM_TXF_LEN 0x152C | ||
379 | #define REG_SRAM_TCPH_ADDR 0x1530 | ||
380 | #define REG_SRAM_PKTH_ADDR 0x1532 | ||
381 | |||
382 | /* Load Ptr Register */ | ||
383 | #define REG_LOAD_PTR 0x1534 /* Software sets this bit after the initialization of the head and tail */ | ||
384 | |||
385 | /* | ||
386 | * addresses of all descriptors, as well as the following descriptor | ||
387 | * control register, which triggers each function block to load the head | ||
388 | * pointer to prepare for the operation. This bit is then self-cleared | ||
389 | * after one cycle. | ||
390 | */ | ||
391 | |||
392 | /* Descriptor Control register */ | ||
393 | #define REG_RXF3_BASE_ADDR_HI 0x153C | ||
394 | #define REG_DESC_BASE_ADDR_HI 0x1540 | ||
395 | #define REG_RXF0_BASE_ADDR_HI 0x1540 /* share with DESC BASE ADDR HI */ | ||
396 | #define REG_HOST_RXF0_PAGE0_LO 0x1544 | ||
397 | #define REG_HOST_RXF0_PAGE1_LO 0x1548 | ||
398 | #define REG_TPD_BASE_ADDR_LO 0x154C | ||
399 | #define REG_RXF1_BASE_ADDR_HI 0x1550 | ||
400 | #define REG_RXF2_BASE_ADDR_HI 0x1554 | ||
401 | #define REG_HOST_RXFPAGE_SIZE 0x1558 | ||
402 | #define REG_TPD_RING_SIZE 0x155C | ||
403 | /* RSS about */ | ||
404 | #define REG_RSS_KEY0 0x14B0 | ||
405 | #define REG_RSS_KEY1 0x14B4 | ||
406 | #define REG_RSS_KEY2 0x14B8 | ||
407 | #define REG_RSS_KEY3 0x14BC | ||
408 | #define REG_RSS_KEY4 0x14C0 | ||
409 | #define REG_RSS_KEY5 0x14C4 | ||
410 | #define REG_RSS_KEY6 0x14C8 | ||
411 | #define REG_RSS_KEY7 0x14CC | ||
412 | #define REG_RSS_KEY8 0x14D0 | ||
413 | #define REG_RSS_KEY9 0x14D4 | ||
414 | #define REG_IDT_TABLE4 0x14E0 | ||
415 | #define REG_IDT_TABLE5 0x14E4 | ||
416 | #define REG_IDT_TABLE6 0x14E8 | ||
417 | #define REG_IDT_TABLE7 0x14EC | ||
418 | #define REG_IDT_TABLE0 0x1560 | ||
419 | #define REG_IDT_TABLE1 0x1564 | ||
420 | #define REG_IDT_TABLE2 0x1568 | ||
421 | #define REG_IDT_TABLE3 0x156C | ||
422 | #define REG_IDT_TABLE REG_IDT_TABLE0 | ||
423 | #define REG_RSS_HASH_VALUE 0x1570 | ||
424 | #define REG_RSS_HASH_FLAG 0x1574 | ||
425 | #define REG_BASE_CPU_NUMBER 0x157C | ||
426 | |||
427 | |||
428 | /* TXQ Control Register */ | ||
429 | #define REG_TXQ_CTRL 0x1580 | ||
430 | #define TXQ_CTRL_NUM_TPD_BURST_MASK 0xF | ||
431 | #define TXQ_CTRL_NUM_TPD_BURST_SHIFT 0 | ||
432 | #define TXQ_CTRL_EN 0x20 /* 1: Enable TXQ */ | ||
433 | #define TXQ_CTRL_ENH_MODE 0x40 /* Performance enhancement mode, in which up to two back-to-back DMA read commands might be dispatched. */ | ||
434 | #define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16 /* Number of data byte to read in a cache-aligned burst. Each SRAM entry is 8-byte in length. */ | ||
435 | #define TXQ_CTRL_TXF_BURST_NUM_MASK 0xffff | ||
436 | |||
437 | /* Jumbo packet Threshold for task offload */ | ||
438 | #define REG_TX_EARLY_TH 0x1584 /* Jumbo frame threshold in QWORD unit. Packet greater than */ | ||
439 | /* JUMBO_TASK_OFFLOAD_THRESHOLD will not be task offloaded. */ | ||
440 | #define TX_TX_EARLY_TH_MASK 0x7ff | ||
441 | #define TX_TX_EARLY_TH_SHIFT 0 | ||
442 | |||
443 | |||
444 | /* RXQ Control Register */ | ||
445 | #define REG_RXQ_CTRL 0x15A0 | ||
446 | #define RXQ_CTRL_PBA_ALIGN_32 0 /* rx-packet alignment */ | ||
447 | #define RXQ_CTRL_PBA_ALIGN_64 1 | ||
448 | #define RXQ_CTRL_PBA_ALIGN_128 2 | ||
449 | #define RXQ_CTRL_PBA_ALIGN_256 3 | ||
450 | #define RXQ_CTRL_Q1_EN 0x10 | ||
451 | #define RXQ_CTRL_Q2_EN 0x20 | ||
452 | #define RXQ_CTRL_Q3_EN 0x40 | ||
453 | #define RXQ_CTRL_IPV6_XSUM_VERIFY_EN 0x80 | ||
454 | #define RXQ_CTRL_HASH_TLEN_SHIFT 8 | ||
455 | #define RXQ_CTRL_HASH_TLEN_MASK 0xFF | ||
456 | #define RXQ_CTRL_HASH_TYPE_IPV4 0x10000 | ||
457 | #define RXQ_CTRL_HASH_TYPE_IPV4_TCP 0x20000 | ||
458 | #define RXQ_CTRL_HASH_TYPE_IPV6 0x40000 | ||
459 | #define RXQ_CTRL_HASH_TYPE_IPV6_TCP 0x80000 | ||
460 | #define RXQ_CTRL_RSS_MODE_DISABLE 0 | ||
461 | #define RXQ_CTRL_RSS_MODE_SQSINT 0x4000000 | ||
462 | #define RXQ_CTRL_RSS_MODE_MQUESINT 0x8000000 | ||
463 | #define RXQ_CTRL_RSS_MODE_MQUEMINT 0xC000000 | ||
464 | #define RXQ_CTRL_NIP_QUEUE_SEL_TBL 0x10000000 | ||
465 | #define RXQ_CTRL_HASH_ENABLE 0x20000000 | ||
466 | #define RXQ_CTRL_CUT_THRU_EN 0x40000000 | ||
467 | #define RXQ_CTRL_EN 0x80000000 | ||
468 | |||
469 | /* Rx jumbo packet threshold and rrd retirement timer */ | ||
470 | #define REG_RXQ_JMBOSZ_RRDTIM 0x15A4 | ||
471 | /* | ||
472 | * Jumbo packet threshold for non-VLAN packet, in QWORD (64-bit) unit. | ||
473 | * When the packet length greater than or equal to this value, RXQ | ||
474 | * shall start cut-through forwarding of the received packet. | ||
475 | */ | ||
476 | #define RXQ_JMBOSZ_TH_MASK 0x7ff | ||
477 | #define RXQ_JMBOSZ_TH_SHIFT 0 /* RRD retirement timer. Decrement by 1 after every 512ns passes*/ | ||
478 | #define RXQ_JMBO_LKAH_MASK 0xf | ||
479 | #define RXQ_JMBO_LKAH_SHIFT 11 | ||
480 | |||
481 | /* RXF flow control register */ | ||
482 | #define REG_RXQ_RXF_PAUSE_THRESH 0x15A8 | ||
483 | #define RXQ_RXF_PAUSE_TH_HI_SHIFT 0 | ||
484 | #define RXQ_RXF_PAUSE_TH_HI_MASK 0xfff | ||
485 | #define RXQ_RXF_PAUSE_TH_LO_SHIFT 16 | ||
486 | #define RXQ_RXF_PAUSE_TH_LO_MASK 0xfff | ||
487 | |||
488 | |||
489 | /* DMA Engine Control Register */ | ||
490 | #define REG_DMA_CTRL 0x15C0 | ||
491 | #define DMA_CTRL_DMAR_IN_ORDER 0x1 | ||
492 | #define DMA_CTRL_DMAR_ENH_ORDER 0x2 | ||
493 | #define DMA_CTRL_DMAR_OUT_ORDER 0x4 | ||
494 | #define DMA_CTRL_RCB_VALUE 0x8 | ||
495 | #define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 | ||
496 | #define DMA_CTRL_DMAR_BURST_LEN_MASK 7 | ||
497 | #define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 | ||
498 | #define DMA_CTRL_DMAW_BURST_LEN_MASK 7 | ||
499 | #define DMA_CTRL_DMAR_REQ_PRI 0x400 | ||
500 | #define DMA_CTRL_DMAR_DLY_CNT_MASK 0x1F | ||
501 | #define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11 | ||
502 | #define DMA_CTRL_DMAW_DLY_CNT_MASK 0xF | ||
503 | #define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16 | ||
504 | #define DMA_CTRL_TXCMB_EN 0x100000 | ||
505 | #define DMA_CTRL_RXCMB_EN 0x200000 | ||
506 | |||
507 | |||
508 | /* CMB/SMB Control Register */ | ||
509 | #define REG_SMB_STAT_TIMER 0x15C4 | ||
510 | #define REG_TRIG_RRD_THRESH 0x15CA | ||
511 | #define REG_TRIG_TPD_THRESH 0x15C8 | ||
512 | #define REG_TRIG_TXTIMER 0x15CC | ||
513 | #define REG_TRIG_RXTIMER 0x15CE | ||
514 | |||
515 | /* HOST RXF Page 1,2,3 address */ | ||
516 | #define REG_HOST_RXF1_PAGE0_LO 0x15D0 | ||
517 | #define REG_HOST_RXF1_PAGE1_LO 0x15D4 | ||
518 | #define REG_HOST_RXF2_PAGE0_LO 0x15D8 | ||
519 | #define REG_HOST_RXF2_PAGE1_LO 0x15DC | ||
520 | #define REG_HOST_RXF3_PAGE0_LO 0x15E0 | ||
521 | #define REG_HOST_RXF3_PAGE1_LO 0x15E4 | ||
522 | |||
523 | /* Mail box */ | ||
524 | #define REG_MB_RXF1_RADDR 0x15B4 | ||
525 | #define REG_MB_RXF2_RADDR 0x15B8 | ||
526 | #define REG_MB_RXF3_RADDR 0x15BC | ||
527 | #define REG_MB_TPD_PROD_IDX 0x15F0 | ||
528 | |||
529 | /* RXF-Page 0-3 PageNo & Valid bit */ | ||
530 | #define REG_HOST_RXF0_PAGE0_VLD 0x15F4 | ||
531 | #define HOST_RXF_VALID 1 | ||
532 | #define HOST_RXF_PAGENO_SHIFT 1 | ||
533 | #define HOST_RXF_PAGENO_MASK 0x7F | ||
534 | #define REG_HOST_RXF0_PAGE1_VLD 0x15F5 | ||
535 | #define REG_HOST_RXF1_PAGE0_VLD 0x15F6 | ||
536 | #define REG_HOST_RXF1_PAGE1_VLD 0x15F7 | ||
537 | #define REG_HOST_RXF2_PAGE0_VLD 0x15F8 | ||
538 | #define REG_HOST_RXF2_PAGE1_VLD 0x15F9 | ||
539 | #define REG_HOST_RXF3_PAGE0_VLD 0x15FA | ||
540 | #define REG_HOST_RXF3_PAGE1_VLD 0x15FB | ||
541 | |||
542 | /* Interrupt Status Register */ | ||
543 | #define REG_ISR 0x1600 | ||
544 | #define ISR_SMB 1 | ||
545 | #define ISR_TIMER 2 /* Interrupt when Timer is counted down to zero */ | ||
546 | /* | ||
547 | * Software manual interrupt, for debug. Set when SW_MAN_INT_EN is set | ||
548 | * in Table 51 Selene Master Control Register (Offset 0x1400). | ||
549 | */ | ||
550 | #define ISR_MANUAL 4 | ||
551 | #define ISR_HW_RXF_OV 8 /* RXF overflow interrupt */ | ||
552 | #define ISR_HOST_RXF0_OV 0x10 | ||
553 | #define ISR_HOST_RXF1_OV 0x20 | ||
554 | #define ISR_HOST_RXF2_OV 0x40 | ||
555 | #define ISR_HOST_RXF3_OV 0x80 | ||
556 | #define ISR_TXF_UN 0x100 | ||
557 | #define ISR_RX0_PAGE_FULL 0x200 | ||
558 | #define ISR_DMAR_TO_RST 0x400 | ||
559 | #define ISR_DMAW_TO_RST 0x800 | ||
560 | #define ISR_GPHY 0x1000 | ||
561 | #define ISR_TX_CREDIT 0x2000 | ||
562 | #define ISR_GPHY_LPW 0x4000 /* GPHY low power state interrupt */ | ||
563 | #define ISR_RX_PKT 0x10000 /* One packet received, triggered by RFD */ | ||
564 | #define ISR_TX_PKT 0x20000 /* One packet transmitted, triggered by TPD */ | ||
565 | #define ISR_TX_DMA 0x40000 | ||
566 | #define ISR_RX_PKT_1 0x80000 | ||
567 | #define ISR_RX_PKT_2 0x100000 | ||
568 | #define ISR_RX_PKT_3 0x200000 | ||
569 | #define ISR_MAC_RX 0x400000 | ||
570 | #define ISR_MAC_TX 0x800000 | ||
571 | #define ISR_UR_DETECTED 0x1000000 | ||
572 | #define ISR_FERR_DETECTED 0x2000000 | ||
573 | #define ISR_NFERR_DETECTED 0x4000000 | ||
574 | #define ISR_CERR_DETECTED 0x8000000 | ||
575 | #define ISR_PHY_LINKDOWN 0x10000000 | ||
576 | #define ISR_DIS_INT 0x80000000 | ||
577 | |||
578 | |||
579 | /* Interrupt Mask Register */ | ||
580 | #define REG_IMR 0x1604 | ||
581 | |||
582 | |||
583 | #define IMR_NORMAL_MASK (\ | ||
584 | ISR_SMB |\ | ||
585 | ISR_TXF_UN |\ | ||
586 | ISR_HW_RXF_OV |\ | ||
587 | ISR_HOST_RXF0_OV|\ | ||
588 | ISR_MANUAL |\ | ||
589 | ISR_GPHY |\ | ||
590 | ISR_GPHY_LPW |\ | ||
591 | ISR_DMAR_TO_RST |\ | ||
592 | ISR_DMAW_TO_RST |\ | ||
593 | ISR_PHY_LINKDOWN|\ | ||
594 | ISR_RX_PKT |\ | ||
595 | ISR_TX_PKT) | ||
596 | |||
597 | #define ISR_TX_EVENT (ISR_TXF_UN | ISR_TX_PKT) | ||
598 | #define ISR_RX_EVENT (ISR_HOST_RXF0_OV | ISR_HW_RXF_OV | ISR_RX_PKT) | ||
599 | |||
600 | #define REG_MAC_RX_STATUS_BIN 0x1700 | ||
601 | #define REG_MAC_RX_STATUS_END 0x175c | ||
602 | #define REG_MAC_TX_STATUS_BIN 0x1760 | ||
603 | #define REG_MAC_TX_STATUS_END 0x17c0 | ||
604 | |||
605 | /* Hardware Offset Register */ | ||
606 | #define REG_HOST_RXF0_PAGEOFF 0x1800 | ||
607 | #define REG_TPD_CONS_IDX 0x1804 | ||
608 | #define REG_HOST_RXF1_PAGEOFF 0x1808 | ||
609 | #define REG_HOST_RXF2_PAGEOFF 0x180C | ||
610 | #define REG_HOST_RXF3_PAGEOFF 0x1810 | ||
611 | |||
612 | /* RXF-Page 0-3 Offset DMA Address */ | ||
613 | #define REG_HOST_RXF0_MB0_LO 0x1820 | ||
614 | #define REG_HOST_RXF0_MB1_LO 0x1824 | ||
615 | #define REG_HOST_RXF1_MB0_LO 0x1828 | ||
616 | #define REG_HOST_RXF1_MB1_LO 0x182C | ||
617 | #define REG_HOST_RXF2_MB0_LO 0x1830 | ||
618 | #define REG_HOST_RXF2_MB1_LO 0x1834 | ||
619 | #define REG_HOST_RXF3_MB0_LO 0x1838 | ||
620 | #define REG_HOST_RXF3_MB1_LO 0x183C | ||
621 | |||
622 | /* Tpd CMB DMA Address */ | ||
623 | #define REG_HOST_TX_CMB_LO 0x1840 | ||
624 | #define REG_HOST_SMB_ADDR_LO 0x1844 | ||
625 | |||
626 | /* DEBUG ADDR */ | ||
627 | #define REG_DEBUG_DATA0 0x1900 | ||
628 | #define REG_DEBUG_DATA1 0x1904 | ||
629 | |||
630 | /***************************** MII definition ***************************************/ | ||
631 | /* PHY Common Register */ | ||
632 | #define MII_BMCR 0x00 | ||
633 | #define MII_BMSR 0x01 | ||
634 | #define MII_PHYSID1 0x02 | ||
635 | #define MII_PHYSID2 0x03 | ||
636 | #define MII_ADVERTISE 0x04 | ||
637 | #define MII_LPA 0x05 | ||
638 | #define MII_EXPANSION 0x06 | ||
639 | #define MII_AT001_CR 0x09 | ||
640 | #define MII_AT001_SR 0x0A | ||
641 | #define MII_AT001_ESR 0x0F | ||
642 | #define MII_AT001_PSCR 0x10 | ||
643 | #define MII_AT001_PSSR 0x11 | ||
644 | #define MII_INT_CTRL 0x12 | ||
645 | #define MII_INT_STATUS 0x13 | ||
646 | #define MII_SMARTSPEED 0x14 | ||
647 | #define MII_RERRCOUNTER 0x15 | ||
648 | #define MII_SREVISION 0x16 | ||
649 | #define MII_RESV1 0x17 | ||
650 | #define MII_LBRERROR 0x18 | ||
651 | #define MII_PHYADDR 0x19 | ||
652 | #define MII_RESV2 0x1a | ||
653 | #define MII_TPISTATUS 0x1b | ||
654 | #define MII_NCONFIG 0x1c | ||
655 | |||
656 | #define MII_DBG_ADDR 0x1D | ||
657 | #define MII_DBG_DATA 0x1E | ||
658 | |||
659 | |||
660 | /* PHY Control Register */ | ||
661 | #define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ | ||
662 | #define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ | ||
663 | #define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ | ||
664 | #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ | ||
665 | #define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ | ||
666 | #define MII_CR_POWER_DOWN 0x0800 /* Power down */ | ||
667 | #define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ | ||
668 | #define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ | ||
669 | #define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ | ||
670 | #define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ | ||
671 | #define MII_CR_SPEED_MASK 0x2040 | ||
672 | #define MII_CR_SPEED_1000 0x0040 | ||
673 | #define MII_CR_SPEED_100 0x2000 | ||
674 | #define MII_CR_SPEED_10 0x0000 | ||
675 | |||
676 | |||
677 | /* PHY Status Register */ | ||
678 | #define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ | ||
679 | #define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ | ||
680 | #define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ | ||
681 | #define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ | ||
682 | #define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ | ||
683 | #define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ | ||
684 | #define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ | ||
685 | #define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ | ||
686 | #define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ | ||
687 | #define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ | ||
688 | #define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ | ||
689 | #define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ | ||
690 | #define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ | ||
691 | #define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ | ||
692 | #define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ | ||
693 | |||
694 | /* Link partner ability register. */ | ||
695 | #define MII_LPA_SLCT 0x001f /* Same as advertise selector */ | ||
696 | #define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */ | ||
697 | #define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */ | ||
698 | #define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */ | ||
699 | #define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */ | ||
700 | #define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */ | ||
701 | #define MII_LPA_PAUSE 0x0400 /* PAUSE */ | ||
702 | #define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */ | ||
703 | #define MII_LPA_RFAULT 0x2000 /* Link partner faulted */ | ||
704 | #define MII_LPA_LPACK 0x4000 /* Link partner acked us */ | ||
705 | #define MII_LPA_NPAGE 0x8000 /* Next page bit */ | ||
706 | |||
707 | /* Autoneg Advertisement Register */ | ||
708 | #define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ | ||
709 | #define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ | ||
710 | #define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ | ||
711 | #define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ | ||
712 | #define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ | ||
713 | #define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ | ||
714 | #define MII_AR_PAUSE 0x0400 /* Pause operation desired */ | ||
715 | #define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ | ||
716 | #define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ | ||
717 | #define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ | ||
718 | #define MII_AR_SPEED_MASK 0x01E0 | ||
719 | #define MII_AR_DEFAULT_CAP_MASK 0x0DE0 | ||
720 | |||
721 | /* 1000BASE-T Control Register */ | ||
722 | #define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ | ||
723 | #define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ | ||
724 | #define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ | ||
725 | /* 0=DTE device */ | ||
726 | #define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ | ||
727 | /* 0=Configure PHY as Slave */ | ||
728 | #define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ | ||
729 | /* 0=Automatic Master/Slave config */ | ||
730 | #define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ | ||
731 | #define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ | ||
732 | #define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ | ||
733 | #define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ | ||
734 | #define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ | ||
735 | #define MII_AT001_CR_1000T_SPEED_MASK 0x0300 | ||
736 | #define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300 | ||
737 | |||
738 | /* 1000BASE-T Status Register */ | ||
739 | #define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ | ||
740 | #define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ | ||
741 | #define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ | ||
742 | #define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ | ||
743 | #define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ | ||
744 | #define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ | ||
745 | #define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12 | ||
746 | #define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13 | ||
747 | |||
748 | /* Extended Status Register */ | ||
749 | #define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ | ||
750 | #define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ | ||
751 | #define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ | ||
752 | #define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ | ||
753 | |||
754 | /* AT001 PHY Specific Control Register */ | ||
755 | #define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ | ||
756 | #define MII_AT001_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ | ||
757 | #define MII_AT001_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ | ||
758 | #define MII_AT001_PSCR_MAC_POWERDOWN 0x0008 | ||
759 | #define MII_AT001_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, | ||
760 | * 0=CLK125 toggling | ||
761 | */ | ||
762 | #define MII_AT001_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ | ||
763 | /* Manual MDI configuration */ | ||
764 | #define MII_AT001_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ | ||
765 | #define MII_AT001_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, | ||
766 | * 100BASE-TX/10BASE-T: | ||
767 | * MDI Mode | ||
768 | */ | ||
769 | #define MII_AT001_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled | ||
770 | * all speeds. | ||
771 | */ | ||
772 | #define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE 0x0080 | ||
773 | /* 1=Enable Extended 10BASE-T distance | ||
774 | * (Lower 10BASE-T RX Threshold) | ||
775 | * 0=Normal 10BASE-T RX Threshold */ | ||
776 | #define MII_AT001_PSCR_MII_5BIT_ENABLE 0x0100 | ||
777 | /* 1=5-Bit interface in 100BASE-TX | ||
778 | * 0=MII interface in 100BASE-TX */ | ||
779 | #define MII_AT001_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ | ||
780 | #define MII_AT001_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ | ||
781 | #define MII_AT001_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ | ||
782 | #define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT 1 | ||
783 | #define MII_AT001_PSCR_AUTO_X_MODE_SHIFT 5 | ||
784 | #define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 | ||
785 | /* AT001 PHY Specific Status Register */ | ||
786 | #define MII_AT001_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ | ||
787 | #define MII_AT001_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ | ||
788 | #define MII_AT001_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ | ||
789 | #define MII_AT001_PSSR_10MBS 0x0000 /* 00=10Mbs */ | ||
790 | #define MII_AT001_PSSR_100MBS 0x4000 /* 01=100Mbs */ | ||
791 | #define MII_AT001_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ | ||
792 | |||
793 | #endif /*_ATHL1E_HW_H_*/ | ||
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c new file mode 100644 index 000000000000..35264c244cfd --- /dev/null +++ b/drivers/net/atl1e/atl1e_main.c | |||
@@ -0,0 +1,2599 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2007 Atheros Corporation. All rights reserved. | ||
3 | * | ||
4 | * Derived from Intel e1000 driver | ||
5 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the Free | ||
9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
19 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #include "atl1e.h" | ||
23 | |||
24 | #define DRV_VERSION "1.0.0.7-NAPI" | ||
25 | |||
26 | char atl1e_driver_name[] = "ATL1E"; | ||
27 | char atl1e_driver_version[] = DRV_VERSION; | ||
28 | #define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026 | ||
29 | /* | ||
30 | * atl1e_pci_tbl - PCI Device ID Table | ||
31 | * | ||
32 | * Wildcard entries (PCI_ANY_ID) should come last | ||
33 | * Last entry must be all 0s | ||
34 | * | ||
35 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, | ||
36 | * Class, Class Mask, private data (not used) } | ||
37 | */ | ||
38 | static struct pci_device_id atl1e_pci_tbl[] = { | ||
39 | {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)}, | ||
40 | /* required last entry */ | ||
41 | { 0 } | ||
42 | }; | ||
43 | MODULE_DEVICE_TABLE(pci, atl1e_pci_tbl); | ||
44 | |||
45 | MODULE_AUTHOR("Atheros Corporation, <xiong.huang@atheros.com>, Jie Yang <jie.yang@atheros.com>"); | ||
46 | MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver"); | ||
47 | MODULE_LICENSE("GPL"); | ||
48 | MODULE_VERSION(DRV_VERSION); | ||
49 | |||
50 | static inline void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter); | ||
51 | |||
52 | static const u16 | ||
53 | atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = | ||
54 | { | ||
55 | {REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD}, | ||
56 | {REG_HOST_RXF1_PAGE0_VLD, REG_HOST_RXF1_PAGE1_VLD}, | ||
57 | {REG_HOST_RXF2_PAGE0_VLD, REG_HOST_RXF2_PAGE1_VLD}, | ||
58 | {REG_HOST_RXF3_PAGE0_VLD, REG_HOST_RXF3_PAGE1_VLD} | ||
59 | }; | ||
60 | |||
61 | static const u16 atl1e_rx_page_hi_addr_regs[AT_MAX_RECEIVE_QUEUE] = | ||
62 | { | ||
63 | REG_RXF0_BASE_ADDR_HI, | ||
64 | REG_RXF1_BASE_ADDR_HI, | ||
65 | REG_RXF2_BASE_ADDR_HI, | ||
66 | REG_RXF3_BASE_ADDR_HI | ||
67 | }; | ||
68 | |||
69 | static const u16 | ||
70 | atl1e_rx_page_lo_addr_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = | ||
71 | { | ||
72 | {REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO}, | ||
73 | {REG_HOST_RXF1_PAGE0_LO, REG_HOST_RXF1_PAGE1_LO}, | ||
74 | {REG_HOST_RXF2_PAGE0_LO, REG_HOST_RXF2_PAGE1_LO}, | ||
75 | {REG_HOST_RXF3_PAGE0_LO, REG_HOST_RXF3_PAGE1_LO} | ||
76 | }; | ||
77 | |||
78 | static const u16 | ||
79 | atl1e_rx_page_write_offset_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = | ||
80 | { | ||
81 | {REG_HOST_RXF0_MB0_LO, REG_HOST_RXF0_MB1_LO}, | ||
82 | {REG_HOST_RXF1_MB0_LO, REG_HOST_RXF1_MB1_LO}, | ||
83 | {REG_HOST_RXF2_MB0_LO, REG_HOST_RXF2_MB1_LO}, | ||
84 | {REG_HOST_RXF3_MB0_LO, REG_HOST_RXF3_MB1_LO} | ||
85 | }; | ||
86 | |||
87 | static const u16 atl1e_pay_load_size[] = { | ||
88 | 128, 256, 512, 1024, 2048, 4096, | ||
89 | }; | ||
90 | |||
91 | /* | ||
92 | * atl1e_irq_enable - Enable default interrupt generation settings | ||
93 | * @adapter: board private structure | ||
94 | */ | ||
95 | static inline void atl1e_irq_enable(struct atl1e_adapter *adapter) | ||
96 | { | ||
97 | if (likely(atomic_dec_and_test(&adapter->irq_sem))) { | ||
98 | AT_WRITE_REG(&adapter->hw, REG_ISR, 0); | ||
99 | AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK); | ||
100 | AT_WRITE_FLUSH(&adapter->hw); | ||
101 | } | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * atl1e_irq_disable - Mask off interrupt generation on the NIC | ||
106 | * @adapter: board private structure | ||
107 | */ | ||
108 | static inline void atl1e_irq_disable(struct atl1e_adapter *adapter) | ||
109 | { | ||
110 | atomic_inc(&adapter->irq_sem); | ||
111 | AT_WRITE_REG(&adapter->hw, REG_IMR, 0); | ||
112 | AT_WRITE_FLUSH(&adapter->hw); | ||
113 | synchronize_irq(adapter->pdev->irq); | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * atl1e_irq_reset - reset interrupt confiure on the NIC | ||
118 | * @adapter: board private structure | ||
119 | */ | ||
120 | static inline void atl1e_irq_reset(struct atl1e_adapter *adapter) | ||
121 | { | ||
122 | atomic_set(&adapter->irq_sem, 0); | ||
123 | AT_WRITE_REG(&adapter->hw, REG_ISR, 0); | ||
124 | AT_WRITE_REG(&adapter->hw, REG_IMR, 0); | ||
125 | AT_WRITE_FLUSH(&adapter->hw); | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * atl1e_phy_config - Timer Call-back | ||
130 | * @data: pointer to netdev cast into an unsigned long | ||
131 | */ | ||
132 | static void atl1e_phy_config(unsigned long data) | ||
133 | { | ||
134 | struct atl1e_adapter *adapter = (struct atl1e_adapter *) data; | ||
135 | struct atl1e_hw *hw = &adapter->hw; | ||
136 | unsigned long flags; | ||
137 | |||
138 | spin_lock_irqsave(&adapter->mdio_lock, flags); | ||
139 | atl1e_restart_autoneg(hw); | ||
140 | spin_unlock_irqrestore(&adapter->mdio_lock, flags); | ||
141 | } | ||
142 | |||
143 | void atl1e_reinit_locked(struct atl1e_adapter *adapter) | ||
144 | { | ||
145 | |||
146 | WARN_ON(in_interrupt()); | ||
147 | while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) | ||
148 | msleep(1); | ||
149 | atl1e_down(adapter); | ||
150 | atl1e_up(adapter); | ||
151 | clear_bit(__AT_RESETTING, &adapter->flags); | ||
152 | } | ||
153 | |||
154 | static void atl1e_reset_task(struct work_struct *work) | ||
155 | { | ||
156 | struct atl1e_adapter *adapter; | ||
157 | adapter = container_of(work, struct atl1e_adapter, reset_task); | ||
158 | |||
159 | atl1e_reinit_locked(adapter); | ||
160 | } | ||
161 | |||
162 | static int atl1e_check_link(struct atl1e_adapter *adapter) | ||
163 | { | ||
164 | struct atl1e_hw *hw = &adapter->hw; | ||
165 | struct net_device *netdev = adapter->netdev; | ||
166 | struct pci_dev *pdev = adapter->pdev; | ||
167 | int err = 0; | ||
168 | u16 speed, duplex, phy_data; | ||
169 | |||
170 | /* MII_BMSR must read twise */ | ||
171 | atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); | ||
172 | atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); | ||
173 | if ((phy_data & BMSR_LSTATUS) == 0) { | ||
174 | /* link down */ | ||
175 | if (netif_carrier_ok(netdev)) { /* old link state: Up */ | ||
176 | u32 value; | ||
177 | /* disable rx */ | ||
178 | value = AT_READ_REG(hw, REG_MAC_CTRL); | ||
179 | value &= ~MAC_CTRL_RX_EN; | ||
180 | AT_WRITE_REG(hw, REG_MAC_CTRL, value); | ||
181 | adapter->link_speed = SPEED_0; | ||
182 | netif_carrier_off(netdev); | ||
183 | netif_stop_queue(netdev); | ||
184 | } | ||
185 | } else { | ||
186 | /* Link Up */ | ||
187 | err = atl1e_get_speed_and_duplex(hw, &speed, &duplex); | ||
188 | if (unlikely(err)) | ||
189 | return err; | ||
190 | |||
191 | /* link result is our setting */ | ||
192 | if (adapter->link_speed != speed || | ||
193 | adapter->link_duplex != duplex) { | ||
194 | adapter->link_speed = speed; | ||
195 | adapter->link_duplex = duplex; | ||
196 | atl1e_setup_mac_ctrl(adapter); | ||
197 | dev_info(&pdev->dev, | ||
198 | "%s: %s NIC Link is Up<%d Mbps %s>\n", | ||
199 | atl1e_driver_name, netdev->name, | ||
200 | adapter->link_speed, | ||
201 | adapter->link_duplex == FULL_DUPLEX ? | ||
202 | "Full Duplex" : "Half Duplex"); | ||
203 | } | ||
204 | |||
205 | if (!netif_carrier_ok(netdev)) { | ||
206 | /* Link down -> Up */ | ||
207 | netif_carrier_on(netdev); | ||
208 | netif_wake_queue(netdev); | ||
209 | } | ||
210 | } | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | /* | ||
215 | * atl1e_link_chg_task - deal with link change event Out of interrupt context | ||
216 | * @netdev: network interface device structure | ||
217 | */ | ||
218 | static void atl1e_link_chg_task(struct work_struct *work) | ||
219 | { | ||
220 | struct atl1e_adapter *adapter; | ||
221 | unsigned long flags; | ||
222 | |||
223 | adapter = container_of(work, struct atl1e_adapter, link_chg_task); | ||
224 | spin_lock_irqsave(&adapter->mdio_lock, flags); | ||
225 | atl1e_check_link(adapter); | ||
226 | spin_unlock_irqrestore(&adapter->mdio_lock, flags); | ||
227 | } | ||
228 | |||
229 | static void atl1e_link_chg_event(struct atl1e_adapter *adapter) | ||
230 | { | ||
231 | struct net_device *netdev = adapter->netdev; | ||
232 | struct pci_dev *pdev = adapter->pdev; | ||
233 | u16 phy_data = 0; | ||
234 | u16 link_up = 0; | ||
235 | |||
236 | spin_lock(&adapter->mdio_lock); | ||
237 | atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); | ||
238 | atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); | ||
239 | spin_unlock(&adapter->mdio_lock); | ||
240 | link_up = phy_data & BMSR_LSTATUS; | ||
241 | /* notify upper layer link down ASAP */ | ||
242 | if (!link_up) { | ||
243 | if (netif_carrier_ok(netdev)) { | ||
244 | /* old link state: Up */ | ||
245 | dev_info(&pdev->dev, "%s: %s NIC Link is Down\n", | ||
246 | atl1e_driver_name, netdev->name); | ||
247 | adapter->link_speed = SPEED_0; | ||
248 | netif_stop_queue(netdev); | ||
249 | } | ||
250 | } | ||
251 | schedule_work(&adapter->link_chg_task); | ||
252 | } | ||
253 | |||
254 | static void atl1e_del_timer(struct atl1e_adapter *adapter) | ||
255 | { | ||
256 | del_timer_sync(&adapter->phy_config_timer); | ||
257 | } | ||
258 | |||
259 | static void atl1e_cancel_work(struct atl1e_adapter *adapter) | ||
260 | { | ||
261 | cancel_work_sync(&adapter->reset_task); | ||
262 | cancel_work_sync(&adapter->link_chg_task); | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * atl1e_tx_timeout - Respond to a Tx Hang | ||
267 | * @netdev: network interface device structure | ||
268 | */ | ||
269 | static void atl1e_tx_timeout(struct net_device *netdev) | ||
270 | { | ||
271 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
272 | |||
273 | /* Do the reset outside of interrupt context */ | ||
274 | schedule_work(&adapter->reset_task); | ||
275 | } | ||
276 | |||
277 | /* | ||
278 | * atl1e_set_multi - Multicast and Promiscuous mode set | ||
279 | * @netdev: network interface device structure | ||
280 | * | ||
281 | * The set_multi entry point is called whenever the multicast address | ||
282 | * list or the network interface flags are updated. This routine is | ||
283 | * responsible for configuring the hardware for proper multicast, | ||
284 | * promiscuous mode, and all-multi behavior. | ||
285 | */ | ||
286 | static void atl1e_set_multi(struct net_device *netdev) | ||
287 | { | ||
288 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
289 | struct atl1e_hw *hw = &adapter->hw; | ||
290 | struct dev_mc_list *mc_ptr; | ||
291 | u32 mac_ctrl_data = 0; | ||
292 | u32 hash_value; | ||
293 | |||
294 | /* Check for Promiscuous and All Multicast modes */ | ||
295 | mac_ctrl_data = AT_READ_REG(hw, REG_MAC_CTRL); | ||
296 | |||
297 | if (netdev->flags & IFF_PROMISC) { | ||
298 | mac_ctrl_data |= MAC_CTRL_PROMIS_EN; | ||
299 | } else if (netdev->flags & IFF_ALLMULTI) { | ||
300 | mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; | ||
301 | mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN; | ||
302 | } else { | ||
303 | mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); | ||
304 | } | ||
305 | |||
306 | AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); | ||
307 | |||
308 | /* clear the old settings from the multicast hash table */ | ||
309 | AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); | ||
310 | AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); | ||
311 | |||
312 | /* comoute mc addresses' hash value ,and put it into hash table */ | ||
313 | for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { | ||
314 | hash_value = atl1e_hash_mc_addr(hw, mc_ptr->dmi_addr); | ||
315 | atl1e_hash_set(hw, hash_value); | ||
316 | } | ||
317 | } | ||
318 | |||
319 | static void atl1e_vlan_rx_register(struct net_device *netdev, | ||
320 | struct vlan_group *grp) | ||
321 | { | ||
322 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
323 | struct pci_dev *pdev = adapter->pdev; | ||
324 | u32 mac_ctrl_data = 0; | ||
325 | |||
326 | dev_dbg(&pdev->dev, "atl1e_vlan_rx_register\n"); | ||
327 | |||
328 | atl1e_irq_disable(adapter); | ||
329 | |||
330 | adapter->vlgrp = grp; | ||
331 | mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL); | ||
332 | |||
333 | if (grp) { | ||
334 | /* enable VLAN tag insert/strip */ | ||
335 | mac_ctrl_data |= MAC_CTRL_RMV_VLAN; | ||
336 | } else { | ||
337 | /* disable VLAN tag insert/strip */ | ||
338 | mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN; | ||
339 | } | ||
340 | |||
341 | AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); | ||
342 | atl1e_irq_enable(adapter); | ||
343 | } | ||
344 | |||
345 | static void atl1e_restore_vlan(struct atl1e_adapter *adapter) | ||
346 | { | ||
347 | struct pci_dev *pdev = adapter->pdev; | ||
348 | |||
349 | dev_dbg(&pdev->dev, "atl1e_restore_vlan !"); | ||
350 | atl1e_vlan_rx_register(adapter->netdev, adapter->vlgrp); | ||
351 | } | ||
352 | /* | ||
353 | * atl1e_set_mac - Change the Ethernet Address of the NIC | ||
354 | * @netdev: network interface device structure | ||
355 | * @p: pointer to an address structure | ||
356 | * | ||
357 | * Returns 0 on success, negative on failure | ||
358 | */ | ||
359 | static int atl1e_set_mac_addr(struct net_device *netdev, void *p) | ||
360 | { | ||
361 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
362 | struct sockaddr *addr = p; | ||
363 | |||
364 | if (!is_valid_ether_addr(addr->sa_data)) | ||
365 | return -EADDRNOTAVAIL; | ||
366 | |||
367 | if (netif_running(netdev)) | ||
368 | return -EBUSY; | ||
369 | |||
370 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
371 | memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); | ||
372 | |||
373 | atl1e_hw_set_mac_addr(&adapter->hw); | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * atl1e_change_mtu - Change the Maximum Transfer Unit | ||
380 | * @netdev: network interface device structure | ||
381 | * @new_mtu: new value for maximum frame size | ||
382 | * | ||
383 | * Returns 0 on success, negative on failure | ||
384 | */ | ||
385 | static int atl1e_change_mtu(struct net_device *netdev, int new_mtu) | ||
386 | { | ||
387 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
388 | int old_mtu = netdev->mtu; | ||
389 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; | ||
390 | |||
391 | if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || | ||
392 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | ||
393 | dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); | ||
394 | return -EINVAL; | ||
395 | } | ||
396 | /* set MTU */ | ||
397 | if (old_mtu != new_mtu && netif_running(netdev)) { | ||
398 | while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) | ||
399 | msleep(1); | ||
400 | netdev->mtu = new_mtu; | ||
401 | adapter->hw.max_frame_size = new_mtu; | ||
402 | adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3; | ||
403 | atl1e_down(adapter); | ||
404 | atl1e_up(adapter); | ||
405 | clear_bit(__AT_RESETTING, &adapter->flags); | ||
406 | } | ||
407 | return 0; | ||
408 | } | ||
409 | |||
410 | /* | ||
411 | * caller should hold mdio_lock | ||
412 | */ | ||
413 | static int atl1e_mdio_read(struct net_device *netdev, int phy_id, int reg_num) | ||
414 | { | ||
415 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
416 | u16 result; | ||
417 | |||
418 | atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result); | ||
419 | return result; | ||
420 | } | ||
421 | |||
422 | static void atl1e_mdio_write(struct net_device *netdev, int phy_id, | ||
423 | int reg_num, int val) | ||
424 | { | ||
425 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
426 | |||
427 | atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val); | ||
428 | } | ||
429 | |||
430 | /* | ||
431 | * atl1e_mii_ioctl - | ||
432 | * @netdev: | ||
433 | * @ifreq: | ||
434 | * @cmd: | ||
435 | */ | ||
436 | static int atl1e_mii_ioctl(struct net_device *netdev, | ||
437 | struct ifreq *ifr, int cmd) | ||
438 | { | ||
439 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
440 | struct pci_dev *pdev = adapter->pdev; | ||
441 | struct mii_ioctl_data *data = if_mii(ifr); | ||
442 | unsigned long flags; | ||
443 | int retval = 0; | ||
444 | |||
445 | if (!netif_running(netdev)) | ||
446 | return -EINVAL; | ||
447 | |||
448 | spin_lock_irqsave(&adapter->mdio_lock, flags); | ||
449 | switch (cmd) { | ||
450 | case SIOCGMIIPHY: | ||
451 | data->phy_id = 0; | ||
452 | break; | ||
453 | |||
454 | case SIOCGMIIREG: | ||
455 | if (!capable(CAP_NET_ADMIN)) { | ||
456 | retval = -EPERM; | ||
457 | goto out; | ||
458 | } | ||
459 | if (atl1e_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, | ||
460 | &data->val_out)) { | ||
461 | retval = -EIO; | ||
462 | goto out; | ||
463 | } | ||
464 | break; | ||
465 | |||
466 | case SIOCSMIIREG: | ||
467 | if (!capable(CAP_NET_ADMIN)) { | ||
468 | retval = -EPERM; | ||
469 | goto out; | ||
470 | } | ||
471 | if (data->reg_num & ~(0x1F)) { | ||
472 | retval = -EFAULT; | ||
473 | goto out; | ||
474 | } | ||
475 | |||
476 | dev_dbg(&pdev->dev, "<atl1e_mii_ioctl> write %x %x", | ||
477 | data->reg_num, data->val_in); | ||
478 | if (atl1e_write_phy_reg(&adapter->hw, | ||
479 | data->reg_num, data->val_in)) { | ||
480 | retval = -EIO; | ||
481 | goto out; | ||
482 | } | ||
483 | break; | ||
484 | |||
485 | default: | ||
486 | retval = -EOPNOTSUPP; | ||
487 | break; | ||
488 | } | ||
489 | out: | ||
490 | spin_unlock_irqrestore(&adapter->mdio_lock, flags); | ||
491 | return retval; | ||
492 | |||
493 | } | ||
494 | |||
495 | /* | ||
496 | * atl1e_ioctl - | ||
497 | * @netdev: | ||
498 | * @ifreq: | ||
499 | * @cmd: | ||
500 | */ | ||
501 | static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
502 | { | ||
503 | switch (cmd) { | ||
504 | case SIOCGMIIPHY: | ||
505 | case SIOCGMIIREG: | ||
506 | case SIOCSMIIREG: | ||
507 | return atl1e_mii_ioctl(netdev, ifr, cmd); | ||
508 | default: | ||
509 | return -EOPNOTSUPP; | ||
510 | } | ||
511 | } | ||
512 | |||
513 | static void atl1e_setup_pcicmd(struct pci_dev *pdev) | ||
514 | { | ||
515 | u16 cmd; | ||
516 | |||
517 | pci_read_config_word(pdev, PCI_COMMAND, &cmd); | ||
518 | cmd &= ~(PCI_COMMAND_INTX_DISABLE | PCI_COMMAND_IO); | ||
519 | cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); | ||
520 | pci_write_config_word(pdev, PCI_COMMAND, cmd); | ||
521 | |||
522 | /* | ||
523 | * some motherboards BIOS(PXE/EFI) driver may set PME | ||
524 | * while they transfer control to OS (Windows/Linux) | ||
525 | * so we should clear this bit before NIC work normally | ||
526 | */ | ||
527 | pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0); | ||
528 | msleep(1); | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * atl1e_alloc_queues - Allocate memory for all rings | ||
533 | * @adapter: board private structure to initialize | ||
534 | * | ||
535 | */ | ||
536 | static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter) | ||
537 | { | ||
538 | return 0; | ||
539 | } | ||
540 | |||
541 | /* | ||
542 | * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter) | ||
543 | * @adapter: board private structure to initialize | ||
544 | * | ||
545 | * atl1e_sw_init initializes the Adapter private data structure. | ||
546 | * Fields are initialized based on PCI device information and | ||
547 | * OS network device settings (MTU size). | ||
548 | */ | ||
549 | static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter) | ||
550 | { | ||
551 | struct atl1e_hw *hw = &adapter->hw; | ||
552 | struct pci_dev *pdev = adapter->pdev; | ||
553 | u32 phy_status_data = 0; | ||
554 | |||
555 | adapter->wol = 0; | ||
556 | adapter->link_speed = SPEED_0; /* hardware init */ | ||
557 | adapter->link_duplex = FULL_DUPLEX; | ||
558 | adapter->num_rx_queues = 1; | ||
559 | |||
560 | /* PCI config space info */ | ||
561 | hw->vendor_id = pdev->vendor; | ||
562 | hw->device_id = pdev->device; | ||
563 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | ||
564 | hw->subsystem_id = pdev->subsystem_device; | ||
565 | |||
566 | pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); | ||
567 | pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); | ||
568 | |||
569 | phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); | ||
570 | /* nic type */ | ||
571 | if (hw->revision_id >= 0xF0) { | ||
572 | hw->nic_type = athr_l2e_revB; | ||
573 | } else { | ||
574 | if (phy_status_data & PHY_STATUS_100M) | ||
575 | hw->nic_type = athr_l1e; | ||
576 | else | ||
577 | hw->nic_type = athr_l2e_revA; | ||
578 | } | ||
579 | |||
580 | phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); | ||
581 | |||
582 | if (phy_status_data & PHY_STATUS_EMI_CA) | ||
583 | hw->emi_ca = true; | ||
584 | else | ||
585 | hw->emi_ca = false; | ||
586 | |||
587 | hw->phy_configured = false; | ||
588 | hw->preamble_len = 7; | ||
589 | hw->max_frame_size = adapter->netdev->mtu; | ||
590 | hw->rx_jumbo_th = (hw->max_frame_size + ETH_HLEN + | ||
591 | VLAN_HLEN + ETH_FCS_LEN + 7) >> 3; | ||
592 | |||
593 | hw->rrs_type = atl1e_rrs_disable; | ||
594 | hw->indirect_tab = 0; | ||
595 | hw->base_cpu = 0; | ||
596 | |||
597 | /* need confirm */ | ||
598 | |||
599 | hw->ict = 50000; /* 100ms */ | ||
600 | hw->smb_timer = 200000; /* 200ms */ | ||
601 | hw->tpd_burst = 5; | ||
602 | hw->rrd_thresh = 1; | ||
603 | hw->tpd_thresh = adapter->tx_ring.count / 2; | ||
604 | hw->rx_count_down = 4; /* 2us resolution */ | ||
605 | hw->tx_count_down = hw->imt * 4 / 3; | ||
606 | hw->dmar_block = atl1e_dma_req_1024; | ||
607 | hw->dmaw_block = atl1e_dma_req_1024; | ||
608 | hw->dmar_dly_cnt = 15; | ||
609 | hw->dmaw_dly_cnt = 4; | ||
610 | |||
611 | if (atl1e_alloc_queues(adapter)) { | ||
612 | dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); | ||
613 | return -ENOMEM; | ||
614 | } | ||
615 | |||
616 | atomic_set(&adapter->irq_sem, 1); | ||
617 | spin_lock_init(&adapter->mdio_lock); | ||
618 | spin_lock_init(&adapter->tx_lock); | ||
619 | |||
620 | set_bit(__AT_DOWN, &adapter->flags); | ||
621 | |||
622 | return 0; | ||
623 | } | ||
624 | |||
625 | /* | ||
626 | * atl1e_clean_tx_ring - Free Tx-skb | ||
627 | * @adapter: board private structure | ||
628 | */ | ||
629 | static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter) | ||
630 | { | ||
631 | struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *) | ||
632 | &adapter->tx_ring; | ||
633 | struct atl1e_tx_buffer *tx_buffer = NULL; | ||
634 | struct pci_dev *pdev = adapter->pdev; | ||
635 | u16 index, ring_count; | ||
636 | |||
637 | if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL) | ||
638 | return; | ||
639 | |||
640 | ring_count = tx_ring->count; | ||
641 | /* first unmmap dma */ | ||
642 | for (index = 0; index < ring_count; index++) { | ||
643 | tx_buffer = &tx_ring->tx_buffer[index]; | ||
644 | if (tx_buffer->dma) { | ||
645 | pci_unmap_page(pdev, tx_buffer->dma, | ||
646 | tx_buffer->length, PCI_DMA_TODEVICE); | ||
647 | tx_buffer->dma = 0; | ||
648 | } | ||
649 | } | ||
650 | /* second free skb */ | ||
651 | for (index = 0; index < ring_count; index++) { | ||
652 | tx_buffer = &tx_ring->tx_buffer[index]; | ||
653 | if (tx_buffer->skb) { | ||
654 | dev_kfree_skb_any(tx_buffer->skb); | ||
655 | tx_buffer->skb = NULL; | ||
656 | } | ||
657 | } | ||
658 | /* Zero out Tx-buffers */ | ||
659 | memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) * | ||
660 | ring_count); | ||
661 | memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) * | ||
662 | ring_count); | ||
663 | } | ||
664 | |||
665 | /* | ||
666 | * atl1e_clean_rx_ring - Free rx-reservation skbs | ||
667 | * @adapter: board private structure | ||
668 | */ | ||
669 | static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter) | ||
670 | { | ||
671 | struct atl1e_rx_ring *rx_ring = | ||
672 | (struct atl1e_rx_ring *)&adapter->rx_ring; | ||
673 | struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc; | ||
674 | u16 i, j; | ||
675 | |||
676 | |||
677 | if (adapter->ring_vir_addr == NULL) | ||
678 | return; | ||
679 | /* Zero out the descriptor ring */ | ||
680 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
681 | for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { | ||
682 | if (rx_page_desc[i].rx_page[j].addr != NULL) { | ||
683 | memset(rx_page_desc[i].rx_page[j].addr, 0, | ||
684 | rx_ring->real_page_size); | ||
685 | } | ||
686 | } | ||
687 | } | ||
688 | } | ||
689 | |||
690 | static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size) | ||
691 | { | ||
692 | *ring_size = ((u32)(adapter->tx_ring.count * | ||
693 | sizeof(struct atl1e_tpd_desc) + 7 | ||
694 | /* tx ring, qword align */ | ||
695 | + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE * | ||
696 | adapter->num_rx_queues + 31 | ||
697 | /* rx ring, 32 bytes align */ | ||
698 | + (1 + AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues) * | ||
699 | sizeof(u32) + 3)); | ||
700 | /* tx, rx cmd, dword align */ | ||
701 | } | ||
702 | |||
703 | static void atl1e_init_ring_resources(struct atl1e_adapter *adapter) | ||
704 | { | ||
705 | struct atl1e_tx_ring *tx_ring = NULL; | ||
706 | struct atl1e_rx_ring *rx_ring = NULL; | ||
707 | |||
708 | tx_ring = &adapter->tx_ring; | ||
709 | rx_ring = &adapter->rx_ring; | ||
710 | |||
711 | rx_ring->real_page_size = adapter->rx_ring.page_size | ||
712 | + adapter->hw.max_frame_size | ||
713 | + ETH_HLEN + VLAN_HLEN | ||
714 | + ETH_FCS_LEN; | ||
715 | rx_ring->real_page_size = roundup(rx_ring->real_page_size, 32); | ||
716 | atl1e_cal_ring_size(adapter, &adapter->ring_size); | ||
717 | |||
718 | adapter->ring_vir_addr = NULL; | ||
719 | adapter->rx_ring.desc = NULL; | ||
720 | rwlock_init(&adapter->tx_ring.tx_lock); | ||
721 | |||
722 | return; | ||
723 | } | ||
724 | |||
725 | /* | ||
726 | * Read / Write Ptr Initialize: | ||
727 | */ | ||
728 | static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter) | ||
729 | { | ||
730 | struct atl1e_tx_ring *tx_ring = NULL; | ||
731 | struct atl1e_rx_ring *rx_ring = NULL; | ||
732 | struct atl1e_rx_page_desc *rx_page_desc = NULL; | ||
733 | int i, j; | ||
734 | |||
735 | tx_ring = &adapter->tx_ring; | ||
736 | rx_ring = &adapter->rx_ring; | ||
737 | rx_page_desc = rx_ring->rx_page_desc; | ||
738 | |||
739 | tx_ring->next_to_use = 0; | ||
740 | atomic_set(&tx_ring->next_to_clean, 0); | ||
741 | |||
742 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
743 | rx_page_desc[i].rx_using = 0; | ||
744 | rx_page_desc[i].rx_nxseq = 0; | ||
745 | for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { | ||
746 | *rx_page_desc[i].rx_page[j].write_offset_addr = 0; | ||
747 | rx_page_desc[i].rx_page[j].read_offset = 0; | ||
748 | } | ||
749 | } | ||
750 | } | ||
751 | |||
752 | /* | ||
753 | * atl1e_free_ring_resources - Free Tx / RX descriptor Resources | ||
754 | * @adapter: board private structure | ||
755 | * | ||
756 | * Free all transmit software resources | ||
757 | */ | ||
758 | static void atl1e_free_ring_resources(struct atl1e_adapter *adapter) | ||
759 | { | ||
760 | struct pci_dev *pdev = adapter->pdev; | ||
761 | |||
762 | atl1e_clean_tx_ring(adapter); | ||
763 | atl1e_clean_rx_ring(adapter); | ||
764 | |||
765 | if (adapter->ring_vir_addr) { | ||
766 | pci_free_consistent(pdev, adapter->ring_size, | ||
767 | adapter->ring_vir_addr, adapter->ring_dma); | ||
768 | adapter->ring_vir_addr = NULL; | ||
769 | } | ||
770 | |||
771 | if (adapter->tx_ring.tx_buffer) { | ||
772 | kfree(adapter->tx_ring.tx_buffer); | ||
773 | adapter->tx_ring.tx_buffer = NULL; | ||
774 | } | ||
775 | } | ||
776 | |||
777 | /* | ||
778 | * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources | ||
779 | * @adapter: board private structure | ||
780 | * | ||
781 | * Return 0 on success, negative on failure | ||
782 | */ | ||
783 | static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter) | ||
784 | { | ||
785 | struct pci_dev *pdev = adapter->pdev; | ||
786 | struct atl1e_tx_ring *tx_ring; | ||
787 | struct atl1e_rx_ring *rx_ring; | ||
788 | struct atl1e_rx_page_desc *rx_page_desc; | ||
789 | int size, i, j; | ||
790 | u32 offset = 0; | ||
791 | int err = 0; | ||
792 | |||
793 | if (adapter->ring_vir_addr != NULL) | ||
794 | return 0; /* alloced already */ | ||
795 | |||
796 | tx_ring = &adapter->tx_ring; | ||
797 | rx_ring = &adapter->rx_ring; | ||
798 | |||
799 | /* real ring DMA buffer */ | ||
800 | |||
801 | size = adapter->ring_size; | ||
802 | adapter->ring_vir_addr = pci_alloc_consistent(pdev, | ||
803 | adapter->ring_size, &adapter->ring_dma); | ||
804 | |||
805 | if (adapter->ring_vir_addr == NULL) { | ||
806 | dev_err(&pdev->dev, "pci_alloc_consistent failed, " | ||
807 | "size = D%d", size); | ||
808 | return -ENOMEM; | ||
809 | } | ||
810 | |||
811 | memset(adapter->ring_vir_addr, 0, adapter->ring_size); | ||
812 | |||
813 | rx_page_desc = rx_ring->rx_page_desc; | ||
814 | |||
815 | /* Init TPD Ring */ | ||
816 | tx_ring->dma = roundup(adapter->ring_dma, 8); | ||
817 | offset = tx_ring->dma - adapter->ring_dma; | ||
818 | tx_ring->desc = (struct atl1e_tpd_desc *) | ||
819 | (adapter->ring_vir_addr + offset); | ||
820 | size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count); | ||
821 | tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL); | ||
822 | if (tx_ring->tx_buffer == NULL) { | ||
823 | dev_err(&pdev->dev, "kzalloc failed , size = D%d", size); | ||
824 | err = -ENOMEM; | ||
825 | goto failed; | ||
826 | } | ||
827 | |||
828 | /* Init RXF-Pages */ | ||
829 | offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count); | ||
830 | offset = roundup(offset, 32); | ||
831 | |||
832 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
833 | for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { | ||
834 | rx_page_desc[i].rx_page[j].dma = | ||
835 | adapter->ring_dma + offset; | ||
836 | rx_page_desc[i].rx_page[j].addr = | ||
837 | adapter->ring_vir_addr + offset; | ||
838 | offset += rx_ring->real_page_size; | ||
839 | } | ||
840 | } | ||
841 | |||
842 | /* Init CMB dma address */ | ||
843 | tx_ring->cmb_dma = adapter->ring_dma + offset; | ||
844 | tx_ring->cmb = (u32 *)(adapter->ring_vir_addr + offset); | ||
845 | offset += sizeof(u32); | ||
846 | |||
847 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
848 | for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { | ||
849 | rx_page_desc[i].rx_page[j].write_offset_dma = | ||
850 | adapter->ring_dma + offset; | ||
851 | rx_page_desc[i].rx_page[j].write_offset_addr = | ||
852 | adapter->ring_vir_addr + offset; | ||
853 | offset += sizeof(u32); | ||
854 | } | ||
855 | } | ||
856 | |||
857 | if (unlikely(offset > adapter->ring_size)) { | ||
858 | dev_err(&pdev->dev, "offset(%d) > ring size(%d) !!\n", | ||
859 | offset, adapter->ring_size); | ||
860 | err = -1; | ||
861 | goto failed; | ||
862 | } | ||
863 | |||
864 | return 0; | ||
865 | failed: | ||
866 | if (adapter->ring_vir_addr != NULL) { | ||
867 | pci_free_consistent(pdev, adapter->ring_size, | ||
868 | adapter->ring_vir_addr, adapter->ring_dma); | ||
869 | adapter->ring_vir_addr = NULL; | ||
870 | } | ||
871 | return err; | ||
872 | } | ||
873 | |||
874 | static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter) | ||
875 | { | ||
876 | |||
877 | struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; | ||
878 | struct atl1e_rx_ring *rx_ring = | ||
879 | (struct atl1e_rx_ring *)&adapter->rx_ring; | ||
880 | struct atl1e_tx_ring *tx_ring = | ||
881 | (struct atl1e_tx_ring *)&adapter->tx_ring; | ||
882 | struct atl1e_rx_page_desc *rx_page_desc = NULL; | ||
883 | int i, j; | ||
884 | |||
885 | AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, | ||
886 | (u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32)); | ||
887 | AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO, | ||
888 | (u32)((tx_ring->dma) & AT_DMA_LO_ADDR_MASK)); | ||
889 | AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count)); | ||
890 | AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO, | ||
891 | (u32)((tx_ring->cmb_dma) & AT_DMA_LO_ADDR_MASK)); | ||
892 | |||
893 | rx_page_desc = rx_ring->rx_page_desc; | ||
894 | /* RXF Page Physical address / Page Length */ | ||
895 | for (i = 0; i < AT_MAX_RECEIVE_QUEUE; i++) { | ||
896 | AT_WRITE_REG(hw, atl1e_rx_page_hi_addr_regs[i], | ||
897 | (u32)((adapter->ring_dma & | ||
898 | AT_DMA_HI_ADDR_MASK) >> 32)); | ||
899 | for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { | ||
900 | u32 page_phy_addr; | ||
901 | u32 offset_phy_addr; | ||
902 | |||
903 | page_phy_addr = rx_page_desc[i].rx_page[j].dma; | ||
904 | offset_phy_addr = | ||
905 | rx_page_desc[i].rx_page[j].write_offset_dma; | ||
906 | |||
907 | AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[i][j], | ||
908 | page_phy_addr & AT_DMA_LO_ADDR_MASK); | ||
909 | AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[i][j], | ||
910 | offset_phy_addr & AT_DMA_LO_ADDR_MASK); | ||
911 | AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[i][j], 1); | ||
912 | } | ||
913 | } | ||
914 | /* Page Length */ | ||
915 | AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size); | ||
916 | /* Load all of base address above */ | ||
917 | AT_WRITE_REG(hw, REG_LOAD_PTR, 1); | ||
918 | |||
919 | return; | ||
920 | } | ||
921 | |||
922 | static inline void atl1e_configure_tx(struct atl1e_adapter *adapter) | ||
923 | { | ||
924 | struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; | ||
925 | u32 dev_ctrl_data = 0; | ||
926 | u32 max_pay_load = 0; | ||
927 | u32 jumbo_thresh = 0; | ||
928 | u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */ | ||
929 | |||
930 | /* configure TXQ param */ | ||
931 | if (hw->nic_type != athr_l2e_revB) { | ||
932 | extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; | ||
933 | if (hw->max_frame_size <= 1500) { | ||
934 | jumbo_thresh = hw->max_frame_size + extra_size; | ||
935 | } else if (hw->max_frame_size < 6*1024) { | ||
936 | jumbo_thresh = | ||
937 | (hw->max_frame_size + extra_size) * 2 / 3; | ||
938 | } else { | ||
939 | jumbo_thresh = (hw->max_frame_size + extra_size) / 2; | ||
940 | } | ||
941 | AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3); | ||
942 | } | ||
943 | |||
944 | dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL); | ||
945 | |||
946 | max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) & | ||
947 | DEVICE_CTRL_MAX_PAYLOAD_MASK; | ||
948 | |||
949 | hw->dmaw_block = min(max_pay_load, hw->dmaw_block); | ||
950 | |||
951 | max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) & | ||
952 | DEVICE_CTRL_MAX_RREQ_SZ_MASK; | ||
953 | hw->dmar_block = min(max_pay_load, hw->dmar_block); | ||
954 | |||
955 | if (hw->nic_type != athr_l2e_revB) | ||
956 | AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2, | ||
957 | atl1e_pay_load_size[hw->dmar_block]); | ||
958 | /* enable TXQ */ | ||
959 | AT_WRITE_REGW(hw, REG_TXQ_CTRL, | ||
960 | (((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK) | ||
961 | << TXQ_CTRL_NUM_TPD_BURST_SHIFT) | ||
962 | | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN); | ||
963 | return; | ||
964 | } | ||
965 | |||
966 | static inline void atl1e_configure_rx(struct atl1e_adapter *adapter) | ||
967 | { | ||
968 | struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw; | ||
969 | u32 rxf_len = 0; | ||
970 | u32 rxf_low = 0; | ||
971 | u32 rxf_high = 0; | ||
972 | u32 rxf_thresh_data = 0; | ||
973 | u32 rxq_ctrl_data = 0; | ||
974 | |||
975 | if (hw->nic_type != athr_l2e_revB) { | ||
976 | AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM, | ||
977 | (u16)((hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) << | ||
978 | RXQ_JMBOSZ_TH_SHIFT | | ||
979 | (1 & RXQ_JMBO_LKAH_MASK) << | ||
980 | RXQ_JMBO_LKAH_SHIFT)); | ||
981 | |||
982 | rxf_len = AT_READ_REG(hw, REG_SRAM_RXF_LEN); | ||
983 | rxf_high = rxf_len * 4 / 5; | ||
984 | rxf_low = rxf_len / 5; | ||
985 | rxf_thresh_data = ((rxf_high & RXQ_RXF_PAUSE_TH_HI_MASK) | ||
986 | << RXQ_RXF_PAUSE_TH_HI_SHIFT) | | ||
987 | ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK) | ||
988 | << RXQ_RXF_PAUSE_TH_LO_SHIFT); | ||
989 | |||
990 | AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data); | ||
991 | } | ||
992 | |||
993 | /* RRS */ | ||
994 | AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab); | ||
995 | AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu); | ||
996 | |||
997 | if (hw->rrs_type & atl1e_rrs_ipv4) | ||
998 | rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4; | ||
999 | |||
1000 | if (hw->rrs_type & atl1e_rrs_ipv4_tcp) | ||
1001 | rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4_TCP; | ||
1002 | |||
1003 | if (hw->rrs_type & atl1e_rrs_ipv6) | ||
1004 | rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6; | ||
1005 | |||
1006 | if (hw->rrs_type & atl1e_rrs_ipv6_tcp) | ||
1007 | rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6_TCP; | ||
1008 | |||
1009 | if (hw->rrs_type != atl1e_rrs_disable) | ||
1010 | rxq_ctrl_data |= | ||
1011 | (RXQ_CTRL_HASH_ENABLE | RXQ_CTRL_RSS_MODE_MQUESINT); | ||
1012 | |||
1013 | rxq_ctrl_data |= RXQ_CTRL_IPV6_XSUM_VERIFY_EN | RXQ_CTRL_PBA_ALIGN_32 | | ||
1014 | RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; | ||
1015 | |||
1016 | AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); | ||
1017 | return; | ||
1018 | } | ||
1019 | |||
1020 | static inline void atl1e_configure_dma(struct atl1e_adapter *adapter) | ||
1021 | { | ||
1022 | struct atl1e_hw *hw = &adapter->hw; | ||
1023 | u32 dma_ctrl_data = 0; | ||
1024 | |||
1025 | dma_ctrl_data = DMA_CTRL_RXCMB_EN; | ||
1026 | dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) | ||
1027 | << DMA_CTRL_DMAR_BURST_LEN_SHIFT; | ||
1028 | dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) | ||
1029 | << DMA_CTRL_DMAW_BURST_LEN_SHIFT; | ||
1030 | dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER; | ||
1031 | dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK) | ||
1032 | << DMA_CTRL_DMAR_DLY_CNT_SHIFT; | ||
1033 | dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK) | ||
1034 | << DMA_CTRL_DMAW_DLY_CNT_SHIFT; | ||
1035 | |||
1036 | AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); | ||
1037 | return; | ||
1038 | } | ||
1039 | |||
1040 | static inline void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter) | ||
1041 | { | ||
1042 | u32 value; | ||
1043 | struct atl1e_hw *hw = &adapter->hw; | ||
1044 | struct net_device *netdev = adapter->netdev; | ||
1045 | |||
1046 | /* Config MAC CTRL Register */ | ||
1047 | value = MAC_CTRL_TX_EN | | ||
1048 | MAC_CTRL_RX_EN ; | ||
1049 | |||
1050 | if (FULL_DUPLEX == adapter->link_duplex) | ||
1051 | value |= MAC_CTRL_DUPLX; | ||
1052 | |||
1053 | value |= ((u32)((SPEED_1000 == adapter->link_speed) ? | ||
1054 | MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << | ||
1055 | MAC_CTRL_SPEED_SHIFT); | ||
1056 | value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); | ||
1057 | |||
1058 | value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); | ||
1059 | value |= (((u32)adapter->hw.preamble_len & | ||
1060 | MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); | ||
1061 | |||
1062 | if (adapter->vlgrp) | ||
1063 | value |= MAC_CTRL_RMV_VLAN; | ||
1064 | |||
1065 | value |= MAC_CTRL_BC_EN; | ||
1066 | if (netdev->flags & IFF_PROMISC) | ||
1067 | value |= MAC_CTRL_PROMIS_EN; | ||
1068 | if (netdev->flags & IFF_ALLMULTI) | ||
1069 | value |= MAC_CTRL_MC_ALL_EN; | ||
1070 | |||
1071 | AT_WRITE_REG(hw, REG_MAC_CTRL, value); | ||
1072 | } | ||
1073 | |||
1074 | /* | ||
1075 | * atl1e_configure - Configure Transmit&Receive Unit after Reset | ||
1076 | * @adapter: board private structure | ||
1077 | * | ||
1078 | * Configure the Tx /Rx unit of the MAC after a reset. | ||
1079 | */ | ||
1080 | static int atl1e_configure(struct atl1e_adapter *adapter) | ||
1081 | { | ||
1082 | struct atl1e_hw *hw = &adapter->hw; | ||
1083 | struct pci_dev *pdev = adapter->pdev; | ||
1084 | |||
1085 | u32 intr_status_data = 0; | ||
1086 | |||
1087 | /* clear interrupt status */ | ||
1088 | AT_WRITE_REG(hw, REG_ISR, ~0); | ||
1089 | |||
1090 | /* 1. set MAC Address */ | ||
1091 | atl1e_hw_set_mac_addr(hw); | ||
1092 | |||
1093 | /* 2. Init the Multicast HASH table done by set_muti */ | ||
1094 | |||
1095 | /* 3. Clear any WOL status */ | ||
1096 | AT_WRITE_REG(hw, REG_WOL_CTRL, 0); | ||
1097 | |||
1098 | /* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr | ||
1099 | * TPD Ring/SMB/RXF0 Page CMBs, they use the same | ||
1100 | * High 32bits memory */ | ||
1101 | atl1e_configure_des_ring(adapter); | ||
1102 | |||
1103 | /* 5. set Interrupt Moderator Timer */ | ||
1104 | AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, hw->imt); | ||
1105 | AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, hw->imt); | ||
1106 | AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE | | ||
1107 | MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN); | ||
1108 | |||
1109 | /* 6. rx/tx threshold to trig interrupt */ | ||
1110 | AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, hw->rrd_thresh); | ||
1111 | AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, hw->tpd_thresh); | ||
1112 | AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, hw->rx_count_down); | ||
1113 | AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, hw->tx_count_down); | ||
1114 | |||
1115 | /* 7. set Interrupt Clear Timer */ | ||
1116 | AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, hw->ict); | ||
1117 | |||
1118 | /* 8. set MTU */ | ||
1119 | AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + | ||
1120 | VLAN_HLEN + ETH_FCS_LEN); | ||
1121 | |||
1122 | /* 9. config TXQ early tx threshold */ | ||
1123 | atl1e_configure_tx(adapter); | ||
1124 | |||
1125 | /* 10. config RXQ */ | ||
1126 | atl1e_configure_rx(adapter); | ||
1127 | |||
1128 | /* 11. config DMA Engine */ | ||
1129 | atl1e_configure_dma(adapter); | ||
1130 | |||
1131 | /* 12. smb timer to trig interrupt */ | ||
1132 | AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, hw->smb_timer); | ||
1133 | |||
1134 | intr_status_data = AT_READ_REG(hw, REG_ISR); | ||
1135 | if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) { | ||
1136 | dev_err(&pdev->dev, "atl1e_configure failed," | ||
1137 | "PCIE phy link down\n"); | ||
1138 | return -1; | ||
1139 | } | ||
1140 | |||
1141 | AT_WRITE_REG(hw, REG_ISR, 0x7fffffff); | ||
1142 | return 0; | ||
1143 | } | ||
1144 | |||
1145 | /* | ||
1146 | * atl1e_get_stats - Get System Network Statistics | ||
1147 | * @netdev: network interface device structure | ||
1148 | * | ||
1149 | * Returns the address of the device statistics structure. | ||
1150 | * The statistics are actually updated from the timer callback. | ||
1151 | */ | ||
1152 | static struct net_device_stats *atl1e_get_stats(struct net_device *netdev) | ||
1153 | { | ||
1154 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
1155 | struct atl1e_hw_stats *hw_stats = &adapter->hw_stats; | ||
1156 | struct net_device_stats *net_stats = &adapter->net_stats; | ||
1157 | |||
1158 | net_stats->rx_packets = hw_stats->rx_ok; | ||
1159 | net_stats->tx_packets = hw_stats->tx_ok; | ||
1160 | net_stats->rx_bytes = hw_stats->rx_byte_cnt; | ||
1161 | net_stats->tx_bytes = hw_stats->tx_byte_cnt; | ||
1162 | net_stats->multicast = hw_stats->rx_mcast; | ||
1163 | net_stats->collisions = hw_stats->tx_1_col + | ||
1164 | hw_stats->tx_2_col * 2 + | ||
1165 | hw_stats->tx_late_col + hw_stats->tx_abort_col; | ||
1166 | |||
1167 | net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err + | ||
1168 | hw_stats->rx_len_err + hw_stats->rx_sz_ov + | ||
1169 | hw_stats->rx_rrd_ov + hw_stats->rx_align_err; | ||
1170 | net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; | ||
1171 | net_stats->rx_length_errors = hw_stats->rx_len_err; | ||
1172 | net_stats->rx_crc_errors = hw_stats->rx_fcs_err; | ||
1173 | net_stats->rx_frame_errors = hw_stats->rx_align_err; | ||
1174 | net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; | ||
1175 | |||
1176 | net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; | ||
1177 | |||
1178 | net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col + | ||
1179 | hw_stats->tx_underrun + hw_stats->tx_trunc; | ||
1180 | net_stats->tx_fifo_errors = hw_stats->tx_underrun; | ||
1181 | net_stats->tx_aborted_errors = hw_stats->tx_abort_col; | ||
1182 | net_stats->tx_window_errors = hw_stats->tx_late_col; | ||
1183 | |||
1184 | return &adapter->net_stats; | ||
1185 | } | ||
1186 | |||
1187 | static void atl1e_update_hw_stats(struct atl1e_adapter *adapter) | ||
1188 | { | ||
1189 | u16 hw_reg_addr = 0; | ||
1190 | unsigned long *stats_item = NULL; | ||
1191 | |||
1192 | /* update rx status */ | ||
1193 | hw_reg_addr = REG_MAC_RX_STATUS_BIN; | ||
1194 | stats_item = &adapter->hw_stats.rx_ok; | ||
1195 | while (hw_reg_addr <= REG_MAC_RX_STATUS_END) { | ||
1196 | *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); | ||
1197 | stats_item++; | ||
1198 | hw_reg_addr += 4; | ||
1199 | } | ||
1200 | /* update tx status */ | ||
1201 | hw_reg_addr = REG_MAC_TX_STATUS_BIN; | ||
1202 | stats_item = &adapter->hw_stats.tx_ok; | ||
1203 | while (hw_reg_addr <= REG_MAC_TX_STATUS_END) { | ||
1204 | *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); | ||
1205 | stats_item++; | ||
1206 | hw_reg_addr += 4; | ||
1207 | } | ||
1208 | } | ||
1209 | |||
1210 | static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter) | ||
1211 | { | ||
1212 | u16 phy_data; | ||
1213 | |||
1214 | spin_lock(&adapter->mdio_lock); | ||
1215 | atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data); | ||
1216 | spin_unlock(&adapter->mdio_lock); | ||
1217 | } | ||
1218 | |||
1219 | static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter) | ||
1220 | { | ||
1221 | struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *) | ||
1222 | &adapter->tx_ring; | ||
1223 | struct atl1e_tx_buffer *tx_buffer = NULL; | ||
1224 | u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX); | ||
1225 | u16 next_to_clean = atomic_read(&tx_ring->next_to_clean); | ||
1226 | |||
1227 | while (next_to_clean != hw_next_to_clean) { | ||
1228 | tx_buffer = &tx_ring->tx_buffer[next_to_clean]; | ||
1229 | if (tx_buffer->dma) { | ||
1230 | pci_unmap_page(adapter->pdev, tx_buffer->dma, | ||
1231 | tx_buffer->length, PCI_DMA_TODEVICE); | ||
1232 | tx_buffer->dma = 0; | ||
1233 | } | ||
1234 | |||
1235 | if (tx_buffer->skb) { | ||
1236 | dev_kfree_skb_irq(tx_buffer->skb); | ||
1237 | tx_buffer->skb = NULL; | ||
1238 | } | ||
1239 | |||
1240 | if (++next_to_clean == tx_ring->count) | ||
1241 | next_to_clean = 0; | ||
1242 | } | ||
1243 | |||
1244 | atomic_set(&tx_ring->next_to_clean, next_to_clean); | ||
1245 | |||
1246 | if (netif_queue_stopped(adapter->netdev) && | ||
1247 | netif_carrier_ok(adapter->netdev)) { | ||
1248 | netif_wake_queue(adapter->netdev); | ||
1249 | } | ||
1250 | |||
1251 | return true; | ||
1252 | } | ||
1253 | |||
1254 | /* | ||
1255 | * atl1e_intr - Interrupt Handler | ||
1256 | * @irq: interrupt number | ||
1257 | * @data: pointer to a network interface device structure | ||
1258 | * @pt_regs: CPU registers structure | ||
1259 | */ | ||
1260 | static irqreturn_t atl1e_intr(int irq, void *data) | ||
1261 | { | ||
1262 | struct net_device *netdev = data; | ||
1263 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
1264 | struct pci_dev *pdev = adapter->pdev; | ||
1265 | struct atl1e_hw *hw = &adapter->hw; | ||
1266 | int max_ints = AT_MAX_INT_WORK; | ||
1267 | int handled = IRQ_NONE; | ||
1268 | u32 status; | ||
1269 | |||
1270 | do { | ||
1271 | status = AT_READ_REG(hw, REG_ISR); | ||
1272 | if ((status & IMR_NORMAL_MASK) == 0 || | ||
1273 | (status & ISR_DIS_INT) != 0) { | ||
1274 | if (max_ints != AT_MAX_INT_WORK) | ||
1275 | handled = IRQ_HANDLED; | ||
1276 | break; | ||
1277 | } | ||
1278 | /* link event */ | ||
1279 | if (status & ISR_GPHY) | ||
1280 | atl1e_clear_phy_int(adapter); | ||
1281 | /* Ack ISR */ | ||
1282 | AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); | ||
1283 | |||
1284 | handled = IRQ_HANDLED; | ||
1285 | /* check if PCIE PHY Link down */ | ||
1286 | if (status & ISR_PHY_LINKDOWN) { | ||
1287 | dev_err(&pdev->dev, | ||
1288 | "pcie phy linkdown %x\n", status); | ||
1289 | if (netif_running(adapter->netdev)) { | ||
1290 | /* reset MAC */ | ||
1291 | atl1e_irq_reset(adapter); | ||
1292 | schedule_work(&adapter->reset_task); | ||
1293 | break; | ||
1294 | } | ||
1295 | } | ||
1296 | |||
1297 | /* check if DMA read/write error */ | ||
1298 | if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { | ||
1299 | dev_err(&pdev->dev, | ||
1300 | "PCIE DMA RW error (status = 0x%x)\n", | ||
1301 | status); | ||
1302 | atl1e_irq_reset(adapter); | ||
1303 | schedule_work(&adapter->reset_task); | ||
1304 | break; | ||
1305 | } | ||
1306 | |||
1307 | if (status & ISR_SMB) | ||
1308 | atl1e_update_hw_stats(adapter); | ||
1309 | |||
1310 | /* link event */ | ||
1311 | if (status & (ISR_GPHY | ISR_MANUAL)) { | ||
1312 | adapter->net_stats.tx_carrier_errors++; | ||
1313 | atl1e_link_chg_event(adapter); | ||
1314 | break; | ||
1315 | } | ||
1316 | |||
1317 | /* transmit event */ | ||
1318 | if (status & ISR_TX_EVENT) | ||
1319 | atl1e_clean_tx_irq(adapter); | ||
1320 | |||
1321 | if (status & ISR_RX_EVENT) { | ||
1322 | /* | ||
1323 | * disable rx interrupts, without | ||
1324 | * the synchronize_irq bit | ||
1325 | */ | ||
1326 | AT_WRITE_REG(hw, REG_IMR, | ||
1327 | IMR_NORMAL_MASK & ~ISR_RX_EVENT); | ||
1328 | AT_WRITE_FLUSH(hw); | ||
1329 | if (likely(netif_rx_schedule_prep(netdev, | ||
1330 | &adapter->napi))) | ||
1331 | __netif_rx_schedule(netdev, &adapter->napi); | ||
1332 | } | ||
1333 | } while (--max_ints > 0); | ||
1334 | /* re-enable Interrupt*/ | ||
1335 | AT_WRITE_REG(&adapter->hw, REG_ISR, 0); | ||
1336 | |||
1337 | return handled; | ||
1338 | } | ||
1339 | |||
1340 | static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter, | ||
1341 | struct sk_buff *skb, struct atl1e_recv_ret_status *prrs) | ||
1342 | { | ||
1343 | u8 *packet = (u8 *)(prrs + 1); | ||
1344 | struct iphdr *iph; | ||
1345 | u16 head_len = ETH_HLEN; | ||
1346 | u16 pkt_flags; | ||
1347 | u16 err_flags; | ||
1348 | |||
1349 | skb->ip_summed = CHECKSUM_NONE; | ||
1350 | pkt_flags = prrs->pkt_flag; | ||
1351 | err_flags = prrs->err_flag; | ||
1352 | if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) && | ||
1353 | ((pkt_flags & RRS_IS_TCP) || (pkt_flags & RRS_IS_UDP))) { | ||
1354 | if (pkt_flags & RRS_IS_IPV4) { | ||
1355 | if (pkt_flags & RRS_IS_802_3) | ||
1356 | head_len += 8; | ||
1357 | iph = (struct iphdr *) (packet + head_len); | ||
1358 | if (iph->frag_off != 0 && !(pkt_flags & RRS_IS_IP_DF)) | ||
1359 | goto hw_xsum; | ||
1360 | } | ||
1361 | if (!(err_flags & (RRS_ERR_IP_CSUM | RRS_ERR_L4_CSUM))) { | ||
1362 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1363 | return; | ||
1364 | } | ||
1365 | } | ||
1366 | |||
1367 | hw_xsum : | ||
1368 | return; | ||
1369 | } | ||
1370 | |||
1371 | static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter, | ||
1372 | u8 que) | ||
1373 | { | ||
1374 | struct atl1e_rx_page_desc *rx_page_desc = | ||
1375 | (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc; | ||
1376 | u8 rx_using = rx_page_desc[que].rx_using; | ||
1377 | |||
1378 | return (struct atl1e_rx_page *)&(rx_page_desc[que].rx_page[rx_using]); | ||
1379 | } | ||
1380 | |||
1381 | static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, | ||
1382 | int *work_done, int work_to_do) | ||
1383 | { | ||
1384 | struct pci_dev *pdev = adapter->pdev; | ||
1385 | struct net_device *netdev = adapter->netdev; | ||
1386 | struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *) | ||
1387 | &adapter->rx_ring; | ||
1388 | struct atl1e_rx_page_desc *rx_page_desc = | ||
1389 | (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc; | ||
1390 | struct sk_buff *skb = NULL; | ||
1391 | struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter, que); | ||
1392 | u32 packet_size, write_offset; | ||
1393 | struct atl1e_recv_ret_status *prrs; | ||
1394 | |||
1395 | write_offset = *(rx_page->write_offset_addr); | ||
1396 | if (likely(rx_page->read_offset < write_offset)) { | ||
1397 | do { | ||
1398 | if (*work_done >= work_to_do) | ||
1399 | break; | ||
1400 | (*work_done)++; | ||
1401 | /* get new packet's rrs */ | ||
1402 | prrs = (struct atl1e_recv_ret_status *) (rx_page->addr + | ||
1403 | rx_page->read_offset); | ||
1404 | /* check sequence number */ | ||
1405 | if (prrs->seq_num != rx_page_desc[que].rx_nxseq) { | ||
1406 | dev_err(&pdev->dev, | ||
1407 | "rx sequence number" | ||
1408 | " error (rx=%d) (expect=%d)\n", | ||
1409 | prrs->seq_num, | ||
1410 | rx_page_desc[que].rx_nxseq); | ||
1411 | rx_page_desc[que].rx_nxseq++; | ||
1412 | /* just for debug use */ | ||
1413 | AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0, | ||
1414 | (((u32)prrs->seq_num) << 16) | | ||
1415 | rx_page_desc[que].rx_nxseq); | ||
1416 | goto fatal_err; | ||
1417 | } | ||
1418 | rx_page_desc[que].rx_nxseq++; | ||
1419 | |||
1420 | /* error packet */ | ||
1421 | if (prrs->pkt_flag & RRS_IS_ERR_FRAME) { | ||
1422 | if (prrs->err_flag & (RRS_ERR_BAD_CRC | | ||
1423 | RRS_ERR_DRIBBLE | RRS_ERR_CODE | | ||
1424 | RRS_ERR_TRUNC)) { | ||
1425 | /* hardware error, discard this packet*/ | ||
1426 | dev_err(&pdev->dev, | ||
1427 | "rx packet desc error %x\n", | ||
1428 | *((u32 *)prrs + 1)); | ||
1429 | goto skip_pkt; | ||
1430 | } | ||
1431 | } | ||
1432 | |||
1433 | packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & | ||
1434 | RRS_PKT_SIZE_MASK) - 4; /* CRC */ | ||
1435 | skb = netdev_alloc_skb(netdev, | ||
1436 | packet_size + NET_IP_ALIGN); | ||
1437 | if (skb == NULL) { | ||
1438 | dev_warn(&pdev->dev, "%s: Memory squeeze," | ||
1439 | "deferring packet.\n", netdev->name); | ||
1440 | goto skip_pkt; | ||
1441 | } | ||
1442 | skb_reserve(skb, NET_IP_ALIGN); | ||
1443 | skb->dev = netdev; | ||
1444 | memcpy(skb->data, (u8 *)(prrs + 1), packet_size); | ||
1445 | skb_put(skb, packet_size); | ||
1446 | skb->protocol = eth_type_trans(skb, netdev); | ||
1447 | atl1e_rx_checksum(adapter, skb, prrs); | ||
1448 | |||
1449 | if (unlikely(adapter->vlgrp && | ||
1450 | (prrs->pkt_flag & RRS_IS_VLAN_TAG))) { | ||
1451 | u16 vlan_tag = (prrs->vtag >> 4) | | ||
1452 | ((prrs->vtag & 7) << 13) | | ||
1453 | ((prrs->vtag & 8) << 9); | ||
1454 | dev_dbg(&pdev->dev, | ||
1455 | "RXD VLAN TAG<RRD>=0x%04x\n", | ||
1456 | prrs->vtag); | ||
1457 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | ||
1458 | vlan_tag); | ||
1459 | } else { | ||
1460 | netif_receive_skb(skb); | ||
1461 | } | ||
1462 | |||
1463 | netdev->last_rx = jiffies; | ||
1464 | skip_pkt: | ||
1465 | /* skip current packet whether it's ok or not. */ | ||
1466 | rx_page->read_offset += | ||
1467 | (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & | ||
1468 | RRS_PKT_SIZE_MASK) + | ||
1469 | sizeof(struct atl1e_recv_ret_status) + 31) & | ||
1470 | 0xFFFFFFE0); | ||
1471 | |||
1472 | if (rx_page->read_offset >= rx_ring->page_size) { | ||
1473 | /* mark this page clean */ | ||
1474 | u16 reg_addr; | ||
1475 | u8 rx_using; | ||
1476 | |||
1477 | rx_page->read_offset = | ||
1478 | *(rx_page->write_offset_addr) = 0; | ||
1479 | rx_using = rx_page_desc[que].rx_using; | ||
1480 | reg_addr = | ||
1481 | atl1e_rx_page_vld_regs[que][rx_using]; | ||
1482 | AT_WRITE_REGB(&adapter->hw, reg_addr, 1); | ||
1483 | rx_page_desc[que].rx_using ^= 1; | ||
1484 | rx_page = atl1e_get_rx_page(adapter, que); | ||
1485 | } | ||
1486 | write_offset = *(rx_page->write_offset_addr); | ||
1487 | } while (rx_page->read_offset < write_offset); | ||
1488 | } | ||
1489 | |||
1490 | return; | ||
1491 | |||
1492 | fatal_err: | ||
1493 | if (!test_bit(__AT_DOWN, &adapter->flags)) | ||
1494 | schedule_work(&adapter->reset_task); | ||
1495 | } | ||
1496 | |||
1497 | /* | ||
1498 | * atl1e_clean - NAPI Rx polling callback | ||
1499 | * @adapter: board private structure | ||
1500 | */ | ||
1501 | static int atl1e_clean(struct napi_struct *napi, int budget) | ||
1502 | { | ||
1503 | struct atl1e_adapter *adapter = | ||
1504 | container_of(napi, struct atl1e_adapter, napi); | ||
1505 | struct net_device *netdev = adapter->netdev; | ||
1506 | struct pci_dev *pdev = adapter->pdev; | ||
1507 | u32 imr_data; | ||
1508 | int work_done = 0; | ||
1509 | |||
1510 | /* Keep link state information with original netdev */ | ||
1511 | if (!netif_carrier_ok(adapter->netdev)) | ||
1512 | goto quit_polling; | ||
1513 | |||
1514 | atl1e_clean_rx_irq(adapter, 0, &work_done, budget); | ||
1515 | |||
1516 | /* If no Tx and not enough Rx work done, exit the polling mode */ | ||
1517 | if (work_done < budget) { | ||
1518 | quit_polling: | ||
1519 | netif_rx_complete(netdev, napi); | ||
1520 | imr_data = AT_READ_REG(&adapter->hw, REG_IMR); | ||
1521 | AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); | ||
1522 | /* test debug */ | ||
1523 | if (test_bit(__AT_DOWN, &adapter->flags)) { | ||
1524 | atomic_dec(&adapter->irq_sem); | ||
1525 | dev_err(&pdev->dev, | ||
1526 | "atl1e_clean is called when AT_DOWN\n"); | ||
1527 | } | ||
1528 | /* reenable RX intr */ | ||
1529 | /*atl1e_irq_enable(adapter); */ | ||
1530 | |||
1531 | } | ||
1532 | return work_done; | ||
1533 | } | ||
1534 | |||
1535 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1536 | |||
1537 | /* | ||
1538 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
1539 | * without having to re-enable interrupts. It's not called while | ||
1540 | * the interrupt routine is executing. | ||
1541 | */ | ||
1542 | static void atl1e_netpoll(struct net_device *netdev) | ||
1543 | { | ||
1544 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
1545 | |||
1546 | disable_irq(adapter->pdev->irq); | ||
1547 | atl1e_intr(adapter->pdev->irq, netdev); | ||
1548 | enable_irq(adapter->pdev->irq); | ||
1549 | } | ||
1550 | #endif | ||
1551 | |||
1552 | static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter) | ||
1553 | { | ||
1554 | struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; | ||
1555 | u16 next_to_use = 0; | ||
1556 | u16 next_to_clean = 0; | ||
1557 | |||
1558 | next_to_clean = atomic_read(&tx_ring->next_to_clean); | ||
1559 | next_to_use = tx_ring->next_to_use; | ||
1560 | |||
1561 | return (u16)(next_to_clean > next_to_use) ? | ||
1562 | (next_to_clean - next_to_use - 1) : | ||
1563 | (tx_ring->count + next_to_clean - next_to_use - 1); | ||
1564 | } | ||
1565 | |||
1566 | /* | ||
1567 | * get next usable tpd | ||
1568 | * Note: should call atl1e_tdp_avail to make sure | ||
1569 | * there is enough tpd to use | ||
1570 | */ | ||
1571 | static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter) | ||
1572 | { | ||
1573 | struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; | ||
1574 | u16 next_to_use = 0; | ||
1575 | |||
1576 | next_to_use = tx_ring->next_to_use; | ||
1577 | if (++tx_ring->next_to_use == tx_ring->count) | ||
1578 | tx_ring->next_to_use = 0; | ||
1579 | |||
1580 | memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc)); | ||
1581 | return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use]; | ||
1582 | } | ||
1583 | |||
1584 | static struct atl1e_tx_buffer * | ||
1585 | atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd) | ||
1586 | { | ||
1587 | struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; | ||
1588 | |||
1589 | return &tx_ring->tx_buffer[tpd - tx_ring->desc]; | ||
1590 | } | ||
1591 | |||
1592 | /* Calculate the transmit packet descript needed*/ | ||
1593 | static u16 atl1e_cal_tdp_req(const struct sk_buff *skb) | ||
1594 | { | ||
1595 | int i = 0; | ||
1596 | u16 tpd_req = 1; | ||
1597 | u16 fg_size = 0; | ||
1598 | u16 proto_hdr_len = 0; | ||
1599 | |||
1600 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
1601 | fg_size = skb_shinfo(skb)->frags[i].size; | ||
1602 | tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT); | ||
1603 | } | ||
1604 | |||
1605 | if (skb_is_gso(skb)) { | ||
1606 | if (skb->protocol == ntohs(ETH_P_IP) || | ||
1607 | (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) { | ||
1608 | proto_hdr_len = skb_transport_offset(skb) + | ||
1609 | tcp_hdrlen(skb); | ||
1610 | if (proto_hdr_len < skb_headlen(skb)) { | ||
1611 | tpd_req += ((skb_headlen(skb) - proto_hdr_len + | ||
1612 | MAX_TX_BUF_LEN - 1) >> | ||
1613 | MAX_TX_BUF_SHIFT); | ||
1614 | } | ||
1615 | } | ||
1616 | |||
1617 | } | ||
1618 | return tpd_req; | ||
1619 | } | ||
1620 | |||
1621 | static int atl1e_tso_csum(struct atl1e_adapter *adapter, | ||
1622 | struct sk_buff *skb, struct atl1e_tpd_desc *tpd) | ||
1623 | { | ||
1624 | struct pci_dev *pdev = adapter->pdev; | ||
1625 | u8 hdr_len; | ||
1626 | u32 real_len; | ||
1627 | unsigned short offload_type; | ||
1628 | int err; | ||
1629 | |||
1630 | if (skb_is_gso(skb)) { | ||
1631 | if (skb_header_cloned(skb)) { | ||
1632 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | ||
1633 | if (unlikely(err)) | ||
1634 | return -1; | ||
1635 | } | ||
1636 | offload_type = skb_shinfo(skb)->gso_type; | ||
1637 | |||
1638 | if (offload_type & SKB_GSO_TCPV4) { | ||
1639 | real_len = (((unsigned char *)ip_hdr(skb) - skb->data) | ||
1640 | + ntohs(ip_hdr(skb)->tot_len)); | ||
1641 | |||
1642 | if (real_len < skb->len) | ||
1643 | pskb_trim(skb, real_len); | ||
1644 | |||
1645 | hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); | ||
1646 | if (unlikely(skb->len == hdr_len)) { | ||
1647 | /* only xsum need */ | ||
1648 | dev_warn(&pdev->dev, | ||
1649 | "IPV4 tso with zero data??\n"); | ||
1650 | goto check_sum; | ||
1651 | } else { | ||
1652 | ip_hdr(skb)->check = 0; | ||
1653 | ip_hdr(skb)->tot_len = 0; | ||
1654 | tcp_hdr(skb)->check = ~csum_tcpudp_magic( | ||
1655 | ip_hdr(skb)->saddr, | ||
1656 | ip_hdr(skb)->daddr, | ||
1657 | 0, IPPROTO_TCP, 0); | ||
1658 | tpd->word3 |= (ip_hdr(skb)->ihl & | ||
1659 | TDP_V4_IPHL_MASK) << | ||
1660 | TPD_V4_IPHL_SHIFT; | ||
1661 | tpd->word3 |= ((tcp_hdrlen(skb) >> 2) & | ||
1662 | TPD_TCPHDRLEN_MASK) << | ||
1663 | TPD_TCPHDRLEN_SHIFT; | ||
1664 | tpd->word3 |= ((skb_shinfo(skb)->gso_size) & | ||
1665 | TPD_MSS_MASK) << TPD_MSS_SHIFT; | ||
1666 | tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; | ||
1667 | } | ||
1668 | return 0; | ||
1669 | } | ||
1670 | |||
1671 | if (offload_type & SKB_GSO_TCPV6) { | ||
1672 | real_len = (((unsigned char *)ipv6_hdr(skb) - skb->data) | ||
1673 | + ntohs(ipv6_hdr(skb)->payload_len)); | ||
1674 | if (real_len < skb->len) | ||
1675 | pskb_trim(skb, real_len); | ||
1676 | |||
1677 | /* check payload == 0 byte ? */ | ||
1678 | hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); | ||
1679 | if (unlikely(skb->len == hdr_len)) { | ||
1680 | /* only xsum need */ | ||
1681 | dev_warn(&pdev->dev, | ||
1682 | "IPV6 tso with zero data??\n"); | ||
1683 | goto check_sum; | ||
1684 | } else { | ||
1685 | tcp_hdr(skb)->check = ~csum_ipv6_magic( | ||
1686 | &ipv6_hdr(skb)->saddr, | ||
1687 | &ipv6_hdr(skb)->daddr, | ||
1688 | 0, IPPROTO_TCP, 0); | ||
1689 | tpd->word3 |= 1 << TPD_IP_VERSION_SHIFT; | ||
1690 | hdr_len >>= 1; | ||
1691 | tpd->word3 |= (hdr_len & TPD_V6_IPHLLO_MASK) << | ||
1692 | TPD_V6_IPHLLO_SHIFT; | ||
1693 | tpd->word3 |= ((hdr_len >> 3) & | ||
1694 | TPD_V6_IPHLHI_MASK) << | ||
1695 | TPD_V6_IPHLHI_SHIFT; | ||
1696 | tpd->word3 |= (tcp_hdrlen(skb) >> 2 & | ||
1697 | TPD_TCPHDRLEN_MASK) << | ||
1698 | TPD_TCPHDRLEN_SHIFT; | ||
1699 | tpd->word3 |= ((skb_shinfo(skb)->gso_size) & | ||
1700 | TPD_MSS_MASK) << TPD_MSS_SHIFT; | ||
1701 | tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; | ||
1702 | } | ||
1703 | } | ||
1704 | return 0; | ||
1705 | } | ||
1706 | |||
1707 | check_sum: | ||
1708 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | ||
1709 | u8 css, cso; | ||
1710 | |||
1711 | cso = skb_transport_offset(skb); | ||
1712 | if (unlikely(cso & 0x1)) { | ||
1713 | dev_err(&adapter->pdev->dev, | ||
1714 | "pay load offset should not ant event number\n"); | ||
1715 | return -1; | ||
1716 | } else { | ||
1717 | css = cso + skb->csum_offset; | ||
1718 | tpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) << | ||
1719 | TPD_PLOADOFFSET_SHIFT; | ||
1720 | tpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) << | ||
1721 | TPD_CCSUMOFFSET_SHIFT; | ||
1722 | tpd->word3 |= 1 << TPD_CC_SEGMENT_EN_SHIFT; | ||
1723 | } | ||
1724 | } | ||
1725 | |||
1726 | return 0; | ||
1727 | } | ||
1728 | |||
1729 | static void atl1e_tx_map(struct atl1e_adapter *adapter, | ||
1730 | struct sk_buff *skb, struct atl1e_tpd_desc *tpd) | ||
1731 | { | ||
1732 | struct atl1e_tpd_desc *use_tpd = NULL; | ||
1733 | struct atl1e_tx_buffer *tx_buffer = NULL; | ||
1734 | u16 buf_len = skb->len - skb->data_len; | ||
1735 | u16 map_len = 0; | ||
1736 | u16 mapped_len = 0; | ||
1737 | u16 hdr_len = 0; | ||
1738 | u16 nr_frags; | ||
1739 | u16 f; | ||
1740 | int segment; | ||
1741 | |||
1742 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
1743 | segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; | ||
1744 | if (segment) { | ||
1745 | /* TSO */ | ||
1746 | map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
1747 | use_tpd = tpd; | ||
1748 | |||
1749 | tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); | ||
1750 | tx_buffer->length = map_len; | ||
1751 | tx_buffer->dma = pci_map_single(adapter->pdev, | ||
1752 | skb->data, hdr_len, PCI_DMA_TODEVICE); | ||
1753 | mapped_len += map_len; | ||
1754 | use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); | ||
1755 | use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | | ||
1756 | ((cpu_to_le32(tx_buffer->length) & | ||
1757 | TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); | ||
1758 | } | ||
1759 | |||
1760 | while (mapped_len < buf_len) { | ||
1761 | /* mapped_len == 0, means we should use the first tpd, | ||
1762 | which is given by caller */ | ||
1763 | if (mapped_len == 0) { | ||
1764 | use_tpd = tpd; | ||
1765 | } else { | ||
1766 | use_tpd = atl1e_get_tpd(adapter); | ||
1767 | memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc)); | ||
1768 | } | ||
1769 | tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); | ||
1770 | tx_buffer->skb = NULL; | ||
1771 | |||
1772 | tx_buffer->length = map_len = | ||
1773 | ((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ? | ||
1774 | MAX_TX_BUF_LEN : (buf_len - mapped_len); | ||
1775 | tx_buffer->dma = | ||
1776 | pci_map_single(adapter->pdev, skb->data + mapped_len, | ||
1777 | map_len, PCI_DMA_TODEVICE); | ||
1778 | mapped_len += map_len; | ||
1779 | use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); | ||
1780 | use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | | ||
1781 | ((cpu_to_le32(tx_buffer->length) & | ||
1782 | TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); | ||
1783 | } | ||
1784 | |||
1785 | for (f = 0; f < nr_frags; f++) { | ||
1786 | struct skb_frag_struct *frag; | ||
1787 | u16 i; | ||
1788 | u16 seg_num; | ||
1789 | |||
1790 | frag = &skb_shinfo(skb)->frags[f]; | ||
1791 | buf_len = frag->size; | ||
1792 | |||
1793 | seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; | ||
1794 | for (i = 0; i < seg_num; i++) { | ||
1795 | use_tpd = atl1e_get_tpd(adapter); | ||
1796 | memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc)); | ||
1797 | |||
1798 | tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); | ||
1799 | if (tx_buffer->skb) | ||
1800 | BUG(); | ||
1801 | |||
1802 | tx_buffer->skb = NULL; | ||
1803 | tx_buffer->length = | ||
1804 | (buf_len > MAX_TX_BUF_LEN) ? | ||
1805 | MAX_TX_BUF_LEN : buf_len; | ||
1806 | buf_len -= tx_buffer->length; | ||
1807 | |||
1808 | tx_buffer->dma = | ||
1809 | pci_map_page(adapter->pdev, frag->page, | ||
1810 | frag->page_offset + | ||
1811 | (i * MAX_TX_BUF_LEN), | ||
1812 | tx_buffer->length, | ||
1813 | PCI_DMA_TODEVICE); | ||
1814 | use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); | ||
1815 | use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | | ||
1816 | ((cpu_to_le32(tx_buffer->length) & | ||
1817 | TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); | ||
1818 | } | ||
1819 | } | ||
1820 | |||
1821 | if ((tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK) | ||
1822 | /* note this one is a tcp header */ | ||
1823 | tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; | ||
1824 | /* The last tpd */ | ||
1825 | |||
1826 | use_tpd->word3 |= 1 << TPD_EOP_SHIFT; | ||
1827 | /* The last buffer info contain the skb address, | ||
1828 | so it will be free after unmap */ | ||
1829 | tx_buffer->skb = skb; | ||
1830 | } | ||
1831 | |||
1832 | static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count, | ||
1833 | struct atl1e_tpd_desc *tpd) | ||
1834 | { | ||
1835 | struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; | ||
1836 | /* Force memory writes to complete before letting h/w | ||
1837 | * know there are new descriptors to fetch. (Only | ||
1838 | * applicable for weak-ordered memory model archs, | ||
1839 | * such as IA-64). */ | ||
1840 | wmb(); | ||
1841 | AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use); | ||
1842 | } | ||
1843 | |||
1844 | static int atl1e_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | ||
1845 | { | ||
1846 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
1847 | unsigned long flags; | ||
1848 | u16 tpd_req = 1; | ||
1849 | struct atl1e_tpd_desc *tpd; | ||
1850 | |||
1851 | if (test_bit(__AT_DOWN, &adapter->flags)) { | ||
1852 | dev_kfree_skb_any(skb); | ||
1853 | return NETDEV_TX_OK; | ||
1854 | } | ||
1855 | |||
1856 | if (unlikely(skb->len <= 0)) { | ||
1857 | dev_kfree_skb_any(skb); | ||
1858 | return NETDEV_TX_OK; | ||
1859 | } | ||
1860 | tpd_req = atl1e_cal_tdp_req(skb); | ||
1861 | if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) | ||
1862 | return NETDEV_TX_LOCKED; | ||
1863 | |||
1864 | if (atl1e_tpd_avail(adapter) < tpd_req) { | ||
1865 | /* no enough descriptor, just stop queue */ | ||
1866 | netif_stop_queue(netdev); | ||
1867 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | ||
1868 | return NETDEV_TX_BUSY; | ||
1869 | } | ||
1870 | |||
1871 | tpd = atl1e_get_tpd(adapter); | ||
1872 | |||
1873 | if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { | ||
1874 | u16 vlan_tag = vlan_tx_tag_get(skb); | ||
1875 | u16 atl1e_vlan_tag; | ||
1876 | |||
1877 | tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; | ||
1878 | AT_VLAN_TAG_TO_TPD_TAG(vlan_tag, atl1e_vlan_tag); | ||
1879 | tpd->word2 |= (atl1e_vlan_tag & TPD_VLANTAG_MASK) << | ||
1880 | TPD_VLAN_SHIFT; | ||
1881 | } | ||
1882 | |||
1883 | if (skb->protocol == ntohs(ETH_P_8021Q)) | ||
1884 | tpd->word3 |= 1 << TPD_VL_TAGGED_SHIFT; | ||
1885 | |||
1886 | if (skb_network_offset(skb) != ETH_HLEN) | ||
1887 | tpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; /* 802.3 frame */ | ||
1888 | |||
1889 | /* do TSO and check sum */ | ||
1890 | if (atl1e_tso_csum(adapter, skb, tpd) != 0) { | ||
1891 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | ||
1892 | dev_kfree_skb_any(skb); | ||
1893 | return NETDEV_TX_OK; | ||
1894 | } | ||
1895 | |||
1896 | atl1e_tx_map(adapter, skb, tpd); | ||
1897 | atl1e_tx_queue(adapter, tpd_req, tpd); | ||
1898 | |||
1899 | netdev->trans_start = jiffies; | ||
1900 | spin_unlock_irqrestore(&adapter->tx_lock, flags); | ||
1901 | return NETDEV_TX_OK; | ||
1902 | } | ||
1903 | |||
1904 | static void atl1e_free_irq(struct atl1e_adapter *adapter) | ||
1905 | { | ||
1906 | struct net_device *netdev = adapter->netdev; | ||
1907 | |||
1908 | free_irq(adapter->pdev->irq, netdev); | ||
1909 | |||
1910 | if (adapter->have_msi) | ||
1911 | pci_disable_msi(adapter->pdev); | ||
1912 | } | ||
1913 | |||
1914 | static int atl1e_request_irq(struct atl1e_adapter *adapter) | ||
1915 | { | ||
1916 | struct pci_dev *pdev = adapter->pdev; | ||
1917 | struct net_device *netdev = adapter->netdev; | ||
1918 | int flags = 0; | ||
1919 | int err = 0; | ||
1920 | |||
1921 | adapter->have_msi = true; | ||
1922 | err = pci_enable_msi(adapter->pdev); | ||
1923 | if (err) { | ||
1924 | dev_dbg(&pdev->dev, | ||
1925 | "Unable to allocate MSI interrupt Error: %d\n", err); | ||
1926 | adapter->have_msi = false; | ||
1927 | } else | ||
1928 | netdev->irq = pdev->irq; | ||
1929 | |||
1930 | |||
1931 | if (!adapter->have_msi) | ||
1932 | flags |= IRQF_SHARED; | ||
1933 | err = request_irq(adapter->pdev->irq, &atl1e_intr, flags, | ||
1934 | netdev->name, netdev); | ||
1935 | if (err) { | ||
1936 | dev_dbg(&pdev->dev, | ||
1937 | "Unable to allocate interrupt Error: %d\n", err); | ||
1938 | if (adapter->have_msi) | ||
1939 | pci_disable_msi(adapter->pdev); | ||
1940 | return err; | ||
1941 | } | ||
1942 | dev_dbg(&pdev->dev, "atl1e_request_irq OK\n"); | ||
1943 | return err; | ||
1944 | } | ||
1945 | |||
1946 | int atl1e_up(struct atl1e_adapter *adapter) | ||
1947 | { | ||
1948 | struct net_device *netdev = adapter->netdev; | ||
1949 | int err = 0; | ||
1950 | u32 val; | ||
1951 | |||
1952 | /* hardware has been reset, we need to reload some things */ | ||
1953 | err = atl1e_init_hw(&adapter->hw); | ||
1954 | if (err) { | ||
1955 | err = -EIO; | ||
1956 | return err; | ||
1957 | } | ||
1958 | atl1e_init_ring_ptrs(adapter); | ||
1959 | atl1e_set_multi(netdev); | ||
1960 | atl1e_restore_vlan(adapter); | ||
1961 | |||
1962 | if (atl1e_configure(adapter)) { | ||
1963 | err = -EIO; | ||
1964 | goto err_up; | ||
1965 | } | ||
1966 | |||
1967 | clear_bit(__AT_DOWN, &adapter->flags); | ||
1968 | napi_enable(&adapter->napi); | ||
1969 | atl1e_irq_enable(adapter); | ||
1970 | val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL); | ||
1971 | AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, | ||
1972 | val | MASTER_CTRL_MANUAL_INT); | ||
1973 | |||
1974 | err_up: | ||
1975 | return err; | ||
1976 | } | ||
1977 | |||
1978 | void atl1e_down(struct atl1e_adapter *adapter) | ||
1979 | { | ||
1980 | struct net_device *netdev = adapter->netdev; | ||
1981 | |||
1982 | /* signal that we're down so the interrupt handler does not | ||
1983 | * reschedule our watchdog timer */ | ||
1984 | set_bit(__AT_DOWN, &adapter->flags); | ||
1985 | |||
1986 | #ifdef NETIF_F_LLTX | ||
1987 | netif_stop_queue(netdev); | ||
1988 | #else | ||
1989 | netif_tx_disable(netdev); | ||
1990 | #endif | ||
1991 | |||
1992 | /* reset MAC to disable all RX/TX */ | ||
1993 | atl1e_reset_hw(&adapter->hw); | ||
1994 | msleep(1); | ||
1995 | |||
1996 | napi_disable(&adapter->napi); | ||
1997 | atl1e_del_timer(adapter); | ||
1998 | atl1e_irq_disable(adapter); | ||
1999 | |||
2000 | netif_carrier_off(netdev); | ||
2001 | adapter->link_speed = SPEED_0; | ||
2002 | adapter->link_duplex = -1; | ||
2003 | atl1e_clean_tx_ring(adapter); | ||
2004 | atl1e_clean_rx_ring(adapter); | ||
2005 | } | ||
2006 | |||
2007 | /* | ||
2008 | * atl1e_open - Called when a network interface is made active | ||
2009 | * @netdev: network interface device structure | ||
2010 | * | ||
2011 | * Returns 0 on success, negative value on failure | ||
2012 | * | ||
2013 | * The open entry point is called when a network interface is made | ||
2014 | * active by the system (IFF_UP). At this point all resources needed | ||
2015 | * for transmit and receive operations are allocated, the interrupt | ||
2016 | * handler is registered with the OS, the watchdog timer is started, | ||
2017 | * and the stack is notified that the interface is ready. | ||
2018 | */ | ||
2019 | static int atl1e_open(struct net_device *netdev) | ||
2020 | { | ||
2021 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
2022 | int err; | ||
2023 | |||
2024 | /* disallow open during test */ | ||
2025 | if (test_bit(__AT_TESTING, &adapter->flags)) | ||
2026 | return -EBUSY; | ||
2027 | |||
2028 | /* allocate rx/tx dma buffer & descriptors */ | ||
2029 | atl1e_init_ring_resources(adapter); | ||
2030 | err = atl1e_setup_ring_resources(adapter); | ||
2031 | if (unlikely(err)) | ||
2032 | return err; | ||
2033 | |||
2034 | err = atl1e_request_irq(adapter); | ||
2035 | if (unlikely(err)) | ||
2036 | goto err_req_irq; | ||
2037 | |||
2038 | err = atl1e_up(adapter); | ||
2039 | if (unlikely(err)) | ||
2040 | goto err_up; | ||
2041 | |||
2042 | return 0; | ||
2043 | |||
2044 | err_up: | ||
2045 | atl1e_free_irq(adapter); | ||
2046 | err_req_irq: | ||
2047 | atl1e_free_ring_resources(adapter); | ||
2048 | atl1e_reset_hw(&adapter->hw); | ||
2049 | |||
2050 | return err; | ||
2051 | } | ||
2052 | |||
2053 | /* | ||
2054 | * atl1e_close - Disables a network interface | ||
2055 | * @netdev: network interface device structure | ||
2056 | * | ||
2057 | * Returns 0, this is not allowed to fail | ||
2058 | * | ||
2059 | * The close entry point is called when an interface is de-activated | ||
2060 | * by the OS. The hardware is still under the drivers control, but | ||
2061 | * needs to be disabled. A global MAC reset is issued to stop the | ||
2062 | * hardware, and all transmit and receive resources are freed. | ||
2063 | */ | ||
2064 | static int atl1e_close(struct net_device *netdev) | ||
2065 | { | ||
2066 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
2067 | |||
2068 | WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); | ||
2069 | atl1e_down(adapter); | ||
2070 | atl1e_free_irq(adapter); | ||
2071 | atl1e_free_ring_resources(adapter); | ||
2072 | |||
2073 | return 0; | ||
2074 | } | ||
2075 | |||
2076 | static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state) | ||
2077 | { | ||
2078 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2079 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
2080 | struct atl1e_hw *hw = &adapter->hw; | ||
2081 | u32 ctrl = 0; | ||
2082 | u32 mac_ctrl_data = 0; | ||
2083 | u32 wol_ctrl_data = 0; | ||
2084 | u16 mii_advertise_data = 0; | ||
2085 | u16 mii_bmsr_data = 0; | ||
2086 | u16 mii_intr_status_data = 0; | ||
2087 | u32 wufc = adapter->wol; | ||
2088 | u32 i; | ||
2089 | #ifdef CONFIG_PM | ||
2090 | int retval = 0; | ||
2091 | #endif | ||
2092 | |||
2093 | if (netif_running(netdev)) { | ||
2094 | WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); | ||
2095 | atl1e_down(adapter); | ||
2096 | } | ||
2097 | netif_device_detach(netdev); | ||
2098 | |||
2099 | #ifdef CONFIG_PM | ||
2100 | retval = pci_save_state(pdev); | ||
2101 | if (retval) | ||
2102 | return retval; | ||
2103 | #endif | ||
2104 | |||
2105 | if (wufc) { | ||
2106 | /* get link status */ | ||
2107 | atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); | ||
2108 | atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); | ||
2109 | |||
2110 | mii_advertise_data = MII_AR_10T_HD_CAPS; | ||
2111 | |||
2112 | if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) || | ||
2113 | (atl1e_write_phy_reg(hw, | ||
2114 | MII_ADVERTISE, mii_advertise_data) != 0) || | ||
2115 | (atl1e_phy_commit(hw)) != 0) { | ||
2116 | dev_dbg(&pdev->dev, "set phy register failed\n"); | ||
2117 | goto wol_dis; | ||
2118 | } | ||
2119 | |||
2120 | hw->phy_configured = false; /* re-init PHY when resume */ | ||
2121 | |||
2122 | /* turn on magic packet wol */ | ||
2123 | if (wufc & AT_WUFC_MAG) | ||
2124 | wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; | ||
2125 | |||
2126 | if (wufc & AT_WUFC_LNKC) { | ||
2127 | /* if orignal link status is link, just wait for retrive link */ | ||
2128 | if (mii_bmsr_data & BMSR_LSTATUS) { | ||
2129 | for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { | ||
2130 | msleep(100); | ||
2131 | atl1e_read_phy_reg(hw, MII_BMSR, | ||
2132 | (u16 *)&mii_bmsr_data); | ||
2133 | if (mii_bmsr_data & BMSR_LSTATUS) | ||
2134 | break; | ||
2135 | } | ||
2136 | |||
2137 | if ((mii_bmsr_data & BMSR_LSTATUS) == 0) | ||
2138 | dev_dbg(&pdev->dev, | ||
2139 | "%s: Link may change" | ||
2140 | "when suspend\n", | ||
2141 | atl1e_driver_name); | ||
2142 | } | ||
2143 | wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; | ||
2144 | /* only link up can wake up */ | ||
2145 | if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) { | ||
2146 | dev_dbg(&pdev->dev, "%s: read write phy " | ||
2147 | "register failed.\n", | ||
2148 | atl1e_driver_name); | ||
2149 | goto wol_dis; | ||
2150 | } | ||
2151 | } | ||
2152 | /* clear phy interrupt */ | ||
2153 | atl1e_read_phy_reg(hw, MII_INT_STATUS, &mii_intr_status_data); | ||
2154 | /* Config MAC Ctrl register */ | ||
2155 | mac_ctrl_data = MAC_CTRL_RX_EN; | ||
2156 | /* set to 10/100M halt duplex */ | ||
2157 | mac_ctrl_data |= MAC_CTRL_SPEED_10_100 << MAC_CTRL_SPEED_SHIFT; | ||
2158 | mac_ctrl_data |= (((u32)adapter->hw.preamble_len & | ||
2159 | MAC_CTRL_PRMLEN_MASK) << | ||
2160 | MAC_CTRL_PRMLEN_SHIFT); | ||
2161 | |||
2162 | if (adapter->vlgrp) | ||
2163 | mac_ctrl_data |= MAC_CTRL_RMV_VLAN; | ||
2164 | |||
2165 | /* magic packet maybe Broadcast&multicast&Unicast frame */ | ||
2166 | if (wufc & AT_WUFC_MAG) | ||
2167 | mac_ctrl_data |= MAC_CTRL_BC_EN; | ||
2168 | |||
2169 | dev_dbg(&pdev->dev, | ||
2170 | "%s: suspend MAC=0x%x\n", | ||
2171 | atl1e_driver_name, mac_ctrl_data); | ||
2172 | |||
2173 | AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); | ||
2174 | AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); | ||
2175 | /* pcie patch */ | ||
2176 | ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC); | ||
2177 | ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; | ||
2178 | AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); | ||
2179 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); | ||
2180 | goto suspend_exit; | ||
2181 | } | ||
2182 | wol_dis: | ||
2183 | |||
2184 | /* WOL disabled */ | ||
2185 | AT_WRITE_REG(hw, REG_WOL_CTRL, 0); | ||
2186 | |||
2187 | /* pcie patch */ | ||
2188 | ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC); | ||
2189 | ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; | ||
2190 | AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); | ||
2191 | |||
2192 | atl1e_force_ps(hw); | ||
2193 | hw->phy_configured = false; /* re-init PHY when resume */ | ||
2194 | |||
2195 | pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); | ||
2196 | |||
2197 | suspend_exit: | ||
2198 | |||
2199 | if (netif_running(netdev)) | ||
2200 | atl1e_free_irq(adapter); | ||
2201 | |||
2202 | pci_disable_device(pdev); | ||
2203 | |||
2204 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
2205 | |||
2206 | return 0; | ||
2207 | } | ||
2208 | |||
2209 | #ifdef CONFIG_PM | ||
2210 | static int atl1e_resume(struct pci_dev *pdev) | ||
2211 | { | ||
2212 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2213 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
2214 | u32 err; | ||
2215 | |||
2216 | pci_set_power_state(pdev, PCI_D0); | ||
2217 | pci_restore_state(pdev); | ||
2218 | |||
2219 | err = pci_enable_device(pdev); | ||
2220 | if (err) { | ||
2221 | dev_err(&pdev->dev, "ATL1e: Cannot enable PCI" | ||
2222 | " device from suspend\n"); | ||
2223 | return err; | ||
2224 | } | ||
2225 | |||
2226 | pci_set_master(pdev); | ||
2227 | |||
2228 | AT_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */ | ||
2229 | |||
2230 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
2231 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
2232 | |||
2233 | AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); | ||
2234 | |||
2235 | if (netif_running(netdev)) | ||
2236 | err = atl1e_request_irq(adapter); | ||
2237 | if (err) | ||
2238 | return err; | ||
2239 | |||
2240 | atl1e_reset_hw(&adapter->hw); | ||
2241 | |||
2242 | if (netif_running(netdev)) | ||
2243 | atl1e_up(adapter); | ||
2244 | |||
2245 | netif_device_attach(netdev); | ||
2246 | |||
2247 | return 0; | ||
2248 | } | ||
2249 | #endif | ||
2250 | |||
2251 | static void atl1e_shutdown(struct pci_dev *pdev) | ||
2252 | { | ||
2253 | atl1e_suspend(pdev, PMSG_SUSPEND); | ||
2254 | } | ||
2255 | |||
2256 | static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) | ||
2257 | { | ||
2258 | SET_NETDEV_DEV(netdev, &pdev->dev); | ||
2259 | pci_set_drvdata(pdev, netdev); | ||
2260 | |||
2261 | netdev->irq = pdev->irq; | ||
2262 | netdev->open = &atl1e_open; | ||
2263 | netdev->stop = &atl1e_close; | ||
2264 | netdev->hard_start_xmit = &atl1e_xmit_frame; | ||
2265 | netdev->get_stats = &atl1e_get_stats; | ||
2266 | netdev->set_multicast_list = &atl1e_set_multi; | ||
2267 | netdev->set_mac_address = &atl1e_set_mac_addr; | ||
2268 | netdev->change_mtu = &atl1e_change_mtu; | ||
2269 | netdev->do_ioctl = &atl1e_ioctl; | ||
2270 | netdev->tx_timeout = &atl1e_tx_timeout; | ||
2271 | netdev->watchdog_timeo = AT_TX_WATCHDOG; | ||
2272 | netdev->vlan_rx_register = atl1e_vlan_rx_register; | ||
2273 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2274 | netdev->poll_controller = atl1e_netpoll; | ||
2275 | #endif | ||
2276 | atl1e_set_ethtool_ops(netdev); | ||
2277 | |||
2278 | netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | | ||
2279 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
2280 | netdev->features |= NETIF_F_LLTX; | ||
2281 | netdev->features |= NETIF_F_TSO; | ||
2282 | netdev->features |= NETIF_F_TSO6; | ||
2283 | |||
2284 | return 0; | ||
2285 | } | ||
2286 | |||
2287 | /* | ||
2288 | * atl1e_probe - Device Initialization Routine | ||
2289 | * @pdev: PCI device information struct | ||
2290 | * @ent: entry in atl1e_pci_tbl | ||
2291 | * | ||
2292 | * Returns 0 on success, negative on failure | ||
2293 | * | ||
2294 | * atl1e_probe initializes an adapter identified by a pci_dev structure. | ||
2295 | * The OS initialization, configuring of the adapter private structure, | ||
2296 | * and a hardware reset occur. | ||
2297 | */ | ||
2298 | static int __devinit atl1e_probe(struct pci_dev *pdev, | ||
2299 | const struct pci_device_id *ent) | ||
2300 | { | ||
2301 | struct net_device *netdev; | ||
2302 | struct atl1e_adapter *adapter = NULL; | ||
2303 | static int cards_found; | ||
2304 | |||
2305 | int err = 0; | ||
2306 | |||
2307 | err = pci_enable_device(pdev); | ||
2308 | if (err) { | ||
2309 | dev_err(&pdev->dev, "cannot enable PCI device\n"); | ||
2310 | return err; | ||
2311 | } | ||
2312 | |||
2313 | /* | ||
2314 | * The atl1e chip can DMA to 64-bit addresses, but it uses a single | ||
2315 | * shared register for the high 32 bits, so only a single, aligned, | ||
2316 | * 4 GB physical address range can be used at a time. | ||
2317 | * | ||
2318 | * Supporting 64-bit DMA on this hardware is more trouble than it's | ||
2319 | * worth. It is far easier to limit to 32-bit DMA than update | ||
2320 | * various kernel subsystems to support the mechanics required by a | ||
2321 | * fixed-high-32-bit system. | ||
2322 | */ | ||
2323 | if ((pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) || | ||
2324 | (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK) != 0)) { | ||
2325 | dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); | ||
2326 | goto err_dma; | ||
2327 | } | ||
2328 | |||
2329 | err = pci_request_regions(pdev, atl1e_driver_name); | ||
2330 | if (err) { | ||
2331 | dev_err(&pdev->dev, "cannot obtain PCI resources\n"); | ||
2332 | goto err_pci_reg; | ||
2333 | } | ||
2334 | |||
2335 | pci_set_master(pdev); | ||
2336 | |||
2337 | netdev = alloc_etherdev(sizeof(struct atl1e_adapter)); | ||
2338 | if (netdev == NULL) { | ||
2339 | err = -ENOMEM; | ||
2340 | dev_err(&pdev->dev, "etherdev alloc failed\n"); | ||
2341 | goto err_alloc_etherdev; | ||
2342 | } | ||
2343 | |||
2344 | err = atl1e_init_netdev(netdev, pdev); | ||
2345 | if (err) { | ||
2346 | dev_err(&pdev->dev, "init netdevice failed\n"); | ||
2347 | goto err_init_netdev; | ||
2348 | } | ||
2349 | adapter = netdev_priv(netdev); | ||
2350 | adapter->bd_number = cards_found; | ||
2351 | adapter->netdev = netdev; | ||
2352 | adapter->pdev = pdev; | ||
2353 | adapter->hw.adapter = adapter; | ||
2354 | adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0); | ||
2355 | if (!adapter->hw.hw_addr) { | ||
2356 | err = -EIO; | ||
2357 | dev_err(&pdev->dev, "cannot map device registers\n"); | ||
2358 | goto err_ioremap; | ||
2359 | } | ||
2360 | netdev->base_addr = (unsigned long)adapter->hw.hw_addr; | ||
2361 | |||
2362 | /* init mii data */ | ||
2363 | adapter->mii.dev = netdev; | ||
2364 | adapter->mii.mdio_read = atl1e_mdio_read; | ||
2365 | adapter->mii.mdio_write = atl1e_mdio_write; | ||
2366 | adapter->mii.phy_id_mask = 0x1f; | ||
2367 | adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK; | ||
2368 | |||
2369 | netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64); | ||
2370 | |||
2371 | init_timer(&adapter->phy_config_timer); | ||
2372 | adapter->phy_config_timer.function = &atl1e_phy_config; | ||
2373 | adapter->phy_config_timer.data = (unsigned long) adapter; | ||
2374 | |||
2375 | /* get user settings */ | ||
2376 | atl1e_check_options(adapter); | ||
2377 | /* | ||
2378 | * Mark all PCI regions associated with PCI device | ||
2379 | * pdev as being reserved by owner atl1e_driver_name | ||
2380 | * Enables bus-mastering on the device and calls | ||
2381 | * pcibios_set_master to do the needed arch specific settings | ||
2382 | */ | ||
2383 | atl1e_setup_pcicmd(pdev); | ||
2384 | /* setup the private structure */ | ||
2385 | err = atl1e_sw_init(adapter); | ||
2386 | if (err) { | ||
2387 | dev_err(&pdev->dev, "net device private data init failed\n"); | ||
2388 | goto err_sw_init; | ||
2389 | } | ||
2390 | |||
2391 | /* Init GPHY as early as possible due to power saving issue */ | ||
2392 | spin_lock(&adapter->mdio_lock); | ||
2393 | atl1e_phy_init(&adapter->hw); | ||
2394 | spin_unlock(&adapter->mdio_lock); | ||
2395 | /* reset the controller to | ||
2396 | * put the device in a known good starting state */ | ||
2397 | err = atl1e_reset_hw(&adapter->hw); | ||
2398 | if (err) { | ||
2399 | err = -EIO; | ||
2400 | goto err_reset; | ||
2401 | } | ||
2402 | |||
2403 | if (atl1e_read_mac_addr(&adapter->hw) != 0) { | ||
2404 | err = -EIO; | ||
2405 | dev_err(&pdev->dev, "get mac address failed\n"); | ||
2406 | goto err_eeprom; | ||
2407 | } | ||
2408 | |||
2409 | memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); | ||
2410 | memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); | ||
2411 | dev_dbg(&pdev->dev, "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n", | ||
2412 | adapter->hw.mac_addr[0], adapter->hw.mac_addr[1], | ||
2413 | adapter->hw.mac_addr[2], adapter->hw.mac_addr[3], | ||
2414 | adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]); | ||
2415 | |||
2416 | INIT_WORK(&adapter->reset_task, atl1e_reset_task); | ||
2417 | INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); | ||
2418 | err = register_netdev(netdev); | ||
2419 | if (err) { | ||
2420 | dev_err(&pdev->dev, "register netdevice failed\n"); | ||
2421 | goto err_register; | ||
2422 | } | ||
2423 | |||
2424 | /* assume we have no link for now */ | ||
2425 | netif_stop_queue(netdev); | ||
2426 | netif_carrier_off(netdev); | ||
2427 | |||
2428 | cards_found++; | ||
2429 | |||
2430 | return 0; | ||
2431 | |||
2432 | err_reset: | ||
2433 | err_register: | ||
2434 | err_sw_init: | ||
2435 | err_eeprom: | ||
2436 | iounmap(adapter->hw.hw_addr); | ||
2437 | err_init_netdev: | ||
2438 | err_ioremap: | ||
2439 | free_netdev(netdev); | ||
2440 | err_alloc_etherdev: | ||
2441 | pci_release_regions(pdev); | ||
2442 | err_pci_reg: | ||
2443 | err_dma: | ||
2444 | pci_disable_device(pdev); | ||
2445 | return err; | ||
2446 | } | ||
2447 | |||
2448 | /* | ||
2449 | * atl1e_remove - Device Removal Routine | ||
2450 | * @pdev: PCI device information struct | ||
2451 | * | ||
2452 | * atl1e_remove is called by the PCI subsystem to alert the driver | ||
2453 | * that it should release a PCI device. The could be caused by a | ||
2454 | * Hot-Plug event, or because the driver is going to be removed from | ||
2455 | * memory. | ||
2456 | */ | ||
2457 | static void __devexit atl1e_remove(struct pci_dev *pdev) | ||
2458 | { | ||
2459 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2460 | struct atl1e_adapter *adapter = netdev_priv(netdev); | ||
2461 | |||
2462 | /* | ||
2463 | * flush_scheduled work may reschedule our watchdog task, so | ||
2464 | * explicitly disable watchdog tasks from being rescheduled | ||
2465 | */ | ||
2466 | set_bit(__AT_DOWN, &adapter->flags); | ||
2467 | |||
2468 | atl1e_del_timer(adapter); | ||
2469 | atl1e_cancel_work(adapter); | ||
2470 | |||
2471 | unregister_netdev(netdev); | ||
2472 | atl1e_free_ring_resources(adapter); | ||
2473 | atl1e_force_ps(&adapter->hw); | ||
2474 | iounmap(adapter->hw.hw_addr); | ||
2475 | pci_release_regions(pdev); | ||
2476 | free_netdev(netdev); | ||
2477 | pci_disable_device(pdev); | ||
2478 | } | ||
2479 | |||
2480 | /* | ||
2481 | * atl1e_io_error_detected - called when PCI error is detected | ||
2482 | * @pdev: Pointer to PCI device | ||
2483 | * @state: The current pci connection state | ||
2484 | * | ||
2485 | * This function is called after a PCI bus error affecting | ||
2486 | * this device has been detected. | ||
2487 | */ | ||
2488 | static pci_ers_result_t | ||
2489 | atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) | ||
2490 | { | ||
2491 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2492 | struct atl1e_adapter *adapter = netdev->priv; | ||
2493 | |||
2494 | netif_device_detach(netdev); | ||
2495 | |||
2496 | if (netif_running(netdev)) | ||
2497 | atl1e_down(adapter); | ||
2498 | |||
2499 | pci_disable_device(pdev); | ||
2500 | |||
2501 | /* Request a slot slot reset. */ | ||
2502 | return PCI_ERS_RESULT_NEED_RESET; | ||
2503 | } | ||
2504 | |||
2505 | /* | ||
2506 | * atl1e_io_slot_reset - called after the pci bus has been reset. | ||
2507 | * @pdev: Pointer to PCI device | ||
2508 | * | ||
2509 | * Restart the card from scratch, as if from a cold-boot. Implementation | ||
2510 | * resembles the first-half of the e1000_resume routine. | ||
2511 | */ | ||
2512 | static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev) | ||
2513 | { | ||
2514 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2515 | struct atl1e_adapter *adapter = netdev->priv; | ||
2516 | |||
2517 | if (pci_enable_device(pdev)) { | ||
2518 | dev_err(&pdev->dev, | ||
2519 | "ATL1e: Cannot re-enable PCI device after reset.\n"); | ||
2520 | return PCI_ERS_RESULT_DISCONNECT; | ||
2521 | } | ||
2522 | pci_set_master(pdev); | ||
2523 | |||
2524 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
2525 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
2526 | |||
2527 | atl1e_reset_hw(&adapter->hw); | ||
2528 | |||
2529 | return PCI_ERS_RESULT_RECOVERED; | ||
2530 | } | ||
2531 | |||
2532 | /* | ||
2533 | * atl1e_io_resume - called when traffic can start flowing again. | ||
2534 | * @pdev: Pointer to PCI device | ||
2535 | * | ||
2536 | * This callback is called when the error recovery driver tells us that | ||
2537 | * its OK to resume normal operation. Implementation resembles the | ||
2538 | * second-half of the atl1e_resume routine. | ||
2539 | */ | ||
2540 | static void atl1e_io_resume(struct pci_dev *pdev) | ||
2541 | { | ||
2542 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2543 | struct atl1e_adapter *adapter = netdev->priv; | ||
2544 | |||
2545 | if (netif_running(netdev)) { | ||
2546 | if (atl1e_up(adapter)) { | ||
2547 | dev_err(&pdev->dev, | ||
2548 | "ATL1e: can't bring device back up after reset\n"); | ||
2549 | return; | ||
2550 | } | ||
2551 | } | ||
2552 | |||
2553 | netif_device_attach(netdev); | ||
2554 | } | ||
2555 | |||
2556 | static struct pci_error_handlers atl1e_err_handler = { | ||
2557 | .error_detected = atl1e_io_error_detected, | ||
2558 | .slot_reset = atl1e_io_slot_reset, | ||
2559 | .resume = atl1e_io_resume, | ||
2560 | }; | ||
2561 | |||
2562 | static struct pci_driver atl1e_driver = { | ||
2563 | .name = atl1e_driver_name, | ||
2564 | .id_table = atl1e_pci_tbl, | ||
2565 | .probe = atl1e_probe, | ||
2566 | .remove = __devexit_p(atl1e_remove), | ||
2567 | /* Power Managment Hooks */ | ||
2568 | #ifdef CONFIG_PM | ||
2569 | .suspend = atl1e_suspend, | ||
2570 | .resume = atl1e_resume, | ||
2571 | #endif | ||
2572 | .shutdown = atl1e_shutdown, | ||
2573 | .err_handler = &atl1e_err_handler | ||
2574 | }; | ||
2575 | |||
2576 | /* | ||
2577 | * atl1e_init_module - Driver Registration Routine | ||
2578 | * | ||
2579 | * atl1e_init_module is the first routine called when the driver is | ||
2580 | * loaded. All it does is register with the PCI subsystem. | ||
2581 | */ | ||
2582 | static int __init atl1e_init_module(void) | ||
2583 | { | ||
2584 | return pci_register_driver(&atl1e_driver); | ||
2585 | } | ||
2586 | |||
2587 | /* | ||
2588 | * atl1e_exit_module - Driver Exit Cleanup Routine | ||
2589 | * | ||
2590 | * atl1e_exit_module is called just before the driver is removed | ||
2591 | * from memory. | ||
2592 | */ | ||
2593 | static void __exit atl1e_exit_module(void) | ||
2594 | { | ||
2595 | pci_unregister_driver(&atl1e_driver); | ||
2596 | } | ||
2597 | |||
2598 | module_init(atl1e_init_module); | ||
2599 | module_exit(atl1e_exit_module); | ||
diff --git a/drivers/net/atl1e/atl1e_param.c b/drivers/net/atl1e/atl1e_param.c new file mode 100644 index 000000000000..f72abb34b0cd --- /dev/null +++ b/drivers/net/atl1e/atl1e_param.c | |||
@@ -0,0 +1,263 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2007 Atheros Corporation. All rights reserved. | ||
3 | * | ||
4 | * Derived from Intel e1000 driver | ||
5 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the Free | ||
9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
19 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/netdevice.h> | ||
23 | |||
24 | #include "atl1e.h" | ||
25 | |||
26 | /* This is the only thing that needs to be changed to adjust the | ||
27 | * maximum number of ports that the driver can manage. | ||
28 | */ | ||
29 | |||
30 | #define ATL1E_MAX_NIC 32 | ||
31 | |||
32 | #define OPTION_UNSET -1 | ||
33 | #define OPTION_DISABLED 0 | ||
34 | #define OPTION_ENABLED 1 | ||
35 | |||
36 | /* All parameters are treated the same, as an integer array of values. | ||
37 | * This macro just reduces the need to repeat the same declaration code | ||
38 | * over and over (plus this helps to avoid typo bugs). | ||
39 | */ | ||
40 | #define ATL1E_PARAM_INIT { [0 ... ATL1E_MAX_NIC] = OPTION_UNSET } | ||
41 | |||
42 | #define ATL1E_PARAM(x, desc) \ | ||
43 | static int __devinitdata x[ATL1E_MAX_NIC + 1] = ATL1E_PARAM_INIT; \ | ||
44 | static int num_##x; \ | ||
45 | module_param_array_named(x, x, int, &num_##x, 0); \ | ||
46 | MODULE_PARM_DESC(x, desc); | ||
47 | |||
48 | /* Transmit Memory count | ||
49 | * | ||
50 | * Valid Range: 64-2048 | ||
51 | * | ||
52 | * Default Value: 128 | ||
53 | */ | ||
54 | #define ATL1E_MIN_TX_DESC_CNT 32 | ||
55 | #define ATL1E_MAX_TX_DESC_CNT 1020 | ||
56 | #define ATL1E_DEFAULT_TX_DESC_CNT 128 | ||
57 | ATL1E_PARAM(tx_desc_cnt, "Transmit description count"); | ||
58 | |||
59 | /* Receive Memory Block Count | ||
60 | * | ||
61 | * Valid Range: 16-512 | ||
62 | * | ||
63 | * Default Value: 128 | ||
64 | */ | ||
65 | #define ATL1E_MIN_RX_MEM_SIZE 8 /* 8KB */ | ||
66 | #define ATL1E_MAX_RX_MEM_SIZE 1024 /* 1MB */ | ||
67 | #define ATL1E_DEFAULT_RX_MEM_SIZE 256 /* 128KB */ | ||
68 | ATL1E_PARAM(rx_mem_size, "memory size of rx buffer(KB)"); | ||
69 | |||
70 | /* User Specified MediaType Override | ||
71 | * | ||
72 | * Valid Range: 0-5 | ||
73 | * - 0 - auto-negotiate at all supported speeds | ||
74 | * - 1 - only link at 100Mbps Full Duplex | ||
75 | * - 2 - only link at 100Mbps Half Duplex | ||
76 | * - 3 - only link at 10Mbps Full Duplex | ||
77 | * - 4 - only link at 10Mbps Half Duplex | ||
78 | * Default Value: 0 | ||
79 | */ | ||
80 | |||
81 | ATL1E_PARAM(media_type, "MediaType Select"); | ||
82 | |||
83 | /* Interrupt Moderate Timer in units of 2 us | ||
84 | * | ||
85 | * Valid Range: 10-65535 | ||
86 | * | ||
87 | * Default Value: 45000(90ms) | ||
88 | */ | ||
89 | #define INT_MOD_DEFAULT_CNT 100 /* 200us */ | ||
90 | #define INT_MOD_MAX_CNT 65000 | ||
91 | #define INT_MOD_MIN_CNT 50 | ||
92 | ATL1E_PARAM(int_mod_timer, "Interrupt Moderator Timer"); | ||
93 | |||
94 | #define AUTONEG_ADV_DEFAULT 0x2F | ||
95 | #define AUTONEG_ADV_MASK 0x2F | ||
96 | #define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL | ||
97 | |||
98 | #define FLASH_VENDOR_DEFAULT 0 | ||
99 | #define FLASH_VENDOR_MIN 0 | ||
100 | #define FLASH_VENDOR_MAX 2 | ||
101 | |||
102 | struct atl1e_option { | ||
103 | enum { enable_option, range_option, list_option } type; | ||
104 | char *name; | ||
105 | char *err; | ||
106 | int def; | ||
107 | union { | ||
108 | struct { /* range_option info */ | ||
109 | int min; | ||
110 | int max; | ||
111 | } r; | ||
112 | struct { /* list_option info */ | ||
113 | int nr; | ||
114 | struct atl1e_opt_list { int i; char *str; } *p; | ||
115 | } l; | ||
116 | } arg; | ||
117 | }; | ||
118 | |||
119 | static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct pci_dev *pdev) | ||
120 | { | ||
121 | if (*value == OPTION_UNSET) { | ||
122 | *value = opt->def; | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | switch (opt->type) { | ||
127 | case enable_option: | ||
128 | switch (*value) { | ||
129 | case OPTION_ENABLED: | ||
130 | dev_info(&pdev->dev, "%s Enabled\n", opt->name); | ||
131 | return 0; | ||
132 | case OPTION_DISABLED: | ||
133 | dev_info(&pdev->dev, "%s Disabled\n", opt->name); | ||
134 | return 0; | ||
135 | } | ||
136 | break; | ||
137 | case range_option: | ||
138 | if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { | ||
139 | dev_info(&pdev->dev, "%s set to %i\n", opt->name, *value); | ||
140 | return 0; | ||
141 | } | ||
142 | break; | ||
143 | case list_option:{ | ||
144 | int i; | ||
145 | struct atl1e_opt_list *ent; | ||
146 | |||
147 | for (i = 0; i < opt->arg.l.nr; i++) { | ||
148 | ent = &opt->arg.l.p[i]; | ||
149 | if (*value == ent->i) { | ||
150 | if (ent->str[0] != '\0') | ||
151 | dev_info(&pdev->dev, "%s\n", | ||
152 | ent->str); | ||
153 | return 0; | ||
154 | } | ||
155 | } | ||
156 | break; | ||
157 | } | ||
158 | default: | ||
159 | BUG(); | ||
160 | } | ||
161 | |||
162 | dev_info(&pdev->dev, "Invalid %s specified (%i) %s\n", | ||
163 | opt->name, *value, opt->err); | ||
164 | *value = opt->def; | ||
165 | return -1; | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * atl1e_check_options - Range Checking for Command Line Parameters | ||
170 | * @adapter: board private structure | ||
171 | * | ||
172 | * This routine checks all command line parameters for valid user | ||
173 | * input. If an invalid value is given, or if no user specified | ||
174 | * value exists, a default value is used. The final value is stored | ||
175 | * in a variable in the adapter structure. | ||
176 | */ | ||
177 | void __devinit atl1e_check_options(struct atl1e_adapter *adapter) | ||
178 | { | ||
179 | struct pci_dev *pdev = adapter->pdev; | ||
180 | int bd = adapter->bd_number; | ||
181 | if (bd >= ATL1E_MAX_NIC) { | ||
182 | dev_notice(&pdev->dev, "no configuration for board #%i\n", bd); | ||
183 | dev_notice(&pdev->dev, "Using defaults for all values\n"); | ||
184 | } | ||
185 | |||
186 | { /* Transmit Ring Size */ | ||
187 | struct atl1e_option opt = { | ||
188 | .type = range_option, | ||
189 | .name = "Transmit Ddescription Count", | ||
190 | .err = "using default of " | ||
191 | __MODULE_STRING(ATL1E_DEFAULT_TX_DESC_CNT), | ||
192 | .def = ATL1E_DEFAULT_TX_DESC_CNT, | ||
193 | .arg = { .r = { .min = ATL1E_MIN_TX_DESC_CNT, | ||
194 | .max = ATL1E_MAX_TX_DESC_CNT} } | ||
195 | }; | ||
196 | int val; | ||
197 | if (num_tx_desc_cnt > bd) { | ||
198 | val = tx_desc_cnt[bd]; | ||
199 | atl1e_validate_option(&val, &opt, pdev); | ||
200 | adapter->tx_ring.count = (u16) val & 0xFFFC; | ||
201 | } else | ||
202 | adapter->tx_ring.count = (u16)opt.def; | ||
203 | } | ||
204 | |||
205 | { /* Receive Memory Block Count */ | ||
206 | struct atl1e_option opt = { | ||
207 | .type = range_option, | ||
208 | .name = "Memory size of rx buffer(KB)", | ||
209 | .err = "using default of " | ||
210 | __MODULE_STRING(ATL1E_DEFAULT_RX_MEM_SIZE), | ||
211 | .def = ATL1E_DEFAULT_RX_MEM_SIZE, | ||
212 | .arg = { .r = { .min = ATL1E_MIN_RX_MEM_SIZE, | ||
213 | .max = ATL1E_MAX_RX_MEM_SIZE} } | ||
214 | }; | ||
215 | int val; | ||
216 | if (num_rx_mem_size > bd) { | ||
217 | val = rx_mem_size[bd]; | ||
218 | atl1e_validate_option(&val, &opt, pdev); | ||
219 | adapter->rx_ring.page_size = (u32)val * 1024; | ||
220 | } else { | ||
221 | adapter->rx_ring.page_size = (u32)opt.def * 1024; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | { /* Interrupt Moderate Timer */ | ||
226 | struct atl1e_option opt = { | ||
227 | .type = range_option, | ||
228 | .name = "Interrupt Moderate Timer", | ||
229 | .err = "using default of " | ||
230 | __MODULE_STRING(INT_MOD_DEFAULT_CNT), | ||
231 | .def = INT_MOD_DEFAULT_CNT, | ||
232 | .arg = { .r = { .min = INT_MOD_MIN_CNT, | ||
233 | .max = INT_MOD_MAX_CNT} } | ||
234 | } ; | ||
235 | int val; | ||
236 | if (num_int_mod_timer > bd) { | ||
237 | val = int_mod_timer[bd]; | ||
238 | atl1e_validate_option(&val, &opt, pdev); | ||
239 | adapter->hw.imt = (u16) val; | ||
240 | } else | ||
241 | adapter->hw.imt = (u16)(opt.def); | ||
242 | } | ||
243 | |||
244 | { /* MediaType */ | ||
245 | struct atl1e_option opt = { | ||
246 | .type = range_option, | ||
247 | .name = "Speed/Duplex Selection", | ||
248 | .err = "using default of " | ||
249 | __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR), | ||
250 | .def = MEDIA_TYPE_AUTO_SENSOR, | ||
251 | .arg = { .r = { .min = MEDIA_TYPE_AUTO_SENSOR, | ||
252 | .max = MEDIA_TYPE_10M_HALF} } | ||
253 | } ; | ||
254 | int val; | ||
255 | if (num_media_type > bd) { | ||
256 | val = media_type[bd]; | ||
257 | atl1e_validate_option(&val, &opt, pdev); | ||
258 | adapter->hw.media_type = (u16) val; | ||
259 | } else | ||
260 | adapter->hw.media_type = (u16)(opt.def); | ||
261 | |||
262 | } | ||
263 | } | ||
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 3ab61e40e86a..cb8be490e5ae 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
@@ -911,9 +911,8 @@ au1000_adjust_link(struct net_device *dev) | |||
911 | if(phydev->link != aup->old_link) { | 911 | if(phydev->link != aup->old_link) { |
912 | // link state changed | 912 | // link state changed |
913 | 913 | ||
914 | if (phydev->link) // link went up | 914 | if (!phydev->link) { |
915 | netif_tx_schedule_all(dev); | 915 | /* link went down */ |
916 | else { // link went down | ||
917 | aup->old_speed = 0; | 916 | aup->old_speed = 0; |
918 | aup->old_duplex = -1; | 917 | aup->old_duplex = -1; |
919 | } | 918 | } |
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index a6a3da89f590..a8ec60e1ed75 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
@@ -357,7 +357,6 @@ static void bfin_mac_adjust_link(struct net_device *dev) | |||
357 | if (!lp->old_link) { | 357 | if (!lp->old_link) { |
358 | new_state = 1; | 358 | new_state = 1; |
359 | lp->old_link = 1; | 359 | lp->old_link = 1; |
360 | netif_tx_schedule_all(dev); | ||
361 | } | 360 | } |
362 | } else if (lp->old_link) { | 361 | } else if (lp->old_link) { |
363 | new_state = 1; | 362 | new_state = 1; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 9737c06045d6..a641eeaa2a2f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -5041,6 +5041,7 @@ static int bond_check_params(struct bond_params *params) | |||
5041 | } | 5041 | } |
5042 | 5042 | ||
5043 | static struct lock_class_key bonding_netdev_xmit_lock_key; | 5043 | static struct lock_class_key bonding_netdev_xmit_lock_key; |
5044 | static struct lock_class_key bonding_netdev_addr_lock_key; | ||
5044 | 5045 | ||
5045 | static void bond_set_lockdep_class_one(struct net_device *dev, | 5046 | static void bond_set_lockdep_class_one(struct net_device *dev, |
5046 | struct netdev_queue *txq, | 5047 | struct netdev_queue *txq, |
@@ -5052,6 +5053,8 @@ static void bond_set_lockdep_class_one(struct net_device *dev, | |||
5052 | 5053 | ||
5053 | static void bond_set_lockdep_class(struct net_device *dev) | 5054 | static void bond_set_lockdep_class(struct net_device *dev) |
5054 | { | 5055 | { |
5056 | lockdep_set_class(&dev->addr_list_lock, | ||
5057 | &bonding_netdev_addr_lock_key); | ||
5055 | netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL); | 5058 | netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL); |
5056 | } | 5059 | } |
5057 | 5060 | ||
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index fbd4280c102c..a7800e559090 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c | |||
@@ -945,10 +945,8 @@ static void cpmac_adjust_link(struct net_device *dev) | |||
945 | if (!priv->oldlink) { | 945 | if (!priv->oldlink) { |
946 | new_state = 1; | 946 | new_state = 1; |
947 | priv->oldlink = 1; | 947 | priv->oldlink = 1; |
948 | netif_tx_schedule_all(dev); | ||
949 | } | 948 | } |
950 | } else if (priv->oldlink) { | 949 | } else if (priv->oldlink) { |
951 | netif_tx_stop_all_queues(dev); | ||
952 | new_state = 1; | 950 | new_state = 1; |
953 | priv->oldlink = 0; | 951 | priv->oldlink = 0; |
954 | priv->oldspeed = 0; | 952 | priv->oldspeed = 0; |
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 952e10d686ec..0b0f1c407a7e 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -888,19 +888,22 @@ dm9000_rx(struct net_device *dev) | |||
888 | dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen); | 888 | dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen); |
889 | } | 889 | } |
890 | 890 | ||
891 | if (rxhdr.RxStatus & 0xbf) { | 891 | /* rxhdr.RxStatus is identical to RSR register. */ |
892 | if (rxhdr.RxStatus & (RSR_FOE | RSR_CE | RSR_AE | | ||
893 | RSR_PLE | RSR_RWTO | | ||
894 | RSR_LCS | RSR_RF)) { | ||
892 | GoodPacket = false; | 895 | GoodPacket = false; |
893 | if (rxhdr.RxStatus & 0x01) { | 896 | if (rxhdr.RxStatus & RSR_FOE) { |
894 | if (netif_msg_rx_err(db)) | 897 | if (netif_msg_rx_err(db)) |
895 | dev_dbg(db->dev, "fifo error\n"); | 898 | dev_dbg(db->dev, "fifo error\n"); |
896 | dev->stats.rx_fifo_errors++; | 899 | dev->stats.rx_fifo_errors++; |
897 | } | 900 | } |
898 | if (rxhdr.RxStatus & 0x02) { | 901 | if (rxhdr.RxStatus & RSR_CE) { |
899 | if (netif_msg_rx_err(db)) | 902 | if (netif_msg_rx_err(db)) |
900 | dev_dbg(db->dev, "crc error\n"); | 903 | dev_dbg(db->dev, "crc error\n"); |
901 | dev->stats.rx_crc_errors++; | 904 | dev->stats.rx_crc_errors++; |
902 | } | 905 | } |
903 | if (rxhdr.RxStatus & 0x80) { | 906 | if (rxhdr.RxStatus & RSR_RF) { |
904 | if (netif_msg_rx_err(db)) | 907 | if (netif_msg_rx_err(db)) |
905 | dev_dbg(db->dev, "length error\n"); | 908 | dev_dbg(db->dev, "length error\n"); |
906 | dev->stats.rx_length_errors++; | 909 | dev->stats.rx_length_errors++; |
@@ -1067,7 +1070,7 @@ dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) | |||
1067 | /* Fill the phyxcer register into REG_0C */ | 1070 | /* Fill the phyxcer register into REG_0C */ |
1068 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | 1071 | iow(db, DM9000_EPAR, DM9000_PHY | reg); |
1069 | 1072 | ||
1070 | iow(db, DM9000_EPCR, 0xc); /* Issue phyxcer read command */ | 1073 | iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */ |
1071 | 1074 | ||
1072 | writeb(reg_save, db->io_addr); | 1075 | writeb(reg_save, db->io_addr); |
1073 | spin_unlock_irqrestore(&db->lock,flags); | 1076 | spin_unlock_irqrestore(&db->lock,flags); |
@@ -1118,7 +1121,7 @@ dm9000_phy_write(struct net_device *dev, | |||
1118 | iow(db, DM9000_EPDRL, value); | 1121 | iow(db, DM9000_EPDRL, value); |
1119 | iow(db, DM9000_EPDRH, value >> 8); | 1122 | iow(db, DM9000_EPDRH, value >> 8); |
1120 | 1123 | ||
1121 | iow(db, DM9000_EPCR, 0xa); /* Issue phyxcer write command */ | 1124 | iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */ |
1122 | 1125 | ||
1123 | writeb(reg_save, db->io_addr); | 1126 | writeb(reg_save, db->io_addr); |
1124 | spin_unlock_irqrestore(&db->lock, flags); | 1127 | spin_unlock_irqrestore(&db->lock, flags); |
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 31feae1ea390..19e317eaf5bc 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -90,10 +90,13 @@ struct e1000_adapter; | |||
90 | #define E1000_ERR(args...) printk(KERN_ERR "e1000: " args) | 90 | #define E1000_ERR(args...) printk(KERN_ERR "e1000: " args) |
91 | 91 | ||
92 | #define PFX "e1000: " | 92 | #define PFX "e1000: " |
93 | #define DPRINTK(nlevel, klevel, fmt, args...) \ | 93 | |
94 | (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ | 94 | #define DPRINTK(nlevel, klevel, fmt, args...) \ |
95 | printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ | 95 | do { \ |
96 | __FUNCTION__ , ## args)) | 96 | if (NETIF_MSG_##nlevel & adapter->msg_enable) \ |
97 | printk(KERN_##klevel PFX "%s: %s: " fmt, \ | ||
98 | adapter->netdev->name, __func__, ##args); \ | ||
99 | } while (0) | ||
97 | 100 | ||
98 | #define E1000_MAX_INTR 10 | 101 | #define E1000_MAX_INTR 10 |
99 | 102 | ||
@@ -151,9 +154,9 @@ struct e1000_adapter; | |||
151 | #define E1000_MASTER_SLAVE e1000_ms_hw_default | 154 | #define E1000_MASTER_SLAVE e1000_ms_hw_default |
152 | #endif | 155 | #endif |
153 | 156 | ||
154 | #define E1000_MNG_VLAN_NONE -1 | 157 | #define E1000_MNG_VLAN_NONE (-1) |
155 | /* Number of packet split data buffers (not including the header buffer) */ | 158 | /* Number of packet split data buffers (not including the header buffer) */ |
156 | #define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1 | 159 | #define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) |
157 | 160 | ||
158 | /* wrapper around a pointer to a socket buffer, | 161 | /* wrapper around a pointer to a socket buffer, |
159 | * so a DMA handle can be stored along with the buffer */ | 162 | * so a DMA handle can be stored along with the buffer */ |
@@ -165,9 +168,13 @@ struct e1000_buffer { | |||
165 | u16 next_to_watch; | 168 | u16 next_to_watch; |
166 | }; | 169 | }; |
167 | 170 | ||
171 | struct e1000_ps_page { | ||
172 | struct page *ps_page[PS_PAGE_BUFFERS]; | ||
173 | }; | ||
168 | 174 | ||
169 | struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; | 175 | struct e1000_ps_page_dma { |
170 | struct e1000_ps_page_dma { u64 ps_page_dma[PS_PAGE_BUFFERS]; }; | 176 | u64 ps_page_dma[PS_PAGE_BUFFERS]; |
177 | }; | ||
171 | 178 | ||
172 | struct e1000_tx_ring { | 179 | struct e1000_tx_ring { |
173 | /* pointer to the descriptor ring memory */ | 180 | /* pointer to the descriptor ring memory */ |
@@ -217,13 +224,13 @@ struct e1000_rx_ring { | |||
217 | u16 rdt; | 224 | u16 rdt; |
218 | }; | 225 | }; |
219 | 226 | ||
220 | #define E1000_DESC_UNUSED(R) \ | 227 | #define E1000_DESC_UNUSED(R) \ |
221 | ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ | 228 | ((((R)->next_to_clean > (R)->next_to_use) \ |
222 | (R)->next_to_clean - (R)->next_to_use - 1) | 229 | ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) |
223 | 230 | ||
224 | #define E1000_RX_DESC_PS(R, i) \ | 231 | #define E1000_RX_DESC_PS(R, i) \ |
225 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | 232 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) |
226 | #define E1000_RX_DESC_EXT(R, i) \ | 233 | #define E1000_RX_DESC_EXT(R, i) \ |
227 | (&(((union e1000_rx_desc_extended *)((R).desc))[i])) | 234 | (&(((union e1000_rx_desc_extended *)((R).desc))[i])) |
228 | #define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) | 235 | #define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) |
229 | #define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) | 236 | #define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) |
@@ -246,9 +253,7 @@ struct e1000_adapter { | |||
246 | u16 link_speed; | 253 | u16 link_speed; |
247 | u16 link_duplex; | 254 | u16 link_duplex; |
248 | spinlock_t stats_lock; | 255 | spinlock_t stats_lock; |
249 | #ifdef CONFIG_E1000_NAPI | ||
250 | spinlock_t tx_queue_lock; | 256 | spinlock_t tx_queue_lock; |
251 | #endif | ||
252 | unsigned int total_tx_bytes; | 257 | unsigned int total_tx_bytes; |
253 | unsigned int total_tx_packets; | 258 | unsigned int total_tx_packets; |
254 | unsigned int total_rx_bytes; | 259 | unsigned int total_rx_bytes; |
@@ -286,22 +291,16 @@ struct e1000_adapter { | |||
286 | bool detect_tx_hung; | 291 | bool detect_tx_hung; |
287 | 292 | ||
288 | /* RX */ | 293 | /* RX */ |
289 | #ifdef CONFIG_E1000_NAPI | 294 | bool (*clean_rx)(struct e1000_adapter *adapter, |
290 | bool (*clean_rx) (struct e1000_adapter *adapter, | 295 | struct e1000_rx_ring *rx_ring, |
291 | struct e1000_rx_ring *rx_ring, | 296 | int *work_done, int work_to_do); |
292 | int *work_done, int work_to_do); | 297 | void (*alloc_rx_buf)(struct e1000_adapter *adapter, |
293 | #else | 298 | struct e1000_rx_ring *rx_ring, |
294 | bool (*clean_rx) (struct e1000_adapter *adapter, | 299 | int cleaned_count); |
295 | struct e1000_rx_ring *rx_ring); | ||
296 | #endif | ||
297 | void (*alloc_rx_buf) (struct e1000_adapter *adapter, | ||
298 | struct e1000_rx_ring *rx_ring, | ||
299 | int cleaned_count); | ||
300 | struct e1000_rx_ring *rx_ring; /* One per active queue */ | 300 | struct e1000_rx_ring *rx_ring; /* One per active queue */ |
301 | #ifdef CONFIG_E1000_NAPI | ||
302 | struct napi_struct napi; | 301 | struct napi_struct napi; |
303 | struct net_device *polling_netdev; /* One per active queue */ | 302 | struct net_device *polling_netdev; /* One per active queue */ |
304 | #endif | 303 | |
305 | int num_tx_queues; | 304 | int num_tx_queues; |
306 | int num_rx_queues; | 305 | int num_rx_queues; |
307 | 306 | ||
@@ -317,7 +316,6 @@ struct e1000_adapter { | |||
317 | u64 gorcl_old; | 316 | u64 gorcl_old; |
318 | u16 rx_ps_bsize0; | 317 | u16 rx_ps_bsize0; |
319 | 318 | ||
320 | |||
321 | /* OS defined structs */ | 319 | /* OS defined structs */ |
322 | struct net_device *netdev; | 320 | struct net_device *netdev; |
323 | struct pci_dev *pdev; | 321 | struct pci_dev *pdev; |
@@ -342,6 +340,10 @@ struct e1000_adapter { | |||
342 | bool quad_port_a; | 340 | bool quad_port_a; |
343 | unsigned long flags; | 341 | unsigned long flags; |
344 | u32 eeprom_wol; | 342 | u32 eeprom_wol; |
343 | |||
344 | /* for ioport free */ | ||
345 | int bars; | ||
346 | int need_ioport; | ||
345 | }; | 347 | }; |
346 | 348 | ||
347 | enum e1000_state_t { | 349 | enum e1000_state_t { |
@@ -353,9 +355,18 @@ enum e1000_state_t { | |||
353 | extern char e1000_driver_name[]; | 355 | extern char e1000_driver_name[]; |
354 | extern const char e1000_driver_version[]; | 356 | extern const char e1000_driver_version[]; |
355 | 357 | ||
358 | extern int e1000_up(struct e1000_adapter *adapter); | ||
359 | extern void e1000_down(struct e1000_adapter *adapter); | ||
360 | extern void e1000_reinit_locked(struct e1000_adapter *adapter); | ||
361 | extern void e1000_reset(struct e1000_adapter *adapter); | ||
362 | extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx); | ||
363 | extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); | ||
364 | extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); | ||
365 | extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); | ||
366 | extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter); | ||
367 | extern void e1000_update_stats(struct e1000_adapter *adapter); | ||
356 | extern void e1000_power_up_phy(struct e1000_adapter *); | 368 | extern void e1000_power_up_phy(struct e1000_adapter *); |
357 | extern void e1000_set_ethtool_ops(struct net_device *netdev); | 369 | extern void e1000_set_ethtool_ops(struct net_device *netdev); |
358 | extern void e1000_check_options(struct e1000_adapter *adapter); | 370 | extern void e1000_check_options(struct e1000_adapter *adapter); |
359 | 371 | ||
360 | |||
361 | #endif /* _E1000_H_ */ | 372 | #endif /* _E1000_H_ */ |
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index a3f6a9c72ec8..6a3893acfe04 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c | |||
@@ -29,21 +29,8 @@ | |||
29 | /* ethtool support for e1000 */ | 29 | /* ethtool support for e1000 */ |
30 | 30 | ||
31 | #include "e1000.h" | 31 | #include "e1000.h" |
32 | |||
33 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
34 | 33 | ||
35 | extern int e1000_up(struct e1000_adapter *adapter); | ||
36 | extern void e1000_down(struct e1000_adapter *adapter); | ||
37 | extern void e1000_reinit_locked(struct e1000_adapter *adapter); | ||
38 | extern void e1000_reset(struct e1000_adapter *adapter); | ||
39 | extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx); | ||
40 | extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); | ||
41 | extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); | ||
42 | extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); | ||
43 | extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter); | ||
44 | extern void e1000_update_stats(struct e1000_adapter *adapter); | ||
45 | |||
46 | |||
47 | struct e1000_stats { | 34 | struct e1000_stats { |
48 | char stat_string[ETH_GSTRING_LEN]; | 35 | char stat_string[ETH_GSTRING_LEN]; |
49 | int sizeof_stat; | 36 | int sizeof_stat; |
@@ -112,8 +99,8 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { | |||
112 | }; | 99 | }; |
113 | #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) | 100 | #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) |
114 | 101 | ||
115 | static int | 102 | static int e1000_get_settings(struct net_device *netdev, |
116 | e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | 103 | struct ethtool_cmd *ecmd) |
117 | { | 104 | { |
118 | struct e1000_adapter *adapter = netdev_priv(netdev); | 105 | struct e1000_adapter *adapter = netdev_priv(netdev); |
119 | struct e1000_hw *hw = &adapter->hw; | 106 | struct e1000_hw *hw = &adapter->hw; |
@@ -162,7 +149,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
162 | ecmd->transceiver = XCVR_EXTERNAL; | 149 | ecmd->transceiver = XCVR_EXTERNAL; |
163 | } | 150 | } |
164 | 151 | ||
165 | if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) { | 152 | if (er32(STATUS) & E1000_STATUS_LU) { |
166 | 153 | ||
167 | e1000_get_speed_and_duplex(hw, &adapter->link_speed, | 154 | e1000_get_speed_and_duplex(hw, &adapter->link_speed, |
168 | &adapter->link_duplex); | 155 | &adapter->link_duplex); |
@@ -185,8 +172,8 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
185 | return 0; | 172 | return 0; |
186 | } | 173 | } |
187 | 174 | ||
188 | static int | 175 | static int e1000_set_settings(struct net_device *netdev, |
189 | e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | 176 | struct ethtool_cmd *ecmd) |
190 | { | 177 | { |
191 | struct e1000_adapter *adapter = netdev_priv(netdev); | 178 | struct e1000_adapter *adapter = netdev_priv(netdev); |
192 | struct e1000_hw *hw = &adapter->hw; | 179 | struct e1000_hw *hw = &adapter->hw; |
@@ -231,9 +218,8 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
231 | return 0; | 218 | return 0; |
232 | } | 219 | } |
233 | 220 | ||
234 | static void | 221 | static void e1000_get_pauseparam(struct net_device *netdev, |
235 | e1000_get_pauseparam(struct net_device *netdev, | 222 | struct ethtool_pauseparam *pause) |
236 | struct ethtool_pauseparam *pause) | ||
237 | { | 223 | { |
238 | struct e1000_adapter *adapter = netdev_priv(netdev); | 224 | struct e1000_adapter *adapter = netdev_priv(netdev); |
239 | struct e1000_hw *hw = &adapter->hw; | 225 | struct e1000_hw *hw = &adapter->hw; |
@@ -251,9 +237,8 @@ e1000_get_pauseparam(struct net_device *netdev, | |||
251 | } | 237 | } |
252 | } | 238 | } |
253 | 239 | ||
254 | static int | 240 | static int e1000_set_pauseparam(struct net_device *netdev, |
255 | e1000_set_pauseparam(struct net_device *netdev, | 241 | struct ethtool_pauseparam *pause) |
256 | struct ethtool_pauseparam *pause) | ||
257 | { | 242 | { |
258 | struct e1000_adapter *adapter = netdev_priv(netdev); | 243 | struct e1000_adapter *adapter = netdev_priv(netdev); |
259 | struct e1000_hw *hw = &adapter->hw; | 244 | struct e1000_hw *hw = &adapter->hw; |
@@ -289,15 +274,13 @@ e1000_set_pauseparam(struct net_device *netdev, | |||
289 | return retval; | 274 | return retval; |
290 | } | 275 | } |
291 | 276 | ||
292 | static u32 | 277 | static u32 e1000_get_rx_csum(struct net_device *netdev) |
293 | e1000_get_rx_csum(struct net_device *netdev) | ||
294 | { | 278 | { |
295 | struct e1000_adapter *adapter = netdev_priv(netdev); | 279 | struct e1000_adapter *adapter = netdev_priv(netdev); |
296 | return adapter->rx_csum; | 280 | return adapter->rx_csum; |
297 | } | 281 | } |
298 | 282 | ||
299 | static int | 283 | static int e1000_set_rx_csum(struct net_device *netdev, u32 data) |
300 | e1000_set_rx_csum(struct net_device *netdev, u32 data) | ||
301 | { | 284 | { |
302 | struct e1000_adapter *adapter = netdev_priv(netdev); | 285 | struct e1000_adapter *adapter = netdev_priv(netdev); |
303 | adapter->rx_csum = data; | 286 | adapter->rx_csum = data; |
@@ -309,18 +292,17 @@ e1000_set_rx_csum(struct net_device *netdev, u32 data) | |||
309 | return 0; | 292 | return 0; |
310 | } | 293 | } |
311 | 294 | ||
312 | static u32 | 295 | static u32 e1000_get_tx_csum(struct net_device *netdev) |
313 | e1000_get_tx_csum(struct net_device *netdev) | ||
314 | { | 296 | { |
315 | return (netdev->features & NETIF_F_HW_CSUM) != 0; | 297 | return (netdev->features & NETIF_F_HW_CSUM) != 0; |
316 | } | 298 | } |
317 | 299 | ||
318 | static int | 300 | static int e1000_set_tx_csum(struct net_device *netdev, u32 data) |
319 | e1000_set_tx_csum(struct net_device *netdev, u32 data) | ||
320 | { | 301 | { |
321 | struct e1000_adapter *adapter = netdev_priv(netdev); | 302 | struct e1000_adapter *adapter = netdev_priv(netdev); |
303 | struct e1000_hw *hw = &adapter->hw; | ||
322 | 304 | ||
323 | if (adapter->hw.mac_type < e1000_82543) { | 305 | if (hw->mac_type < e1000_82543) { |
324 | if (!data) | 306 | if (!data) |
325 | return -EINVAL; | 307 | return -EINVAL; |
326 | return 0; | 308 | return 0; |
@@ -334,12 +316,13 @@ e1000_set_tx_csum(struct net_device *netdev, u32 data) | |||
334 | return 0; | 316 | return 0; |
335 | } | 317 | } |
336 | 318 | ||
337 | static int | 319 | static int e1000_set_tso(struct net_device *netdev, u32 data) |
338 | e1000_set_tso(struct net_device *netdev, u32 data) | ||
339 | { | 320 | { |
340 | struct e1000_adapter *adapter = netdev_priv(netdev); | 321 | struct e1000_adapter *adapter = netdev_priv(netdev); |
341 | if ((adapter->hw.mac_type < e1000_82544) || | 322 | struct e1000_hw *hw = &adapter->hw; |
342 | (adapter->hw.mac_type == e1000_82547)) | 323 | |
324 | if ((hw->mac_type < e1000_82544) || | ||
325 | (hw->mac_type == e1000_82547)) | ||
343 | return data ? -EINVAL : 0; | 326 | return data ? -EINVAL : 0; |
344 | 327 | ||
345 | if (data) | 328 | if (data) |
@@ -357,30 +340,26 @@ e1000_set_tso(struct net_device *netdev, u32 data) | |||
357 | return 0; | 340 | return 0; |
358 | } | 341 | } |
359 | 342 | ||
360 | static u32 | 343 | static u32 e1000_get_msglevel(struct net_device *netdev) |
361 | e1000_get_msglevel(struct net_device *netdev) | ||
362 | { | 344 | { |
363 | struct e1000_adapter *adapter = netdev_priv(netdev); | 345 | struct e1000_adapter *adapter = netdev_priv(netdev); |
364 | return adapter->msg_enable; | 346 | return adapter->msg_enable; |
365 | } | 347 | } |
366 | 348 | ||
367 | static void | 349 | static void e1000_set_msglevel(struct net_device *netdev, u32 data) |
368 | e1000_set_msglevel(struct net_device *netdev, u32 data) | ||
369 | { | 350 | { |
370 | struct e1000_adapter *adapter = netdev_priv(netdev); | 351 | struct e1000_adapter *adapter = netdev_priv(netdev); |
371 | adapter->msg_enable = data; | 352 | adapter->msg_enable = data; |
372 | } | 353 | } |
373 | 354 | ||
374 | static int | 355 | static int e1000_get_regs_len(struct net_device *netdev) |
375 | e1000_get_regs_len(struct net_device *netdev) | ||
376 | { | 356 | { |
377 | #define E1000_REGS_LEN 32 | 357 | #define E1000_REGS_LEN 32 |
378 | return E1000_REGS_LEN * sizeof(u32); | 358 | return E1000_REGS_LEN * sizeof(u32); |
379 | } | 359 | } |
380 | 360 | ||
381 | static void | 361 | static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, |
382 | e1000_get_regs(struct net_device *netdev, | 362 | void *p) |
383 | struct ethtool_regs *regs, void *p) | ||
384 | { | 363 | { |
385 | struct e1000_adapter *adapter = netdev_priv(netdev); | 364 | struct e1000_adapter *adapter = netdev_priv(netdev); |
386 | struct e1000_hw *hw = &adapter->hw; | 365 | struct e1000_hw *hw = &adapter->hw; |
@@ -391,22 +370,22 @@ e1000_get_regs(struct net_device *netdev, | |||
391 | 370 | ||
392 | regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; | 371 | regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; |
393 | 372 | ||
394 | regs_buff[0] = E1000_READ_REG(hw, CTRL); | 373 | regs_buff[0] = er32(CTRL); |
395 | regs_buff[1] = E1000_READ_REG(hw, STATUS); | 374 | regs_buff[1] = er32(STATUS); |
396 | 375 | ||
397 | regs_buff[2] = E1000_READ_REG(hw, RCTL); | 376 | regs_buff[2] = er32(RCTL); |
398 | regs_buff[3] = E1000_READ_REG(hw, RDLEN); | 377 | regs_buff[3] = er32(RDLEN); |
399 | regs_buff[4] = E1000_READ_REG(hw, RDH); | 378 | regs_buff[4] = er32(RDH); |
400 | regs_buff[5] = E1000_READ_REG(hw, RDT); | 379 | regs_buff[5] = er32(RDT); |
401 | regs_buff[6] = E1000_READ_REG(hw, RDTR); | 380 | regs_buff[6] = er32(RDTR); |
402 | 381 | ||
403 | regs_buff[7] = E1000_READ_REG(hw, TCTL); | 382 | regs_buff[7] = er32(TCTL); |
404 | regs_buff[8] = E1000_READ_REG(hw, TDLEN); | 383 | regs_buff[8] = er32(TDLEN); |
405 | regs_buff[9] = E1000_READ_REG(hw, TDH); | 384 | regs_buff[9] = er32(TDH); |
406 | regs_buff[10] = E1000_READ_REG(hw, TDT); | 385 | regs_buff[10] = er32(TDT); |
407 | regs_buff[11] = E1000_READ_REG(hw, TIDV); | 386 | regs_buff[11] = er32(TIDV); |
408 | 387 | ||
409 | regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */ | 388 | regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */ |
410 | if (hw->phy_type == e1000_phy_igp) { | 389 | if (hw->phy_type == e1000_phy_igp) { |
411 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, | 390 | e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, |
412 | IGP01E1000_PHY_AGC_A); | 391 | IGP01E1000_PHY_AGC_A); |
@@ -464,20 +443,20 @@ e1000_get_regs(struct net_device *netdev, | |||
464 | if (hw->mac_type >= e1000_82540 && | 443 | if (hw->mac_type >= e1000_82540 && |
465 | hw->mac_type < e1000_82571 && | 444 | hw->mac_type < e1000_82571 && |
466 | hw->media_type == e1000_media_type_copper) { | 445 | hw->media_type == e1000_media_type_copper) { |
467 | regs_buff[26] = E1000_READ_REG(hw, MANC); | 446 | regs_buff[26] = er32(MANC); |
468 | } | 447 | } |
469 | } | 448 | } |
470 | 449 | ||
471 | static int | 450 | static int e1000_get_eeprom_len(struct net_device *netdev) |
472 | e1000_get_eeprom_len(struct net_device *netdev) | ||
473 | { | 451 | { |
474 | struct e1000_adapter *adapter = netdev_priv(netdev); | 452 | struct e1000_adapter *adapter = netdev_priv(netdev); |
475 | return adapter->hw.eeprom.word_size * 2; | 453 | struct e1000_hw *hw = &adapter->hw; |
454 | |||
455 | return hw->eeprom.word_size * 2; | ||
476 | } | 456 | } |
477 | 457 | ||
478 | static int | 458 | static int e1000_get_eeprom(struct net_device *netdev, |
479 | e1000_get_eeprom(struct net_device *netdev, | 459 | struct ethtool_eeprom *eeprom, u8 *bytes) |
480 | struct ethtool_eeprom *eeprom, u8 *bytes) | ||
481 | { | 460 | { |
482 | struct e1000_adapter *adapter = netdev_priv(netdev); | 461 | struct e1000_adapter *adapter = netdev_priv(netdev); |
483 | struct e1000_hw *hw = &adapter->hw; | 462 | struct e1000_hw *hw = &adapter->hw; |
@@ -504,10 +483,12 @@ e1000_get_eeprom(struct net_device *netdev, | |||
504 | last_word - first_word + 1, | 483 | last_word - first_word + 1, |
505 | eeprom_buff); | 484 | eeprom_buff); |
506 | else { | 485 | else { |
507 | for (i = 0; i < last_word - first_word + 1; i++) | 486 | for (i = 0; i < last_word - first_word + 1; i++) { |
508 | if ((ret_val = e1000_read_eeprom(hw, first_word + i, 1, | 487 | ret_val = e1000_read_eeprom(hw, first_word + i, 1, |
509 | &eeprom_buff[i]))) | 488 | &eeprom_buff[i]); |
489 | if (ret_val) | ||
510 | break; | 490 | break; |
491 | } | ||
511 | } | 492 | } |
512 | 493 | ||
513 | /* Device's eeprom is always little-endian, word addressable */ | 494 | /* Device's eeprom is always little-endian, word addressable */ |
@@ -521,9 +502,8 @@ e1000_get_eeprom(struct net_device *netdev, | |||
521 | return ret_val; | 502 | return ret_val; |
522 | } | 503 | } |
523 | 504 | ||
524 | static int | 505 | static int e1000_set_eeprom(struct net_device *netdev, |
525 | e1000_set_eeprom(struct net_device *netdev, | 506 | struct ethtool_eeprom *eeprom, u8 *bytes) |
526 | struct ethtool_eeprom *eeprom, u8 *bytes) | ||
527 | { | 507 | { |
528 | struct e1000_adapter *adapter = netdev_priv(netdev); | 508 | struct e1000_adapter *adapter = netdev_priv(netdev); |
529 | struct e1000_hw *hw = &adapter->hw; | 509 | struct e1000_hw *hw = &adapter->hw; |
@@ -584,11 +564,11 @@ e1000_set_eeprom(struct net_device *netdev, | |||
584 | return ret_val; | 564 | return ret_val; |
585 | } | 565 | } |
586 | 566 | ||
587 | static void | 567 | static void e1000_get_drvinfo(struct net_device *netdev, |
588 | e1000_get_drvinfo(struct net_device *netdev, | 568 | struct ethtool_drvinfo *drvinfo) |
589 | struct ethtool_drvinfo *drvinfo) | ||
590 | { | 569 | { |
591 | struct e1000_adapter *adapter = netdev_priv(netdev); | 570 | struct e1000_adapter *adapter = netdev_priv(netdev); |
571 | struct e1000_hw *hw = &adapter->hw; | ||
592 | char firmware_version[32]; | 572 | char firmware_version[32]; |
593 | u16 eeprom_data; | 573 | u16 eeprom_data; |
594 | 574 | ||
@@ -597,8 +577,8 @@ e1000_get_drvinfo(struct net_device *netdev, | |||
597 | 577 | ||
598 | /* EEPROM image version # is reported as firmware version # for | 578 | /* EEPROM image version # is reported as firmware version # for |
599 | * 8257{1|2|3} controllers */ | 579 | * 8257{1|2|3} controllers */ |
600 | e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data); | 580 | e1000_read_eeprom(hw, 5, 1, &eeprom_data); |
601 | switch (adapter->hw.mac_type) { | 581 | switch (hw->mac_type) { |
602 | case e1000_82571: | 582 | case e1000_82571: |
603 | case e1000_82572: | 583 | case e1000_82572: |
604 | case e1000_82573: | 584 | case e1000_82573: |
@@ -619,12 +599,12 @@ e1000_get_drvinfo(struct net_device *netdev, | |||
619 | drvinfo->eedump_len = e1000_get_eeprom_len(netdev); | 599 | drvinfo->eedump_len = e1000_get_eeprom_len(netdev); |
620 | } | 600 | } |
621 | 601 | ||
622 | static void | 602 | static void e1000_get_ringparam(struct net_device *netdev, |
623 | e1000_get_ringparam(struct net_device *netdev, | 603 | struct ethtool_ringparam *ring) |
624 | struct ethtool_ringparam *ring) | ||
625 | { | 604 | { |
626 | struct e1000_adapter *adapter = netdev_priv(netdev); | 605 | struct e1000_adapter *adapter = netdev_priv(netdev); |
627 | e1000_mac_type mac_type = adapter->hw.mac_type; | 606 | struct e1000_hw *hw = &adapter->hw; |
607 | e1000_mac_type mac_type = hw->mac_type; | ||
628 | struct e1000_tx_ring *txdr = adapter->tx_ring; | 608 | struct e1000_tx_ring *txdr = adapter->tx_ring; |
629 | struct e1000_rx_ring *rxdr = adapter->rx_ring; | 609 | struct e1000_rx_ring *rxdr = adapter->rx_ring; |
630 | 610 | ||
@@ -640,12 +620,12 @@ e1000_get_ringparam(struct net_device *netdev, | |||
640 | ring->rx_jumbo_pending = 0; | 620 | ring->rx_jumbo_pending = 0; |
641 | } | 621 | } |
642 | 622 | ||
643 | static int | 623 | static int e1000_set_ringparam(struct net_device *netdev, |
644 | e1000_set_ringparam(struct net_device *netdev, | 624 | struct ethtool_ringparam *ring) |
645 | struct ethtool_ringparam *ring) | ||
646 | { | 625 | { |
647 | struct e1000_adapter *adapter = netdev_priv(netdev); | 626 | struct e1000_adapter *adapter = netdev_priv(netdev); |
648 | e1000_mac_type mac_type = adapter->hw.mac_type; | 627 | struct e1000_hw *hw = &adapter->hw; |
628 | e1000_mac_type mac_type = hw->mac_type; | ||
649 | struct e1000_tx_ring *txdr, *tx_old; | 629 | struct e1000_tx_ring *txdr, *tx_old; |
650 | struct e1000_rx_ring *rxdr, *rx_old; | 630 | struct e1000_rx_ring *rxdr, *rx_old; |
651 | int i, err; | 631 | int i, err; |
@@ -691,9 +671,11 @@ e1000_set_ringparam(struct net_device *netdev, | |||
691 | 671 | ||
692 | if (netif_running(adapter->netdev)) { | 672 | if (netif_running(adapter->netdev)) { |
693 | /* Try to get new resources before deleting old */ | 673 | /* Try to get new resources before deleting old */ |
694 | if ((err = e1000_setup_all_rx_resources(adapter))) | 674 | err = e1000_setup_all_rx_resources(adapter); |
675 | if (err) | ||
695 | goto err_setup_rx; | 676 | goto err_setup_rx; |
696 | if ((err = e1000_setup_all_tx_resources(adapter))) | 677 | err = e1000_setup_all_tx_resources(adapter); |
678 | if (err) | ||
697 | goto err_setup_tx; | 679 | goto err_setup_tx; |
698 | 680 | ||
699 | /* save the new, restore the old in order to free it, | 681 | /* save the new, restore the old in order to free it, |
@@ -707,7 +689,8 @@ e1000_set_ringparam(struct net_device *netdev, | |||
707 | kfree(rx_old); | 689 | kfree(rx_old); |
708 | adapter->rx_ring = rxdr; | 690 | adapter->rx_ring = rxdr; |
709 | adapter->tx_ring = txdr; | 691 | adapter->tx_ring = txdr; |
710 | if ((err = e1000_up(adapter))) | 692 | err = e1000_up(adapter); |
693 | if (err) | ||
711 | goto err_setup; | 694 | goto err_setup; |
712 | } | 695 | } |
713 | 696 | ||
@@ -728,12 +711,13 @@ err_setup: | |||
728 | return err; | 711 | return err; |
729 | } | 712 | } |
730 | 713 | ||
731 | static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, | 714 | static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, |
732 | int reg, u32 mask, u32 write) | 715 | u32 mask, u32 write) |
733 | { | 716 | { |
717 | struct e1000_hw *hw = &adapter->hw; | ||
734 | static const u32 test[] = | 718 | static const u32 test[] = |
735 | {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; | 719 | {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; |
736 | u8 __iomem *address = adapter->hw.hw_addr + reg; | 720 | u8 __iomem *address = hw->hw_addr + reg; |
737 | u32 read; | 721 | u32 read; |
738 | int i; | 722 | int i; |
739 | 723 | ||
@@ -751,10 +735,11 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, | |||
751 | return false; | 735 | return false; |
752 | } | 736 | } |
753 | 737 | ||
754 | static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, | 738 | static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, |
755 | int reg, u32 mask, u32 write) | 739 | u32 mask, u32 write) |
756 | { | 740 | { |
757 | u8 __iomem *address = adapter->hw.hw_addr + reg; | 741 | struct e1000_hw *hw = &adapter->hw; |
742 | u8 __iomem *address = hw->hw_addr + reg; | ||
758 | u32 read; | 743 | u32 read; |
759 | 744 | ||
760 | writel(write & mask, address); | 745 | writel(write & mask, address); |
@@ -772,7 +757,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, | |||
772 | #define REG_PATTERN_TEST(reg, mask, write) \ | 757 | #define REG_PATTERN_TEST(reg, mask, write) \ |
773 | do { \ | 758 | do { \ |
774 | if (reg_pattern_test(adapter, data, \ | 759 | if (reg_pattern_test(adapter, data, \ |
775 | (adapter->hw.mac_type >= e1000_82543) \ | 760 | (hw->mac_type >= e1000_82543) \ |
776 | ? E1000_##reg : E1000_82542_##reg, \ | 761 | ? E1000_##reg : E1000_82542_##reg, \ |
777 | mask, write)) \ | 762 | mask, write)) \ |
778 | return 1; \ | 763 | return 1; \ |
@@ -781,22 +766,22 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, | |||
781 | #define REG_SET_AND_CHECK(reg, mask, write) \ | 766 | #define REG_SET_AND_CHECK(reg, mask, write) \ |
782 | do { \ | 767 | do { \ |
783 | if (reg_set_and_check(adapter, data, \ | 768 | if (reg_set_and_check(adapter, data, \ |
784 | (adapter->hw.mac_type >= e1000_82543) \ | 769 | (hw->mac_type >= e1000_82543) \ |
785 | ? E1000_##reg : E1000_82542_##reg, \ | 770 | ? E1000_##reg : E1000_82542_##reg, \ |
786 | mask, write)) \ | 771 | mask, write)) \ |
787 | return 1; \ | 772 | return 1; \ |
788 | } while (0) | 773 | } while (0) |
789 | 774 | ||
790 | static int | 775 | static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) |
791 | e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | ||
792 | { | 776 | { |
793 | u32 value, before, after; | 777 | u32 value, before, after; |
794 | u32 i, toggle; | 778 | u32 i, toggle; |
779 | struct e1000_hw *hw = &adapter->hw; | ||
795 | 780 | ||
796 | /* The status register is Read Only, so a write should fail. | 781 | /* The status register is Read Only, so a write should fail. |
797 | * Some bits that get toggled are ignored. | 782 | * Some bits that get toggled are ignored. |
798 | */ | 783 | */ |
799 | switch (adapter->hw.mac_type) { | 784 | switch (hw->mac_type) { |
800 | /* there are several bits on newer hardware that are r/w */ | 785 | /* there are several bits on newer hardware that are r/w */ |
801 | case e1000_82571: | 786 | case e1000_82571: |
802 | case e1000_82572: | 787 | case e1000_82572: |
@@ -812,10 +797,10 @@ e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
812 | break; | 797 | break; |
813 | } | 798 | } |
814 | 799 | ||
815 | before = E1000_READ_REG(&adapter->hw, STATUS); | 800 | before = er32(STATUS); |
816 | value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle); | 801 | value = (er32(STATUS) & toggle); |
817 | E1000_WRITE_REG(&adapter->hw, STATUS, toggle); | 802 | ew32(STATUS, toggle); |
818 | after = E1000_READ_REG(&adapter->hw, STATUS) & toggle; | 803 | after = er32(STATUS) & toggle; |
819 | if (value != after) { | 804 | if (value != after) { |
820 | DPRINTK(DRV, ERR, "failed STATUS register test got: " | 805 | DPRINTK(DRV, ERR, "failed STATUS register test got: " |
821 | "0x%08X expected: 0x%08X\n", after, value); | 806 | "0x%08X expected: 0x%08X\n", after, value); |
@@ -823,9 +808,9 @@ e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
823 | return 1; | 808 | return 1; |
824 | } | 809 | } |
825 | /* restore previous status */ | 810 | /* restore previous status */ |
826 | E1000_WRITE_REG(&adapter->hw, STATUS, before); | 811 | ew32(STATUS, before); |
827 | 812 | ||
828 | if (adapter->hw.mac_type != e1000_ich8lan) { | 813 | if (hw->mac_type != e1000_ich8lan) { |
829 | REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); | 814 | REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); |
830 | REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); | 815 | REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); |
831 | REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); | 816 | REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); |
@@ -845,20 +830,20 @@ e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
845 | 830 | ||
846 | REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); | 831 | REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); |
847 | 832 | ||
848 | before = (adapter->hw.mac_type == e1000_ich8lan ? | 833 | before = (hw->mac_type == e1000_ich8lan ? |
849 | 0x06C3B33E : 0x06DFB3FE); | 834 | 0x06C3B33E : 0x06DFB3FE); |
850 | REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); | 835 | REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); |
851 | REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); | 836 | REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); |
852 | 837 | ||
853 | if (adapter->hw.mac_type >= e1000_82543) { | 838 | if (hw->mac_type >= e1000_82543) { |
854 | 839 | ||
855 | REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); | 840 | REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); |
856 | REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | 841 | REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); |
857 | if (adapter->hw.mac_type != e1000_ich8lan) | 842 | if (hw->mac_type != e1000_ich8lan) |
858 | REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); | 843 | REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); |
859 | REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | 844 | REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); |
860 | REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); | 845 | REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); |
861 | value = (adapter->hw.mac_type == e1000_ich8lan ? | 846 | value = (hw->mac_type == e1000_ich8lan ? |
862 | E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES); | 847 | E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES); |
863 | for (i = 0; i < value; i++) { | 848 | for (i = 0; i < value; i++) { |
864 | REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, | 849 | REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, |
@@ -874,7 +859,7 @@ e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
874 | 859 | ||
875 | } | 860 | } |
876 | 861 | ||
877 | value = (adapter->hw.mac_type == e1000_ich8lan ? | 862 | value = (hw->mac_type == e1000_ich8lan ? |
878 | E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE); | 863 | E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE); |
879 | for (i = 0; i < value; i++) | 864 | for (i = 0; i < value; i++) |
880 | REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); | 865 | REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); |
@@ -883,9 +868,9 @@ e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
883 | return 0; | 868 | return 0; |
884 | } | 869 | } |
885 | 870 | ||
886 | static int | 871 | static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) |
887 | e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) | ||
888 | { | 872 | { |
873 | struct e1000_hw *hw = &adapter->hw; | ||
889 | u16 temp; | 874 | u16 temp; |
890 | u16 checksum = 0; | 875 | u16 checksum = 0; |
891 | u16 i; | 876 | u16 i; |
@@ -893,7 +878,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) | |||
893 | *data = 0; | 878 | *data = 0; |
894 | /* Read and add up the contents of the EEPROM */ | 879 | /* Read and add up the contents of the EEPROM */ |
895 | for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { | 880 | for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { |
896 | if ((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) { | 881 | if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) { |
897 | *data = 1; | 882 | *data = 1; |
898 | break; | 883 | break; |
899 | } | 884 | } |
@@ -901,30 +886,30 @@ e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) | |||
901 | } | 886 | } |
902 | 887 | ||
903 | /* If Checksum is not Correct return error else test passed */ | 888 | /* If Checksum is not Correct return error else test passed */ |
904 | if ((checksum != (u16) EEPROM_SUM) && !(*data)) | 889 | if ((checksum != (u16)EEPROM_SUM) && !(*data)) |
905 | *data = 2; | 890 | *data = 2; |
906 | 891 | ||
907 | return *data; | 892 | return *data; |
908 | } | 893 | } |
909 | 894 | ||
910 | static irqreturn_t | 895 | static irqreturn_t e1000_test_intr(int irq, void *data) |
911 | e1000_test_intr(int irq, void *data) | ||
912 | { | 896 | { |
913 | struct net_device *netdev = (struct net_device *) data; | 897 | struct net_device *netdev = (struct net_device *)data; |
914 | struct e1000_adapter *adapter = netdev_priv(netdev); | 898 | struct e1000_adapter *adapter = netdev_priv(netdev); |
899 | struct e1000_hw *hw = &adapter->hw; | ||
915 | 900 | ||
916 | adapter->test_icr |= E1000_READ_REG(&adapter->hw, ICR); | 901 | adapter->test_icr |= er32(ICR); |
917 | 902 | ||
918 | return IRQ_HANDLED; | 903 | return IRQ_HANDLED; |
919 | } | 904 | } |
920 | 905 | ||
921 | static int | 906 | static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) |
922 | e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | ||
923 | { | 907 | { |
924 | struct net_device *netdev = adapter->netdev; | 908 | struct net_device *netdev = adapter->netdev; |
925 | u32 mask, i = 0; | 909 | u32 mask, i = 0; |
926 | bool shared_int = true; | 910 | bool shared_int = true; |
927 | u32 irq = adapter->pdev->irq; | 911 | u32 irq = adapter->pdev->irq; |
912 | struct e1000_hw *hw = &adapter->hw; | ||
928 | 913 | ||
929 | *data = 0; | 914 | *data = 0; |
930 | 915 | ||
@@ -942,13 +927,13 @@ e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
942 | (shared_int ? "shared" : "unshared")); | 927 | (shared_int ? "shared" : "unshared")); |
943 | 928 | ||
944 | /* Disable all the interrupts */ | 929 | /* Disable all the interrupts */ |
945 | E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF); | 930 | ew32(IMC, 0xFFFFFFFF); |
946 | msleep(10); | 931 | msleep(10); |
947 | 932 | ||
948 | /* Test each interrupt */ | 933 | /* Test each interrupt */ |
949 | for (; i < 10; i++) { | 934 | for (; i < 10; i++) { |
950 | 935 | ||
951 | if (adapter->hw.mac_type == e1000_ich8lan && i == 8) | 936 | if (hw->mac_type == e1000_ich8lan && i == 8) |
952 | continue; | 937 | continue; |
953 | 938 | ||
954 | /* Interrupt to test */ | 939 | /* Interrupt to test */ |
@@ -962,8 +947,8 @@ e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
962 | * test failed. | 947 | * test failed. |
963 | */ | 948 | */ |
964 | adapter->test_icr = 0; | 949 | adapter->test_icr = 0; |
965 | E1000_WRITE_REG(&adapter->hw, IMC, mask); | 950 | ew32(IMC, mask); |
966 | E1000_WRITE_REG(&adapter->hw, ICS, mask); | 951 | ew32(ICS, mask); |
967 | msleep(10); | 952 | msleep(10); |
968 | 953 | ||
969 | if (adapter->test_icr & mask) { | 954 | if (adapter->test_icr & mask) { |
@@ -979,8 +964,8 @@ e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
979 | * test failed. | 964 | * test failed. |
980 | */ | 965 | */ |
981 | adapter->test_icr = 0; | 966 | adapter->test_icr = 0; |
982 | E1000_WRITE_REG(&adapter->hw, IMS, mask); | 967 | ew32(IMS, mask); |
983 | E1000_WRITE_REG(&adapter->hw, ICS, mask); | 968 | ew32(ICS, mask); |
984 | msleep(10); | 969 | msleep(10); |
985 | 970 | ||
986 | if (!(adapter->test_icr & mask)) { | 971 | if (!(adapter->test_icr & mask)) { |
@@ -996,8 +981,8 @@ e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
996 | * test failed. | 981 | * test failed. |
997 | */ | 982 | */ |
998 | adapter->test_icr = 0; | 983 | adapter->test_icr = 0; |
999 | E1000_WRITE_REG(&adapter->hw, IMC, ~mask & 0x00007FFF); | 984 | ew32(IMC, ~mask & 0x00007FFF); |
1000 | E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF); | 985 | ew32(ICS, ~mask & 0x00007FFF); |
1001 | msleep(10); | 986 | msleep(10); |
1002 | 987 | ||
1003 | if (adapter->test_icr) { | 988 | if (adapter->test_icr) { |
@@ -1008,7 +993,7 @@ e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
1008 | } | 993 | } |
1009 | 994 | ||
1010 | /* Disable all the interrupts */ | 995 | /* Disable all the interrupts */ |
1011 | E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF); | 996 | ew32(IMC, 0xFFFFFFFF); |
1012 | msleep(10); | 997 | msleep(10); |
1013 | 998 | ||
1014 | /* Unhook test interrupt handler */ | 999 | /* Unhook test interrupt handler */ |
@@ -1017,8 +1002,7 @@ e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
1017 | return *data; | 1002 | return *data; |
1018 | } | 1003 | } |
1019 | 1004 | ||
1020 | static void | 1005 | static void e1000_free_desc_rings(struct e1000_adapter *adapter) |
1021 | e1000_free_desc_rings(struct e1000_adapter *adapter) | ||
1022 | { | 1006 | { |
1023 | struct e1000_tx_ring *txdr = &adapter->test_tx_ring; | 1007 | struct e1000_tx_ring *txdr = &adapter->test_tx_ring; |
1024 | struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; | 1008 | struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; |
@@ -1064,9 +1048,9 @@ e1000_free_desc_rings(struct e1000_adapter *adapter) | |||
1064 | return; | 1048 | return; |
1065 | } | 1049 | } |
1066 | 1050 | ||
1067 | static int | 1051 | static int e1000_setup_desc_rings(struct e1000_adapter *adapter) |
1068 | e1000_setup_desc_rings(struct e1000_adapter *adapter) | ||
1069 | { | 1052 | { |
1053 | struct e1000_hw *hw = &adapter->hw; | ||
1070 | struct e1000_tx_ring *txdr = &adapter->test_tx_ring; | 1054 | struct e1000_tx_ring *txdr = &adapter->test_tx_ring; |
1071 | struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; | 1055 | struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; |
1072 | struct pci_dev *pdev = adapter->pdev; | 1056 | struct pci_dev *pdev = adapter->pdev; |
@@ -1078,41 +1062,39 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1078 | if (!txdr->count) | 1062 | if (!txdr->count) |
1079 | txdr->count = E1000_DEFAULT_TXD; | 1063 | txdr->count = E1000_DEFAULT_TXD; |
1080 | 1064 | ||
1081 | if (!(txdr->buffer_info = kcalloc(txdr->count, | 1065 | txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_buffer), |
1082 | sizeof(struct e1000_buffer), | 1066 | GFP_KERNEL); |
1083 | GFP_KERNEL))) { | 1067 | if (!txdr->buffer_info) { |
1084 | ret_val = 1; | 1068 | ret_val = 1; |
1085 | goto err_nomem; | 1069 | goto err_nomem; |
1086 | } | 1070 | } |
1087 | 1071 | ||
1088 | txdr->size = txdr->count * sizeof(struct e1000_tx_desc); | 1072 | txdr->size = txdr->count * sizeof(struct e1000_tx_desc); |
1089 | txdr->size = ALIGN(txdr->size, 4096); | 1073 | txdr->size = ALIGN(txdr->size, 4096); |
1090 | if (!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, | 1074 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); |
1091 | &txdr->dma))) { | 1075 | if (!txdr->desc) { |
1092 | ret_val = 2; | 1076 | ret_val = 2; |
1093 | goto err_nomem; | 1077 | goto err_nomem; |
1094 | } | 1078 | } |
1095 | memset(txdr->desc, 0, txdr->size); | 1079 | memset(txdr->desc, 0, txdr->size); |
1096 | txdr->next_to_use = txdr->next_to_clean = 0; | 1080 | txdr->next_to_use = txdr->next_to_clean = 0; |
1097 | 1081 | ||
1098 | E1000_WRITE_REG(&adapter->hw, TDBAL, | 1082 | ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); |
1099 | ((u64) txdr->dma & 0x00000000FFFFFFFF)); | 1083 | ew32(TDBAH, ((u64)txdr->dma >> 32)); |
1100 | E1000_WRITE_REG(&adapter->hw, TDBAH, ((u64) txdr->dma >> 32)); | 1084 | ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc)); |
1101 | E1000_WRITE_REG(&adapter->hw, TDLEN, | 1085 | ew32(TDH, 0); |
1102 | txdr->count * sizeof(struct e1000_tx_desc)); | 1086 | ew32(TDT, 0); |
1103 | E1000_WRITE_REG(&adapter->hw, TDH, 0); | 1087 | ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | |
1104 | E1000_WRITE_REG(&adapter->hw, TDT, 0); | 1088 | E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | |
1105 | E1000_WRITE_REG(&adapter->hw, TCTL, | 1089 | E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); |
1106 | E1000_TCTL_PSP | E1000_TCTL_EN | | ||
1107 | E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | | ||
1108 | E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); | ||
1109 | 1090 | ||
1110 | for (i = 0; i < txdr->count; i++) { | 1091 | for (i = 0; i < txdr->count; i++) { |
1111 | struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); | 1092 | struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); |
1112 | struct sk_buff *skb; | 1093 | struct sk_buff *skb; |
1113 | unsigned int size = 1024; | 1094 | unsigned int size = 1024; |
1114 | 1095 | ||
1115 | if (!(skb = alloc_skb(size, GFP_KERNEL))) { | 1096 | skb = alloc_skb(size, GFP_KERNEL); |
1097 | if (!skb) { | ||
1116 | ret_val = 3; | 1098 | ret_val = 3; |
1117 | goto err_nomem; | 1099 | goto err_nomem; |
1118 | } | 1100 | } |
@@ -1135,40 +1117,40 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1135 | if (!rxdr->count) | 1117 | if (!rxdr->count) |
1136 | rxdr->count = E1000_DEFAULT_RXD; | 1118 | rxdr->count = E1000_DEFAULT_RXD; |
1137 | 1119 | ||
1138 | if (!(rxdr->buffer_info = kcalloc(rxdr->count, | 1120 | rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), |
1139 | sizeof(struct e1000_buffer), | 1121 | GFP_KERNEL); |
1140 | GFP_KERNEL))) { | 1122 | if (!rxdr->buffer_info) { |
1141 | ret_val = 4; | 1123 | ret_val = 4; |
1142 | goto err_nomem; | 1124 | goto err_nomem; |
1143 | } | 1125 | } |
1144 | 1126 | ||
1145 | rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); | 1127 | rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); |
1146 | if (!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) { | 1128 | rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); |
1129 | if (!rxdr->desc) { | ||
1147 | ret_val = 5; | 1130 | ret_val = 5; |
1148 | goto err_nomem; | 1131 | goto err_nomem; |
1149 | } | 1132 | } |
1150 | memset(rxdr->desc, 0, rxdr->size); | 1133 | memset(rxdr->desc, 0, rxdr->size); |
1151 | rxdr->next_to_use = rxdr->next_to_clean = 0; | 1134 | rxdr->next_to_use = rxdr->next_to_clean = 0; |
1152 | 1135 | ||
1153 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 1136 | rctl = er32(RCTL); |
1154 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN); | 1137 | ew32(RCTL, rctl & ~E1000_RCTL_EN); |
1155 | E1000_WRITE_REG(&adapter->hw, RDBAL, | 1138 | ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF)); |
1156 | ((u64) rxdr->dma & 0xFFFFFFFF)); | 1139 | ew32(RDBAH, ((u64)rxdr->dma >> 32)); |
1157 | E1000_WRITE_REG(&adapter->hw, RDBAH, ((u64) rxdr->dma >> 32)); | 1140 | ew32(RDLEN, rxdr->size); |
1158 | E1000_WRITE_REG(&adapter->hw, RDLEN, rxdr->size); | 1141 | ew32(RDH, 0); |
1159 | E1000_WRITE_REG(&adapter->hw, RDH, 0); | 1142 | ew32(RDT, 0); |
1160 | E1000_WRITE_REG(&adapter->hw, RDT, 0); | ||
1161 | rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | | 1143 | rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | |
1162 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | 1144 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | |
1163 | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); | 1145 | (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); |
1164 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 1146 | ew32(RCTL, rctl); |
1165 | 1147 | ||
1166 | for (i = 0; i < rxdr->count; i++) { | 1148 | for (i = 0; i < rxdr->count; i++) { |
1167 | struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); | 1149 | struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); |
1168 | struct sk_buff *skb; | 1150 | struct sk_buff *skb; |
1169 | 1151 | ||
1170 | if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, | 1152 | skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); |
1171 | GFP_KERNEL))) { | 1153 | if (!skb) { |
1172 | ret_val = 6; | 1154 | ret_val = 6; |
1173 | goto err_nomem; | 1155 | goto err_nomem; |
1174 | } | 1156 | } |
@@ -1189,73 +1171,74 @@ err_nomem: | |||
1189 | return ret_val; | 1171 | return ret_val; |
1190 | } | 1172 | } |
1191 | 1173 | ||
1192 | static void | 1174 | static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) |
1193 | e1000_phy_disable_receiver(struct e1000_adapter *adapter) | ||
1194 | { | 1175 | { |
1176 | struct e1000_hw *hw = &adapter->hw; | ||
1177 | |||
1195 | /* Write out to PHY registers 29 and 30 to disable the Receiver. */ | 1178 | /* Write out to PHY registers 29 and 30 to disable the Receiver. */ |
1196 | e1000_write_phy_reg(&adapter->hw, 29, 0x001F); | 1179 | e1000_write_phy_reg(hw, 29, 0x001F); |
1197 | e1000_write_phy_reg(&adapter->hw, 30, 0x8FFC); | 1180 | e1000_write_phy_reg(hw, 30, 0x8FFC); |
1198 | e1000_write_phy_reg(&adapter->hw, 29, 0x001A); | 1181 | e1000_write_phy_reg(hw, 29, 0x001A); |
1199 | e1000_write_phy_reg(&adapter->hw, 30, 0x8FF0); | 1182 | e1000_write_phy_reg(hw, 30, 0x8FF0); |
1200 | } | 1183 | } |
1201 | 1184 | ||
1202 | static void | 1185 | static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) |
1203 | e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) | ||
1204 | { | 1186 | { |
1187 | struct e1000_hw *hw = &adapter->hw; | ||
1205 | u16 phy_reg; | 1188 | u16 phy_reg; |
1206 | 1189 | ||
1207 | /* Because we reset the PHY above, we need to re-force TX_CLK in the | 1190 | /* Because we reset the PHY above, we need to re-force TX_CLK in the |
1208 | * Extended PHY Specific Control Register to 25MHz clock. This | 1191 | * Extended PHY Specific Control Register to 25MHz clock. This |
1209 | * value defaults back to a 2.5MHz clock when the PHY is reset. | 1192 | * value defaults back to a 2.5MHz clock when the PHY is reset. |
1210 | */ | 1193 | */ |
1211 | e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); | 1194 | e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); |
1212 | phy_reg |= M88E1000_EPSCR_TX_CLK_25; | 1195 | phy_reg |= M88E1000_EPSCR_TX_CLK_25; |
1213 | e1000_write_phy_reg(&adapter->hw, | 1196 | e1000_write_phy_reg(hw, |
1214 | M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); | 1197 | M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); |
1215 | 1198 | ||
1216 | /* In addition, because of the s/w reset above, we need to enable | 1199 | /* In addition, because of the s/w reset above, we need to enable |
1217 | * CRS on TX. This must be set for both full and half duplex | 1200 | * CRS on TX. This must be set for both full and half duplex |
1218 | * operation. | 1201 | * operation. |
1219 | */ | 1202 | */ |
1220 | e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); | 1203 | e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); |
1221 | phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; | 1204 | phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; |
1222 | e1000_write_phy_reg(&adapter->hw, | 1205 | e1000_write_phy_reg(hw, |
1223 | M88E1000_PHY_SPEC_CTRL, phy_reg); | 1206 | M88E1000_PHY_SPEC_CTRL, phy_reg); |
1224 | } | 1207 | } |
1225 | 1208 | ||
1226 | static int | 1209 | static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) |
1227 | e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) | ||
1228 | { | 1210 | { |
1211 | struct e1000_hw *hw = &adapter->hw; | ||
1229 | u32 ctrl_reg; | 1212 | u32 ctrl_reg; |
1230 | u16 phy_reg; | 1213 | u16 phy_reg; |
1231 | 1214 | ||
1232 | /* Setup the Device Control Register for PHY loopback test. */ | 1215 | /* Setup the Device Control Register for PHY loopback test. */ |
1233 | 1216 | ||
1234 | ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); | 1217 | ctrl_reg = er32(CTRL); |
1235 | ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ | 1218 | ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ |
1236 | E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ | 1219 | E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ |
1237 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | 1220 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ |
1238 | E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ | 1221 | E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ |
1239 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1222 | E1000_CTRL_FD); /* Force Duplex to FULL */ |
1240 | 1223 | ||
1241 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl_reg); | 1224 | ew32(CTRL, ctrl_reg); |
1242 | 1225 | ||
1243 | /* Read the PHY Specific Control Register (0x10) */ | 1226 | /* Read the PHY Specific Control Register (0x10) */ |
1244 | e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); | 1227 | e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); |
1245 | 1228 | ||
1246 | /* Clear Auto-Crossover bits in PHY Specific Control Register | 1229 | /* Clear Auto-Crossover bits in PHY Specific Control Register |
1247 | * (bits 6:5). | 1230 | * (bits 6:5). |
1248 | */ | 1231 | */ |
1249 | phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; | 1232 | phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; |
1250 | e1000_write_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, phy_reg); | 1233 | e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); |
1251 | 1234 | ||
1252 | /* Perform software reset on the PHY */ | 1235 | /* Perform software reset on the PHY */ |
1253 | e1000_phy_reset(&adapter->hw); | 1236 | e1000_phy_reset(hw); |
1254 | 1237 | ||
1255 | /* Have to setup TX_CLK and TX_CRS after software reset */ | 1238 | /* Have to setup TX_CLK and TX_CRS after software reset */ |
1256 | e1000_phy_reset_clk_and_crs(adapter); | 1239 | e1000_phy_reset_clk_and_crs(adapter); |
1257 | 1240 | ||
1258 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8100); | 1241 | e1000_write_phy_reg(hw, PHY_CTRL, 0x8100); |
1259 | 1242 | ||
1260 | /* Wait for reset to complete. */ | 1243 | /* Wait for reset to complete. */ |
1261 | udelay(500); | 1244 | udelay(500); |
@@ -1267,55 +1250,55 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) | |||
1267 | e1000_phy_disable_receiver(adapter); | 1250 | e1000_phy_disable_receiver(adapter); |
1268 | 1251 | ||
1269 | /* Set the loopback bit in the PHY control register. */ | 1252 | /* Set the loopback bit in the PHY control register. */ |
1270 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg); | 1253 | e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); |
1271 | phy_reg |= MII_CR_LOOPBACK; | 1254 | phy_reg |= MII_CR_LOOPBACK; |
1272 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_reg); | 1255 | e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); |
1273 | 1256 | ||
1274 | /* Setup TX_CLK and TX_CRS one more time. */ | 1257 | /* Setup TX_CLK and TX_CRS one more time. */ |
1275 | e1000_phy_reset_clk_and_crs(adapter); | 1258 | e1000_phy_reset_clk_and_crs(adapter); |
1276 | 1259 | ||
1277 | /* Check Phy Configuration */ | 1260 | /* Check Phy Configuration */ |
1278 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg); | 1261 | e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); |
1279 | if (phy_reg != 0x4100) | 1262 | if (phy_reg != 0x4100) |
1280 | return 9; | 1263 | return 9; |
1281 | 1264 | ||
1282 | e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); | 1265 | e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); |
1283 | if (phy_reg != 0x0070) | 1266 | if (phy_reg != 0x0070) |
1284 | return 10; | 1267 | return 10; |
1285 | 1268 | ||
1286 | e1000_read_phy_reg(&adapter->hw, 29, &phy_reg); | 1269 | e1000_read_phy_reg(hw, 29, &phy_reg); |
1287 | if (phy_reg != 0x001A) | 1270 | if (phy_reg != 0x001A) |
1288 | return 11; | 1271 | return 11; |
1289 | 1272 | ||
1290 | return 0; | 1273 | return 0; |
1291 | } | 1274 | } |
1292 | 1275 | ||
1293 | static int | 1276 | static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) |
1294 | e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | ||
1295 | { | 1277 | { |
1278 | struct e1000_hw *hw = &adapter->hw; | ||
1296 | u32 ctrl_reg = 0; | 1279 | u32 ctrl_reg = 0; |
1297 | u32 stat_reg = 0; | 1280 | u32 stat_reg = 0; |
1298 | 1281 | ||
1299 | adapter->hw.autoneg = false; | 1282 | hw->autoneg = false; |
1300 | 1283 | ||
1301 | if (adapter->hw.phy_type == e1000_phy_m88) { | 1284 | if (hw->phy_type == e1000_phy_m88) { |
1302 | /* Auto-MDI/MDIX Off */ | 1285 | /* Auto-MDI/MDIX Off */ |
1303 | e1000_write_phy_reg(&adapter->hw, | 1286 | e1000_write_phy_reg(hw, |
1304 | M88E1000_PHY_SPEC_CTRL, 0x0808); | 1287 | M88E1000_PHY_SPEC_CTRL, 0x0808); |
1305 | /* reset to update Auto-MDI/MDIX */ | 1288 | /* reset to update Auto-MDI/MDIX */ |
1306 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140); | 1289 | e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); |
1307 | /* autoneg off */ | 1290 | /* autoneg off */ |
1308 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140); | 1291 | e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); |
1309 | } else if (adapter->hw.phy_type == e1000_phy_gg82563) | 1292 | } else if (hw->phy_type == e1000_phy_gg82563) |
1310 | e1000_write_phy_reg(&adapter->hw, | 1293 | e1000_write_phy_reg(hw, |
1311 | GG82563_PHY_KMRN_MODE_CTRL, | 1294 | GG82563_PHY_KMRN_MODE_CTRL, |
1312 | 0x1CC); | 1295 | 0x1CC); |
1313 | 1296 | ||
1314 | ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); | 1297 | ctrl_reg = er32(CTRL); |
1315 | 1298 | ||
1316 | if (adapter->hw.phy_type == e1000_phy_ife) { | 1299 | if (hw->phy_type == e1000_phy_ife) { |
1317 | /* force 100, set loopback */ | 1300 | /* force 100, set loopback */ |
1318 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x6100); | 1301 | e1000_write_phy_reg(hw, PHY_CTRL, 0x6100); |
1319 | 1302 | ||
1320 | /* Now set up the MAC to the same speed/duplex as the PHY. */ | 1303 | /* Now set up the MAC to the same speed/duplex as the PHY. */ |
1321 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ | 1304 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ |
@@ -1325,10 +1308,10 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1325 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1308 | E1000_CTRL_FD); /* Force Duplex to FULL */ |
1326 | } else { | 1309 | } else { |
1327 | /* force 1000, set loopback */ | 1310 | /* force 1000, set loopback */ |
1328 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140); | 1311 | e1000_write_phy_reg(hw, PHY_CTRL, 0x4140); |
1329 | 1312 | ||
1330 | /* Now set up the MAC to the same speed/duplex as the PHY. */ | 1313 | /* Now set up the MAC to the same speed/duplex as the PHY. */ |
1331 | ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); | 1314 | ctrl_reg = er32(CTRL); |
1332 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ | 1315 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ |
1333 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ | 1316 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ |
1334 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | 1317 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ |
@@ -1336,23 +1319,23 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1336 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1319 | E1000_CTRL_FD); /* Force Duplex to FULL */ |
1337 | } | 1320 | } |
1338 | 1321 | ||
1339 | if (adapter->hw.media_type == e1000_media_type_copper && | 1322 | if (hw->media_type == e1000_media_type_copper && |
1340 | adapter->hw.phy_type == e1000_phy_m88) | 1323 | hw->phy_type == e1000_phy_m88) |
1341 | ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ | 1324 | ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ |
1342 | else { | 1325 | else { |
1343 | /* Set the ILOS bit on the fiber Nic is half | 1326 | /* Set the ILOS bit on the fiber Nic is half |
1344 | * duplex link is detected. */ | 1327 | * duplex link is detected. */ |
1345 | stat_reg = E1000_READ_REG(&adapter->hw, STATUS); | 1328 | stat_reg = er32(STATUS); |
1346 | if ((stat_reg & E1000_STATUS_FD) == 0) | 1329 | if ((stat_reg & E1000_STATUS_FD) == 0) |
1347 | ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); | 1330 | ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); |
1348 | } | 1331 | } |
1349 | 1332 | ||
1350 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl_reg); | 1333 | ew32(CTRL, ctrl_reg); |
1351 | 1334 | ||
1352 | /* Disable the receiver on the PHY so when a cable is plugged in, the | 1335 | /* Disable the receiver on the PHY so when a cable is plugged in, the |
1353 | * PHY does not begin to autoneg when a cable is reconnected to the NIC. | 1336 | * PHY does not begin to autoneg when a cable is reconnected to the NIC. |
1354 | */ | 1337 | */ |
1355 | if (adapter->hw.phy_type == e1000_phy_m88) | 1338 | if (hw->phy_type == e1000_phy_m88) |
1356 | e1000_phy_disable_receiver(adapter); | 1339 | e1000_phy_disable_receiver(adapter); |
1357 | 1340 | ||
1358 | udelay(500); | 1341 | udelay(500); |
@@ -1360,15 +1343,15 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1360 | return 0; | 1343 | return 0; |
1361 | } | 1344 | } |
1362 | 1345 | ||
1363 | static int | 1346 | static int e1000_set_phy_loopback(struct e1000_adapter *adapter) |
1364 | e1000_set_phy_loopback(struct e1000_adapter *adapter) | ||
1365 | { | 1347 | { |
1348 | struct e1000_hw *hw = &adapter->hw; | ||
1366 | u16 phy_reg = 0; | 1349 | u16 phy_reg = 0; |
1367 | u16 count = 0; | 1350 | u16 count = 0; |
1368 | 1351 | ||
1369 | switch (adapter->hw.mac_type) { | 1352 | switch (hw->mac_type) { |
1370 | case e1000_82543: | 1353 | case e1000_82543: |
1371 | if (adapter->hw.media_type == e1000_media_type_copper) { | 1354 | if (hw->media_type == e1000_media_type_copper) { |
1372 | /* Attempt to setup Loopback mode on Non-integrated PHY. | 1355 | /* Attempt to setup Loopback mode on Non-integrated PHY. |
1373 | * Some PHY registers get corrupted at random, so | 1356 | * Some PHY registers get corrupted at random, so |
1374 | * attempt this 10 times. | 1357 | * attempt this 10 times. |
@@ -1402,9 +1385,9 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter) | |||
1402 | /* Default PHY loopback work is to read the MII | 1385 | /* Default PHY loopback work is to read the MII |
1403 | * control register and assert bit 14 (loopback mode). | 1386 | * control register and assert bit 14 (loopback mode). |
1404 | */ | 1387 | */ |
1405 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg); | 1388 | e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); |
1406 | phy_reg |= MII_CR_LOOPBACK; | 1389 | phy_reg |= MII_CR_LOOPBACK; |
1407 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_reg); | 1390 | e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); |
1408 | return 0; | 1391 | return 0; |
1409 | break; | 1392 | break; |
1410 | } | 1393 | } |
@@ -1412,8 +1395,7 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter) | |||
1412 | return 8; | 1395 | return 8; |
1413 | } | 1396 | } |
1414 | 1397 | ||
1415 | static int | 1398 | static int e1000_setup_loopback_test(struct e1000_adapter *adapter) |
1416 | e1000_setup_loopback_test(struct e1000_adapter *adapter) | ||
1417 | { | 1399 | { |
1418 | struct e1000_hw *hw = &adapter->hw; | 1400 | struct e1000_hw *hw = &adapter->hw; |
1419 | u32 rctl; | 1401 | u32 rctl; |
@@ -1431,14 +1413,14 @@ e1000_setup_loopback_test(struct e1000_adapter *adapter) | |||
1431 | case e1000_82572: | 1413 | case e1000_82572: |
1432 | #define E1000_SERDES_LB_ON 0x410 | 1414 | #define E1000_SERDES_LB_ON 0x410 |
1433 | e1000_set_phy_loopback(adapter); | 1415 | e1000_set_phy_loopback(adapter); |
1434 | E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_ON); | 1416 | ew32(SCTL, E1000_SERDES_LB_ON); |
1435 | msleep(10); | 1417 | msleep(10); |
1436 | return 0; | 1418 | return 0; |
1437 | break; | 1419 | break; |
1438 | default: | 1420 | default: |
1439 | rctl = E1000_READ_REG(hw, RCTL); | 1421 | rctl = er32(RCTL); |
1440 | rctl |= E1000_RCTL_LBM_TCVR; | 1422 | rctl |= E1000_RCTL_LBM_TCVR; |
1441 | E1000_WRITE_REG(hw, RCTL, rctl); | 1423 | ew32(RCTL, rctl); |
1442 | return 0; | 1424 | return 0; |
1443 | } | 1425 | } |
1444 | } else if (hw->media_type == e1000_media_type_copper) | 1426 | } else if (hw->media_type == e1000_media_type_copper) |
@@ -1447,16 +1429,15 @@ e1000_setup_loopback_test(struct e1000_adapter *adapter) | |||
1447 | return 7; | 1429 | return 7; |
1448 | } | 1430 | } |
1449 | 1431 | ||
1450 | static void | 1432 | static void e1000_loopback_cleanup(struct e1000_adapter *adapter) |
1451 | e1000_loopback_cleanup(struct e1000_adapter *adapter) | ||
1452 | { | 1433 | { |
1453 | struct e1000_hw *hw = &adapter->hw; | 1434 | struct e1000_hw *hw = &adapter->hw; |
1454 | u32 rctl; | 1435 | u32 rctl; |
1455 | u16 phy_reg; | 1436 | u16 phy_reg; |
1456 | 1437 | ||
1457 | rctl = E1000_READ_REG(hw, RCTL); | 1438 | rctl = er32(RCTL); |
1458 | rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); | 1439 | rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); |
1459 | E1000_WRITE_REG(hw, RCTL, rctl); | 1440 | ew32(RCTL, rctl); |
1460 | 1441 | ||
1461 | switch (hw->mac_type) { | 1442 | switch (hw->mac_type) { |
1462 | case e1000_82571: | 1443 | case e1000_82571: |
@@ -1464,7 +1445,7 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter) | |||
1464 | if (hw->media_type == e1000_media_type_fiber || | 1445 | if (hw->media_type == e1000_media_type_fiber || |
1465 | hw->media_type == e1000_media_type_internal_serdes) { | 1446 | hw->media_type == e1000_media_type_internal_serdes) { |
1466 | #define E1000_SERDES_LB_OFF 0x400 | 1447 | #define E1000_SERDES_LB_OFF 0x400 |
1467 | E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF); | 1448 | ew32(SCTL, E1000_SERDES_LB_OFF); |
1468 | msleep(10); | 1449 | msleep(10); |
1469 | break; | 1450 | break; |
1470 | } | 1451 | } |
@@ -1489,8 +1470,8 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter) | |||
1489 | } | 1470 | } |
1490 | } | 1471 | } |
1491 | 1472 | ||
1492 | static void | 1473 | static void e1000_create_lbtest_frame(struct sk_buff *skb, |
1493 | e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) | 1474 | unsigned int frame_size) |
1494 | { | 1475 | { |
1495 | memset(skb->data, 0xFF, frame_size); | 1476 | memset(skb->data, 0xFF, frame_size); |
1496 | frame_size &= ~1; | 1477 | frame_size &= ~1; |
@@ -1499,8 +1480,8 @@ e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) | |||
1499 | memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); | 1480 | memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); |
1500 | } | 1481 | } |
1501 | 1482 | ||
1502 | static int | 1483 | static int e1000_check_lbtest_frame(struct sk_buff *skb, |
1503 | e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) | 1484 | unsigned int frame_size) |
1504 | { | 1485 | { |
1505 | frame_size &= ~1; | 1486 | frame_size &= ~1; |
1506 | if (*(skb->data + 3) == 0xFF) { | 1487 | if (*(skb->data + 3) == 0xFF) { |
@@ -1512,16 +1493,16 @@ e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) | |||
1512 | return 13; | 1493 | return 13; |
1513 | } | 1494 | } |
1514 | 1495 | ||
1515 | static int | 1496 | static int e1000_run_loopback_test(struct e1000_adapter *adapter) |
1516 | e1000_run_loopback_test(struct e1000_adapter *adapter) | ||
1517 | { | 1497 | { |
1498 | struct e1000_hw *hw = &adapter->hw; | ||
1518 | struct e1000_tx_ring *txdr = &adapter->test_tx_ring; | 1499 | struct e1000_tx_ring *txdr = &adapter->test_tx_ring; |
1519 | struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; | 1500 | struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; |
1520 | struct pci_dev *pdev = adapter->pdev; | 1501 | struct pci_dev *pdev = adapter->pdev; |
1521 | int i, j, k, l, lc, good_cnt, ret_val=0; | 1502 | int i, j, k, l, lc, good_cnt, ret_val=0; |
1522 | unsigned long time; | 1503 | unsigned long time; |
1523 | 1504 | ||
1524 | E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); | 1505 | ew32(RDT, rxdr->count - 1); |
1525 | 1506 | ||
1526 | /* Calculate the loop count based on the largest descriptor ring | 1507 | /* Calculate the loop count based on the largest descriptor ring |
1527 | * The idea is to wrap the largest ring a number of times using 64 | 1508 | * The idea is to wrap the largest ring a number of times using 64 |
@@ -1544,7 +1525,7 @@ e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
1544 | PCI_DMA_TODEVICE); | 1525 | PCI_DMA_TODEVICE); |
1545 | if (unlikely(++k == txdr->count)) k = 0; | 1526 | if (unlikely(++k == txdr->count)) k = 0; |
1546 | } | 1527 | } |
1547 | E1000_WRITE_REG(&adapter->hw, TDT, k); | 1528 | ew32(TDT, k); |
1548 | msleep(200); | 1529 | msleep(200); |
1549 | time = jiffies; /* set the start time for the receive */ | 1530 | time = jiffies; /* set the start time for the receive */ |
1550 | good_cnt = 0; | 1531 | good_cnt = 0; |
@@ -1577,21 +1558,24 @@ e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
1577 | return ret_val; | 1558 | return ret_val; |
1578 | } | 1559 | } |
1579 | 1560 | ||
1580 | static int | 1561 | static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) |
1581 | e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) | ||
1582 | { | 1562 | { |
1563 | struct e1000_hw *hw = &adapter->hw; | ||
1564 | |||
1583 | /* PHY loopback cannot be performed if SoL/IDER | 1565 | /* PHY loopback cannot be performed if SoL/IDER |
1584 | * sessions are active */ | 1566 | * sessions are active */ |
1585 | if (e1000_check_phy_reset_block(&adapter->hw)) { | 1567 | if (e1000_check_phy_reset_block(hw)) { |
1586 | DPRINTK(DRV, ERR, "Cannot do PHY loopback test " | 1568 | DPRINTK(DRV, ERR, "Cannot do PHY loopback test " |
1587 | "when SoL/IDER is active.\n"); | 1569 | "when SoL/IDER is active.\n"); |
1588 | *data = 0; | 1570 | *data = 0; |
1589 | goto out; | 1571 | goto out; |
1590 | } | 1572 | } |
1591 | 1573 | ||
1592 | if ((*data = e1000_setup_desc_rings(adapter))) | 1574 | *data = e1000_setup_desc_rings(adapter); |
1575 | if (*data) | ||
1593 | goto out; | 1576 | goto out; |
1594 | if ((*data = e1000_setup_loopback_test(adapter))) | 1577 | *data = e1000_setup_loopback_test(adapter); |
1578 | if (*data) | ||
1595 | goto err_loopback; | 1579 | goto err_loopback; |
1596 | *data = e1000_run_loopback_test(adapter); | 1580 | *data = e1000_run_loopback_test(adapter); |
1597 | e1000_loopback_cleanup(adapter); | 1581 | e1000_loopback_cleanup(adapter); |
@@ -1602,38 +1586,37 @@ out: | |||
1602 | return *data; | 1586 | return *data; |
1603 | } | 1587 | } |
1604 | 1588 | ||
1605 | static int | 1589 | static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) |
1606 | e1000_link_test(struct e1000_adapter *adapter, u64 *data) | ||
1607 | { | 1590 | { |
1591 | struct e1000_hw *hw = &adapter->hw; | ||
1608 | *data = 0; | 1592 | *data = 0; |
1609 | if (adapter->hw.media_type == e1000_media_type_internal_serdes) { | 1593 | if (hw->media_type == e1000_media_type_internal_serdes) { |
1610 | int i = 0; | 1594 | int i = 0; |
1611 | adapter->hw.serdes_link_down = true; | 1595 | hw->serdes_link_down = true; |
1612 | 1596 | ||
1613 | /* On some blade server designs, link establishment | 1597 | /* On some blade server designs, link establishment |
1614 | * could take as long as 2-3 minutes */ | 1598 | * could take as long as 2-3 minutes */ |
1615 | do { | 1599 | do { |
1616 | e1000_check_for_link(&adapter->hw); | 1600 | e1000_check_for_link(hw); |
1617 | if (!adapter->hw.serdes_link_down) | 1601 | if (!hw->serdes_link_down) |
1618 | return *data; | 1602 | return *data; |
1619 | msleep(20); | 1603 | msleep(20); |
1620 | } while (i++ < 3750); | 1604 | } while (i++ < 3750); |
1621 | 1605 | ||
1622 | *data = 1; | 1606 | *data = 1; |
1623 | } else { | 1607 | } else { |
1624 | e1000_check_for_link(&adapter->hw); | 1608 | e1000_check_for_link(hw); |
1625 | if (adapter->hw.autoneg) /* if auto_neg is set wait for it */ | 1609 | if (hw->autoneg) /* if auto_neg is set wait for it */ |
1626 | msleep(4000); | 1610 | msleep(4000); |
1627 | 1611 | ||
1628 | if (!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { | 1612 | if (!(er32(STATUS) & E1000_STATUS_LU)) { |
1629 | *data = 1; | 1613 | *data = 1; |
1630 | } | 1614 | } |
1631 | } | 1615 | } |
1632 | return *data; | 1616 | return *data; |
1633 | } | 1617 | } |
1634 | 1618 | ||
1635 | static int | 1619 | static int e1000_get_sset_count(struct net_device *netdev, int sset) |
1636 | e1000_get_sset_count(struct net_device *netdev, int sset) | ||
1637 | { | 1620 | { |
1638 | switch (sset) { | 1621 | switch (sset) { |
1639 | case ETH_SS_TEST: | 1622 | case ETH_SS_TEST: |
@@ -1645,11 +1628,11 @@ e1000_get_sset_count(struct net_device *netdev, int sset) | |||
1645 | } | 1628 | } |
1646 | } | 1629 | } |
1647 | 1630 | ||
1648 | static void | 1631 | static void e1000_diag_test(struct net_device *netdev, |
1649 | e1000_diag_test(struct net_device *netdev, | 1632 | struct ethtool_test *eth_test, u64 *data) |
1650 | struct ethtool_test *eth_test, u64 *data) | ||
1651 | { | 1633 | { |
1652 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1634 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1635 | struct e1000_hw *hw = &adapter->hw; | ||
1653 | bool if_running = netif_running(netdev); | 1636 | bool if_running = netif_running(netdev); |
1654 | 1637 | ||
1655 | set_bit(__E1000_TESTING, &adapter->flags); | 1638 | set_bit(__E1000_TESTING, &adapter->flags); |
@@ -1657,9 +1640,9 @@ e1000_diag_test(struct net_device *netdev, | |||
1657 | /* Offline tests */ | 1640 | /* Offline tests */ |
1658 | 1641 | ||
1659 | /* save speed, duplex, autoneg settings */ | 1642 | /* save speed, duplex, autoneg settings */ |
1660 | u16 autoneg_advertised = adapter->hw.autoneg_advertised; | 1643 | u16 autoneg_advertised = hw->autoneg_advertised; |
1661 | u8 forced_speed_duplex = adapter->hw.forced_speed_duplex; | 1644 | u8 forced_speed_duplex = hw->forced_speed_duplex; |
1662 | u8 autoneg = adapter->hw.autoneg; | 1645 | u8 autoneg = hw->autoneg; |
1663 | 1646 | ||
1664 | DPRINTK(HW, INFO, "offline testing starting\n"); | 1647 | DPRINTK(HW, INFO, "offline testing starting\n"); |
1665 | 1648 | ||
@@ -1692,9 +1675,9 @@ e1000_diag_test(struct net_device *netdev, | |||
1692 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1675 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1693 | 1676 | ||
1694 | /* restore speed, duplex, autoneg settings */ | 1677 | /* restore speed, duplex, autoneg settings */ |
1695 | adapter->hw.autoneg_advertised = autoneg_advertised; | 1678 | hw->autoneg_advertised = autoneg_advertised; |
1696 | adapter->hw.forced_speed_duplex = forced_speed_duplex; | 1679 | hw->forced_speed_duplex = forced_speed_duplex; |
1697 | adapter->hw.autoneg = autoneg; | 1680 | hw->autoneg = autoneg; |
1698 | 1681 | ||
1699 | e1000_reset(adapter); | 1682 | e1000_reset(adapter); |
1700 | clear_bit(__E1000_TESTING, &adapter->flags); | 1683 | clear_bit(__E1000_TESTING, &adapter->flags); |
@@ -1717,7 +1700,8 @@ e1000_diag_test(struct net_device *netdev, | |||
1717 | msleep_interruptible(4 * 1000); | 1700 | msleep_interruptible(4 * 1000); |
1718 | } | 1701 | } |
1719 | 1702 | ||
1720 | static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wolinfo *wol) | 1703 | static int e1000_wol_exclusion(struct e1000_adapter *adapter, |
1704 | struct ethtool_wolinfo *wol) | ||
1721 | { | 1705 | { |
1722 | struct e1000_hw *hw = &adapter->hw; | 1706 | struct e1000_hw *hw = &adapter->hw; |
1723 | int retval = 1; /* fail by default */ | 1707 | int retval = 1; /* fail by default */ |
@@ -1742,7 +1726,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol | |||
1742 | case E1000_DEV_ID_82571EB_SERDES: | 1726 | case E1000_DEV_ID_82571EB_SERDES: |
1743 | case E1000_DEV_ID_82571EB_COPPER: | 1727 | case E1000_DEV_ID_82571EB_COPPER: |
1744 | /* Wake events not supported on port B */ | 1728 | /* Wake events not supported on port B */ |
1745 | if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { | 1729 | if (er32(STATUS) & E1000_STATUS_FUNC_1) { |
1746 | wol->supported = 0; | 1730 | wol->supported = 0; |
1747 | break; | 1731 | break; |
1748 | } | 1732 | } |
@@ -1766,7 +1750,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol | |||
1766 | /* dual port cards only support WoL on port A from now on | 1750 | /* dual port cards only support WoL on port A from now on |
1767 | * unless it was enabled in the eeprom for port B | 1751 | * unless it was enabled in the eeprom for port B |
1768 | * so exclude FUNC_1 ports from having WoL enabled */ | 1752 | * so exclude FUNC_1 ports from having WoL enabled */ |
1769 | if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1 && | 1753 | if (er32(STATUS) & E1000_STATUS_FUNC_1 && |
1770 | !adapter->eeprom_wol) { | 1754 | !adapter->eeprom_wol) { |
1771 | wol->supported = 0; | 1755 | wol->supported = 0; |
1772 | break; | 1756 | break; |
@@ -1778,10 +1762,11 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol | |||
1778 | return retval; | 1762 | return retval; |
1779 | } | 1763 | } |
1780 | 1764 | ||
1781 | static void | 1765 | static void e1000_get_wol(struct net_device *netdev, |
1782 | e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | 1766 | struct ethtool_wolinfo *wol) |
1783 | { | 1767 | { |
1784 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1768 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1769 | struct e1000_hw *hw = &adapter->hw; | ||
1785 | 1770 | ||
1786 | wol->supported = WAKE_UCAST | WAKE_MCAST | | 1771 | wol->supported = WAKE_UCAST | WAKE_MCAST | |
1787 | WAKE_BCAST | WAKE_MAGIC; | 1772 | WAKE_BCAST | WAKE_MAGIC; |
@@ -1793,7 +1778,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1793 | return; | 1778 | return; |
1794 | 1779 | ||
1795 | /* apply any specific unsupported masks here */ | 1780 | /* apply any specific unsupported masks here */ |
1796 | switch (adapter->hw.device_id) { | 1781 | switch (hw->device_id) { |
1797 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: | 1782 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: |
1798 | /* KSP3 does not suppport UCAST wake-ups */ | 1783 | /* KSP3 does not suppport UCAST wake-ups */ |
1799 | wol->supported &= ~WAKE_UCAST; | 1784 | wol->supported &= ~WAKE_UCAST; |
@@ -1818,8 +1803,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1818 | return; | 1803 | return; |
1819 | } | 1804 | } |
1820 | 1805 | ||
1821 | static int | 1806 | static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) |
1822 | e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | ||
1823 | { | 1807 | { |
1824 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1808 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1825 | struct e1000_hw *hw = &adapter->hw; | 1809 | struct e1000_hw *hw = &adapter->hw; |
@@ -1863,61 +1847,60 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
1863 | /* bit defines for adapter->led_status */ | 1847 | /* bit defines for adapter->led_status */ |
1864 | #define E1000_LED_ON 0 | 1848 | #define E1000_LED_ON 0 |
1865 | 1849 | ||
1866 | static void | 1850 | static void e1000_led_blink_callback(unsigned long data) |
1867 | e1000_led_blink_callback(unsigned long data) | ||
1868 | { | 1851 | { |
1869 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 1852 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
1853 | struct e1000_hw *hw = &adapter->hw; | ||
1870 | 1854 | ||
1871 | if (test_and_change_bit(E1000_LED_ON, &adapter->led_status)) | 1855 | if (test_and_change_bit(E1000_LED_ON, &adapter->led_status)) |
1872 | e1000_led_off(&adapter->hw); | 1856 | e1000_led_off(hw); |
1873 | else | 1857 | else |
1874 | e1000_led_on(&adapter->hw); | 1858 | e1000_led_on(hw); |
1875 | 1859 | ||
1876 | mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL); | 1860 | mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL); |
1877 | } | 1861 | } |
1878 | 1862 | ||
1879 | static int | 1863 | static int e1000_phys_id(struct net_device *netdev, u32 data) |
1880 | e1000_phys_id(struct net_device *netdev, u32 data) | ||
1881 | { | 1864 | { |
1882 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1865 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1866 | struct e1000_hw *hw = &adapter->hw; | ||
1883 | 1867 | ||
1884 | if (!data) | 1868 | if (!data) |
1885 | data = INT_MAX; | 1869 | data = INT_MAX; |
1886 | 1870 | ||
1887 | if (adapter->hw.mac_type < e1000_82571) { | 1871 | if (hw->mac_type < e1000_82571) { |
1888 | if (!adapter->blink_timer.function) { | 1872 | if (!adapter->blink_timer.function) { |
1889 | init_timer(&adapter->blink_timer); | 1873 | init_timer(&adapter->blink_timer); |
1890 | adapter->blink_timer.function = e1000_led_blink_callback; | 1874 | adapter->blink_timer.function = e1000_led_blink_callback; |
1891 | adapter->blink_timer.data = (unsigned long) adapter; | 1875 | adapter->blink_timer.data = (unsigned long)adapter; |
1892 | } | 1876 | } |
1893 | e1000_setup_led(&adapter->hw); | 1877 | e1000_setup_led(hw); |
1894 | mod_timer(&adapter->blink_timer, jiffies); | 1878 | mod_timer(&adapter->blink_timer, jiffies); |
1895 | msleep_interruptible(data * 1000); | 1879 | msleep_interruptible(data * 1000); |
1896 | del_timer_sync(&adapter->blink_timer); | 1880 | del_timer_sync(&adapter->blink_timer); |
1897 | } else if (adapter->hw.phy_type == e1000_phy_ife) { | 1881 | } else if (hw->phy_type == e1000_phy_ife) { |
1898 | if (!adapter->blink_timer.function) { | 1882 | if (!adapter->blink_timer.function) { |
1899 | init_timer(&adapter->blink_timer); | 1883 | init_timer(&adapter->blink_timer); |
1900 | adapter->blink_timer.function = e1000_led_blink_callback; | 1884 | adapter->blink_timer.function = e1000_led_blink_callback; |
1901 | adapter->blink_timer.data = (unsigned long) adapter; | 1885 | adapter->blink_timer.data = (unsigned long)adapter; |
1902 | } | 1886 | } |
1903 | mod_timer(&adapter->blink_timer, jiffies); | 1887 | mod_timer(&adapter->blink_timer, jiffies); |
1904 | msleep_interruptible(data * 1000); | 1888 | msleep_interruptible(data * 1000); |
1905 | del_timer_sync(&adapter->blink_timer); | 1889 | del_timer_sync(&adapter->blink_timer); |
1906 | e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0); | 1890 | e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0); |
1907 | } else { | 1891 | } else { |
1908 | e1000_blink_led_start(&adapter->hw); | 1892 | e1000_blink_led_start(hw); |
1909 | msleep_interruptible(data * 1000); | 1893 | msleep_interruptible(data * 1000); |
1910 | } | 1894 | } |
1911 | 1895 | ||
1912 | e1000_led_off(&adapter->hw); | 1896 | e1000_led_off(hw); |
1913 | clear_bit(E1000_LED_ON, &adapter->led_status); | 1897 | clear_bit(E1000_LED_ON, &adapter->led_status); |
1914 | e1000_cleanup_led(&adapter->hw); | 1898 | e1000_cleanup_led(hw); |
1915 | 1899 | ||
1916 | return 0; | 1900 | return 0; |
1917 | } | 1901 | } |
1918 | 1902 | ||
1919 | static int | 1903 | static int e1000_nway_reset(struct net_device *netdev) |
1920 | e1000_nway_reset(struct net_device *netdev) | ||
1921 | { | 1904 | { |
1922 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1905 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1923 | if (netif_running(netdev)) | 1906 | if (netif_running(netdev)) |
@@ -1925,9 +1908,8 @@ e1000_nway_reset(struct net_device *netdev) | |||
1925 | return 0; | 1908 | return 0; |
1926 | } | 1909 | } |
1927 | 1910 | ||
1928 | static void | 1911 | static void e1000_get_ethtool_stats(struct net_device *netdev, |
1929 | e1000_get_ethtool_stats(struct net_device *netdev, | 1912 | struct ethtool_stats *stats, u64 *data) |
1930 | struct ethtool_stats *stats, u64 *data) | ||
1931 | { | 1913 | { |
1932 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1914 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1933 | int i; | 1915 | int i; |
@@ -1941,8 +1923,8 @@ e1000_get_ethtool_stats(struct net_device *netdev, | |||
1941 | /* BUG_ON(i != E1000_STATS_LEN); */ | 1923 | /* BUG_ON(i != E1000_STATS_LEN); */ |
1942 | } | 1924 | } |
1943 | 1925 | ||
1944 | static void | 1926 | static void e1000_get_strings(struct net_device *netdev, u32 stringset, |
1945 | e1000_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | 1927 | u8 *data) |
1946 | { | 1928 | { |
1947 | u8 *p = data; | 1929 | u8 *p = data; |
1948 | int i; | 1930 | int i; |
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 9a4b6cbddf2c..9d6edf3e73f9 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -42,48 +42,65 @@ static void e1000_release_software_semaphore(struct e1000_hw *hw); | |||
42 | 42 | ||
43 | static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw); | 43 | static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw); |
44 | static s32 e1000_check_downshift(struct e1000_hw *hw); | 44 | static s32 e1000_check_downshift(struct e1000_hw *hw); |
45 | static s32 e1000_check_polarity(struct e1000_hw *hw, e1000_rev_polarity *polarity); | 45 | static s32 e1000_check_polarity(struct e1000_hw *hw, |
46 | e1000_rev_polarity *polarity); | ||
46 | static void e1000_clear_hw_cntrs(struct e1000_hw *hw); | 47 | static void e1000_clear_hw_cntrs(struct e1000_hw *hw); |
47 | static void e1000_clear_vfta(struct e1000_hw *hw); | 48 | static void e1000_clear_vfta(struct e1000_hw *hw); |
48 | static s32 e1000_commit_shadow_ram(struct e1000_hw *hw); | 49 | static s32 e1000_commit_shadow_ram(struct e1000_hw *hw); |
49 | static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, | 50 | static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, |
50 | bool link_up); | 51 | bool link_up); |
51 | static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); | 52 | static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); |
52 | static s32 e1000_detect_gig_phy(struct e1000_hw *hw); | 53 | static s32 e1000_detect_gig_phy(struct e1000_hw *hw); |
53 | static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank); | 54 | static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank); |
54 | static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); | 55 | static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); |
55 | static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, u16 *max_length); | 56 | static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, |
57 | u16 *max_length); | ||
56 | static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw); | 58 | static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw); |
57 | static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); | 59 | static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); |
58 | static s32 e1000_get_software_flag(struct e1000_hw *hw); | 60 | static s32 e1000_get_software_flag(struct e1000_hw *hw); |
59 | static s32 e1000_ich8_cycle_init(struct e1000_hw *hw); | 61 | static s32 e1000_ich8_cycle_init(struct e1000_hw *hw); |
60 | static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout); | 62 | static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout); |
61 | static s32 e1000_id_led_init(struct e1000_hw *hw); | 63 | static s32 e1000_id_led_init(struct e1000_hw *hw); |
62 | static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, u32 cnf_base_addr, u32 cnf_size); | 64 | static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, |
65 | u32 cnf_base_addr, | ||
66 | u32 cnf_size); | ||
63 | static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw); | 67 | static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw); |
64 | static void e1000_init_rx_addrs(struct e1000_hw *hw); | 68 | static void e1000_init_rx_addrs(struct e1000_hw *hw); |
65 | static void e1000_initialize_hardware_bits(struct e1000_hw *hw); | 69 | static void e1000_initialize_hardware_bits(struct e1000_hw *hw); |
66 | static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw); | 70 | static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw); |
67 | static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw); | 71 | static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw); |
68 | static s32 e1000_mng_enable_host_if(struct e1000_hw *hw); | 72 | static s32 e1000_mng_enable_host_if(struct e1000_hw *hw); |
69 | static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, u16 offset, u8 *sum); | 73 | static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, |
70 | static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw, struct e1000_host_mng_command_header* hdr); | 74 | u16 offset, u8 *sum); |
75 | static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw, | ||
76 | struct e1000_host_mng_command_header | ||
77 | *hdr); | ||
71 | static s32 e1000_mng_write_commit(struct e1000_hw *hw); | 78 | static s32 e1000_mng_write_commit(struct e1000_hw *hw); |
72 | static s32 e1000_phy_ife_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); | 79 | static s32 e1000_phy_ife_get_info(struct e1000_hw *hw, |
73 | static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); | 80 | struct e1000_phy_info *phy_info); |
74 | static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); | 81 | static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, |
75 | static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); | 82 | struct e1000_phy_info *phy_info); |
83 | static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words, | ||
84 | u16 *data); | ||
85 | static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words, | ||
86 | u16 *data); | ||
76 | static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd); | 87 | static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd); |
77 | static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); | 88 | static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, |
89 | struct e1000_phy_info *phy_info); | ||
78 | static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw); | 90 | static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw); |
79 | static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data); | 91 | static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data); |
80 | static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte); | 92 | static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, |
93 | u8 byte); | ||
81 | static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte); | 94 | static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte); |
82 | static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data); | 95 | static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data); |
83 | static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size, u16 *data); | 96 | static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size, |
84 | static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size, u16 data); | 97 | u16 *data); |
85 | static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); | 98 | static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size, |
86 | static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); | 99 | u16 data); |
100 | static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, | ||
101 | u16 *data); | ||
102 | static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, | ||
103 | u16 *data); | ||
87 | static void e1000_release_software_flag(struct e1000_hw *hw); | 104 | static void e1000_release_software_flag(struct e1000_hw *hw); |
88 | static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); | 105 | static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); |
89 | static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); | 106 | static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); |
@@ -101,23 +118,21 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); | |||
101 | static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); | 118 | static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); |
102 | static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); | 119 | static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); |
103 | static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, | 120 | static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, |
104 | u16 count); | 121 | u16 count); |
105 | static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); | 122 | static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); |
106 | static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); | 123 | static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); |
107 | static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, | 124 | static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, |
108 | u16 words, u16 *data); | 125 | u16 words, u16 *data); |
109 | static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, | 126 | static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, |
110 | u16 offset, u16 words, | 127 | u16 words, u16 *data); |
111 | u16 *data); | ||
112 | static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); | 128 | static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); |
113 | static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); | 129 | static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); |
114 | static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); | 130 | static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); |
115 | static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, | 131 | static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); |
116 | u16 count); | ||
117 | static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | 132 | static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, |
118 | u16 phy_data); | 133 | u16 phy_data); |
119 | static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw,u32 reg_addr, | 134 | static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw,u32 reg_addr, |
120 | u16 *phy_data); | 135 | u16 *phy_data); |
121 | static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); | 136 | static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); |
122 | static s32 e1000_acquire_eeprom(struct e1000_hw *hw); | 137 | static s32 e1000_acquire_eeprom(struct e1000_hw *hw); |
123 | static void e1000_release_eeprom(struct e1000_hw *hw); | 138 | static void e1000_release_eeprom(struct e1000_hw *hw); |
@@ -127,8 +142,7 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); | |||
127 | static s32 e1000_set_phy_mode(struct e1000_hw *hw); | 142 | static s32 e1000_set_phy_mode(struct e1000_hw *hw); |
128 | static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer); | 143 | static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer); |
129 | static u8 e1000_calculate_mng_checksum(char *buffer, u32 length); | 144 | static u8 e1000_calculate_mng_checksum(char *buffer, u32 length); |
130 | static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, | 145 | static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex); |
131 | u16 duplex); | ||
132 | static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw); | 146 | static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw); |
133 | 147 | ||
134 | /* IGP cable length table */ | 148 | /* IGP cable length table */ |
@@ -159,8 +173,7 @@ u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] = | |||
159 | * | 173 | * |
160 | * hw - Struct containing variables accessed by shared code | 174 | * hw - Struct containing variables accessed by shared code |
161 | *****************************************************************************/ | 175 | *****************************************************************************/ |
162 | static s32 | 176 | static s32 e1000_set_phy_type(struct e1000_hw *hw) |
163 | e1000_set_phy_type(struct e1000_hw *hw) | ||
164 | { | 177 | { |
165 | DEBUGFUNC("e1000_set_phy_type"); | 178 | DEBUGFUNC("e1000_set_phy_type"); |
166 | 179 | ||
@@ -210,8 +223,7 @@ e1000_set_phy_type(struct e1000_hw *hw) | |||
210 | * | 223 | * |
211 | * hw - Struct containing variables accessed by shared code | 224 | * hw - Struct containing variables accessed by shared code |
212 | *****************************************************************************/ | 225 | *****************************************************************************/ |
213 | static void | 226 | static void e1000_phy_init_script(struct e1000_hw *hw) |
214 | e1000_phy_init_script(struct e1000_hw *hw) | ||
215 | { | 227 | { |
216 | u32 ret_val; | 228 | u32 ret_val; |
217 | u16 phy_saved_data; | 229 | u16 phy_saved_data; |
@@ -306,8 +318,7 @@ e1000_phy_init_script(struct e1000_hw *hw) | |||
306 | * | 318 | * |
307 | * hw - Struct containing variables accessed by shared code | 319 | * hw - Struct containing variables accessed by shared code |
308 | *****************************************************************************/ | 320 | *****************************************************************************/ |
309 | s32 | 321 | s32 e1000_set_mac_type(struct e1000_hw *hw) |
310 | e1000_set_mac_type(struct e1000_hw *hw) | ||
311 | { | 322 | { |
312 | DEBUGFUNC("e1000_set_mac_type"); | 323 | DEBUGFUNC("e1000_set_mac_type"); |
313 | 324 | ||
@@ -474,8 +485,7 @@ e1000_set_mac_type(struct e1000_hw *hw) | |||
474 | * | 485 | * |
475 | * hw - Struct containing variables accessed by shared code | 486 | * hw - Struct containing variables accessed by shared code |
476 | * **************************************************************************/ | 487 | * **************************************************************************/ |
477 | void | 488 | void e1000_set_media_type(struct e1000_hw *hw) |
478 | e1000_set_media_type(struct e1000_hw *hw) | ||
479 | { | 489 | { |
480 | u32 status; | 490 | u32 status; |
481 | 491 | ||
@@ -510,7 +520,7 @@ e1000_set_media_type(struct e1000_hw *hw) | |||
510 | hw->media_type = e1000_media_type_copper; | 520 | hw->media_type = e1000_media_type_copper; |
511 | break; | 521 | break; |
512 | default: | 522 | default: |
513 | status = E1000_READ_REG(hw, STATUS); | 523 | status = er32(STATUS); |
514 | if (status & E1000_STATUS_TBIMODE) { | 524 | if (status & E1000_STATUS_TBIMODE) { |
515 | hw->media_type = e1000_media_type_fiber; | 525 | hw->media_type = e1000_media_type_fiber; |
516 | /* tbi_compatibility not valid on fiber */ | 526 | /* tbi_compatibility not valid on fiber */ |
@@ -528,8 +538,7 @@ e1000_set_media_type(struct e1000_hw *hw) | |||
528 | * | 538 | * |
529 | * hw - Struct containing variables accessed by shared code | 539 | * hw - Struct containing variables accessed by shared code |
530 | *****************************************************************************/ | 540 | *****************************************************************************/ |
531 | s32 | 541 | s32 e1000_reset_hw(struct e1000_hw *hw) |
532 | e1000_reset_hw(struct e1000_hw *hw) | ||
533 | { | 542 | { |
534 | u32 ctrl; | 543 | u32 ctrl; |
535 | u32 ctrl_ext; | 544 | u32 ctrl_ext; |
@@ -559,15 +568,15 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
559 | 568 | ||
560 | /* Clear interrupt mask to stop board from generating interrupts */ | 569 | /* Clear interrupt mask to stop board from generating interrupts */ |
561 | DEBUGOUT("Masking off all interrupts\n"); | 570 | DEBUGOUT("Masking off all interrupts\n"); |
562 | E1000_WRITE_REG(hw, IMC, 0xffffffff); | 571 | ew32(IMC, 0xffffffff); |
563 | 572 | ||
564 | /* Disable the Transmit and Receive units. Then delay to allow | 573 | /* Disable the Transmit and Receive units. Then delay to allow |
565 | * any pending transactions to complete before we hit the MAC with | 574 | * any pending transactions to complete before we hit the MAC with |
566 | * the global reset. | 575 | * the global reset. |
567 | */ | 576 | */ |
568 | E1000_WRITE_REG(hw, RCTL, 0); | 577 | ew32(RCTL, 0); |
569 | E1000_WRITE_REG(hw, TCTL, E1000_TCTL_PSP); | 578 | ew32(TCTL, E1000_TCTL_PSP); |
570 | E1000_WRITE_FLUSH(hw); | 579 | E1000_WRITE_FLUSH(); |
571 | 580 | ||
572 | /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ | 581 | /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ |
573 | hw->tbi_compatibility_on = false; | 582 | hw->tbi_compatibility_on = false; |
@@ -577,11 +586,11 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
577 | */ | 586 | */ |
578 | msleep(10); | 587 | msleep(10); |
579 | 588 | ||
580 | ctrl = E1000_READ_REG(hw, CTRL); | 589 | ctrl = er32(CTRL); |
581 | 590 | ||
582 | /* Must reset the PHY before resetting the MAC */ | 591 | /* Must reset the PHY before resetting the MAC */ |
583 | if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { | 592 | if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { |
584 | E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST)); | 593 | ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); |
585 | msleep(5); | 594 | msleep(5); |
586 | } | 595 | } |
587 | 596 | ||
@@ -590,12 +599,12 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
590 | if (hw->mac_type == e1000_82573) { | 599 | if (hw->mac_type == e1000_82573) { |
591 | timeout = 10; | 600 | timeout = 10; |
592 | 601 | ||
593 | extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); | 602 | extcnf_ctrl = er32(EXTCNF_CTRL); |
594 | extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; | 603 | extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; |
595 | 604 | ||
596 | do { | 605 | do { |
597 | E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); | 606 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
598 | extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); | 607 | extcnf_ctrl = er32(EXTCNF_CTRL); |
599 | 608 | ||
600 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) | 609 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) |
601 | break; | 610 | break; |
@@ -610,9 +619,9 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
610 | /* Workaround for ICH8 bit corruption issue in FIFO memory */ | 619 | /* Workaround for ICH8 bit corruption issue in FIFO memory */ |
611 | if (hw->mac_type == e1000_ich8lan) { | 620 | if (hw->mac_type == e1000_ich8lan) { |
612 | /* Set Tx and Rx buffer allocation to 8k apiece. */ | 621 | /* Set Tx and Rx buffer allocation to 8k apiece. */ |
613 | E1000_WRITE_REG(hw, PBA, E1000_PBA_8K); | 622 | ew32(PBA, E1000_PBA_8K); |
614 | /* Set Packet Buffer Size to 16k. */ | 623 | /* Set Packet Buffer Size to 16k. */ |
615 | E1000_WRITE_REG(hw, PBS, E1000_PBS_16K); | 624 | ew32(PBS, E1000_PBS_16K); |
616 | } | 625 | } |
617 | 626 | ||
618 | /* Issue a global reset to the MAC. This will reset the chip's | 627 | /* Issue a global reset to the MAC. This will reset the chip's |
@@ -636,7 +645,7 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
636 | case e1000_82545_rev_3: | 645 | case e1000_82545_rev_3: |
637 | case e1000_82546_rev_3: | 646 | case e1000_82546_rev_3: |
638 | /* Reset is performed on a shadow of the control register */ | 647 | /* Reset is performed on a shadow of the control register */ |
639 | E1000_WRITE_REG(hw, CTRL_DUP, (ctrl | E1000_CTRL_RST)); | 648 | ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); |
640 | break; | 649 | break; |
641 | case e1000_ich8lan: | 650 | case e1000_ich8lan: |
642 | if (!hw->phy_reset_disable && | 651 | if (!hw->phy_reset_disable && |
@@ -649,11 +658,11 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
649 | } | 658 | } |
650 | 659 | ||
651 | e1000_get_software_flag(hw); | 660 | e1000_get_software_flag(hw); |
652 | E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST)); | 661 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); |
653 | msleep(5); | 662 | msleep(5); |
654 | break; | 663 | break; |
655 | default: | 664 | default: |
656 | E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST)); | 665 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); |
657 | break; | 666 | break; |
658 | } | 667 | } |
659 | 668 | ||
@@ -668,10 +677,10 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
668 | case e1000_82544: | 677 | case e1000_82544: |
669 | /* Wait for reset to complete */ | 678 | /* Wait for reset to complete */ |
670 | udelay(10); | 679 | udelay(10); |
671 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 680 | ctrl_ext = er32(CTRL_EXT); |
672 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; | 681 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; |
673 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 682 | ew32(CTRL_EXT, ctrl_ext); |
674 | E1000_WRITE_FLUSH(hw); | 683 | E1000_WRITE_FLUSH(); |
675 | /* Wait for EEPROM reload */ | 684 | /* Wait for EEPROM reload */ |
676 | msleep(2); | 685 | msleep(2); |
677 | break; | 686 | break; |
@@ -685,10 +694,10 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
685 | case e1000_82573: | 694 | case e1000_82573: |
686 | if (!e1000_is_onboard_nvm_eeprom(hw)) { | 695 | if (!e1000_is_onboard_nvm_eeprom(hw)) { |
687 | udelay(10); | 696 | udelay(10); |
688 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 697 | ctrl_ext = er32(CTRL_EXT); |
689 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; | 698 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; |
690 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 699 | ew32(CTRL_EXT, ctrl_ext); |
691 | E1000_WRITE_FLUSH(hw); | 700 | E1000_WRITE_FLUSH(); |
692 | } | 701 | } |
693 | /* fall through */ | 702 | /* fall through */ |
694 | default: | 703 | default: |
@@ -701,27 +710,27 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
701 | 710 | ||
702 | /* Disable HW ARPs on ASF enabled adapters */ | 711 | /* Disable HW ARPs on ASF enabled adapters */ |
703 | if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) { | 712 | if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) { |
704 | manc = E1000_READ_REG(hw, MANC); | 713 | manc = er32(MANC); |
705 | manc &= ~(E1000_MANC_ARP_EN); | 714 | manc &= ~(E1000_MANC_ARP_EN); |
706 | E1000_WRITE_REG(hw, MANC, manc); | 715 | ew32(MANC, manc); |
707 | } | 716 | } |
708 | 717 | ||
709 | if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { | 718 | if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { |
710 | e1000_phy_init_script(hw); | 719 | e1000_phy_init_script(hw); |
711 | 720 | ||
712 | /* Configure activity LED after PHY reset */ | 721 | /* Configure activity LED after PHY reset */ |
713 | led_ctrl = E1000_READ_REG(hw, LEDCTL); | 722 | led_ctrl = er32(LEDCTL); |
714 | led_ctrl &= IGP_ACTIVITY_LED_MASK; | 723 | led_ctrl &= IGP_ACTIVITY_LED_MASK; |
715 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); | 724 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); |
716 | E1000_WRITE_REG(hw, LEDCTL, led_ctrl); | 725 | ew32(LEDCTL, led_ctrl); |
717 | } | 726 | } |
718 | 727 | ||
719 | /* Clear interrupt mask to stop board from generating interrupts */ | 728 | /* Clear interrupt mask to stop board from generating interrupts */ |
720 | DEBUGOUT("Masking off all interrupts\n"); | 729 | DEBUGOUT("Masking off all interrupts\n"); |
721 | E1000_WRITE_REG(hw, IMC, 0xffffffff); | 730 | ew32(IMC, 0xffffffff); |
722 | 731 | ||
723 | /* Clear any pending interrupt events. */ | 732 | /* Clear any pending interrupt events. */ |
724 | icr = E1000_READ_REG(hw, ICR); | 733 | icr = er32(ICR); |
725 | 734 | ||
726 | /* If MWI was previously enabled, reenable it. */ | 735 | /* If MWI was previously enabled, reenable it. */ |
727 | if (hw->mac_type == e1000_82542_rev2_0) { | 736 | if (hw->mac_type == e1000_82542_rev2_0) { |
@@ -730,9 +739,9 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
730 | } | 739 | } |
731 | 740 | ||
732 | if (hw->mac_type == e1000_ich8lan) { | 741 | if (hw->mac_type == e1000_ich8lan) { |
733 | u32 kab = E1000_READ_REG(hw, KABGTXD); | 742 | u32 kab = er32(KABGTXD); |
734 | kab |= E1000_KABGTXD_BGSQLBIAS; | 743 | kab |= E1000_KABGTXD_BGSQLBIAS; |
735 | E1000_WRITE_REG(hw, KABGTXD, kab); | 744 | ew32(KABGTXD, kab); |
736 | } | 745 | } |
737 | 746 | ||
738 | return E1000_SUCCESS; | 747 | return E1000_SUCCESS; |
@@ -747,8 +756,7 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
747 | * This function contains hardware limitation workarounds for PCI-E adapters | 756 | * This function contains hardware limitation workarounds for PCI-E adapters |
748 | * | 757 | * |
749 | *****************************************************************************/ | 758 | *****************************************************************************/ |
750 | static void | 759 | static void e1000_initialize_hardware_bits(struct e1000_hw *hw) |
751 | e1000_initialize_hardware_bits(struct e1000_hw *hw) | ||
752 | { | 760 | { |
753 | if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) { | 761 | if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) { |
754 | /* Settings common to all PCI-express silicon */ | 762 | /* Settings common to all PCI-express silicon */ |
@@ -758,22 +766,22 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw) | |||
758 | u32 reg_txdctl, reg_txdctl1; | 766 | u32 reg_txdctl, reg_txdctl1; |
759 | 767 | ||
760 | /* link autonegotiation/sync workarounds */ | 768 | /* link autonegotiation/sync workarounds */ |
761 | reg_tarc0 = E1000_READ_REG(hw, TARC0); | 769 | reg_tarc0 = er32(TARC0); |
762 | reg_tarc0 &= ~((1 << 30)|(1 << 29)|(1 << 28)|(1 << 27)); | 770 | reg_tarc0 &= ~((1 << 30)|(1 << 29)|(1 << 28)|(1 << 27)); |
763 | 771 | ||
764 | /* Enable not-done TX descriptor counting */ | 772 | /* Enable not-done TX descriptor counting */ |
765 | reg_txdctl = E1000_READ_REG(hw, TXDCTL); | 773 | reg_txdctl = er32(TXDCTL); |
766 | reg_txdctl |= E1000_TXDCTL_COUNT_DESC; | 774 | reg_txdctl |= E1000_TXDCTL_COUNT_DESC; |
767 | E1000_WRITE_REG(hw, TXDCTL, reg_txdctl); | 775 | ew32(TXDCTL, reg_txdctl); |
768 | reg_txdctl1 = E1000_READ_REG(hw, TXDCTL1); | 776 | reg_txdctl1 = er32(TXDCTL1); |
769 | reg_txdctl1 |= E1000_TXDCTL_COUNT_DESC; | 777 | reg_txdctl1 |= E1000_TXDCTL_COUNT_DESC; |
770 | E1000_WRITE_REG(hw, TXDCTL1, reg_txdctl1); | 778 | ew32(TXDCTL1, reg_txdctl1); |
771 | 779 | ||
772 | switch (hw->mac_type) { | 780 | switch (hw->mac_type) { |
773 | case e1000_82571: | 781 | case e1000_82571: |
774 | case e1000_82572: | 782 | case e1000_82572: |
775 | /* Clear PHY TX compatible mode bits */ | 783 | /* Clear PHY TX compatible mode bits */ |
776 | reg_tarc1 = E1000_READ_REG(hw, TARC1); | 784 | reg_tarc1 = er32(TARC1); |
777 | reg_tarc1 &= ~((1 << 30)|(1 << 29)); | 785 | reg_tarc1 &= ~((1 << 30)|(1 << 29)); |
778 | 786 | ||
779 | /* link autonegotiation/sync workarounds */ | 787 | /* link autonegotiation/sync workarounds */ |
@@ -783,25 +791,25 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw) | |||
783 | reg_tarc1 |= ((1 << 26)|(1 << 25)|(1 << 24)); | 791 | reg_tarc1 |= ((1 << 26)|(1 << 25)|(1 << 24)); |
784 | 792 | ||
785 | /* Multiple read bit is reversed polarity */ | 793 | /* Multiple read bit is reversed polarity */ |
786 | reg_tctl = E1000_READ_REG(hw, TCTL); | 794 | reg_tctl = er32(TCTL); |
787 | if (reg_tctl & E1000_TCTL_MULR) | 795 | if (reg_tctl & E1000_TCTL_MULR) |
788 | reg_tarc1 &= ~(1 << 28); | 796 | reg_tarc1 &= ~(1 << 28); |
789 | else | 797 | else |
790 | reg_tarc1 |= (1 << 28); | 798 | reg_tarc1 |= (1 << 28); |
791 | 799 | ||
792 | E1000_WRITE_REG(hw, TARC1, reg_tarc1); | 800 | ew32(TARC1, reg_tarc1); |
793 | break; | 801 | break; |
794 | case e1000_82573: | 802 | case e1000_82573: |
795 | reg_ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 803 | reg_ctrl_ext = er32(CTRL_EXT); |
796 | reg_ctrl_ext &= ~(1 << 23); | 804 | reg_ctrl_ext &= ~(1 << 23); |
797 | reg_ctrl_ext |= (1 << 22); | 805 | reg_ctrl_ext |= (1 << 22); |
798 | 806 | ||
799 | /* TX byte count fix */ | 807 | /* TX byte count fix */ |
800 | reg_ctrl = E1000_READ_REG(hw, CTRL); | 808 | reg_ctrl = er32(CTRL); |
801 | reg_ctrl &= ~(1 << 29); | 809 | reg_ctrl &= ~(1 << 29); |
802 | 810 | ||
803 | E1000_WRITE_REG(hw, CTRL_EXT, reg_ctrl_ext); | 811 | ew32(CTRL_EXT, reg_ctrl_ext); |
804 | E1000_WRITE_REG(hw, CTRL, reg_ctrl); | 812 | ew32(CTRL, reg_ctrl); |
805 | break; | 813 | break; |
806 | case e1000_80003es2lan: | 814 | case e1000_80003es2lan: |
807 | /* improve small packet performace for fiber/serdes */ | 815 | /* improve small packet performace for fiber/serdes */ |
@@ -811,14 +819,14 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw) | |||
811 | } | 819 | } |
812 | 820 | ||
813 | /* Multiple read bit is reversed polarity */ | 821 | /* Multiple read bit is reversed polarity */ |
814 | reg_tctl = E1000_READ_REG(hw, TCTL); | 822 | reg_tctl = er32(TCTL); |
815 | reg_tarc1 = E1000_READ_REG(hw, TARC1); | 823 | reg_tarc1 = er32(TARC1); |
816 | if (reg_tctl & E1000_TCTL_MULR) | 824 | if (reg_tctl & E1000_TCTL_MULR) |
817 | reg_tarc1 &= ~(1 << 28); | 825 | reg_tarc1 &= ~(1 << 28); |
818 | else | 826 | else |
819 | reg_tarc1 |= (1 << 28); | 827 | reg_tarc1 |= (1 << 28); |
820 | 828 | ||
821 | E1000_WRITE_REG(hw, TARC1, reg_tarc1); | 829 | ew32(TARC1, reg_tarc1); |
822 | break; | 830 | break; |
823 | case e1000_ich8lan: | 831 | case e1000_ich8lan: |
824 | /* Reduce concurrent DMA requests to 3 from 4 */ | 832 | /* Reduce concurrent DMA requests to 3 from 4 */ |
@@ -827,16 +835,16 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw) | |||
827 | (hw->device_id != E1000_DEV_ID_ICH8_IGP_M))) | 835 | (hw->device_id != E1000_DEV_ID_ICH8_IGP_M))) |
828 | reg_tarc0 |= ((1 << 29)|(1 << 28)); | 836 | reg_tarc0 |= ((1 << 29)|(1 << 28)); |
829 | 837 | ||
830 | reg_ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 838 | reg_ctrl_ext = er32(CTRL_EXT); |
831 | reg_ctrl_ext |= (1 << 22); | 839 | reg_ctrl_ext |= (1 << 22); |
832 | E1000_WRITE_REG(hw, CTRL_EXT, reg_ctrl_ext); | 840 | ew32(CTRL_EXT, reg_ctrl_ext); |
833 | 841 | ||
834 | /* workaround TX hang with TSO=on */ | 842 | /* workaround TX hang with TSO=on */ |
835 | reg_tarc0 |= ((1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)); | 843 | reg_tarc0 |= ((1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)); |
836 | 844 | ||
837 | /* Multiple read bit is reversed polarity */ | 845 | /* Multiple read bit is reversed polarity */ |
838 | reg_tctl = E1000_READ_REG(hw, TCTL); | 846 | reg_tctl = er32(TCTL); |
839 | reg_tarc1 = E1000_READ_REG(hw, TARC1); | 847 | reg_tarc1 = er32(TARC1); |
840 | if (reg_tctl & E1000_TCTL_MULR) | 848 | if (reg_tctl & E1000_TCTL_MULR) |
841 | reg_tarc1 &= ~(1 << 28); | 849 | reg_tarc1 &= ~(1 << 28); |
842 | else | 850 | else |
@@ -845,13 +853,13 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw) | |||
845 | /* workaround TX hang with TSO=on */ | 853 | /* workaround TX hang with TSO=on */ |
846 | reg_tarc1 |= ((1 << 30)|(1 << 26)|(1 << 24)); | 854 | reg_tarc1 |= ((1 << 30)|(1 << 26)|(1 << 24)); |
847 | 855 | ||
848 | E1000_WRITE_REG(hw, TARC1, reg_tarc1); | 856 | ew32(TARC1, reg_tarc1); |
849 | break; | 857 | break; |
850 | default: | 858 | default: |
851 | break; | 859 | break; |
852 | } | 860 | } |
853 | 861 | ||
854 | E1000_WRITE_REG(hw, TARC0, reg_tarc0); | 862 | ew32(TARC0, reg_tarc0); |
855 | } | 863 | } |
856 | } | 864 | } |
857 | 865 | ||
@@ -866,8 +874,7 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw) | |||
866 | * configuration and flow control settings. Clears all on-chip counters. Leaves | 874 | * configuration and flow control settings. Clears all on-chip counters. Leaves |
867 | * the transmit and receive units disabled and uninitialized. | 875 | * the transmit and receive units disabled and uninitialized. |
868 | *****************************************************************************/ | 876 | *****************************************************************************/ |
869 | s32 | 877 | s32 e1000_init_hw(struct e1000_hw *hw) |
870 | e1000_init_hw(struct e1000_hw *hw) | ||
871 | { | 878 | { |
872 | u32 ctrl; | 879 | u32 ctrl; |
873 | u32 i; | 880 | u32 i; |
@@ -883,9 +890,9 @@ e1000_init_hw(struct e1000_hw *hw) | |||
883 | ((hw->revision_id < 3) || | 890 | ((hw->revision_id < 3) || |
884 | ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) && | 891 | ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) && |
885 | (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))) { | 892 | (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))) { |
886 | reg_data = E1000_READ_REG(hw, STATUS); | 893 | reg_data = er32(STATUS); |
887 | reg_data &= ~0x80000000; | 894 | reg_data &= ~0x80000000; |
888 | E1000_WRITE_REG(hw, STATUS, reg_data); | 895 | ew32(STATUS, reg_data); |
889 | } | 896 | } |
890 | 897 | ||
891 | /* Initialize Identification LED */ | 898 | /* Initialize Identification LED */ |
@@ -906,7 +913,7 @@ e1000_init_hw(struct e1000_hw *hw) | |||
906 | /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */ | 913 | /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */ |
907 | if (hw->mac_type != e1000_ich8lan) { | 914 | if (hw->mac_type != e1000_ich8lan) { |
908 | if (hw->mac_type < e1000_82545_rev_3) | 915 | if (hw->mac_type < e1000_82545_rev_3) |
909 | E1000_WRITE_REG(hw, VET, 0); | 916 | ew32(VET, 0); |
910 | e1000_clear_vfta(hw); | 917 | e1000_clear_vfta(hw); |
911 | } | 918 | } |
912 | 919 | ||
@@ -914,8 +921,8 @@ e1000_init_hw(struct e1000_hw *hw) | |||
914 | if (hw->mac_type == e1000_82542_rev2_0) { | 921 | if (hw->mac_type == e1000_82542_rev2_0) { |
915 | DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); | 922 | DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); |
916 | e1000_pci_clear_mwi(hw); | 923 | e1000_pci_clear_mwi(hw); |
917 | E1000_WRITE_REG(hw, RCTL, E1000_RCTL_RST); | 924 | ew32(RCTL, E1000_RCTL_RST); |
918 | E1000_WRITE_FLUSH(hw); | 925 | E1000_WRITE_FLUSH(); |
919 | msleep(5); | 926 | msleep(5); |
920 | } | 927 | } |
921 | 928 | ||
@@ -926,8 +933,8 @@ e1000_init_hw(struct e1000_hw *hw) | |||
926 | 933 | ||
927 | /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ | 934 | /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ |
928 | if (hw->mac_type == e1000_82542_rev2_0) { | 935 | if (hw->mac_type == e1000_82542_rev2_0) { |
929 | E1000_WRITE_REG(hw, RCTL, 0); | 936 | ew32(RCTL, 0); |
930 | E1000_WRITE_FLUSH(hw); | 937 | E1000_WRITE_FLUSH(); |
931 | msleep(1); | 938 | msleep(1); |
932 | if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) | 939 | if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) |
933 | e1000_pci_set_mwi(hw); | 940 | e1000_pci_set_mwi(hw); |
@@ -942,7 +949,7 @@ e1000_init_hw(struct e1000_hw *hw) | |||
942 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); | 949 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); |
943 | /* use write flush to prevent Memory Write Block (MWB) from | 950 | /* use write flush to prevent Memory Write Block (MWB) from |
944 | * occuring when accessing our register space */ | 951 | * occuring when accessing our register space */ |
945 | E1000_WRITE_FLUSH(hw); | 952 | E1000_WRITE_FLUSH(); |
946 | } | 953 | } |
947 | 954 | ||
948 | /* Set the PCI priority bit correctly in the CTRL register. This | 955 | /* Set the PCI priority bit correctly in the CTRL register. This |
@@ -951,8 +958,8 @@ e1000_init_hw(struct e1000_hw *hw) | |||
951 | * 82542 and 82543 silicon. | 958 | * 82542 and 82543 silicon. |
952 | */ | 959 | */ |
953 | if (hw->dma_fairness && hw->mac_type <= e1000_82543) { | 960 | if (hw->dma_fairness && hw->mac_type <= e1000_82543) { |
954 | ctrl = E1000_READ_REG(hw, CTRL); | 961 | ctrl = er32(CTRL); |
955 | E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR); | 962 | ew32(CTRL, ctrl | E1000_CTRL_PRIOR); |
956 | } | 963 | } |
957 | 964 | ||
958 | switch (hw->mac_type) { | 965 | switch (hw->mac_type) { |
@@ -975,9 +982,9 @@ e1000_init_hw(struct e1000_hw *hw) | |||
975 | 982 | ||
976 | /* Set the transmit descriptor write-back policy */ | 983 | /* Set the transmit descriptor write-back policy */ |
977 | if (hw->mac_type > e1000_82544) { | 984 | if (hw->mac_type > e1000_82544) { |
978 | ctrl = E1000_READ_REG(hw, TXDCTL); | 985 | ctrl = er32(TXDCTL); |
979 | ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; | 986 | ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; |
980 | E1000_WRITE_REG(hw, TXDCTL, ctrl); | 987 | ew32(TXDCTL, ctrl); |
981 | } | 988 | } |
982 | 989 | ||
983 | if (hw->mac_type == e1000_82573) { | 990 | if (hw->mac_type == e1000_82573) { |
@@ -989,21 +996,21 @@ e1000_init_hw(struct e1000_hw *hw) | |||
989 | break; | 996 | break; |
990 | case e1000_80003es2lan: | 997 | case e1000_80003es2lan: |
991 | /* Enable retransmit on late collisions */ | 998 | /* Enable retransmit on late collisions */ |
992 | reg_data = E1000_READ_REG(hw, TCTL); | 999 | reg_data = er32(TCTL); |
993 | reg_data |= E1000_TCTL_RTLC; | 1000 | reg_data |= E1000_TCTL_RTLC; |
994 | E1000_WRITE_REG(hw, TCTL, reg_data); | 1001 | ew32(TCTL, reg_data); |
995 | 1002 | ||
996 | /* Configure Gigabit Carry Extend Padding */ | 1003 | /* Configure Gigabit Carry Extend Padding */ |
997 | reg_data = E1000_READ_REG(hw, TCTL_EXT); | 1004 | reg_data = er32(TCTL_EXT); |
998 | reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; | 1005 | reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; |
999 | reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; | 1006 | reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX; |
1000 | E1000_WRITE_REG(hw, TCTL_EXT, reg_data); | 1007 | ew32(TCTL_EXT, reg_data); |
1001 | 1008 | ||
1002 | /* Configure Transmit Inter-Packet Gap */ | 1009 | /* Configure Transmit Inter-Packet Gap */ |
1003 | reg_data = E1000_READ_REG(hw, TIPG); | 1010 | reg_data = er32(TIPG); |
1004 | reg_data &= ~E1000_TIPG_IPGT_MASK; | 1011 | reg_data &= ~E1000_TIPG_IPGT_MASK; |
1005 | reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; | 1012 | reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; |
1006 | E1000_WRITE_REG(hw, TIPG, reg_data); | 1013 | ew32(TIPG, reg_data); |
1007 | 1014 | ||
1008 | reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001); | 1015 | reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001); |
1009 | reg_data &= ~0x00100000; | 1016 | reg_data &= ~0x00100000; |
@@ -1012,17 +1019,17 @@ e1000_init_hw(struct e1000_hw *hw) | |||
1012 | case e1000_82571: | 1019 | case e1000_82571: |
1013 | case e1000_82572: | 1020 | case e1000_82572: |
1014 | case e1000_ich8lan: | 1021 | case e1000_ich8lan: |
1015 | ctrl = E1000_READ_REG(hw, TXDCTL1); | 1022 | ctrl = er32(TXDCTL1); |
1016 | ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; | 1023 | ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; |
1017 | E1000_WRITE_REG(hw, TXDCTL1, ctrl); | 1024 | ew32(TXDCTL1, ctrl); |
1018 | break; | 1025 | break; |
1019 | } | 1026 | } |
1020 | 1027 | ||
1021 | 1028 | ||
1022 | if (hw->mac_type == e1000_82573) { | 1029 | if (hw->mac_type == e1000_82573) { |
1023 | u32 gcr = E1000_READ_REG(hw, GCR); | 1030 | u32 gcr = er32(GCR); |
1024 | gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; | 1031 | gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; |
1025 | E1000_WRITE_REG(hw, GCR, gcr); | 1032 | ew32(GCR, gcr); |
1026 | } | 1033 | } |
1027 | 1034 | ||
1028 | /* Clear all of the statistics registers (clear on read). It is | 1035 | /* Clear all of the statistics registers (clear on read). It is |
@@ -1039,11 +1046,11 @@ e1000_init_hw(struct e1000_hw *hw) | |||
1039 | 1046 | ||
1040 | if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || | 1047 | if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || |
1041 | hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { | 1048 | hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { |
1042 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 1049 | ctrl_ext = er32(CTRL_EXT); |
1043 | /* Relaxed ordering must be disabled to avoid a parity | 1050 | /* Relaxed ordering must be disabled to avoid a parity |
1044 | * error crash in a PCI slot. */ | 1051 | * error crash in a PCI slot. */ |
1045 | ctrl_ext |= E1000_CTRL_EXT_RO_DIS; | 1052 | ctrl_ext |= E1000_CTRL_EXT_RO_DIS; |
1046 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 1053 | ew32(CTRL_EXT, ctrl_ext); |
1047 | } | 1054 | } |
1048 | 1055 | ||
1049 | return ret_val; | 1056 | return ret_val; |
@@ -1054,8 +1061,7 @@ e1000_init_hw(struct e1000_hw *hw) | |||
1054 | * | 1061 | * |
1055 | * hw - Struct containing variables accessed by shared code. | 1062 | * hw - Struct containing variables accessed by shared code. |
1056 | *****************************************************************************/ | 1063 | *****************************************************************************/ |
1057 | static s32 | 1064 | static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) |
1058 | e1000_adjust_serdes_amplitude(struct e1000_hw *hw) | ||
1059 | { | 1065 | { |
1060 | u16 eeprom_data; | 1066 | u16 eeprom_data; |
1061 | s32 ret_val; | 1067 | s32 ret_val; |
@@ -1100,8 +1106,7 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw) | |||
1100 | * established. Assumes the hardware has previously been reset and the | 1106 | * established. Assumes the hardware has previously been reset and the |
1101 | * transmitter and receiver are not enabled. | 1107 | * transmitter and receiver are not enabled. |
1102 | *****************************************************************************/ | 1108 | *****************************************************************************/ |
1103 | s32 | 1109 | s32 e1000_setup_link(struct e1000_hw *hw) |
1104 | e1000_setup_link(struct e1000_hw *hw) | ||
1105 | { | 1110 | { |
1106 | u32 ctrl_ext; | 1111 | u32 ctrl_ext; |
1107 | s32 ret_val; | 1112 | s32 ret_val; |
@@ -1176,7 +1181,7 @@ e1000_setup_link(struct e1000_hw *hw) | |||
1176 | } | 1181 | } |
1177 | ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << | 1182 | ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << |
1178 | SWDPIO__EXT_SHIFT); | 1183 | SWDPIO__EXT_SHIFT); |
1179 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 1184 | ew32(CTRL_EXT, ctrl_ext); |
1180 | } | 1185 | } |
1181 | 1186 | ||
1182 | /* Call the necessary subroutine to configure the link. */ | 1187 | /* Call the necessary subroutine to configure the link. */ |
@@ -1193,12 +1198,12 @@ e1000_setup_link(struct e1000_hw *hw) | |||
1193 | 1198 | ||
1194 | /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */ | 1199 | /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */ |
1195 | if (hw->mac_type != e1000_ich8lan) { | 1200 | if (hw->mac_type != e1000_ich8lan) { |
1196 | E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE); | 1201 | ew32(FCT, FLOW_CONTROL_TYPE); |
1197 | E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH); | 1202 | ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); |
1198 | E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW); | 1203 | ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); |
1199 | } | 1204 | } |
1200 | 1205 | ||
1201 | E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time); | 1206 | ew32(FCTTV, hw->fc_pause_time); |
1202 | 1207 | ||
1203 | /* Set the flow control receive threshold registers. Normally, | 1208 | /* Set the flow control receive threshold registers. Normally, |
1204 | * these registers will be set to a default threshold that may be | 1209 | * these registers will be set to a default threshold that may be |
@@ -1207,18 +1212,18 @@ e1000_setup_link(struct e1000_hw *hw) | |||
1207 | * registers will be set to 0. | 1212 | * registers will be set to 0. |
1208 | */ | 1213 | */ |
1209 | if (!(hw->fc & E1000_FC_TX_PAUSE)) { | 1214 | if (!(hw->fc & E1000_FC_TX_PAUSE)) { |
1210 | E1000_WRITE_REG(hw, FCRTL, 0); | 1215 | ew32(FCRTL, 0); |
1211 | E1000_WRITE_REG(hw, FCRTH, 0); | 1216 | ew32(FCRTH, 0); |
1212 | } else { | 1217 | } else { |
1213 | /* We need to set up the Receive Threshold high and low water marks | 1218 | /* We need to set up the Receive Threshold high and low water marks |
1214 | * as well as (optionally) enabling the transmission of XON frames. | 1219 | * as well as (optionally) enabling the transmission of XON frames. |
1215 | */ | 1220 | */ |
1216 | if (hw->fc_send_xon) { | 1221 | if (hw->fc_send_xon) { |
1217 | E1000_WRITE_REG(hw, FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); | 1222 | ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); |
1218 | E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water); | 1223 | ew32(FCRTH, hw->fc_high_water); |
1219 | } else { | 1224 | } else { |
1220 | E1000_WRITE_REG(hw, FCRTL, hw->fc_low_water); | 1225 | ew32(FCRTL, hw->fc_low_water); |
1221 | E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water); | 1226 | ew32(FCRTH, hw->fc_high_water); |
1222 | } | 1227 | } |
1223 | } | 1228 | } |
1224 | return ret_val; | 1229 | return ret_val; |
@@ -1233,8 +1238,7 @@ e1000_setup_link(struct e1000_hw *hw) | |||
1233 | * link. Assumes the hardware has been previously reset and the transmitter | 1238 | * link. Assumes the hardware has been previously reset and the transmitter |
1234 | * and receiver are not enabled. | 1239 | * and receiver are not enabled. |
1235 | *****************************************************************************/ | 1240 | *****************************************************************************/ |
1236 | static s32 | 1241 | static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) |
1237 | e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | ||
1238 | { | 1242 | { |
1239 | u32 ctrl; | 1243 | u32 ctrl; |
1240 | u32 status; | 1244 | u32 status; |
@@ -1251,7 +1255,7 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
1251 | * loopback mode is disabled during initialization. | 1255 | * loopback mode is disabled during initialization. |
1252 | */ | 1256 | */ |
1253 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) | 1257 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) |
1254 | E1000_WRITE_REG(hw, SCTL, E1000_DISABLE_SERDES_LOOPBACK); | 1258 | ew32(SCTL, E1000_DISABLE_SERDES_LOOPBACK); |
1255 | 1259 | ||
1256 | /* On adapters with a MAC newer than 82544, SWDP 1 will be | 1260 | /* On adapters with a MAC newer than 82544, SWDP 1 will be |
1257 | * set when the optics detect a signal. On older adapters, it will be | 1261 | * set when the optics detect a signal. On older adapters, it will be |
@@ -1259,7 +1263,7 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
1259 | * If we're on serdes media, adjust the output amplitude to value | 1263 | * If we're on serdes media, adjust the output amplitude to value |
1260 | * set in the EEPROM. | 1264 | * set in the EEPROM. |
1261 | */ | 1265 | */ |
1262 | ctrl = E1000_READ_REG(hw, CTRL); | 1266 | ctrl = er32(CTRL); |
1263 | if (hw->media_type == e1000_media_type_fiber) | 1267 | if (hw->media_type == e1000_media_type_fiber) |
1264 | signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; | 1268 | signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; |
1265 | 1269 | ||
@@ -1330,9 +1334,9 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
1330 | */ | 1334 | */ |
1331 | DEBUGOUT("Auto-negotiation enabled\n"); | 1335 | DEBUGOUT("Auto-negotiation enabled\n"); |
1332 | 1336 | ||
1333 | E1000_WRITE_REG(hw, TXCW, txcw); | 1337 | ew32(TXCW, txcw); |
1334 | E1000_WRITE_REG(hw, CTRL, ctrl); | 1338 | ew32(CTRL, ctrl); |
1335 | E1000_WRITE_FLUSH(hw); | 1339 | E1000_WRITE_FLUSH(); |
1336 | 1340 | ||
1337 | hw->txcw = txcw; | 1341 | hw->txcw = txcw; |
1338 | msleep(1); | 1342 | msleep(1); |
@@ -1344,11 +1348,11 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
1344 | * For internal serdes, we just assume a signal is present, then poll. | 1348 | * For internal serdes, we just assume a signal is present, then poll. |
1345 | */ | 1349 | */ |
1346 | if (hw->media_type == e1000_media_type_internal_serdes || | 1350 | if (hw->media_type == e1000_media_type_internal_serdes || |
1347 | (E1000_READ_REG(hw, CTRL) & E1000_CTRL_SWDPIN1) == signal) { | 1351 | (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { |
1348 | DEBUGOUT("Looking for Link\n"); | 1352 | DEBUGOUT("Looking for Link\n"); |
1349 | for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { | 1353 | for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { |
1350 | msleep(10); | 1354 | msleep(10); |
1351 | status = E1000_READ_REG(hw, STATUS); | 1355 | status = er32(STATUS); |
1352 | if (status & E1000_STATUS_LU) break; | 1356 | if (status & E1000_STATUS_LU) break; |
1353 | } | 1357 | } |
1354 | if (i == (LINK_UP_TIMEOUT / 10)) { | 1358 | if (i == (LINK_UP_TIMEOUT / 10)) { |
@@ -1380,8 +1384,7 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
1380 | * | 1384 | * |
1381 | * hw - Struct containing variables accessed by shared code | 1385 | * hw - Struct containing variables accessed by shared code |
1382 | ******************************************************************************/ | 1386 | ******************************************************************************/ |
1383 | static s32 | 1387 | static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) |
1384 | e1000_copper_link_preconfig(struct e1000_hw *hw) | ||
1385 | { | 1388 | { |
1386 | u32 ctrl; | 1389 | u32 ctrl; |
1387 | s32 ret_val; | 1390 | s32 ret_val; |
@@ -1389,7 +1392,7 @@ e1000_copper_link_preconfig(struct e1000_hw *hw) | |||
1389 | 1392 | ||
1390 | DEBUGFUNC("e1000_copper_link_preconfig"); | 1393 | DEBUGFUNC("e1000_copper_link_preconfig"); |
1391 | 1394 | ||
1392 | ctrl = E1000_READ_REG(hw, CTRL); | 1395 | ctrl = er32(CTRL); |
1393 | /* With 82543, we need to force speed and duplex on the MAC equal to what | 1396 | /* With 82543, we need to force speed and duplex on the MAC equal to what |
1394 | * the PHY speed and duplex configuration is. In addition, we need to | 1397 | * the PHY speed and duplex configuration is. In addition, we need to |
1395 | * perform a hardware reset on the PHY to take it out of reset. | 1398 | * perform a hardware reset on the PHY to take it out of reset. |
@@ -1397,10 +1400,10 @@ e1000_copper_link_preconfig(struct e1000_hw *hw) | |||
1397 | if (hw->mac_type > e1000_82543) { | 1400 | if (hw->mac_type > e1000_82543) { |
1398 | ctrl |= E1000_CTRL_SLU; | 1401 | ctrl |= E1000_CTRL_SLU; |
1399 | ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | 1402 | ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); |
1400 | E1000_WRITE_REG(hw, CTRL, ctrl); | 1403 | ew32(CTRL, ctrl); |
1401 | } else { | 1404 | } else { |
1402 | ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); | 1405 | ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); |
1403 | E1000_WRITE_REG(hw, CTRL, ctrl); | 1406 | ew32(CTRL, ctrl); |
1404 | ret_val = e1000_phy_hw_reset(hw); | 1407 | ret_val = e1000_phy_hw_reset(hw); |
1405 | if (ret_val) | 1408 | if (ret_val) |
1406 | return ret_val; | 1409 | return ret_val; |
@@ -1440,8 +1443,7 @@ e1000_copper_link_preconfig(struct e1000_hw *hw) | |||
1440 | * | 1443 | * |
1441 | * hw - Struct containing variables accessed by shared code | 1444 | * hw - Struct containing variables accessed by shared code |
1442 | *********************************************************************/ | 1445 | *********************************************************************/ |
1443 | static s32 | 1446 | static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) |
1444 | e1000_copper_link_igp_setup(struct e1000_hw *hw) | ||
1445 | { | 1447 | { |
1446 | u32 led_ctrl; | 1448 | u32 led_ctrl; |
1447 | s32 ret_val; | 1449 | s32 ret_val; |
@@ -1462,10 +1464,10 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw) | |||
1462 | msleep(15); | 1464 | msleep(15); |
1463 | if (hw->mac_type != e1000_ich8lan) { | 1465 | if (hw->mac_type != e1000_ich8lan) { |
1464 | /* Configure activity LED after PHY reset */ | 1466 | /* Configure activity LED after PHY reset */ |
1465 | led_ctrl = E1000_READ_REG(hw, LEDCTL); | 1467 | led_ctrl = er32(LEDCTL); |
1466 | led_ctrl &= IGP_ACTIVITY_LED_MASK; | 1468 | led_ctrl &= IGP_ACTIVITY_LED_MASK; |
1467 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); | 1469 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); |
1468 | E1000_WRITE_REG(hw, LEDCTL, led_ctrl); | 1470 | ew32(LEDCTL, led_ctrl); |
1469 | } | 1471 | } |
1470 | 1472 | ||
1471 | /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ | 1473 | /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ |
@@ -1587,8 +1589,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw) | |||
1587 | * | 1589 | * |
1588 | * hw - Struct containing variables accessed by shared code | 1590 | * hw - Struct containing variables accessed by shared code |
1589 | *********************************************************************/ | 1591 | *********************************************************************/ |
1590 | static s32 | 1592 | static s32 e1000_copper_link_ggp_setup(struct e1000_hw *hw) |
1591 | e1000_copper_link_ggp_setup(struct e1000_hw *hw) | ||
1592 | { | 1593 | { |
1593 | s32 ret_val; | 1594 | s32 ret_val; |
1594 | u16 phy_data; | 1595 | u16 phy_data; |
@@ -1679,9 +1680,9 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw) | |||
1679 | if (ret_val) | 1680 | if (ret_val) |
1680 | return ret_val; | 1681 | return ret_val; |
1681 | 1682 | ||
1682 | reg_data = E1000_READ_REG(hw, CTRL_EXT); | 1683 | reg_data = er32(CTRL_EXT); |
1683 | reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); | 1684 | reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); |
1684 | E1000_WRITE_REG(hw, CTRL_EXT, reg_data); | 1685 | ew32(CTRL_EXT, reg_data); |
1685 | 1686 | ||
1686 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, | 1687 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, |
1687 | &phy_data); | 1688 | &phy_data); |
@@ -1735,8 +1736,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw) | |||
1735 | * | 1736 | * |
1736 | * hw - Struct containing variables accessed by shared code | 1737 | * hw - Struct containing variables accessed by shared code |
1737 | *********************************************************************/ | 1738 | *********************************************************************/ |
1738 | static s32 | 1739 | static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) |
1739 | e1000_copper_link_mgp_setup(struct e1000_hw *hw) | ||
1740 | { | 1740 | { |
1741 | s32 ret_val; | 1741 | s32 ret_val; |
1742 | u16 phy_data; | 1742 | u16 phy_data; |
@@ -1839,8 +1839,7 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw) | |||
1839 | * | 1839 | * |
1840 | * hw - Struct containing variables accessed by shared code | 1840 | * hw - Struct containing variables accessed by shared code |
1841 | *********************************************************************/ | 1841 | *********************************************************************/ |
1842 | static s32 | 1842 | static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) |
1843 | e1000_copper_link_autoneg(struct e1000_hw *hw) | ||
1844 | { | 1843 | { |
1845 | s32 ret_val; | 1844 | s32 ret_val; |
1846 | u16 phy_data; | 1845 | u16 phy_data; |
@@ -1910,8 +1909,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
1910 | * | 1909 | * |
1911 | * hw - Struct containing variables accessed by shared code | 1910 | * hw - Struct containing variables accessed by shared code |
1912 | ******************************************************************************/ | 1911 | ******************************************************************************/ |
1913 | static s32 | 1912 | static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) |
1914 | e1000_copper_link_postconfig(struct e1000_hw *hw) | ||
1915 | { | 1913 | { |
1916 | s32 ret_val; | 1914 | s32 ret_val; |
1917 | DEBUGFUNC("e1000_copper_link_postconfig"); | 1915 | DEBUGFUNC("e1000_copper_link_postconfig"); |
@@ -1948,8 +1946,7 @@ e1000_copper_link_postconfig(struct e1000_hw *hw) | |||
1948 | * | 1946 | * |
1949 | * hw - Struct containing variables accessed by shared code | 1947 | * hw - Struct containing variables accessed by shared code |
1950 | ******************************************************************************/ | 1948 | ******************************************************************************/ |
1951 | static s32 | 1949 | static s32 e1000_setup_copper_link(struct e1000_hw *hw) |
1952 | e1000_setup_copper_link(struct e1000_hw *hw) | ||
1953 | { | 1950 | { |
1954 | s32 ret_val; | 1951 | s32 ret_val; |
1955 | u16 i; | 1952 | u16 i; |
@@ -2062,8 +2059,7 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
2062 | * | 2059 | * |
2063 | * hw - Struct containing variables accessed by shared code | 2060 | * hw - Struct containing variables accessed by shared code |
2064 | ******************************************************************************/ | 2061 | ******************************************************************************/ |
2065 | static s32 | 2062 | static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex) |
2066 | e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex) | ||
2067 | { | 2063 | { |
2068 | s32 ret_val = E1000_SUCCESS; | 2064 | s32 ret_val = E1000_SUCCESS; |
2069 | u32 tipg; | 2065 | u32 tipg; |
@@ -2078,10 +2074,10 @@ e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex) | |||
2078 | return ret_val; | 2074 | return ret_val; |
2079 | 2075 | ||
2080 | /* Configure Transmit Inter-Packet Gap */ | 2076 | /* Configure Transmit Inter-Packet Gap */ |
2081 | tipg = E1000_READ_REG(hw, TIPG); | 2077 | tipg = er32(TIPG); |
2082 | tipg &= ~E1000_TIPG_IPGT_MASK; | 2078 | tipg &= ~E1000_TIPG_IPGT_MASK; |
2083 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100; | 2079 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100; |
2084 | E1000_WRITE_REG(hw, TIPG, tipg); | 2080 | ew32(TIPG, tipg); |
2085 | 2081 | ||
2086 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); | 2082 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); |
2087 | 2083 | ||
@@ -2098,8 +2094,7 @@ e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex) | |||
2098 | return ret_val; | 2094 | return ret_val; |
2099 | } | 2095 | } |
2100 | 2096 | ||
2101 | static s32 | 2097 | static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw) |
2102 | e1000_configure_kmrn_for_1000(struct e1000_hw *hw) | ||
2103 | { | 2098 | { |
2104 | s32 ret_val = E1000_SUCCESS; | 2099 | s32 ret_val = E1000_SUCCESS; |
2105 | u16 reg_data; | 2100 | u16 reg_data; |
@@ -2114,10 +2109,10 @@ e1000_configure_kmrn_for_1000(struct e1000_hw *hw) | |||
2114 | return ret_val; | 2109 | return ret_val; |
2115 | 2110 | ||
2116 | /* Configure Transmit Inter-Packet Gap */ | 2111 | /* Configure Transmit Inter-Packet Gap */ |
2117 | tipg = E1000_READ_REG(hw, TIPG); | 2112 | tipg = er32(TIPG); |
2118 | tipg &= ~E1000_TIPG_IPGT_MASK; | 2113 | tipg &= ~E1000_TIPG_IPGT_MASK; |
2119 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; | 2114 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; |
2120 | E1000_WRITE_REG(hw, TIPG, tipg); | 2115 | ew32(TIPG, tipg); |
2121 | 2116 | ||
2122 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); | 2117 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); |
2123 | 2118 | ||
@@ -2135,8 +2130,7 @@ e1000_configure_kmrn_for_1000(struct e1000_hw *hw) | |||
2135 | * | 2130 | * |
2136 | * hw - Struct containing variables accessed by shared code | 2131 | * hw - Struct containing variables accessed by shared code |
2137 | ******************************************************************************/ | 2132 | ******************************************************************************/ |
2138 | s32 | 2133 | s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) |
2139 | e1000_phy_setup_autoneg(struct e1000_hw *hw) | ||
2140 | { | 2134 | { |
2141 | s32 ret_val; | 2135 | s32 ret_val; |
2142 | u16 mii_autoneg_adv_reg; | 2136 | u16 mii_autoneg_adv_reg; |
@@ -2284,8 +2278,7 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
2284 | * | 2278 | * |
2285 | * hw - Struct containing variables accessed by shared code | 2279 | * hw - Struct containing variables accessed by shared code |
2286 | ******************************************************************************/ | 2280 | ******************************************************************************/ |
2287 | static s32 | 2281 | static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) |
2288 | e1000_phy_force_speed_duplex(struct e1000_hw *hw) | ||
2289 | { | 2282 | { |
2290 | u32 ctrl; | 2283 | u32 ctrl; |
2291 | s32 ret_val; | 2284 | s32 ret_val; |
@@ -2302,7 +2295,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
2302 | DEBUGOUT1("hw->fc = %d\n", hw->fc); | 2295 | DEBUGOUT1("hw->fc = %d\n", hw->fc); |
2303 | 2296 | ||
2304 | /* Read the Device Control Register. */ | 2297 | /* Read the Device Control Register. */ |
2305 | ctrl = E1000_READ_REG(hw, CTRL); | 2298 | ctrl = er32(CTRL); |
2306 | 2299 | ||
2307 | /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ | 2300 | /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ |
2308 | ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | 2301 | ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); |
@@ -2357,7 +2350,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
2357 | e1000_config_collision_dist(hw); | 2350 | e1000_config_collision_dist(hw); |
2358 | 2351 | ||
2359 | /* Write the configured values back to the Device Control Reg. */ | 2352 | /* Write the configured values back to the Device Control Reg. */ |
2360 | E1000_WRITE_REG(hw, CTRL, ctrl); | 2353 | ew32(CTRL, ctrl); |
2361 | 2354 | ||
2362 | if ((hw->phy_type == e1000_phy_m88) || | 2355 | if ((hw->phy_type == e1000_phy_m88) || |
2363 | (hw->phy_type == e1000_phy_gg82563)) { | 2356 | (hw->phy_type == e1000_phy_gg82563)) { |
@@ -2535,8 +2528,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
2535 | * Link should have been established previously. Reads the speed and duplex | 2528 | * Link should have been established previously. Reads the speed and duplex |
2536 | * information from the Device Status register. | 2529 | * information from the Device Status register. |
2537 | ******************************************************************************/ | 2530 | ******************************************************************************/ |
2538 | void | 2531 | void e1000_config_collision_dist(struct e1000_hw *hw) |
2539 | e1000_config_collision_dist(struct e1000_hw *hw) | ||
2540 | { | 2532 | { |
2541 | u32 tctl, coll_dist; | 2533 | u32 tctl, coll_dist; |
2542 | 2534 | ||
@@ -2547,13 +2539,13 @@ e1000_config_collision_dist(struct e1000_hw *hw) | |||
2547 | else | 2539 | else |
2548 | coll_dist = E1000_COLLISION_DISTANCE; | 2540 | coll_dist = E1000_COLLISION_DISTANCE; |
2549 | 2541 | ||
2550 | tctl = E1000_READ_REG(hw, TCTL); | 2542 | tctl = er32(TCTL); |
2551 | 2543 | ||
2552 | tctl &= ~E1000_TCTL_COLD; | 2544 | tctl &= ~E1000_TCTL_COLD; |
2553 | tctl |= coll_dist << E1000_COLD_SHIFT; | 2545 | tctl |= coll_dist << E1000_COLD_SHIFT; |
2554 | 2546 | ||
2555 | E1000_WRITE_REG(hw, TCTL, tctl); | 2547 | ew32(TCTL, tctl); |
2556 | E1000_WRITE_FLUSH(hw); | 2548 | E1000_WRITE_FLUSH(); |
2557 | } | 2549 | } |
2558 | 2550 | ||
2559 | /****************************************************************************** | 2551 | /****************************************************************************** |
@@ -2565,8 +2557,7 @@ e1000_config_collision_dist(struct e1000_hw *hw) | |||
2565 | * The contents of the PHY register containing the needed information need to | 2557 | * The contents of the PHY register containing the needed information need to |
2566 | * be passed in. | 2558 | * be passed in. |
2567 | ******************************************************************************/ | 2559 | ******************************************************************************/ |
2568 | static s32 | 2560 | static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) |
2569 | e1000_config_mac_to_phy(struct e1000_hw *hw) | ||
2570 | { | 2561 | { |
2571 | u32 ctrl; | 2562 | u32 ctrl; |
2572 | s32 ret_val; | 2563 | s32 ret_val; |
@@ -2582,7 +2573,7 @@ e1000_config_mac_to_phy(struct e1000_hw *hw) | |||
2582 | /* Read the Device Control Register and set the bits to Force Speed | 2573 | /* Read the Device Control Register and set the bits to Force Speed |
2583 | * and Duplex. | 2574 | * and Duplex. |
2584 | */ | 2575 | */ |
2585 | ctrl = E1000_READ_REG(hw, CTRL); | 2576 | ctrl = er32(CTRL); |
2586 | ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | 2577 | ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); |
2587 | ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); | 2578 | ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); |
2588 | 2579 | ||
@@ -2609,7 +2600,7 @@ e1000_config_mac_to_phy(struct e1000_hw *hw) | |||
2609 | ctrl |= E1000_CTRL_SPD_100; | 2600 | ctrl |= E1000_CTRL_SPD_100; |
2610 | 2601 | ||
2611 | /* Write the configured values back to the Device Control Reg. */ | 2602 | /* Write the configured values back to the Device Control Reg. */ |
2612 | E1000_WRITE_REG(hw, CTRL, ctrl); | 2603 | ew32(CTRL, ctrl); |
2613 | return E1000_SUCCESS; | 2604 | return E1000_SUCCESS; |
2614 | } | 2605 | } |
2615 | 2606 | ||
@@ -2624,15 +2615,14 @@ e1000_config_mac_to_phy(struct e1000_hw *hw) | |||
2624 | * by the PHY rather than the MAC. Software must also configure these | 2615 | * by the PHY rather than the MAC. Software must also configure these |
2625 | * bits when link is forced on a fiber connection. | 2616 | * bits when link is forced on a fiber connection. |
2626 | *****************************************************************************/ | 2617 | *****************************************************************************/ |
2627 | s32 | 2618 | s32 e1000_force_mac_fc(struct e1000_hw *hw) |
2628 | e1000_force_mac_fc(struct e1000_hw *hw) | ||
2629 | { | 2619 | { |
2630 | u32 ctrl; | 2620 | u32 ctrl; |
2631 | 2621 | ||
2632 | DEBUGFUNC("e1000_force_mac_fc"); | 2622 | DEBUGFUNC("e1000_force_mac_fc"); |
2633 | 2623 | ||
2634 | /* Get the current configuration of the Device Control Register */ | 2624 | /* Get the current configuration of the Device Control Register */ |
2635 | ctrl = E1000_READ_REG(hw, CTRL); | 2625 | ctrl = er32(CTRL); |
2636 | 2626 | ||
2637 | /* Because we didn't get link via the internal auto-negotiation | 2627 | /* Because we didn't get link via the internal auto-negotiation |
2638 | * mechanism (we either forced link or we got link via PHY | 2628 | * mechanism (we either forced link or we got link via PHY |
@@ -2676,7 +2666,7 @@ e1000_force_mac_fc(struct e1000_hw *hw) | |||
2676 | if (hw->mac_type == e1000_82542_rev2_0) | 2666 | if (hw->mac_type == e1000_82542_rev2_0) |
2677 | ctrl &= (~E1000_CTRL_TFCE); | 2667 | ctrl &= (~E1000_CTRL_TFCE); |
2678 | 2668 | ||
2679 | E1000_WRITE_REG(hw, CTRL, ctrl); | 2669 | ew32(CTRL, ctrl); |
2680 | return E1000_SUCCESS; | 2670 | return E1000_SUCCESS; |
2681 | } | 2671 | } |
2682 | 2672 | ||
@@ -2691,8 +2681,7 @@ e1000_force_mac_fc(struct e1000_hw *hw) | |||
2691 | * based on the flow control negotiated by the PHY. In TBI mode, the TFCE | 2681 | * based on the flow control negotiated by the PHY. In TBI mode, the TFCE |
2692 | * and RFCE bits will be automaticaly set to the negotiated flow control mode. | 2682 | * and RFCE bits will be automaticaly set to the negotiated flow control mode. |
2693 | *****************************************************************************/ | 2683 | *****************************************************************************/ |
2694 | static s32 | 2684 | static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) |
2695 | e1000_config_fc_after_link_up(struct e1000_hw *hw) | ||
2696 | { | 2685 | { |
2697 | s32 ret_val; | 2686 | s32 ret_val; |
2698 | u16 mii_status_reg; | 2687 | u16 mii_status_reg; |
@@ -2896,8 +2885,7 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw) | |||
2896 | * | 2885 | * |
2897 | * Called by any function that needs to check the link status of the adapter. | 2886 | * Called by any function that needs to check the link status of the adapter. |
2898 | *****************************************************************************/ | 2887 | *****************************************************************************/ |
2899 | s32 | 2888 | s32 e1000_check_for_link(struct e1000_hw *hw) |
2900 | e1000_check_for_link(struct e1000_hw *hw) | ||
2901 | { | 2889 | { |
2902 | u32 rxcw = 0; | 2890 | u32 rxcw = 0; |
2903 | u32 ctrl; | 2891 | u32 ctrl; |
@@ -2910,8 +2898,8 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
2910 | 2898 | ||
2911 | DEBUGFUNC("e1000_check_for_link"); | 2899 | DEBUGFUNC("e1000_check_for_link"); |
2912 | 2900 | ||
2913 | ctrl = E1000_READ_REG(hw, CTRL); | 2901 | ctrl = er32(CTRL); |
2914 | status = E1000_READ_REG(hw, STATUS); | 2902 | status = er32(STATUS); |
2915 | 2903 | ||
2916 | /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be | 2904 | /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be |
2917 | * set when the optics detect a signal. On older adapters, it will be | 2905 | * set when the optics detect a signal. On older adapters, it will be |
@@ -2919,7 +2907,7 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
2919 | */ | 2907 | */ |
2920 | if ((hw->media_type == e1000_media_type_fiber) || | 2908 | if ((hw->media_type == e1000_media_type_fiber) || |
2921 | (hw->media_type == e1000_media_type_internal_serdes)) { | 2909 | (hw->media_type == e1000_media_type_internal_serdes)) { |
2922 | rxcw = E1000_READ_REG(hw, RXCW); | 2910 | rxcw = er32(RXCW); |
2923 | 2911 | ||
2924 | if (hw->media_type == e1000_media_type_fiber) { | 2912 | if (hw->media_type == e1000_media_type_fiber) { |
2925 | signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; | 2913 | signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; |
@@ -2965,11 +2953,11 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
2965 | (!hw->autoneg) && | 2953 | (!hw->autoneg) && |
2966 | (hw->forced_speed_duplex == e1000_10_full || | 2954 | (hw->forced_speed_duplex == e1000_10_full || |
2967 | hw->forced_speed_duplex == e1000_10_half)) { | 2955 | hw->forced_speed_duplex == e1000_10_half)) { |
2968 | E1000_WRITE_REG(hw, IMC, 0xffffffff); | 2956 | ew32(IMC, 0xffffffff); |
2969 | ret_val = e1000_polarity_reversal_workaround(hw); | 2957 | ret_val = e1000_polarity_reversal_workaround(hw); |
2970 | icr = E1000_READ_REG(hw, ICR); | 2958 | icr = er32(ICR); |
2971 | E1000_WRITE_REG(hw, ICS, (icr & ~E1000_ICS_LSC)); | 2959 | ew32(ICS, (icr & ~E1000_ICS_LSC)); |
2972 | E1000_WRITE_REG(hw, IMS, IMS_ENABLE_MASK); | 2960 | ew32(IMS, IMS_ENABLE_MASK); |
2973 | } | 2961 | } |
2974 | 2962 | ||
2975 | } else { | 2963 | } else { |
@@ -3034,9 +3022,9 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
3034 | */ | 3022 | */ |
3035 | if (hw->tbi_compatibility_on) { | 3023 | if (hw->tbi_compatibility_on) { |
3036 | /* If we previously were in the mode, turn it off. */ | 3024 | /* If we previously were in the mode, turn it off. */ |
3037 | rctl = E1000_READ_REG(hw, RCTL); | 3025 | rctl = er32(RCTL); |
3038 | rctl &= ~E1000_RCTL_SBP; | 3026 | rctl &= ~E1000_RCTL_SBP; |
3039 | E1000_WRITE_REG(hw, RCTL, rctl); | 3027 | ew32(RCTL, rctl); |
3040 | hw->tbi_compatibility_on = false; | 3028 | hw->tbi_compatibility_on = false; |
3041 | } | 3029 | } |
3042 | } else { | 3030 | } else { |
@@ -3047,9 +3035,9 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
3047 | */ | 3035 | */ |
3048 | if (!hw->tbi_compatibility_on) { | 3036 | if (!hw->tbi_compatibility_on) { |
3049 | hw->tbi_compatibility_on = true; | 3037 | hw->tbi_compatibility_on = true; |
3050 | rctl = E1000_READ_REG(hw, RCTL); | 3038 | rctl = er32(RCTL); |
3051 | rctl |= E1000_RCTL_SBP; | 3039 | rctl |= E1000_RCTL_SBP; |
3052 | E1000_WRITE_REG(hw, RCTL, rctl); | 3040 | ew32(RCTL, rctl); |
3053 | } | 3041 | } |
3054 | } | 3042 | } |
3055 | } | 3043 | } |
@@ -3073,12 +3061,12 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
3073 | DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); | 3061 | DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); |
3074 | 3062 | ||
3075 | /* Disable auto-negotiation in the TXCW register */ | 3063 | /* Disable auto-negotiation in the TXCW register */ |
3076 | E1000_WRITE_REG(hw, TXCW, (hw->txcw & ~E1000_TXCW_ANE)); | 3064 | ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); |
3077 | 3065 | ||
3078 | /* Force link-up and also force full-duplex. */ | 3066 | /* Force link-up and also force full-duplex. */ |
3079 | ctrl = E1000_READ_REG(hw, CTRL); | 3067 | ctrl = er32(CTRL); |
3080 | ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); | 3068 | ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); |
3081 | E1000_WRITE_REG(hw, CTRL, ctrl); | 3069 | ew32(CTRL, ctrl); |
3082 | 3070 | ||
3083 | /* Configure Flow Control after forcing link up. */ | 3071 | /* Configure Flow Control after forcing link up. */ |
3084 | ret_val = e1000_config_fc_after_link_up(hw); | 3072 | ret_val = e1000_config_fc_after_link_up(hw); |
@@ -3096,8 +3084,8 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
3096 | (hw->media_type == e1000_media_type_internal_serdes)) && | 3084 | (hw->media_type == e1000_media_type_internal_serdes)) && |
3097 | (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 3085 | (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
3098 | DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); | 3086 | DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); |
3099 | E1000_WRITE_REG(hw, TXCW, hw->txcw); | 3087 | ew32(TXCW, hw->txcw); |
3100 | E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU)); | 3088 | ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); |
3101 | 3089 | ||
3102 | hw->serdes_link_down = false; | 3090 | hw->serdes_link_down = false; |
3103 | } | 3091 | } |
@@ -3105,10 +3093,10 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
3105 | * based on MAC synchronization for internal serdes media type. | 3093 | * based on MAC synchronization for internal serdes media type. |
3106 | */ | 3094 | */ |
3107 | else if ((hw->media_type == e1000_media_type_internal_serdes) && | 3095 | else if ((hw->media_type == e1000_media_type_internal_serdes) && |
3108 | !(E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) { | 3096 | !(E1000_TXCW_ANE & er32(TXCW))) { |
3109 | /* SYNCH bit and IV bit are sticky. */ | 3097 | /* SYNCH bit and IV bit are sticky. */ |
3110 | udelay(10); | 3098 | udelay(10); |
3111 | if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) { | 3099 | if (E1000_RXCW_SYNCH & er32(RXCW)) { |
3112 | if (!(rxcw & E1000_RXCW_IV)) { | 3100 | if (!(rxcw & E1000_RXCW_IV)) { |
3113 | hw->serdes_link_down = false; | 3101 | hw->serdes_link_down = false; |
3114 | DEBUGOUT("SERDES: Link is up.\n"); | 3102 | DEBUGOUT("SERDES: Link is up.\n"); |
@@ -3119,8 +3107,8 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
3119 | } | 3107 | } |
3120 | } | 3108 | } |
3121 | if ((hw->media_type == e1000_media_type_internal_serdes) && | 3109 | if ((hw->media_type == e1000_media_type_internal_serdes) && |
3122 | (E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) { | 3110 | (E1000_TXCW_ANE & er32(TXCW))) { |
3123 | hw->serdes_link_down = !(E1000_STATUS_LU & E1000_READ_REG(hw, STATUS)); | 3111 | hw->serdes_link_down = !(E1000_STATUS_LU & er32(STATUS)); |
3124 | } | 3112 | } |
3125 | return E1000_SUCCESS; | 3113 | return E1000_SUCCESS; |
3126 | } | 3114 | } |
@@ -3132,10 +3120,7 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
3132 | * speed - Speed of the connection | 3120 | * speed - Speed of the connection |
3133 | * duplex - Duplex setting of the connection | 3121 | * duplex - Duplex setting of the connection |
3134 | *****************************************************************************/ | 3122 | *****************************************************************************/ |
3135 | s32 | 3123 | s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) |
3136 | e1000_get_speed_and_duplex(struct e1000_hw *hw, | ||
3137 | u16 *speed, | ||
3138 | u16 *duplex) | ||
3139 | { | 3124 | { |
3140 | u32 status; | 3125 | u32 status; |
3141 | s32 ret_val; | 3126 | s32 ret_val; |
@@ -3144,7 +3129,7 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw, | |||
3144 | DEBUGFUNC("e1000_get_speed_and_duplex"); | 3129 | DEBUGFUNC("e1000_get_speed_and_duplex"); |
3145 | 3130 | ||
3146 | if (hw->mac_type >= e1000_82543) { | 3131 | if (hw->mac_type >= e1000_82543) { |
3147 | status = E1000_READ_REG(hw, STATUS); | 3132 | status = er32(STATUS); |
3148 | if (status & E1000_STATUS_SPEED_1000) { | 3133 | if (status & E1000_STATUS_SPEED_1000) { |
3149 | *speed = SPEED_1000; | 3134 | *speed = SPEED_1000; |
3150 | DEBUGOUT("1000 Mbs, "); | 3135 | DEBUGOUT("1000 Mbs, "); |
@@ -3214,8 +3199,7 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw, | |||
3214 | * | 3199 | * |
3215 | * hw - Struct containing variables accessed by shared code | 3200 | * hw - Struct containing variables accessed by shared code |
3216 | ******************************************************************************/ | 3201 | ******************************************************************************/ |
3217 | static s32 | 3202 | static s32 e1000_wait_autoneg(struct e1000_hw *hw) |
3218 | e1000_wait_autoneg(struct e1000_hw *hw) | ||
3219 | { | 3203 | { |
3220 | s32 ret_val; | 3204 | s32 ret_val; |
3221 | u16 i; | 3205 | u16 i; |
@@ -3249,15 +3233,13 @@ e1000_wait_autoneg(struct e1000_hw *hw) | |||
3249 | * hw - Struct containing variables accessed by shared code | 3233 | * hw - Struct containing variables accessed by shared code |
3250 | * ctrl - Device control register's current value | 3234 | * ctrl - Device control register's current value |
3251 | ******************************************************************************/ | 3235 | ******************************************************************************/ |
3252 | static void | 3236 | static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) |
3253 | e1000_raise_mdi_clk(struct e1000_hw *hw, | ||
3254 | u32 *ctrl) | ||
3255 | { | 3237 | { |
3256 | /* Raise the clock input to the Management Data Clock (by setting the MDC | 3238 | /* Raise the clock input to the Management Data Clock (by setting the MDC |
3257 | * bit), and then delay 10 microseconds. | 3239 | * bit), and then delay 10 microseconds. |
3258 | */ | 3240 | */ |
3259 | E1000_WRITE_REG(hw, CTRL, (*ctrl | E1000_CTRL_MDC)); | 3241 | ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); |
3260 | E1000_WRITE_FLUSH(hw); | 3242 | E1000_WRITE_FLUSH(); |
3261 | udelay(10); | 3243 | udelay(10); |
3262 | } | 3244 | } |
3263 | 3245 | ||
@@ -3267,15 +3249,13 @@ e1000_raise_mdi_clk(struct e1000_hw *hw, | |||
3267 | * hw - Struct containing variables accessed by shared code | 3249 | * hw - Struct containing variables accessed by shared code |
3268 | * ctrl - Device control register's current value | 3250 | * ctrl - Device control register's current value |
3269 | ******************************************************************************/ | 3251 | ******************************************************************************/ |
3270 | static void | 3252 | static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) |
3271 | e1000_lower_mdi_clk(struct e1000_hw *hw, | ||
3272 | u32 *ctrl) | ||
3273 | { | 3253 | { |
3274 | /* Lower the clock input to the Management Data Clock (by clearing the MDC | 3254 | /* Lower the clock input to the Management Data Clock (by clearing the MDC |
3275 | * bit), and then delay 10 microseconds. | 3255 | * bit), and then delay 10 microseconds. |
3276 | */ | 3256 | */ |
3277 | E1000_WRITE_REG(hw, CTRL, (*ctrl & ~E1000_CTRL_MDC)); | 3257 | ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); |
3278 | E1000_WRITE_FLUSH(hw); | 3258 | E1000_WRITE_FLUSH(); |
3279 | udelay(10); | 3259 | udelay(10); |
3280 | } | 3260 | } |
3281 | 3261 | ||
@@ -3288,10 +3268,7 @@ e1000_lower_mdi_clk(struct e1000_hw *hw, | |||
3288 | * | 3268 | * |
3289 | * Bits are shifted out in MSB to LSB order. | 3269 | * Bits are shifted out in MSB to LSB order. |
3290 | ******************************************************************************/ | 3270 | ******************************************************************************/ |
3291 | static void | 3271 | static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) |
3292 | e1000_shift_out_mdi_bits(struct e1000_hw *hw, | ||
3293 | u32 data, | ||
3294 | u16 count) | ||
3295 | { | 3272 | { |
3296 | u32 ctrl; | 3273 | u32 ctrl; |
3297 | u32 mask; | 3274 | u32 mask; |
@@ -3303,7 +3280,7 @@ e1000_shift_out_mdi_bits(struct e1000_hw *hw, | |||
3303 | mask = 0x01; | 3280 | mask = 0x01; |
3304 | mask <<= (count - 1); | 3281 | mask <<= (count - 1); |
3305 | 3282 | ||
3306 | ctrl = E1000_READ_REG(hw, CTRL); | 3283 | ctrl = er32(CTRL); |
3307 | 3284 | ||
3308 | /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ | 3285 | /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ |
3309 | ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); | 3286 | ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); |
@@ -3319,8 +3296,8 @@ e1000_shift_out_mdi_bits(struct e1000_hw *hw, | |||
3319 | else | 3296 | else |
3320 | ctrl &= ~E1000_CTRL_MDIO; | 3297 | ctrl &= ~E1000_CTRL_MDIO; |
3321 | 3298 | ||
3322 | E1000_WRITE_REG(hw, CTRL, ctrl); | 3299 | ew32(CTRL, ctrl); |
3323 | E1000_WRITE_FLUSH(hw); | 3300 | E1000_WRITE_FLUSH(); |
3324 | 3301 | ||
3325 | udelay(10); | 3302 | udelay(10); |
3326 | 3303 | ||
@@ -3338,8 +3315,7 @@ e1000_shift_out_mdi_bits(struct e1000_hw *hw, | |||
3338 | * | 3315 | * |
3339 | * Bits are shifted in in MSB to LSB order. | 3316 | * Bits are shifted in in MSB to LSB order. |
3340 | ******************************************************************************/ | 3317 | ******************************************************************************/ |
3341 | static u16 | 3318 | static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) |
3342 | e1000_shift_in_mdi_bits(struct e1000_hw *hw) | ||
3343 | { | 3319 | { |
3344 | u32 ctrl; | 3320 | u32 ctrl; |
3345 | u16 data = 0; | 3321 | u16 data = 0; |
@@ -3352,14 +3328,14 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw) | |||
3352 | * by raising the input to the Management Data Clock (setting the MDC bit), | 3328 | * by raising the input to the Management Data Clock (setting the MDC bit), |
3353 | * and then reading the value of the MDIO bit. | 3329 | * and then reading the value of the MDIO bit. |
3354 | */ | 3330 | */ |
3355 | ctrl = E1000_READ_REG(hw, CTRL); | 3331 | ctrl = er32(CTRL); |
3356 | 3332 | ||
3357 | /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */ | 3333 | /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */ |
3358 | ctrl &= ~E1000_CTRL_MDIO_DIR; | 3334 | ctrl &= ~E1000_CTRL_MDIO_DIR; |
3359 | ctrl &= ~E1000_CTRL_MDIO; | 3335 | ctrl &= ~E1000_CTRL_MDIO; |
3360 | 3336 | ||
3361 | E1000_WRITE_REG(hw, CTRL, ctrl); | 3337 | ew32(CTRL, ctrl); |
3362 | E1000_WRITE_FLUSH(hw); | 3338 | E1000_WRITE_FLUSH(); |
3363 | 3339 | ||
3364 | /* Raise and Lower the clock before reading in the data. This accounts for | 3340 | /* Raise and Lower the clock before reading in the data. This accounts for |
3365 | * the turnaround bits. The first clock occurred when we clocked out the | 3341 | * the turnaround bits. The first clock occurred when we clocked out the |
@@ -3371,7 +3347,7 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw) | |||
3371 | for (data = 0, i = 0; i < 16; i++) { | 3347 | for (data = 0, i = 0; i < 16; i++) { |
3372 | data = data << 1; | 3348 | data = data << 1; |
3373 | e1000_raise_mdi_clk(hw, &ctrl); | 3349 | e1000_raise_mdi_clk(hw, &ctrl); |
3374 | ctrl = E1000_READ_REG(hw, CTRL); | 3350 | ctrl = er32(CTRL); |
3375 | /* Check to see if we shifted in a "1". */ | 3351 | /* Check to see if we shifted in a "1". */ |
3376 | if (ctrl & E1000_CTRL_MDIO) | 3352 | if (ctrl & E1000_CTRL_MDIO) |
3377 | data |= 1; | 3353 | data |= 1; |
@@ -3384,8 +3360,7 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw) | |||
3384 | return data; | 3360 | return data; |
3385 | } | 3361 | } |
3386 | 3362 | ||
3387 | static s32 | 3363 | static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask) |
3388 | e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask) | ||
3389 | { | 3364 | { |
3390 | u32 swfw_sync = 0; | 3365 | u32 swfw_sync = 0; |
3391 | u32 swmask = mask; | 3366 | u32 swmask = mask; |
@@ -3404,7 +3379,7 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask) | |||
3404 | if (e1000_get_hw_eeprom_semaphore(hw)) | 3379 | if (e1000_get_hw_eeprom_semaphore(hw)) |
3405 | return -E1000_ERR_SWFW_SYNC; | 3380 | return -E1000_ERR_SWFW_SYNC; |
3406 | 3381 | ||
3407 | swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC); | 3382 | swfw_sync = er32(SW_FW_SYNC); |
3408 | if (!(swfw_sync & (fwmask | swmask))) { | 3383 | if (!(swfw_sync & (fwmask | swmask))) { |
3409 | break; | 3384 | break; |
3410 | } | 3385 | } |
@@ -3422,14 +3397,13 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask) | |||
3422 | } | 3397 | } |
3423 | 3398 | ||
3424 | swfw_sync |= swmask; | 3399 | swfw_sync |= swmask; |
3425 | E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync); | 3400 | ew32(SW_FW_SYNC, swfw_sync); |
3426 | 3401 | ||
3427 | e1000_put_hw_eeprom_semaphore(hw); | 3402 | e1000_put_hw_eeprom_semaphore(hw); |
3428 | return E1000_SUCCESS; | 3403 | return E1000_SUCCESS; |
3429 | } | 3404 | } |
3430 | 3405 | ||
3431 | static void | 3406 | static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask) |
3432 | e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask) | ||
3433 | { | 3407 | { |
3434 | u32 swfw_sync; | 3408 | u32 swfw_sync; |
3435 | u32 swmask = mask; | 3409 | u32 swmask = mask; |
@@ -3451,9 +3425,9 @@ e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask) | |||
3451 | while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS); | 3425 | while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS); |
3452 | /* empty */ | 3426 | /* empty */ |
3453 | 3427 | ||
3454 | swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC); | 3428 | swfw_sync = er32(SW_FW_SYNC); |
3455 | swfw_sync &= ~swmask; | 3429 | swfw_sync &= ~swmask; |
3456 | E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync); | 3430 | ew32(SW_FW_SYNC, swfw_sync); |
3457 | 3431 | ||
3458 | e1000_put_hw_eeprom_semaphore(hw); | 3432 | e1000_put_hw_eeprom_semaphore(hw); |
3459 | } | 3433 | } |
@@ -3464,10 +3438,7 @@ e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask) | |||
3464 | * hw - Struct containing variables accessed by shared code | 3438 | * hw - Struct containing variables accessed by shared code |
3465 | * reg_addr - address of the PHY register to read | 3439 | * reg_addr - address of the PHY register to read |
3466 | ******************************************************************************/ | 3440 | ******************************************************************************/ |
3467 | s32 | 3441 | s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) |
3468 | e1000_read_phy_reg(struct e1000_hw *hw, | ||
3469 | u32 reg_addr, | ||
3470 | u16 *phy_data) | ||
3471 | { | 3442 | { |
3472 | u32 ret_val; | 3443 | u32 ret_val; |
3473 | u16 swfw; | 3444 | u16 swfw; |
@@ -3475,7 +3446,7 @@ e1000_read_phy_reg(struct e1000_hw *hw, | |||
3475 | DEBUGFUNC("e1000_read_phy_reg"); | 3446 | DEBUGFUNC("e1000_read_phy_reg"); |
3476 | 3447 | ||
3477 | if ((hw->mac_type == e1000_80003es2lan) && | 3448 | if ((hw->mac_type == e1000_80003es2lan) && |
3478 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | 3449 | (er32(STATUS) & E1000_STATUS_FUNC_1)) { |
3479 | swfw = E1000_SWFW_PHY1_SM; | 3450 | swfw = E1000_SWFW_PHY1_SM; |
3480 | } else { | 3451 | } else { |
3481 | swfw = E1000_SWFW_PHY0_SM; | 3452 | swfw = E1000_SWFW_PHY0_SM; |
@@ -3523,9 +3494,8 @@ e1000_read_phy_reg(struct e1000_hw *hw, | |||
3523 | return ret_val; | 3494 | return ret_val; |
3524 | } | 3495 | } |
3525 | 3496 | ||
3526 | static s32 | 3497 | static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, |
3527 | e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | 3498 | u16 *phy_data) |
3528 | u16 *phy_data) | ||
3529 | { | 3499 | { |
3530 | u32 i; | 3500 | u32 i; |
3531 | u32 mdic = 0; | 3501 | u32 mdic = 0; |
@@ -3547,12 +3517,12 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | |||
3547 | (phy_addr << E1000_MDIC_PHY_SHIFT) | | 3517 | (phy_addr << E1000_MDIC_PHY_SHIFT) | |
3548 | (E1000_MDIC_OP_READ)); | 3518 | (E1000_MDIC_OP_READ)); |
3549 | 3519 | ||
3550 | E1000_WRITE_REG(hw, MDIC, mdic); | 3520 | ew32(MDIC, mdic); |
3551 | 3521 | ||
3552 | /* Poll the ready bit to see if the MDI read completed */ | 3522 | /* Poll the ready bit to see if the MDI read completed */ |
3553 | for (i = 0; i < 64; i++) { | 3523 | for (i = 0; i < 64; i++) { |
3554 | udelay(50); | 3524 | udelay(50); |
3555 | mdic = E1000_READ_REG(hw, MDIC); | 3525 | mdic = er32(MDIC); |
3556 | if (mdic & E1000_MDIC_READY) break; | 3526 | if (mdic & E1000_MDIC_READY) break; |
3557 | } | 3527 | } |
3558 | if (!(mdic & E1000_MDIC_READY)) { | 3528 | if (!(mdic & E1000_MDIC_READY)) { |
@@ -3563,7 +3533,7 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | |||
3563 | DEBUGOUT("MDI Error\n"); | 3533 | DEBUGOUT("MDI Error\n"); |
3564 | return -E1000_ERR_PHY; | 3534 | return -E1000_ERR_PHY; |
3565 | } | 3535 | } |
3566 | *phy_data = (u16) mdic; | 3536 | *phy_data = (u16)mdic; |
3567 | } else { | 3537 | } else { |
3568 | /* We must first send a preamble through the MDIO pin to signal the | 3538 | /* We must first send a preamble through the MDIO pin to signal the |
3569 | * beginning of an MII instruction. This is done by sending 32 | 3539 | * beginning of an MII instruction. This is done by sending 32 |
@@ -3603,9 +3573,7 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | |||
3603 | * reg_addr - address of the PHY register to write | 3573 | * reg_addr - address of the PHY register to write |
3604 | * data - data to write to the PHY | 3574 | * data - data to write to the PHY |
3605 | ******************************************************************************/ | 3575 | ******************************************************************************/ |
3606 | s32 | 3576 | s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) |
3607 | e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, | ||
3608 | u16 phy_data) | ||
3609 | { | 3577 | { |
3610 | u32 ret_val; | 3578 | u32 ret_val; |
3611 | u16 swfw; | 3579 | u16 swfw; |
@@ -3613,7 +3581,7 @@ e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, | |||
3613 | DEBUGFUNC("e1000_write_phy_reg"); | 3581 | DEBUGFUNC("e1000_write_phy_reg"); |
3614 | 3582 | ||
3615 | if ((hw->mac_type == e1000_80003es2lan) && | 3583 | if ((hw->mac_type == e1000_80003es2lan) && |
3616 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | 3584 | (er32(STATUS) & E1000_STATUS_FUNC_1)) { |
3617 | swfw = E1000_SWFW_PHY1_SM; | 3585 | swfw = E1000_SWFW_PHY1_SM; |
3618 | } else { | 3586 | } else { |
3619 | swfw = E1000_SWFW_PHY0_SM; | 3587 | swfw = E1000_SWFW_PHY0_SM; |
@@ -3661,9 +3629,8 @@ e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, | |||
3661 | return ret_val; | 3629 | return ret_val; |
3662 | } | 3630 | } |
3663 | 3631 | ||
3664 | static s32 | 3632 | static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, |
3665 | e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | 3633 | u16 phy_data) |
3666 | u16 phy_data) | ||
3667 | { | 3634 | { |
3668 | u32 i; | 3635 | u32 i; |
3669 | u32 mdic = 0; | 3636 | u32 mdic = 0; |
@@ -3681,17 +3648,17 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | |||
3681 | * for the PHY register in the MDI Control register. The MAC will take | 3648 | * for the PHY register in the MDI Control register. The MAC will take |
3682 | * care of interfacing with the PHY to send the desired data. | 3649 | * care of interfacing with the PHY to send the desired data. |
3683 | */ | 3650 | */ |
3684 | mdic = (((u32) phy_data) | | 3651 | mdic = (((u32)phy_data) | |
3685 | (reg_addr << E1000_MDIC_REG_SHIFT) | | 3652 | (reg_addr << E1000_MDIC_REG_SHIFT) | |
3686 | (phy_addr << E1000_MDIC_PHY_SHIFT) | | 3653 | (phy_addr << E1000_MDIC_PHY_SHIFT) | |
3687 | (E1000_MDIC_OP_WRITE)); | 3654 | (E1000_MDIC_OP_WRITE)); |
3688 | 3655 | ||
3689 | E1000_WRITE_REG(hw, MDIC, mdic); | 3656 | ew32(MDIC, mdic); |
3690 | 3657 | ||
3691 | /* Poll the ready bit to see if the MDI read completed */ | 3658 | /* Poll the ready bit to see if the MDI read completed */ |
3692 | for (i = 0; i < 641; i++) { | 3659 | for (i = 0; i < 641; i++) { |
3693 | udelay(5); | 3660 | udelay(5); |
3694 | mdic = E1000_READ_REG(hw, MDIC); | 3661 | mdic = er32(MDIC); |
3695 | if (mdic & E1000_MDIC_READY) break; | 3662 | if (mdic & E1000_MDIC_READY) break; |
3696 | } | 3663 | } |
3697 | if (!(mdic & E1000_MDIC_READY)) { | 3664 | if (!(mdic & E1000_MDIC_READY)) { |
@@ -3715,7 +3682,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | |||
3715 | mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | | 3682 | mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | |
3716 | (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); | 3683 | (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); |
3717 | mdic <<= 16; | 3684 | mdic <<= 16; |
3718 | mdic |= (u32) phy_data; | 3685 | mdic |= (u32)phy_data; |
3719 | 3686 | ||
3720 | e1000_shift_out_mdi_bits(hw, mdic, 32); | 3687 | e1000_shift_out_mdi_bits(hw, mdic, 32); |
3721 | } | 3688 | } |
@@ -3723,17 +3690,14 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, | |||
3723 | return E1000_SUCCESS; | 3690 | return E1000_SUCCESS; |
3724 | } | 3691 | } |
3725 | 3692 | ||
3726 | static s32 | 3693 | static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data) |
3727 | e1000_read_kmrn_reg(struct e1000_hw *hw, | ||
3728 | u32 reg_addr, | ||
3729 | u16 *data) | ||
3730 | { | 3694 | { |
3731 | u32 reg_val; | 3695 | u32 reg_val; |
3732 | u16 swfw; | 3696 | u16 swfw; |
3733 | DEBUGFUNC("e1000_read_kmrn_reg"); | 3697 | DEBUGFUNC("e1000_read_kmrn_reg"); |
3734 | 3698 | ||
3735 | if ((hw->mac_type == e1000_80003es2lan) && | 3699 | if ((hw->mac_type == e1000_80003es2lan) && |
3736 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | 3700 | (er32(STATUS) & E1000_STATUS_FUNC_1)) { |
3737 | swfw = E1000_SWFW_PHY1_SM; | 3701 | swfw = E1000_SWFW_PHY1_SM; |
3738 | } else { | 3702 | } else { |
3739 | swfw = E1000_SWFW_PHY0_SM; | 3703 | swfw = E1000_SWFW_PHY0_SM; |
@@ -3745,28 +3709,25 @@ e1000_read_kmrn_reg(struct e1000_hw *hw, | |||
3745 | reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & | 3709 | reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & |
3746 | E1000_KUMCTRLSTA_OFFSET) | | 3710 | E1000_KUMCTRLSTA_OFFSET) | |
3747 | E1000_KUMCTRLSTA_REN; | 3711 | E1000_KUMCTRLSTA_REN; |
3748 | E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val); | 3712 | ew32(KUMCTRLSTA, reg_val); |
3749 | udelay(2); | 3713 | udelay(2); |
3750 | 3714 | ||
3751 | /* Read the data returned */ | 3715 | /* Read the data returned */ |
3752 | reg_val = E1000_READ_REG(hw, KUMCTRLSTA); | 3716 | reg_val = er32(KUMCTRLSTA); |
3753 | *data = (u16)reg_val; | 3717 | *data = (u16)reg_val; |
3754 | 3718 | ||
3755 | e1000_swfw_sync_release(hw, swfw); | 3719 | e1000_swfw_sync_release(hw, swfw); |
3756 | return E1000_SUCCESS; | 3720 | return E1000_SUCCESS; |
3757 | } | 3721 | } |
3758 | 3722 | ||
3759 | static s32 | 3723 | static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data) |
3760 | e1000_write_kmrn_reg(struct e1000_hw *hw, | ||
3761 | u32 reg_addr, | ||
3762 | u16 data) | ||
3763 | { | 3724 | { |
3764 | u32 reg_val; | 3725 | u32 reg_val; |
3765 | u16 swfw; | 3726 | u16 swfw; |
3766 | DEBUGFUNC("e1000_write_kmrn_reg"); | 3727 | DEBUGFUNC("e1000_write_kmrn_reg"); |
3767 | 3728 | ||
3768 | if ((hw->mac_type == e1000_80003es2lan) && | 3729 | if ((hw->mac_type == e1000_80003es2lan) && |
3769 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | 3730 | (er32(STATUS) & E1000_STATUS_FUNC_1)) { |
3770 | swfw = E1000_SWFW_PHY1_SM; | 3731 | swfw = E1000_SWFW_PHY1_SM; |
3771 | } else { | 3732 | } else { |
3772 | swfw = E1000_SWFW_PHY0_SM; | 3733 | swfw = E1000_SWFW_PHY0_SM; |
@@ -3776,7 +3737,7 @@ e1000_write_kmrn_reg(struct e1000_hw *hw, | |||
3776 | 3737 | ||
3777 | reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & | 3738 | reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) & |
3778 | E1000_KUMCTRLSTA_OFFSET) | data; | 3739 | E1000_KUMCTRLSTA_OFFSET) | data; |
3779 | E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val); | 3740 | ew32(KUMCTRLSTA, reg_val); |
3780 | udelay(2); | 3741 | udelay(2); |
3781 | 3742 | ||
3782 | e1000_swfw_sync_release(hw, swfw); | 3743 | e1000_swfw_sync_release(hw, swfw); |
@@ -3788,8 +3749,7 @@ e1000_write_kmrn_reg(struct e1000_hw *hw, | |||
3788 | * | 3749 | * |
3789 | * hw - Struct containing variables accessed by shared code | 3750 | * hw - Struct containing variables accessed by shared code |
3790 | ******************************************************************************/ | 3751 | ******************************************************************************/ |
3791 | s32 | 3752 | s32 e1000_phy_hw_reset(struct e1000_hw *hw) |
3792 | e1000_phy_hw_reset(struct e1000_hw *hw) | ||
3793 | { | 3753 | { |
3794 | u32 ctrl, ctrl_ext; | 3754 | u32 ctrl, ctrl_ext; |
3795 | u32 led_ctrl; | 3755 | u32 led_ctrl; |
@@ -3808,7 +3768,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3808 | 3768 | ||
3809 | if (hw->mac_type > e1000_82543) { | 3769 | if (hw->mac_type > e1000_82543) { |
3810 | if ((hw->mac_type == e1000_80003es2lan) && | 3770 | if ((hw->mac_type == e1000_80003es2lan) && |
3811 | (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { | 3771 | (er32(STATUS) & E1000_STATUS_FUNC_1)) { |
3812 | swfw = E1000_SWFW_PHY1_SM; | 3772 | swfw = E1000_SWFW_PHY1_SM; |
3813 | } else { | 3773 | } else { |
3814 | swfw = E1000_SWFW_PHY0_SM; | 3774 | swfw = E1000_SWFW_PHY0_SM; |
@@ -3823,17 +3783,17 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3823 | * and deassert. For e1000_82571 hardware and later, we instead delay | 3783 | * and deassert. For e1000_82571 hardware and later, we instead delay |
3824 | * for 50us between and 10ms after the deassertion. | 3784 | * for 50us between and 10ms after the deassertion. |
3825 | */ | 3785 | */ |
3826 | ctrl = E1000_READ_REG(hw, CTRL); | 3786 | ctrl = er32(CTRL); |
3827 | E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST); | 3787 | ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); |
3828 | E1000_WRITE_FLUSH(hw); | 3788 | E1000_WRITE_FLUSH(); |
3829 | 3789 | ||
3830 | if (hw->mac_type < e1000_82571) | 3790 | if (hw->mac_type < e1000_82571) |
3831 | msleep(10); | 3791 | msleep(10); |
3832 | else | 3792 | else |
3833 | udelay(100); | 3793 | udelay(100); |
3834 | 3794 | ||
3835 | E1000_WRITE_REG(hw, CTRL, ctrl); | 3795 | ew32(CTRL, ctrl); |
3836 | E1000_WRITE_FLUSH(hw); | 3796 | E1000_WRITE_FLUSH(); |
3837 | 3797 | ||
3838 | if (hw->mac_type >= e1000_82571) | 3798 | if (hw->mac_type >= e1000_82571) |
3839 | mdelay(10); | 3799 | mdelay(10); |
@@ -3843,24 +3803,24 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3843 | /* Read the Extended Device Control Register, assert the PHY_RESET_DIR | 3803 | /* Read the Extended Device Control Register, assert the PHY_RESET_DIR |
3844 | * bit to put the PHY into reset. Then, take it out of reset. | 3804 | * bit to put the PHY into reset. Then, take it out of reset. |
3845 | */ | 3805 | */ |
3846 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 3806 | ctrl_ext = er32(CTRL_EXT); |
3847 | ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; | 3807 | ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; |
3848 | ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; | 3808 | ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; |
3849 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 3809 | ew32(CTRL_EXT, ctrl_ext); |
3850 | E1000_WRITE_FLUSH(hw); | 3810 | E1000_WRITE_FLUSH(); |
3851 | msleep(10); | 3811 | msleep(10); |
3852 | ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; | 3812 | ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; |
3853 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 3813 | ew32(CTRL_EXT, ctrl_ext); |
3854 | E1000_WRITE_FLUSH(hw); | 3814 | E1000_WRITE_FLUSH(); |
3855 | } | 3815 | } |
3856 | udelay(150); | 3816 | udelay(150); |
3857 | 3817 | ||
3858 | if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { | 3818 | if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { |
3859 | /* Configure activity LED after PHY reset */ | 3819 | /* Configure activity LED after PHY reset */ |
3860 | led_ctrl = E1000_READ_REG(hw, LEDCTL); | 3820 | led_ctrl = er32(LEDCTL); |
3861 | led_ctrl &= IGP_ACTIVITY_LED_MASK; | 3821 | led_ctrl &= IGP_ACTIVITY_LED_MASK; |
3862 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); | 3822 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); |
3863 | E1000_WRITE_REG(hw, LEDCTL, led_ctrl); | 3823 | ew32(LEDCTL, led_ctrl); |
3864 | } | 3824 | } |
3865 | 3825 | ||
3866 | /* Wait for FW to finish PHY configuration. */ | 3826 | /* Wait for FW to finish PHY configuration. */ |
@@ -3882,8 +3842,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3882 | * | 3842 | * |
3883 | * Sets bit 15 of the MII Control register | 3843 | * Sets bit 15 of the MII Control register |
3884 | ******************************************************************************/ | 3844 | ******************************************************************************/ |
3885 | s32 | 3845 | s32 e1000_phy_reset(struct e1000_hw *hw) |
3886 | e1000_phy_reset(struct e1000_hw *hw) | ||
3887 | { | 3846 | { |
3888 | s32 ret_val; | 3847 | s32 ret_val; |
3889 | u16 phy_data; | 3848 | u16 phy_data; |
@@ -3934,8 +3893,7 @@ e1000_phy_reset(struct e1000_hw *hw) | |||
3934 | * | 3893 | * |
3935 | * hw - struct containing variables accessed by shared code | 3894 | * hw - struct containing variables accessed by shared code |
3936 | ******************************************************************************/ | 3895 | ******************************************************************************/ |
3937 | void | 3896 | void e1000_phy_powerdown_workaround(struct e1000_hw *hw) |
3938 | e1000_phy_powerdown_workaround(struct e1000_hw *hw) | ||
3939 | { | 3897 | { |
3940 | s32 reg; | 3898 | s32 reg; |
3941 | u16 phy_data; | 3899 | u16 phy_data; |
@@ -3948,8 +3906,8 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw) | |||
3948 | 3906 | ||
3949 | do { | 3907 | do { |
3950 | /* Disable link */ | 3908 | /* Disable link */ |
3951 | reg = E1000_READ_REG(hw, PHY_CTRL); | 3909 | reg = er32(PHY_CTRL); |
3952 | E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | | 3910 | ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | |
3953 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | 3911 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); |
3954 | 3912 | ||
3955 | /* Write VR power-down enable - bits 9:8 should be 10b */ | 3913 | /* Write VR power-down enable - bits 9:8 should be 10b */ |
@@ -3964,8 +3922,8 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw) | |||
3964 | break; | 3922 | break; |
3965 | 3923 | ||
3966 | /* Issue PHY reset and repeat at most one more time */ | 3924 | /* Issue PHY reset and repeat at most one more time */ |
3967 | reg = E1000_READ_REG(hw, CTRL); | 3925 | reg = er32(CTRL); |
3968 | E1000_WRITE_REG(hw, CTRL, reg | E1000_CTRL_PHY_RST); | 3926 | ew32(CTRL, reg | E1000_CTRL_PHY_RST); |
3969 | retry++; | 3927 | retry++; |
3970 | } while (retry); | 3928 | } while (retry); |
3971 | 3929 | ||
@@ -3987,8 +3945,7 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw) | |||
3987 | * | 3945 | * |
3988 | * hw - struct containing variables accessed by shared code | 3946 | * hw - struct containing variables accessed by shared code |
3989 | ******************************************************************************/ | 3947 | ******************************************************************************/ |
3990 | static s32 | 3948 | static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) |
3991 | e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) | ||
3992 | { | 3949 | { |
3993 | s32 ret_val; | 3950 | s32 ret_val; |
3994 | s32 reg; | 3951 | s32 reg; |
@@ -4024,8 +3981,8 @@ e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) | |||
4024 | mdelay(5); | 3981 | mdelay(5); |
4025 | } | 3982 | } |
4026 | /* Disable GigE link negotiation */ | 3983 | /* Disable GigE link negotiation */ |
4027 | reg = E1000_READ_REG(hw, PHY_CTRL); | 3984 | reg = er32(PHY_CTRL); |
4028 | E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | | 3985 | ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | |
4029 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | 3986 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); |
4030 | 3987 | ||
4031 | /* unable to acquire PCS lock */ | 3988 | /* unable to acquire PCS lock */ |
@@ -4040,8 +3997,7 @@ e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) | |||
4040 | * | 3997 | * |
4041 | * hw - Struct containing variables accessed by shared code | 3998 | * hw - Struct containing variables accessed by shared code |
4042 | ******************************************************************************/ | 3999 | ******************************************************************************/ |
4043 | static s32 | 4000 | static s32 e1000_detect_gig_phy(struct e1000_hw *hw) |
4044 | e1000_detect_gig_phy(struct e1000_hw *hw) | ||
4045 | { | 4001 | { |
4046 | s32 phy_init_status, ret_val; | 4002 | s32 phy_init_status, ret_val; |
4047 | u16 phy_id_high, phy_id_low; | 4003 | u16 phy_id_high, phy_id_low; |
@@ -4076,14 +4032,14 @@ e1000_detect_gig_phy(struct e1000_hw *hw) | |||
4076 | if (ret_val) | 4032 | if (ret_val) |
4077 | return ret_val; | 4033 | return ret_val; |
4078 | 4034 | ||
4079 | hw->phy_id = (u32) (phy_id_high << 16); | 4035 | hw->phy_id = (u32)(phy_id_high << 16); |
4080 | udelay(20); | 4036 | udelay(20); |
4081 | ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); | 4037 | ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); |
4082 | if (ret_val) | 4038 | if (ret_val) |
4083 | return ret_val; | 4039 | return ret_val; |
4084 | 4040 | ||
4085 | hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK); | 4041 | hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK); |
4086 | hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK; | 4042 | hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK; |
4087 | 4043 | ||
4088 | switch (hw->mac_type) { | 4044 | switch (hw->mac_type) { |
4089 | case e1000_82543: | 4045 | case e1000_82543: |
@@ -4136,8 +4092,7 @@ e1000_detect_gig_phy(struct e1000_hw *hw) | |||
4136 | * | 4092 | * |
4137 | * hw - Struct containing variables accessed by shared code | 4093 | * hw - Struct containing variables accessed by shared code |
4138 | ******************************************************************************/ | 4094 | ******************************************************************************/ |
4139 | static s32 | 4095 | static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) |
4140 | e1000_phy_reset_dsp(struct e1000_hw *hw) | ||
4141 | { | 4096 | { |
4142 | s32 ret_val; | 4097 | s32 ret_val; |
4143 | DEBUGFUNC("e1000_phy_reset_dsp"); | 4098 | DEBUGFUNC("e1000_phy_reset_dsp"); |
@@ -4163,9 +4118,8 @@ e1000_phy_reset_dsp(struct e1000_hw *hw) | |||
4163 | * hw - Struct containing variables accessed by shared code | 4118 | * hw - Struct containing variables accessed by shared code |
4164 | * phy_info - PHY information structure | 4119 | * phy_info - PHY information structure |
4165 | ******************************************************************************/ | 4120 | ******************************************************************************/ |
4166 | static s32 | 4121 | static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, |
4167 | e1000_phy_igp_get_info(struct e1000_hw *hw, | 4122 | struct e1000_phy_info *phy_info) |
4168 | struct e1000_phy_info *phy_info) | ||
4169 | { | 4123 | { |
4170 | s32 ret_val; | 4124 | s32 ret_val; |
4171 | u16 phy_data, min_length, max_length, average; | 4125 | u16 phy_data, min_length, max_length, average; |
@@ -4240,9 +4194,8 @@ e1000_phy_igp_get_info(struct e1000_hw *hw, | |||
4240 | * hw - Struct containing variables accessed by shared code | 4194 | * hw - Struct containing variables accessed by shared code |
4241 | * phy_info - PHY information structure | 4195 | * phy_info - PHY information structure |
4242 | ******************************************************************************/ | 4196 | ******************************************************************************/ |
4243 | static s32 | 4197 | static s32 e1000_phy_ife_get_info(struct e1000_hw *hw, |
4244 | e1000_phy_ife_get_info(struct e1000_hw *hw, | 4198 | struct e1000_phy_info *phy_info) |
4245 | struct e1000_phy_info *phy_info) | ||
4246 | { | 4199 | { |
4247 | s32 ret_val; | 4200 | s32 ret_val; |
4248 | u16 phy_data; | 4201 | u16 phy_data; |
@@ -4290,9 +4243,8 @@ e1000_phy_ife_get_info(struct e1000_hw *hw, | |||
4290 | * hw - Struct containing variables accessed by shared code | 4243 | * hw - Struct containing variables accessed by shared code |
4291 | * phy_info - PHY information structure | 4244 | * phy_info - PHY information structure |
4292 | ******************************************************************************/ | 4245 | ******************************************************************************/ |
4293 | static s32 | 4246 | static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, |
4294 | e1000_phy_m88_get_info(struct e1000_hw *hw, | 4247 | struct e1000_phy_info *phy_info) |
4295 | struct e1000_phy_info *phy_info) | ||
4296 | { | 4248 | { |
4297 | s32 ret_val; | 4249 | s32 ret_val; |
4298 | u16 phy_data; | 4250 | u16 phy_data; |
@@ -4369,9 +4321,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw, | |||
4369 | * hw - Struct containing variables accessed by shared code | 4321 | * hw - Struct containing variables accessed by shared code |
4370 | * phy_info - PHY information structure | 4322 | * phy_info - PHY information structure |
4371 | ******************************************************************************/ | 4323 | ******************************************************************************/ |
4372 | s32 | 4324 | s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) |
4373 | e1000_phy_get_info(struct e1000_hw *hw, | ||
4374 | struct e1000_phy_info *phy_info) | ||
4375 | { | 4325 | { |
4376 | s32 ret_val; | 4326 | s32 ret_val; |
4377 | u16 phy_data; | 4327 | u16 phy_data; |
@@ -4415,8 +4365,7 @@ e1000_phy_get_info(struct e1000_hw *hw, | |||
4415 | return e1000_phy_m88_get_info(hw, phy_info); | 4365 | return e1000_phy_m88_get_info(hw, phy_info); |
4416 | } | 4366 | } |
4417 | 4367 | ||
4418 | s32 | 4368 | s32 e1000_validate_mdi_setting(struct e1000_hw *hw) |
4419 | e1000_validate_mdi_setting(struct e1000_hw *hw) | ||
4420 | { | 4369 | { |
4421 | DEBUGFUNC("e1000_validate_mdi_settings"); | 4370 | DEBUGFUNC("e1000_validate_mdi_settings"); |
4422 | 4371 | ||
@@ -4436,11 +4385,10 @@ e1000_validate_mdi_setting(struct e1000_hw *hw) | |||
4436 | * | 4385 | * |
4437 | * hw - Struct containing variables accessed by shared code | 4386 | * hw - Struct containing variables accessed by shared code |
4438 | *****************************************************************************/ | 4387 | *****************************************************************************/ |
4439 | s32 | 4388 | s32 e1000_init_eeprom_params(struct e1000_hw *hw) |
4440 | e1000_init_eeprom_params(struct e1000_hw *hw) | ||
4441 | { | 4389 | { |
4442 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 4390 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
4443 | u32 eecd = E1000_READ_REG(hw, EECD); | 4391 | u32 eecd = er32(EECD); |
4444 | s32 ret_val = E1000_SUCCESS; | 4392 | s32 ret_val = E1000_SUCCESS; |
4445 | u16 eeprom_size; | 4393 | u16 eeprom_size; |
4446 | 4394 | ||
@@ -4542,7 +4490,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw) | |||
4542 | /* Ensure that the Autonomous FLASH update bit is cleared due to | 4490 | /* Ensure that the Autonomous FLASH update bit is cleared due to |
4543 | * Flash update issue on parts which use a FLASH for NVM. */ | 4491 | * Flash update issue on parts which use a FLASH for NVM. */ |
4544 | eecd &= ~E1000_EECD_AUPDEN; | 4492 | eecd &= ~E1000_EECD_AUPDEN; |
4545 | E1000_WRITE_REG(hw, EECD, eecd); | 4493 | ew32(EECD, eecd); |
4546 | } | 4494 | } |
4547 | break; | 4495 | break; |
4548 | case e1000_80003es2lan: | 4496 | case e1000_80003es2lan: |
@@ -4626,16 +4574,14 @@ e1000_init_eeprom_params(struct e1000_hw *hw) | |||
4626 | * hw - Struct containing variables accessed by shared code | 4574 | * hw - Struct containing variables accessed by shared code |
4627 | * eecd - EECD's current value | 4575 | * eecd - EECD's current value |
4628 | *****************************************************************************/ | 4576 | *****************************************************************************/ |
4629 | static void | 4577 | static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) |
4630 | e1000_raise_ee_clk(struct e1000_hw *hw, | ||
4631 | u32 *eecd) | ||
4632 | { | 4578 | { |
4633 | /* Raise the clock input to the EEPROM (by setting the SK bit), and then | 4579 | /* Raise the clock input to the EEPROM (by setting the SK bit), and then |
4634 | * wait <delay> microseconds. | 4580 | * wait <delay> microseconds. |
4635 | */ | 4581 | */ |
4636 | *eecd = *eecd | E1000_EECD_SK; | 4582 | *eecd = *eecd | E1000_EECD_SK; |
4637 | E1000_WRITE_REG(hw, EECD, *eecd); | 4583 | ew32(EECD, *eecd); |
4638 | E1000_WRITE_FLUSH(hw); | 4584 | E1000_WRITE_FLUSH(); |
4639 | udelay(hw->eeprom.delay_usec); | 4585 | udelay(hw->eeprom.delay_usec); |
4640 | } | 4586 | } |
4641 | 4587 | ||
@@ -4645,16 +4591,14 @@ e1000_raise_ee_clk(struct e1000_hw *hw, | |||
4645 | * hw - Struct containing variables accessed by shared code | 4591 | * hw - Struct containing variables accessed by shared code |
4646 | * eecd - EECD's current value | 4592 | * eecd - EECD's current value |
4647 | *****************************************************************************/ | 4593 | *****************************************************************************/ |
4648 | static void | 4594 | static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) |
4649 | e1000_lower_ee_clk(struct e1000_hw *hw, | ||
4650 | u32 *eecd) | ||
4651 | { | 4595 | { |
4652 | /* Lower the clock input to the EEPROM (by clearing the SK bit), and then | 4596 | /* Lower the clock input to the EEPROM (by clearing the SK bit), and then |
4653 | * wait 50 microseconds. | 4597 | * wait 50 microseconds. |
4654 | */ | 4598 | */ |
4655 | *eecd = *eecd & ~E1000_EECD_SK; | 4599 | *eecd = *eecd & ~E1000_EECD_SK; |
4656 | E1000_WRITE_REG(hw, EECD, *eecd); | 4600 | ew32(EECD, *eecd); |
4657 | E1000_WRITE_FLUSH(hw); | 4601 | E1000_WRITE_FLUSH(); |
4658 | udelay(hw->eeprom.delay_usec); | 4602 | udelay(hw->eeprom.delay_usec); |
4659 | } | 4603 | } |
4660 | 4604 | ||
@@ -4665,10 +4609,7 @@ e1000_lower_ee_clk(struct e1000_hw *hw, | |||
4665 | * data - data to send to the EEPROM | 4609 | * data - data to send to the EEPROM |
4666 | * count - number of bits to shift out | 4610 | * count - number of bits to shift out |
4667 | *****************************************************************************/ | 4611 | *****************************************************************************/ |
4668 | static void | 4612 | static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) |
4669 | e1000_shift_out_ee_bits(struct e1000_hw *hw, | ||
4670 | u16 data, | ||
4671 | u16 count) | ||
4672 | { | 4613 | { |
4673 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 4614 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
4674 | u32 eecd; | 4615 | u32 eecd; |
@@ -4679,7 +4620,7 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw, | |||
4679 | * In order to do this, "data" must be broken down into bits. | 4620 | * In order to do this, "data" must be broken down into bits. |
4680 | */ | 4621 | */ |
4681 | mask = 0x01 << (count - 1); | 4622 | mask = 0x01 << (count - 1); |
4682 | eecd = E1000_READ_REG(hw, EECD); | 4623 | eecd = er32(EECD); |
4683 | if (eeprom->type == e1000_eeprom_microwire) { | 4624 | if (eeprom->type == e1000_eeprom_microwire) { |
4684 | eecd &= ~E1000_EECD_DO; | 4625 | eecd &= ~E1000_EECD_DO; |
4685 | } else if (eeprom->type == e1000_eeprom_spi) { | 4626 | } else if (eeprom->type == e1000_eeprom_spi) { |
@@ -4696,8 +4637,8 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw, | |||
4696 | if (data & mask) | 4637 | if (data & mask) |
4697 | eecd |= E1000_EECD_DI; | 4638 | eecd |= E1000_EECD_DI; |
4698 | 4639 | ||
4699 | E1000_WRITE_REG(hw, EECD, eecd); | 4640 | ew32(EECD, eecd); |
4700 | E1000_WRITE_FLUSH(hw); | 4641 | E1000_WRITE_FLUSH(); |
4701 | 4642 | ||
4702 | udelay(eeprom->delay_usec); | 4643 | udelay(eeprom->delay_usec); |
4703 | 4644 | ||
@@ -4710,7 +4651,7 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw, | |||
4710 | 4651 | ||
4711 | /* We leave the "DI" bit set to "0" when we leave this routine. */ | 4652 | /* We leave the "DI" bit set to "0" when we leave this routine. */ |
4712 | eecd &= ~E1000_EECD_DI; | 4653 | eecd &= ~E1000_EECD_DI; |
4713 | E1000_WRITE_REG(hw, EECD, eecd); | 4654 | ew32(EECD, eecd); |
4714 | } | 4655 | } |
4715 | 4656 | ||
4716 | /****************************************************************************** | 4657 | /****************************************************************************** |
@@ -4718,9 +4659,7 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw, | |||
4718 | * | 4659 | * |
4719 | * hw - Struct containing variables accessed by shared code | 4660 | * hw - Struct containing variables accessed by shared code |
4720 | *****************************************************************************/ | 4661 | *****************************************************************************/ |
4721 | static u16 | 4662 | static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) |
4722 | e1000_shift_in_ee_bits(struct e1000_hw *hw, | ||
4723 | u16 count) | ||
4724 | { | 4663 | { |
4725 | u32 eecd; | 4664 | u32 eecd; |
4726 | u32 i; | 4665 | u32 i; |
@@ -4733,7 +4672,7 @@ e1000_shift_in_ee_bits(struct e1000_hw *hw, | |||
4733 | * always be clear. | 4672 | * always be clear. |
4734 | */ | 4673 | */ |
4735 | 4674 | ||
4736 | eecd = E1000_READ_REG(hw, EECD); | 4675 | eecd = er32(EECD); |
4737 | 4676 | ||
4738 | eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); | 4677 | eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); |
4739 | data = 0; | 4678 | data = 0; |
@@ -4742,7 +4681,7 @@ e1000_shift_in_ee_bits(struct e1000_hw *hw, | |||
4742 | data = data << 1; | 4681 | data = data << 1; |
4743 | e1000_raise_ee_clk(hw, &eecd); | 4682 | e1000_raise_ee_clk(hw, &eecd); |
4744 | 4683 | ||
4745 | eecd = E1000_READ_REG(hw, EECD); | 4684 | eecd = er32(EECD); |
4746 | 4685 | ||
4747 | eecd &= ~(E1000_EECD_DI); | 4686 | eecd &= ~(E1000_EECD_DI); |
4748 | if (eecd & E1000_EECD_DO) | 4687 | if (eecd & E1000_EECD_DO) |
@@ -4762,8 +4701,7 @@ e1000_shift_in_ee_bits(struct e1000_hw *hw, | |||
4762 | * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This | 4701 | * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This |
4763 | * function should be called before issuing a command to the EEPROM. | 4702 | * function should be called before issuing a command to the EEPROM. |
4764 | *****************************************************************************/ | 4703 | *****************************************************************************/ |
4765 | static s32 | 4704 | static s32 e1000_acquire_eeprom(struct e1000_hw *hw) |
4766 | e1000_acquire_eeprom(struct e1000_hw *hw) | ||
4767 | { | 4705 | { |
4768 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 4706 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
4769 | u32 eecd, i=0; | 4707 | u32 eecd, i=0; |
@@ -4772,23 +4710,23 @@ e1000_acquire_eeprom(struct e1000_hw *hw) | |||
4772 | 4710 | ||
4773 | if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) | 4711 | if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) |
4774 | return -E1000_ERR_SWFW_SYNC; | 4712 | return -E1000_ERR_SWFW_SYNC; |
4775 | eecd = E1000_READ_REG(hw, EECD); | 4713 | eecd = er32(EECD); |
4776 | 4714 | ||
4777 | if (hw->mac_type != e1000_82573) { | 4715 | if (hw->mac_type != e1000_82573) { |
4778 | /* Request EEPROM Access */ | 4716 | /* Request EEPROM Access */ |
4779 | if (hw->mac_type > e1000_82544) { | 4717 | if (hw->mac_type > e1000_82544) { |
4780 | eecd |= E1000_EECD_REQ; | 4718 | eecd |= E1000_EECD_REQ; |
4781 | E1000_WRITE_REG(hw, EECD, eecd); | 4719 | ew32(EECD, eecd); |
4782 | eecd = E1000_READ_REG(hw, EECD); | 4720 | eecd = er32(EECD); |
4783 | while ((!(eecd & E1000_EECD_GNT)) && | 4721 | while ((!(eecd & E1000_EECD_GNT)) && |
4784 | (i < E1000_EEPROM_GRANT_ATTEMPTS)) { | 4722 | (i < E1000_EEPROM_GRANT_ATTEMPTS)) { |
4785 | i++; | 4723 | i++; |
4786 | udelay(5); | 4724 | udelay(5); |
4787 | eecd = E1000_READ_REG(hw, EECD); | 4725 | eecd = er32(EECD); |
4788 | } | 4726 | } |
4789 | if (!(eecd & E1000_EECD_GNT)) { | 4727 | if (!(eecd & E1000_EECD_GNT)) { |
4790 | eecd &= ~E1000_EECD_REQ; | 4728 | eecd &= ~E1000_EECD_REQ; |
4791 | E1000_WRITE_REG(hw, EECD, eecd); | 4729 | ew32(EECD, eecd); |
4792 | DEBUGOUT("Could not acquire EEPROM grant\n"); | 4730 | DEBUGOUT("Could not acquire EEPROM grant\n"); |
4793 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); | 4731 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); |
4794 | return -E1000_ERR_EEPROM; | 4732 | return -E1000_ERR_EEPROM; |
@@ -4801,15 +4739,15 @@ e1000_acquire_eeprom(struct e1000_hw *hw) | |||
4801 | if (eeprom->type == e1000_eeprom_microwire) { | 4739 | if (eeprom->type == e1000_eeprom_microwire) { |
4802 | /* Clear SK and DI */ | 4740 | /* Clear SK and DI */ |
4803 | eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); | 4741 | eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); |
4804 | E1000_WRITE_REG(hw, EECD, eecd); | 4742 | ew32(EECD, eecd); |
4805 | 4743 | ||
4806 | /* Set CS */ | 4744 | /* Set CS */ |
4807 | eecd |= E1000_EECD_CS; | 4745 | eecd |= E1000_EECD_CS; |
4808 | E1000_WRITE_REG(hw, EECD, eecd); | 4746 | ew32(EECD, eecd); |
4809 | } else if (eeprom->type == e1000_eeprom_spi) { | 4747 | } else if (eeprom->type == e1000_eeprom_spi) { |
4810 | /* Clear SK and CS */ | 4748 | /* Clear SK and CS */ |
4811 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); | 4749 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); |
4812 | E1000_WRITE_REG(hw, EECD, eecd); | 4750 | ew32(EECD, eecd); |
4813 | udelay(1); | 4751 | udelay(1); |
4814 | } | 4752 | } |
4815 | 4753 | ||
@@ -4821,46 +4759,45 @@ e1000_acquire_eeprom(struct e1000_hw *hw) | |||
4821 | * | 4759 | * |
4822 | * hw - Struct containing variables accessed by shared code | 4760 | * hw - Struct containing variables accessed by shared code |
4823 | *****************************************************************************/ | 4761 | *****************************************************************************/ |
4824 | static void | 4762 | static void e1000_standby_eeprom(struct e1000_hw *hw) |
4825 | e1000_standby_eeprom(struct e1000_hw *hw) | ||
4826 | { | 4763 | { |
4827 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 4764 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
4828 | u32 eecd; | 4765 | u32 eecd; |
4829 | 4766 | ||
4830 | eecd = E1000_READ_REG(hw, EECD); | 4767 | eecd = er32(EECD); |
4831 | 4768 | ||
4832 | if (eeprom->type == e1000_eeprom_microwire) { | 4769 | if (eeprom->type == e1000_eeprom_microwire) { |
4833 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); | 4770 | eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); |
4834 | E1000_WRITE_REG(hw, EECD, eecd); | 4771 | ew32(EECD, eecd); |
4835 | E1000_WRITE_FLUSH(hw); | 4772 | E1000_WRITE_FLUSH(); |
4836 | udelay(eeprom->delay_usec); | 4773 | udelay(eeprom->delay_usec); |
4837 | 4774 | ||
4838 | /* Clock high */ | 4775 | /* Clock high */ |
4839 | eecd |= E1000_EECD_SK; | 4776 | eecd |= E1000_EECD_SK; |
4840 | E1000_WRITE_REG(hw, EECD, eecd); | 4777 | ew32(EECD, eecd); |
4841 | E1000_WRITE_FLUSH(hw); | 4778 | E1000_WRITE_FLUSH(); |
4842 | udelay(eeprom->delay_usec); | 4779 | udelay(eeprom->delay_usec); |
4843 | 4780 | ||
4844 | /* Select EEPROM */ | 4781 | /* Select EEPROM */ |
4845 | eecd |= E1000_EECD_CS; | 4782 | eecd |= E1000_EECD_CS; |
4846 | E1000_WRITE_REG(hw, EECD, eecd); | 4783 | ew32(EECD, eecd); |
4847 | E1000_WRITE_FLUSH(hw); | 4784 | E1000_WRITE_FLUSH(); |
4848 | udelay(eeprom->delay_usec); | 4785 | udelay(eeprom->delay_usec); |
4849 | 4786 | ||
4850 | /* Clock low */ | 4787 | /* Clock low */ |
4851 | eecd &= ~E1000_EECD_SK; | 4788 | eecd &= ~E1000_EECD_SK; |
4852 | E1000_WRITE_REG(hw, EECD, eecd); | 4789 | ew32(EECD, eecd); |
4853 | E1000_WRITE_FLUSH(hw); | 4790 | E1000_WRITE_FLUSH(); |
4854 | udelay(eeprom->delay_usec); | 4791 | udelay(eeprom->delay_usec); |
4855 | } else if (eeprom->type == e1000_eeprom_spi) { | 4792 | } else if (eeprom->type == e1000_eeprom_spi) { |
4856 | /* Toggle CS to flush commands */ | 4793 | /* Toggle CS to flush commands */ |
4857 | eecd |= E1000_EECD_CS; | 4794 | eecd |= E1000_EECD_CS; |
4858 | E1000_WRITE_REG(hw, EECD, eecd); | 4795 | ew32(EECD, eecd); |
4859 | E1000_WRITE_FLUSH(hw); | 4796 | E1000_WRITE_FLUSH(); |
4860 | udelay(eeprom->delay_usec); | 4797 | udelay(eeprom->delay_usec); |
4861 | eecd &= ~E1000_EECD_CS; | 4798 | eecd &= ~E1000_EECD_CS; |
4862 | E1000_WRITE_REG(hw, EECD, eecd); | 4799 | ew32(EECD, eecd); |
4863 | E1000_WRITE_FLUSH(hw); | 4800 | E1000_WRITE_FLUSH(); |
4864 | udelay(eeprom->delay_usec); | 4801 | udelay(eeprom->delay_usec); |
4865 | } | 4802 | } |
4866 | } | 4803 | } |
@@ -4870,20 +4807,19 @@ e1000_standby_eeprom(struct e1000_hw *hw) | |||
4870 | * | 4807 | * |
4871 | * hw - Struct containing variables accessed by shared code | 4808 | * hw - Struct containing variables accessed by shared code |
4872 | *****************************************************************************/ | 4809 | *****************************************************************************/ |
4873 | static void | 4810 | static void e1000_release_eeprom(struct e1000_hw *hw) |
4874 | e1000_release_eeprom(struct e1000_hw *hw) | ||
4875 | { | 4811 | { |
4876 | u32 eecd; | 4812 | u32 eecd; |
4877 | 4813 | ||
4878 | DEBUGFUNC("e1000_release_eeprom"); | 4814 | DEBUGFUNC("e1000_release_eeprom"); |
4879 | 4815 | ||
4880 | eecd = E1000_READ_REG(hw, EECD); | 4816 | eecd = er32(EECD); |
4881 | 4817 | ||
4882 | if (hw->eeprom.type == e1000_eeprom_spi) { | 4818 | if (hw->eeprom.type == e1000_eeprom_spi) { |
4883 | eecd |= E1000_EECD_CS; /* Pull CS high */ | 4819 | eecd |= E1000_EECD_CS; /* Pull CS high */ |
4884 | eecd &= ~E1000_EECD_SK; /* Lower SCK */ | 4820 | eecd &= ~E1000_EECD_SK; /* Lower SCK */ |
4885 | 4821 | ||
4886 | E1000_WRITE_REG(hw, EECD, eecd); | 4822 | ew32(EECD, eecd); |
4887 | 4823 | ||
4888 | udelay(hw->eeprom.delay_usec); | 4824 | udelay(hw->eeprom.delay_usec); |
4889 | } else if (hw->eeprom.type == e1000_eeprom_microwire) { | 4825 | } else if (hw->eeprom.type == e1000_eeprom_microwire) { |
@@ -4892,25 +4828,25 @@ e1000_release_eeprom(struct e1000_hw *hw) | |||
4892 | /* CS on Microwire is active-high */ | 4828 | /* CS on Microwire is active-high */ |
4893 | eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); | 4829 | eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); |
4894 | 4830 | ||
4895 | E1000_WRITE_REG(hw, EECD, eecd); | 4831 | ew32(EECD, eecd); |
4896 | 4832 | ||
4897 | /* Rising edge of clock */ | 4833 | /* Rising edge of clock */ |
4898 | eecd |= E1000_EECD_SK; | 4834 | eecd |= E1000_EECD_SK; |
4899 | E1000_WRITE_REG(hw, EECD, eecd); | 4835 | ew32(EECD, eecd); |
4900 | E1000_WRITE_FLUSH(hw); | 4836 | E1000_WRITE_FLUSH(); |
4901 | udelay(hw->eeprom.delay_usec); | 4837 | udelay(hw->eeprom.delay_usec); |
4902 | 4838 | ||
4903 | /* Falling edge of clock */ | 4839 | /* Falling edge of clock */ |
4904 | eecd &= ~E1000_EECD_SK; | 4840 | eecd &= ~E1000_EECD_SK; |
4905 | E1000_WRITE_REG(hw, EECD, eecd); | 4841 | ew32(EECD, eecd); |
4906 | E1000_WRITE_FLUSH(hw); | 4842 | E1000_WRITE_FLUSH(); |
4907 | udelay(hw->eeprom.delay_usec); | 4843 | udelay(hw->eeprom.delay_usec); |
4908 | } | 4844 | } |
4909 | 4845 | ||
4910 | /* Stop requesting EEPROM access */ | 4846 | /* Stop requesting EEPROM access */ |
4911 | if (hw->mac_type > e1000_82544) { | 4847 | if (hw->mac_type > e1000_82544) { |
4912 | eecd &= ~E1000_EECD_REQ; | 4848 | eecd &= ~E1000_EECD_REQ; |
4913 | E1000_WRITE_REG(hw, EECD, eecd); | 4849 | ew32(EECD, eecd); |
4914 | } | 4850 | } |
4915 | 4851 | ||
4916 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); | 4852 | e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM); |
@@ -4921,8 +4857,7 @@ e1000_release_eeprom(struct e1000_hw *hw) | |||
4921 | * | 4857 | * |
4922 | * hw - Struct containing variables accessed by shared code | 4858 | * hw - Struct containing variables accessed by shared code |
4923 | *****************************************************************************/ | 4859 | *****************************************************************************/ |
4924 | static s32 | 4860 | static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) |
4925 | e1000_spi_eeprom_ready(struct e1000_hw *hw) | ||
4926 | { | 4861 | { |
4927 | u16 retry_count = 0; | 4862 | u16 retry_count = 0; |
4928 | u8 spi_stat_reg; | 4863 | u8 spi_stat_reg; |
@@ -4967,11 +4902,7 @@ e1000_spi_eeprom_ready(struct e1000_hw *hw) | |||
4967 | * data - word read from the EEPROM | 4902 | * data - word read from the EEPROM |
4968 | * words - number of words to read | 4903 | * words - number of words to read |
4969 | *****************************************************************************/ | 4904 | *****************************************************************************/ |
4970 | s32 | 4905 | s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) |
4971 | e1000_read_eeprom(struct e1000_hw *hw, | ||
4972 | u16 offset, | ||
4973 | u16 words, | ||
4974 | u16 *data) | ||
4975 | { | 4906 | { |
4976 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 4907 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
4977 | u32 i = 0; | 4908 | u32 i = 0; |
@@ -5068,11 +4999,8 @@ e1000_read_eeprom(struct e1000_hw *hw, | |||
5068 | * data - word read from the EEPROM | 4999 | * data - word read from the EEPROM |
5069 | * words - number of words to read | 5000 | * words - number of words to read |
5070 | *****************************************************************************/ | 5001 | *****************************************************************************/ |
5071 | static s32 | 5002 | static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words, |
5072 | e1000_read_eeprom_eerd(struct e1000_hw *hw, | 5003 | u16 *data) |
5073 | u16 offset, | ||
5074 | u16 words, | ||
5075 | u16 *data) | ||
5076 | { | 5004 | { |
5077 | u32 i, eerd = 0; | 5005 | u32 i, eerd = 0; |
5078 | s32 error = 0; | 5006 | s32 error = 0; |
@@ -5081,13 +5009,13 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw, | |||
5081 | eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) + | 5009 | eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) + |
5082 | E1000_EEPROM_RW_REG_START; | 5010 | E1000_EEPROM_RW_REG_START; |
5083 | 5011 | ||
5084 | E1000_WRITE_REG(hw, EERD, eerd); | 5012 | ew32(EERD, eerd); |
5085 | error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ); | 5013 | error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ); |
5086 | 5014 | ||
5087 | if (error) { | 5015 | if (error) { |
5088 | break; | 5016 | break; |
5089 | } | 5017 | } |
5090 | data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA); | 5018 | data[i] = (er32(EERD) >> E1000_EEPROM_RW_REG_DATA); |
5091 | 5019 | ||
5092 | } | 5020 | } |
5093 | 5021 | ||
@@ -5102,11 +5030,8 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw, | |||
5102 | * data - word read from the EEPROM | 5030 | * data - word read from the EEPROM |
5103 | * words - number of words to read | 5031 | * words - number of words to read |
5104 | *****************************************************************************/ | 5032 | *****************************************************************************/ |
5105 | static s32 | 5033 | static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words, |
5106 | e1000_write_eeprom_eewr(struct e1000_hw *hw, | 5034 | u16 *data) |
5107 | u16 offset, | ||
5108 | u16 words, | ||
5109 | u16 *data) | ||
5110 | { | 5035 | { |
5111 | u32 register_value = 0; | 5036 | u32 register_value = 0; |
5112 | u32 i = 0; | 5037 | u32 i = 0; |
@@ -5125,7 +5050,7 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw, | |||
5125 | break; | 5050 | break; |
5126 | } | 5051 | } |
5127 | 5052 | ||
5128 | E1000_WRITE_REG(hw, EEWR, register_value); | 5053 | ew32(EEWR, register_value); |
5129 | 5054 | ||
5130 | error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); | 5055 | error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); |
5131 | 5056 | ||
@@ -5143,8 +5068,7 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw, | |||
5143 | * | 5068 | * |
5144 | * hw - Struct containing variables accessed by shared code | 5069 | * hw - Struct containing variables accessed by shared code |
5145 | *****************************************************************************/ | 5070 | *****************************************************************************/ |
5146 | static s32 | 5071 | static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) |
5147 | e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) | ||
5148 | { | 5072 | { |
5149 | u32 attempts = 100000; | 5073 | u32 attempts = 100000; |
5150 | u32 i, reg = 0; | 5074 | u32 i, reg = 0; |
@@ -5152,9 +5076,9 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) | |||
5152 | 5076 | ||
5153 | for (i = 0; i < attempts; i++) { | 5077 | for (i = 0; i < attempts; i++) { |
5154 | if (eerd == E1000_EEPROM_POLL_READ) | 5078 | if (eerd == E1000_EEPROM_POLL_READ) |
5155 | reg = E1000_READ_REG(hw, EERD); | 5079 | reg = er32(EERD); |
5156 | else | 5080 | else |
5157 | reg = E1000_READ_REG(hw, EEWR); | 5081 | reg = er32(EEWR); |
5158 | 5082 | ||
5159 | if (reg & E1000_EEPROM_RW_REG_DONE) { | 5083 | if (reg & E1000_EEPROM_RW_REG_DONE) { |
5160 | done = E1000_SUCCESS; | 5084 | done = E1000_SUCCESS; |
@@ -5171,8 +5095,7 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) | |||
5171 | * | 5095 | * |
5172 | * hw - Struct containing variables accessed by shared code | 5096 | * hw - Struct containing variables accessed by shared code |
5173 | ****************************************************************************/ | 5097 | ****************************************************************************/ |
5174 | static bool | 5098 | static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) |
5175 | e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) | ||
5176 | { | 5099 | { |
5177 | u32 eecd = 0; | 5100 | u32 eecd = 0; |
5178 | 5101 | ||
@@ -5182,7 +5105,7 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) | |||
5182 | return false; | 5105 | return false; |
5183 | 5106 | ||
5184 | if (hw->mac_type == e1000_82573) { | 5107 | if (hw->mac_type == e1000_82573) { |
5185 | eecd = E1000_READ_REG(hw, EECD); | 5108 | eecd = er32(EECD); |
5186 | 5109 | ||
5187 | /* Isolate bits 15 & 16 */ | 5110 | /* Isolate bits 15 & 16 */ |
5188 | eecd = ((eecd >> 15) & 0x03); | 5111 | eecd = ((eecd >> 15) & 0x03); |
@@ -5204,8 +5127,7 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) | |||
5204 | * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is | 5127 | * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is |
5205 | * valid. | 5128 | * valid. |
5206 | *****************************************************************************/ | 5129 | *****************************************************************************/ |
5207 | s32 | 5130 | s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) |
5208 | e1000_validate_eeprom_checksum(struct e1000_hw *hw) | ||
5209 | { | 5131 | { |
5210 | u16 checksum = 0; | 5132 | u16 checksum = 0; |
5211 | u16 i, eeprom_data; | 5133 | u16 i, eeprom_data; |
@@ -5252,7 +5174,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw) | |||
5252 | checksum += eeprom_data; | 5174 | checksum += eeprom_data; |
5253 | } | 5175 | } |
5254 | 5176 | ||
5255 | if (checksum == (u16) EEPROM_SUM) | 5177 | if (checksum == (u16)EEPROM_SUM) |
5256 | return E1000_SUCCESS; | 5178 | return E1000_SUCCESS; |
5257 | else { | 5179 | else { |
5258 | DEBUGOUT("EEPROM Checksum Invalid\n"); | 5180 | DEBUGOUT("EEPROM Checksum Invalid\n"); |
@@ -5268,8 +5190,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw) | |||
5268 | * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. | 5190 | * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. |
5269 | * Writes the difference to word offset 63 of the EEPROM. | 5191 | * Writes the difference to word offset 63 of the EEPROM. |
5270 | *****************************************************************************/ | 5192 | *****************************************************************************/ |
5271 | s32 | 5193 | s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) |
5272 | e1000_update_eeprom_checksum(struct e1000_hw *hw) | ||
5273 | { | 5194 | { |
5274 | u32 ctrl_ext; | 5195 | u32 ctrl_ext; |
5275 | u16 checksum = 0; | 5196 | u16 checksum = 0; |
@@ -5284,7 +5205,7 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw) | |||
5284 | } | 5205 | } |
5285 | checksum += eeprom_data; | 5206 | checksum += eeprom_data; |
5286 | } | 5207 | } |
5287 | checksum = (u16) EEPROM_SUM - checksum; | 5208 | checksum = (u16)EEPROM_SUM - checksum; |
5288 | if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { | 5209 | if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { |
5289 | DEBUGOUT("EEPROM Write Error\n"); | 5210 | DEBUGOUT("EEPROM Write Error\n"); |
5290 | return -E1000_ERR_EEPROM; | 5211 | return -E1000_ERR_EEPROM; |
@@ -5294,9 +5215,9 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw) | |||
5294 | e1000_commit_shadow_ram(hw); | 5215 | e1000_commit_shadow_ram(hw); |
5295 | /* Reload the EEPROM, or else modifications will not appear | 5216 | /* Reload the EEPROM, or else modifications will not appear |
5296 | * until after next adapter reset. */ | 5217 | * until after next adapter reset. */ |
5297 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 5218 | ctrl_ext = er32(CTRL_EXT); |
5298 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; | 5219 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; |
5299 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 5220 | ew32(CTRL_EXT, ctrl_ext); |
5300 | msleep(10); | 5221 | msleep(10); |
5301 | } | 5222 | } |
5302 | return E1000_SUCCESS; | 5223 | return E1000_SUCCESS; |
@@ -5313,11 +5234,7 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw) | |||
5313 | * If e1000_update_eeprom_checksum is not called after this function, the | 5234 | * If e1000_update_eeprom_checksum is not called after this function, the |
5314 | * EEPROM will most likely contain an invalid checksum. | 5235 | * EEPROM will most likely contain an invalid checksum. |
5315 | *****************************************************************************/ | 5236 | *****************************************************************************/ |
5316 | s32 | 5237 | s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) |
5317 | e1000_write_eeprom(struct e1000_hw *hw, | ||
5318 | u16 offset, | ||
5319 | u16 words, | ||
5320 | u16 *data) | ||
5321 | { | 5238 | { |
5322 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 5239 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
5323 | s32 status = 0; | 5240 | s32 status = 0; |
@@ -5370,11 +5287,8 @@ e1000_write_eeprom(struct e1000_hw *hw, | |||
5370 | * data - pointer to array of 8 bit words to be written to the EEPROM | 5287 | * data - pointer to array of 8 bit words to be written to the EEPROM |
5371 | * | 5288 | * |
5372 | *****************************************************************************/ | 5289 | *****************************************************************************/ |
5373 | static s32 | 5290 | static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, |
5374 | e1000_write_eeprom_spi(struct e1000_hw *hw, | 5291 | u16 *data) |
5375 | u16 offset, | ||
5376 | u16 words, | ||
5377 | u16 *data) | ||
5378 | { | 5292 | { |
5379 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 5293 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
5380 | u16 widx = 0; | 5294 | u16 widx = 0; |
@@ -5436,11 +5350,8 @@ e1000_write_eeprom_spi(struct e1000_hw *hw, | |||
5436 | * data - pointer to array of 16 bit words to be written to the EEPROM | 5350 | * data - pointer to array of 16 bit words to be written to the EEPROM |
5437 | * | 5351 | * |
5438 | *****************************************************************************/ | 5352 | *****************************************************************************/ |
5439 | static s32 | 5353 | static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, |
5440 | e1000_write_eeprom_microwire(struct e1000_hw *hw, | 5354 | u16 words, u16 *data) |
5441 | u16 offset, | ||
5442 | u16 words, | ||
5443 | u16 *data) | ||
5444 | { | 5355 | { |
5445 | struct e1000_eeprom_info *eeprom = &hw->eeprom; | 5356 | struct e1000_eeprom_info *eeprom = &hw->eeprom; |
5446 | u32 eecd; | 5357 | u32 eecd; |
@@ -5484,7 +5395,7 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw, | |||
5484 | * If DO does not go high in 10 milliseconds, then error out. | 5395 | * If DO does not go high in 10 milliseconds, then error out. |
5485 | */ | 5396 | */ |
5486 | for (i = 0; i < 200; i++) { | 5397 | for (i = 0; i < 200; i++) { |
5487 | eecd = E1000_READ_REG(hw, EECD); | 5398 | eecd = er32(EECD); |
5488 | if (eecd & E1000_EECD_DO) break; | 5399 | if (eecd & E1000_EECD_DO) break; |
5489 | udelay(50); | 5400 | udelay(50); |
5490 | } | 5401 | } |
@@ -5523,8 +5434,7 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw, | |||
5523 | * data - word read from the EEPROM | 5434 | * data - word read from the EEPROM |
5524 | * words - number of words to read | 5435 | * words - number of words to read |
5525 | *****************************************************************************/ | 5436 | *****************************************************************************/ |
5526 | static s32 | 5437 | static s32 e1000_commit_shadow_ram(struct e1000_hw *hw) |
5527 | e1000_commit_shadow_ram(struct e1000_hw *hw) | ||
5528 | { | 5438 | { |
5529 | u32 attempts = 100000; | 5439 | u32 attempts = 100000; |
5530 | u32 eecd = 0; | 5440 | u32 eecd = 0; |
@@ -5539,9 +5449,9 @@ e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
5539 | 5449 | ||
5540 | if (hw->mac_type == e1000_82573) { | 5450 | if (hw->mac_type == e1000_82573) { |
5541 | /* The flop register will be used to determine if flash type is STM */ | 5451 | /* The flop register will be used to determine if flash type is STM */ |
5542 | flop = E1000_READ_REG(hw, FLOP); | 5452 | flop = er32(FLOP); |
5543 | for (i=0; i < attempts; i++) { | 5453 | for (i=0; i < attempts; i++) { |
5544 | eecd = E1000_READ_REG(hw, EECD); | 5454 | eecd = er32(EECD); |
5545 | if ((eecd & E1000_EECD_FLUPD) == 0) { | 5455 | if ((eecd & E1000_EECD_FLUPD) == 0) { |
5546 | break; | 5456 | break; |
5547 | } | 5457 | } |
@@ -5554,14 +5464,14 @@ e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
5554 | 5464 | ||
5555 | /* If STM opcode located in bits 15:8 of flop, reset firmware */ | 5465 | /* If STM opcode located in bits 15:8 of flop, reset firmware */ |
5556 | if ((flop & 0xFF00) == E1000_STM_OPCODE) { | 5466 | if ((flop & 0xFF00) == E1000_STM_OPCODE) { |
5557 | E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET); | 5467 | ew32(HICR, E1000_HICR_FW_RESET); |
5558 | } | 5468 | } |
5559 | 5469 | ||
5560 | /* Perform the flash update */ | 5470 | /* Perform the flash update */ |
5561 | E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD); | 5471 | ew32(EECD, eecd | E1000_EECD_FLUPD); |
5562 | 5472 | ||
5563 | for (i=0; i < attempts; i++) { | 5473 | for (i=0; i < attempts; i++) { |
5564 | eecd = E1000_READ_REG(hw, EECD); | 5474 | eecd = er32(EECD); |
5565 | if ((eecd & E1000_EECD_FLUPD) == 0) { | 5475 | if ((eecd & E1000_EECD_FLUPD) == 0) { |
5566 | break; | 5476 | break; |
5567 | } | 5477 | } |
@@ -5577,7 +5487,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
5577 | /* We're writing to the opposite bank so if we're on bank 1, | 5487 | /* We're writing to the opposite bank so if we're on bank 1, |
5578 | * write to bank 0 etc. We also need to erase the segment that | 5488 | * write to bank 0 etc. We also need to erase the segment that |
5579 | * is going to be written */ | 5489 | * is going to be written */ |
5580 | if (!(E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL)) { | 5490 | if (!(er32(EECD) & E1000_EECD_SEC1VAL)) { |
5581 | new_bank_offset = hw->flash_bank_size * 2; | 5491 | new_bank_offset = hw->flash_bank_size * 2; |
5582 | old_bank_offset = 0; | 5492 | old_bank_offset = 0; |
5583 | e1000_erase_ich8_4k_segment(hw, 1); | 5493 | e1000_erase_ich8_4k_segment(hw, 1); |
@@ -5687,8 +5597,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
5687 | * | 5597 | * |
5688 | * hw - Struct containing variables accessed by shared code | 5598 | * hw - Struct containing variables accessed by shared code |
5689 | *****************************************************************************/ | 5599 | *****************************************************************************/ |
5690 | s32 | 5600 | s32 e1000_read_mac_addr(struct e1000_hw *hw) |
5691 | e1000_read_mac_addr(struct e1000_hw * hw) | ||
5692 | { | 5601 | { |
5693 | u16 offset; | 5602 | u16 offset; |
5694 | u16 eeprom_data, i; | 5603 | u16 eeprom_data, i; |
@@ -5701,8 +5610,8 @@ e1000_read_mac_addr(struct e1000_hw * hw) | |||
5701 | DEBUGOUT("EEPROM Read Error\n"); | 5610 | DEBUGOUT("EEPROM Read Error\n"); |
5702 | return -E1000_ERR_EEPROM; | 5611 | return -E1000_ERR_EEPROM; |
5703 | } | 5612 | } |
5704 | hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF); | 5613 | hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF); |
5705 | hw->perm_mac_addr[i+1] = (u8) (eeprom_data >> 8); | 5614 | hw->perm_mac_addr[i+1] = (u8)(eeprom_data >> 8); |
5706 | } | 5615 | } |
5707 | 5616 | ||
5708 | switch (hw->mac_type) { | 5617 | switch (hw->mac_type) { |
@@ -5712,7 +5621,7 @@ e1000_read_mac_addr(struct e1000_hw * hw) | |||
5712 | case e1000_82546_rev_3: | 5621 | case e1000_82546_rev_3: |
5713 | case e1000_82571: | 5622 | case e1000_82571: |
5714 | case e1000_80003es2lan: | 5623 | case e1000_80003es2lan: |
5715 | if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) | 5624 | if (er32(STATUS) & E1000_STATUS_FUNC_1) |
5716 | hw->perm_mac_addr[5] ^= 0x01; | 5625 | hw->perm_mac_addr[5] ^= 0x01; |
5717 | break; | 5626 | break; |
5718 | } | 5627 | } |
@@ -5731,8 +5640,7 @@ e1000_read_mac_addr(struct e1000_hw * hw) | |||
5731 | * of the receive addresss registers. Clears the multicast table. Assumes | 5640 | * of the receive addresss registers. Clears the multicast table. Assumes |
5732 | * the receiver is in reset when the routine is called. | 5641 | * the receiver is in reset when the routine is called. |
5733 | *****************************************************************************/ | 5642 | *****************************************************************************/ |
5734 | static void | 5643 | static void e1000_init_rx_addrs(struct e1000_hw *hw) |
5735 | e1000_init_rx_addrs(struct e1000_hw *hw) | ||
5736 | { | 5644 | { |
5737 | u32 i; | 5645 | u32 i; |
5738 | u32 rar_num; | 5646 | u32 rar_num; |
@@ -5758,9 +5666,9 @@ e1000_init_rx_addrs(struct e1000_hw *hw) | |||
5758 | DEBUGOUT("Clearing RAR[1-15]\n"); | 5666 | DEBUGOUT("Clearing RAR[1-15]\n"); |
5759 | for (i = 1; i < rar_num; i++) { | 5667 | for (i = 1; i < rar_num; i++) { |
5760 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); | 5668 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); |
5761 | E1000_WRITE_FLUSH(hw); | 5669 | E1000_WRITE_FLUSH(); |
5762 | E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); | 5670 | E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); |
5763 | E1000_WRITE_FLUSH(hw); | 5671 | E1000_WRITE_FLUSH(); |
5764 | } | 5672 | } |
5765 | } | 5673 | } |
5766 | 5674 | ||
@@ -5770,9 +5678,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw) | |||
5770 | * hw - Struct containing variables accessed by shared code | 5678 | * hw - Struct containing variables accessed by shared code |
5771 | * mc_addr - the multicast address to hash | 5679 | * mc_addr - the multicast address to hash |
5772 | *****************************************************************************/ | 5680 | *****************************************************************************/ |
5773 | u32 | 5681 | u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) |
5774 | e1000_hash_mc_addr(struct e1000_hw *hw, | ||
5775 | u8 *mc_addr) | ||
5776 | { | 5682 | { |
5777 | u32 hash_value = 0; | 5683 | u32 hash_value = 0; |
5778 | 5684 | ||
@@ -5787,37 +5693,37 @@ e1000_hash_mc_addr(struct e1000_hw *hw, | |||
5787 | case 0: | 5693 | case 0: |
5788 | if (hw->mac_type == e1000_ich8lan) { | 5694 | if (hw->mac_type == e1000_ich8lan) { |
5789 | /* [47:38] i.e. 0x158 for above example address */ | 5695 | /* [47:38] i.e. 0x158 for above example address */ |
5790 | hash_value = ((mc_addr[4] >> 6) | (((u16) mc_addr[5]) << 2)); | 5696 | hash_value = ((mc_addr[4] >> 6) | (((u16)mc_addr[5]) << 2)); |
5791 | } else { | 5697 | } else { |
5792 | /* [47:36] i.e. 0x563 for above example address */ | 5698 | /* [47:36] i.e. 0x563 for above example address */ |
5793 | hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); | 5699 | hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); |
5794 | } | 5700 | } |
5795 | break; | 5701 | break; |
5796 | case 1: | 5702 | case 1: |
5797 | if (hw->mac_type == e1000_ich8lan) { | 5703 | if (hw->mac_type == e1000_ich8lan) { |
5798 | /* [46:37] i.e. 0x2B1 for above example address */ | 5704 | /* [46:37] i.e. 0x2B1 for above example address */ |
5799 | hash_value = ((mc_addr[4] >> 5) | (((u16) mc_addr[5]) << 3)); | 5705 | hash_value = ((mc_addr[4] >> 5) | (((u16)mc_addr[5]) << 3)); |
5800 | } else { | 5706 | } else { |
5801 | /* [46:35] i.e. 0xAC6 for above example address */ | 5707 | /* [46:35] i.e. 0xAC6 for above example address */ |
5802 | hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5)); | 5708 | hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); |
5803 | } | 5709 | } |
5804 | break; | 5710 | break; |
5805 | case 2: | 5711 | case 2: |
5806 | if (hw->mac_type == e1000_ich8lan) { | 5712 | if (hw->mac_type == e1000_ich8lan) { |
5807 | /*[45:36] i.e. 0x163 for above example address */ | 5713 | /*[45:36] i.e. 0x163 for above example address */ |
5808 | hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); | 5714 | hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); |
5809 | } else { | 5715 | } else { |
5810 | /* [45:34] i.e. 0x5D8 for above example address */ | 5716 | /* [45:34] i.e. 0x5D8 for above example address */ |
5811 | hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); | 5717 | hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); |
5812 | } | 5718 | } |
5813 | break; | 5719 | break; |
5814 | case 3: | 5720 | case 3: |
5815 | if (hw->mac_type == e1000_ich8lan) { | 5721 | if (hw->mac_type == e1000_ich8lan) { |
5816 | /* [43:34] i.e. 0x18D for above example address */ | 5722 | /* [43:34] i.e. 0x18D for above example address */ |
5817 | hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); | 5723 | hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); |
5818 | } else { | 5724 | } else { |
5819 | /* [43:32] i.e. 0x634 for above example address */ | 5725 | /* [43:32] i.e. 0x634 for above example address */ |
5820 | hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8)); | 5726 | hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); |
5821 | } | 5727 | } |
5822 | break; | 5728 | break; |
5823 | } | 5729 | } |
@@ -5835,9 +5741,7 @@ e1000_hash_mc_addr(struct e1000_hw *hw, | |||
5835 | * hw - Struct containing variables accessed by shared code | 5741 | * hw - Struct containing variables accessed by shared code |
5836 | * hash_value - Multicast address hash value | 5742 | * hash_value - Multicast address hash value |
5837 | *****************************************************************************/ | 5743 | *****************************************************************************/ |
5838 | void | 5744 | void e1000_mta_set(struct e1000_hw *hw, u32 hash_value) |
5839 | e1000_mta_set(struct e1000_hw *hw, | ||
5840 | u32 hash_value) | ||
5841 | { | 5745 | { |
5842 | u32 hash_bit, hash_reg; | 5746 | u32 hash_bit, hash_reg; |
5843 | u32 mta; | 5747 | u32 mta; |
@@ -5868,12 +5772,12 @@ e1000_mta_set(struct e1000_hw *hw, | |||
5868 | if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) { | 5772 | if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) { |
5869 | temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1)); | 5773 | temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1)); |
5870 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); | 5774 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); |
5871 | E1000_WRITE_FLUSH(hw); | 5775 | E1000_WRITE_FLUSH(); |
5872 | E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp); | 5776 | E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp); |
5873 | E1000_WRITE_FLUSH(hw); | 5777 | E1000_WRITE_FLUSH(); |
5874 | } else { | 5778 | } else { |
5875 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); | 5779 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); |
5876 | E1000_WRITE_FLUSH(hw); | 5780 | E1000_WRITE_FLUSH(); |
5877 | } | 5781 | } |
5878 | } | 5782 | } |
5879 | 5783 | ||
@@ -5884,20 +5788,16 @@ e1000_mta_set(struct e1000_hw *hw, | |||
5884 | * addr - Address to put into receive address register | 5788 | * addr - Address to put into receive address register |
5885 | * index - Receive address register to write | 5789 | * index - Receive address register to write |
5886 | *****************************************************************************/ | 5790 | *****************************************************************************/ |
5887 | void | 5791 | void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) |
5888 | e1000_rar_set(struct e1000_hw *hw, | ||
5889 | u8 *addr, | ||
5890 | u32 index) | ||
5891 | { | 5792 | { |
5892 | u32 rar_low, rar_high; | 5793 | u32 rar_low, rar_high; |
5893 | 5794 | ||
5894 | /* HW expects these in little endian so we reverse the byte order | 5795 | /* HW expects these in little endian so we reverse the byte order |
5895 | * from network order (big endian) to little endian | 5796 | * from network order (big endian) to little endian |
5896 | */ | 5797 | */ |
5897 | rar_low = ((u32) addr[0] | | 5798 | rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | |
5898 | ((u32) addr[1] << 8) | | 5799 | ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); |
5899 | ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); | 5800 | rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); |
5900 | rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); | ||
5901 | 5801 | ||
5902 | /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx | 5802 | /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx |
5903 | * unit hang. | 5803 | * unit hang. |
@@ -5930,9 +5830,9 @@ e1000_rar_set(struct e1000_hw *hw, | |||
5930 | } | 5830 | } |
5931 | 5831 | ||
5932 | E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); | 5832 | E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); |
5933 | E1000_WRITE_FLUSH(hw); | 5833 | E1000_WRITE_FLUSH(); |
5934 | E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); | 5834 | E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); |
5935 | E1000_WRITE_FLUSH(hw); | 5835 | E1000_WRITE_FLUSH(); |
5936 | } | 5836 | } |
5937 | 5837 | ||
5938 | /****************************************************************************** | 5838 | /****************************************************************************** |
@@ -5942,10 +5842,7 @@ e1000_rar_set(struct e1000_hw *hw, | |||
5942 | * offset - Offset in VLAN filer table to write | 5842 | * offset - Offset in VLAN filer table to write |
5943 | * value - Value to write into VLAN filter table | 5843 | * value - Value to write into VLAN filter table |
5944 | *****************************************************************************/ | 5844 | *****************************************************************************/ |
5945 | void | 5845 | void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) |
5946 | e1000_write_vfta(struct e1000_hw *hw, | ||
5947 | u32 offset, | ||
5948 | u32 value) | ||
5949 | { | 5846 | { |
5950 | u32 temp; | 5847 | u32 temp; |
5951 | 5848 | ||
@@ -5955,12 +5852,12 @@ e1000_write_vfta(struct e1000_hw *hw, | |||
5955 | if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { | 5852 | if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { |
5956 | temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); | 5853 | temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); |
5957 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); | 5854 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); |
5958 | E1000_WRITE_FLUSH(hw); | 5855 | E1000_WRITE_FLUSH(); |
5959 | E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); | 5856 | E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); |
5960 | E1000_WRITE_FLUSH(hw); | 5857 | E1000_WRITE_FLUSH(); |
5961 | } else { | 5858 | } else { |
5962 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); | 5859 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); |
5963 | E1000_WRITE_FLUSH(hw); | 5860 | E1000_WRITE_FLUSH(); |
5964 | } | 5861 | } |
5965 | } | 5862 | } |
5966 | 5863 | ||
@@ -5969,8 +5866,7 @@ e1000_write_vfta(struct e1000_hw *hw, | |||
5969 | * | 5866 | * |
5970 | * hw - Struct containing variables accessed by shared code | 5867 | * hw - Struct containing variables accessed by shared code |
5971 | *****************************************************************************/ | 5868 | *****************************************************************************/ |
5972 | static void | 5869 | static void e1000_clear_vfta(struct e1000_hw *hw) |
5973 | e1000_clear_vfta(struct e1000_hw *hw) | ||
5974 | { | 5870 | { |
5975 | u32 offset; | 5871 | u32 offset; |
5976 | u32 vfta_value = 0; | 5872 | u32 vfta_value = 0; |
@@ -5999,12 +5895,11 @@ e1000_clear_vfta(struct e1000_hw *hw) | |||
5999 | * manageability unit */ | 5895 | * manageability unit */ |
6000 | vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; | 5896 | vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; |
6001 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); | 5897 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); |
6002 | E1000_WRITE_FLUSH(hw); | 5898 | E1000_WRITE_FLUSH(); |
6003 | } | 5899 | } |
6004 | } | 5900 | } |
6005 | 5901 | ||
6006 | static s32 | 5902 | static s32 e1000_id_led_init(struct e1000_hw *hw) |
6007 | e1000_id_led_init(struct e1000_hw * hw) | ||
6008 | { | 5903 | { |
6009 | u32 ledctl; | 5904 | u32 ledctl; |
6010 | const u32 ledctl_mask = 0x000000FF; | 5905 | const u32 ledctl_mask = 0x000000FF; |
@@ -6020,7 +5915,7 @@ e1000_id_led_init(struct e1000_hw * hw) | |||
6020 | return E1000_SUCCESS; | 5915 | return E1000_SUCCESS; |
6021 | } | 5916 | } |
6022 | 5917 | ||
6023 | ledctl = E1000_READ_REG(hw, LEDCTL); | 5918 | ledctl = er32(LEDCTL); |
6024 | hw->ledctl_default = ledctl; | 5919 | hw->ledctl_default = ledctl; |
6025 | hw->ledctl_mode1 = hw->ledctl_default; | 5920 | hw->ledctl_mode1 = hw->ledctl_default; |
6026 | hw->ledctl_mode2 = hw->ledctl_default; | 5921 | hw->ledctl_mode2 = hw->ledctl_default; |
@@ -6086,8 +5981,7 @@ e1000_id_led_init(struct e1000_hw * hw) | |||
6086 | * | 5981 | * |
6087 | * hw - Struct containing variables accessed by shared code | 5982 | * hw - Struct containing variables accessed by shared code |
6088 | *****************************************************************************/ | 5983 | *****************************************************************************/ |
6089 | s32 | 5984 | s32 e1000_setup_led(struct e1000_hw *hw) |
6090 | e1000_setup_led(struct e1000_hw *hw) | ||
6091 | { | 5985 | { |
6092 | u32 ledctl; | 5986 | u32 ledctl; |
6093 | s32 ret_val = E1000_SUCCESS; | 5987 | s32 ret_val = E1000_SUCCESS; |
@@ -6118,7 +6012,7 @@ e1000_setup_led(struct e1000_hw *hw) | |||
6118 | /* Fall Through */ | 6012 | /* Fall Through */ |
6119 | default: | 6013 | default: |
6120 | if (hw->media_type == e1000_media_type_fiber) { | 6014 | if (hw->media_type == e1000_media_type_fiber) { |
6121 | ledctl = E1000_READ_REG(hw, LEDCTL); | 6015 | ledctl = er32(LEDCTL); |
6122 | /* Save current LEDCTL settings */ | 6016 | /* Save current LEDCTL settings */ |
6123 | hw->ledctl_default = ledctl; | 6017 | hw->ledctl_default = ledctl; |
6124 | /* Turn off LED0 */ | 6018 | /* Turn off LED0 */ |
@@ -6127,9 +6021,9 @@ e1000_setup_led(struct e1000_hw *hw) | |||
6127 | E1000_LEDCTL_LED0_MODE_MASK); | 6021 | E1000_LEDCTL_LED0_MODE_MASK); |
6128 | ledctl |= (E1000_LEDCTL_MODE_LED_OFF << | 6022 | ledctl |= (E1000_LEDCTL_MODE_LED_OFF << |
6129 | E1000_LEDCTL_LED0_MODE_SHIFT); | 6023 | E1000_LEDCTL_LED0_MODE_SHIFT); |
6130 | E1000_WRITE_REG(hw, LEDCTL, ledctl); | 6024 | ew32(LEDCTL, ledctl); |
6131 | } else if (hw->media_type == e1000_media_type_copper) | 6025 | } else if (hw->media_type == e1000_media_type_copper) |
6132 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1); | 6026 | ew32(LEDCTL, hw->ledctl_mode1); |
6133 | break; | 6027 | break; |
6134 | } | 6028 | } |
6135 | 6029 | ||
@@ -6145,8 +6039,7 @@ e1000_setup_led(struct e1000_hw *hw) | |||
6145 | * | 6039 | * |
6146 | * hw - Struct containing variables accessed by shared code | 6040 | * hw - Struct containing variables accessed by shared code |
6147 | *****************************************************************************/ | 6041 | *****************************************************************************/ |
6148 | s32 | 6042 | s32 e1000_blink_led_start(struct e1000_hw *hw) |
6149 | e1000_blink_led_start(struct e1000_hw *hw) | ||
6150 | { | 6043 | { |
6151 | s16 i; | 6044 | s16 i; |
6152 | u32 ledctl_blink = 0; | 6045 | u32 ledctl_blink = 0; |
@@ -6170,7 +6063,7 @@ e1000_blink_led_start(struct e1000_hw *hw) | |||
6170 | ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8)); | 6063 | ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8)); |
6171 | } | 6064 | } |
6172 | 6065 | ||
6173 | E1000_WRITE_REG(hw, LEDCTL, ledctl_blink); | 6066 | ew32(LEDCTL, ledctl_blink); |
6174 | 6067 | ||
6175 | return E1000_SUCCESS; | 6068 | return E1000_SUCCESS; |
6176 | } | 6069 | } |
@@ -6180,8 +6073,7 @@ e1000_blink_led_start(struct e1000_hw *hw) | |||
6180 | * | 6073 | * |
6181 | * hw - Struct containing variables accessed by shared code | 6074 | * hw - Struct containing variables accessed by shared code |
6182 | *****************************************************************************/ | 6075 | *****************************************************************************/ |
6183 | s32 | 6076 | s32 e1000_cleanup_led(struct e1000_hw *hw) |
6184 | e1000_cleanup_led(struct e1000_hw *hw) | ||
6185 | { | 6077 | { |
6186 | s32 ret_val = E1000_SUCCESS; | 6078 | s32 ret_val = E1000_SUCCESS; |
6187 | 6079 | ||
@@ -6210,7 +6102,7 @@ e1000_cleanup_led(struct e1000_hw *hw) | |||
6210 | break; | 6102 | break; |
6211 | } | 6103 | } |
6212 | /* Restore LEDCTL settings */ | 6104 | /* Restore LEDCTL settings */ |
6213 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_default); | 6105 | ew32(LEDCTL, hw->ledctl_default); |
6214 | break; | 6106 | break; |
6215 | } | 6107 | } |
6216 | 6108 | ||
@@ -6222,10 +6114,9 @@ e1000_cleanup_led(struct e1000_hw *hw) | |||
6222 | * | 6114 | * |
6223 | * hw - Struct containing variables accessed by shared code | 6115 | * hw - Struct containing variables accessed by shared code |
6224 | *****************************************************************************/ | 6116 | *****************************************************************************/ |
6225 | s32 | 6117 | s32 e1000_led_on(struct e1000_hw *hw) |
6226 | e1000_led_on(struct e1000_hw *hw) | ||
6227 | { | 6118 | { |
6228 | u32 ctrl = E1000_READ_REG(hw, CTRL); | 6119 | u32 ctrl = er32(CTRL); |
6229 | 6120 | ||
6230 | DEBUGFUNC("e1000_led_on"); | 6121 | DEBUGFUNC("e1000_led_on"); |
6231 | 6122 | ||
@@ -6257,13 +6148,13 @@ e1000_led_on(struct e1000_hw *hw) | |||
6257 | e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, | 6148 | e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, |
6258 | (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); | 6149 | (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); |
6259 | } else if (hw->media_type == e1000_media_type_copper) { | 6150 | } else if (hw->media_type == e1000_media_type_copper) { |
6260 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode2); | 6151 | ew32(LEDCTL, hw->ledctl_mode2); |
6261 | return E1000_SUCCESS; | 6152 | return E1000_SUCCESS; |
6262 | } | 6153 | } |
6263 | break; | 6154 | break; |
6264 | } | 6155 | } |
6265 | 6156 | ||
6266 | E1000_WRITE_REG(hw, CTRL, ctrl); | 6157 | ew32(CTRL, ctrl); |
6267 | 6158 | ||
6268 | return E1000_SUCCESS; | 6159 | return E1000_SUCCESS; |
6269 | } | 6160 | } |
@@ -6273,10 +6164,9 @@ e1000_led_on(struct e1000_hw *hw) | |||
6273 | * | 6164 | * |
6274 | * hw - Struct containing variables accessed by shared code | 6165 | * hw - Struct containing variables accessed by shared code |
6275 | *****************************************************************************/ | 6166 | *****************************************************************************/ |
6276 | s32 | 6167 | s32 e1000_led_off(struct e1000_hw *hw) |
6277 | e1000_led_off(struct e1000_hw *hw) | ||
6278 | { | 6168 | { |
6279 | u32 ctrl = E1000_READ_REG(hw, CTRL); | 6169 | u32 ctrl = er32(CTRL); |
6280 | 6170 | ||
6281 | DEBUGFUNC("e1000_led_off"); | 6171 | DEBUGFUNC("e1000_led_off"); |
6282 | 6172 | ||
@@ -6308,13 +6198,13 @@ e1000_led_off(struct e1000_hw *hw) | |||
6308 | e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, | 6198 | e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, |
6309 | (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); | 6199 | (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); |
6310 | } else if (hw->media_type == e1000_media_type_copper) { | 6200 | } else if (hw->media_type == e1000_media_type_copper) { |
6311 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1); | 6201 | ew32(LEDCTL, hw->ledctl_mode1); |
6312 | return E1000_SUCCESS; | 6202 | return E1000_SUCCESS; |
6313 | } | 6203 | } |
6314 | break; | 6204 | break; |
6315 | } | 6205 | } |
6316 | 6206 | ||
6317 | E1000_WRITE_REG(hw, CTRL, ctrl); | 6207 | ew32(CTRL, ctrl); |
6318 | 6208 | ||
6319 | return E1000_SUCCESS; | 6209 | return E1000_SUCCESS; |
6320 | } | 6210 | } |
@@ -6324,98 +6214,97 @@ e1000_led_off(struct e1000_hw *hw) | |||
6324 | * | 6214 | * |
6325 | * hw - Struct containing variables accessed by shared code | 6215 | * hw - Struct containing variables accessed by shared code |
6326 | *****************************************************************************/ | 6216 | *****************************************************************************/ |
6327 | static void | 6217 | static void e1000_clear_hw_cntrs(struct e1000_hw *hw) |
6328 | e1000_clear_hw_cntrs(struct e1000_hw *hw) | ||
6329 | { | 6218 | { |
6330 | volatile u32 temp; | 6219 | volatile u32 temp; |
6331 | 6220 | ||
6332 | temp = E1000_READ_REG(hw, CRCERRS); | 6221 | temp = er32(CRCERRS); |
6333 | temp = E1000_READ_REG(hw, SYMERRS); | 6222 | temp = er32(SYMERRS); |
6334 | temp = E1000_READ_REG(hw, MPC); | 6223 | temp = er32(MPC); |
6335 | temp = E1000_READ_REG(hw, SCC); | 6224 | temp = er32(SCC); |
6336 | temp = E1000_READ_REG(hw, ECOL); | 6225 | temp = er32(ECOL); |
6337 | temp = E1000_READ_REG(hw, MCC); | 6226 | temp = er32(MCC); |
6338 | temp = E1000_READ_REG(hw, LATECOL); | 6227 | temp = er32(LATECOL); |
6339 | temp = E1000_READ_REG(hw, COLC); | 6228 | temp = er32(COLC); |
6340 | temp = E1000_READ_REG(hw, DC); | 6229 | temp = er32(DC); |
6341 | temp = E1000_READ_REG(hw, SEC); | 6230 | temp = er32(SEC); |
6342 | temp = E1000_READ_REG(hw, RLEC); | 6231 | temp = er32(RLEC); |
6343 | temp = E1000_READ_REG(hw, XONRXC); | 6232 | temp = er32(XONRXC); |
6344 | temp = E1000_READ_REG(hw, XONTXC); | 6233 | temp = er32(XONTXC); |
6345 | temp = E1000_READ_REG(hw, XOFFRXC); | 6234 | temp = er32(XOFFRXC); |
6346 | temp = E1000_READ_REG(hw, XOFFTXC); | 6235 | temp = er32(XOFFTXC); |
6347 | temp = E1000_READ_REG(hw, FCRUC); | 6236 | temp = er32(FCRUC); |
6348 | 6237 | ||
6349 | if (hw->mac_type != e1000_ich8lan) { | 6238 | if (hw->mac_type != e1000_ich8lan) { |
6350 | temp = E1000_READ_REG(hw, PRC64); | 6239 | temp = er32(PRC64); |
6351 | temp = E1000_READ_REG(hw, PRC127); | 6240 | temp = er32(PRC127); |
6352 | temp = E1000_READ_REG(hw, PRC255); | 6241 | temp = er32(PRC255); |
6353 | temp = E1000_READ_REG(hw, PRC511); | 6242 | temp = er32(PRC511); |
6354 | temp = E1000_READ_REG(hw, PRC1023); | 6243 | temp = er32(PRC1023); |
6355 | temp = E1000_READ_REG(hw, PRC1522); | 6244 | temp = er32(PRC1522); |
6356 | } | 6245 | } |
6357 | 6246 | ||
6358 | temp = E1000_READ_REG(hw, GPRC); | 6247 | temp = er32(GPRC); |
6359 | temp = E1000_READ_REG(hw, BPRC); | 6248 | temp = er32(BPRC); |
6360 | temp = E1000_READ_REG(hw, MPRC); | 6249 | temp = er32(MPRC); |
6361 | temp = E1000_READ_REG(hw, GPTC); | 6250 | temp = er32(GPTC); |
6362 | temp = E1000_READ_REG(hw, GORCL); | 6251 | temp = er32(GORCL); |
6363 | temp = E1000_READ_REG(hw, GORCH); | 6252 | temp = er32(GORCH); |
6364 | temp = E1000_READ_REG(hw, GOTCL); | 6253 | temp = er32(GOTCL); |
6365 | temp = E1000_READ_REG(hw, GOTCH); | 6254 | temp = er32(GOTCH); |
6366 | temp = E1000_READ_REG(hw, RNBC); | 6255 | temp = er32(RNBC); |
6367 | temp = E1000_READ_REG(hw, RUC); | 6256 | temp = er32(RUC); |
6368 | temp = E1000_READ_REG(hw, RFC); | 6257 | temp = er32(RFC); |
6369 | temp = E1000_READ_REG(hw, ROC); | 6258 | temp = er32(ROC); |
6370 | temp = E1000_READ_REG(hw, RJC); | 6259 | temp = er32(RJC); |
6371 | temp = E1000_READ_REG(hw, TORL); | 6260 | temp = er32(TORL); |
6372 | temp = E1000_READ_REG(hw, TORH); | 6261 | temp = er32(TORH); |
6373 | temp = E1000_READ_REG(hw, TOTL); | 6262 | temp = er32(TOTL); |
6374 | temp = E1000_READ_REG(hw, TOTH); | 6263 | temp = er32(TOTH); |
6375 | temp = E1000_READ_REG(hw, TPR); | 6264 | temp = er32(TPR); |
6376 | temp = E1000_READ_REG(hw, TPT); | 6265 | temp = er32(TPT); |
6377 | 6266 | ||
6378 | if (hw->mac_type != e1000_ich8lan) { | 6267 | if (hw->mac_type != e1000_ich8lan) { |
6379 | temp = E1000_READ_REG(hw, PTC64); | 6268 | temp = er32(PTC64); |
6380 | temp = E1000_READ_REG(hw, PTC127); | 6269 | temp = er32(PTC127); |
6381 | temp = E1000_READ_REG(hw, PTC255); | 6270 | temp = er32(PTC255); |
6382 | temp = E1000_READ_REG(hw, PTC511); | 6271 | temp = er32(PTC511); |
6383 | temp = E1000_READ_REG(hw, PTC1023); | 6272 | temp = er32(PTC1023); |
6384 | temp = E1000_READ_REG(hw, PTC1522); | 6273 | temp = er32(PTC1522); |
6385 | } | 6274 | } |
6386 | 6275 | ||
6387 | temp = E1000_READ_REG(hw, MPTC); | 6276 | temp = er32(MPTC); |
6388 | temp = E1000_READ_REG(hw, BPTC); | 6277 | temp = er32(BPTC); |
6389 | 6278 | ||
6390 | if (hw->mac_type < e1000_82543) return; | 6279 | if (hw->mac_type < e1000_82543) return; |
6391 | 6280 | ||
6392 | temp = E1000_READ_REG(hw, ALGNERRC); | 6281 | temp = er32(ALGNERRC); |
6393 | temp = E1000_READ_REG(hw, RXERRC); | 6282 | temp = er32(RXERRC); |
6394 | temp = E1000_READ_REG(hw, TNCRS); | 6283 | temp = er32(TNCRS); |
6395 | temp = E1000_READ_REG(hw, CEXTERR); | 6284 | temp = er32(CEXTERR); |
6396 | temp = E1000_READ_REG(hw, TSCTC); | 6285 | temp = er32(TSCTC); |
6397 | temp = E1000_READ_REG(hw, TSCTFC); | 6286 | temp = er32(TSCTFC); |
6398 | 6287 | ||
6399 | if (hw->mac_type <= e1000_82544) return; | 6288 | if (hw->mac_type <= e1000_82544) return; |
6400 | 6289 | ||
6401 | temp = E1000_READ_REG(hw, MGTPRC); | 6290 | temp = er32(MGTPRC); |
6402 | temp = E1000_READ_REG(hw, MGTPDC); | 6291 | temp = er32(MGTPDC); |
6403 | temp = E1000_READ_REG(hw, MGTPTC); | 6292 | temp = er32(MGTPTC); |
6404 | 6293 | ||
6405 | if (hw->mac_type <= e1000_82547_rev_2) return; | 6294 | if (hw->mac_type <= e1000_82547_rev_2) return; |
6406 | 6295 | ||
6407 | temp = E1000_READ_REG(hw, IAC); | 6296 | temp = er32(IAC); |
6408 | temp = E1000_READ_REG(hw, ICRXOC); | 6297 | temp = er32(ICRXOC); |
6409 | 6298 | ||
6410 | if (hw->mac_type == e1000_ich8lan) return; | 6299 | if (hw->mac_type == e1000_ich8lan) return; |
6411 | 6300 | ||
6412 | temp = E1000_READ_REG(hw, ICRXPTC); | 6301 | temp = er32(ICRXPTC); |
6413 | temp = E1000_READ_REG(hw, ICRXATC); | 6302 | temp = er32(ICRXATC); |
6414 | temp = E1000_READ_REG(hw, ICTXPTC); | 6303 | temp = er32(ICTXPTC); |
6415 | temp = E1000_READ_REG(hw, ICTXATC); | 6304 | temp = er32(ICTXATC); |
6416 | temp = E1000_READ_REG(hw, ICTXQEC); | 6305 | temp = er32(ICTXQEC); |
6417 | temp = E1000_READ_REG(hw, ICTXQMTC); | 6306 | temp = er32(ICTXQMTC); |
6418 | temp = E1000_READ_REG(hw, ICRXDMTC); | 6307 | temp = er32(ICRXDMTC); |
6419 | } | 6308 | } |
6420 | 6309 | ||
6421 | /****************************************************************************** | 6310 | /****************************************************************************** |
@@ -6428,8 +6317,7 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw) | |||
6428 | * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio | 6317 | * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio |
6429 | * before calling this function. | 6318 | * before calling this function. |
6430 | *****************************************************************************/ | 6319 | *****************************************************************************/ |
6431 | void | 6320 | void e1000_reset_adaptive(struct e1000_hw *hw) |
6432 | e1000_reset_adaptive(struct e1000_hw *hw) | ||
6433 | { | 6321 | { |
6434 | DEBUGFUNC("e1000_reset_adaptive"); | 6322 | DEBUGFUNC("e1000_reset_adaptive"); |
6435 | 6323 | ||
@@ -6442,7 +6330,7 @@ e1000_reset_adaptive(struct e1000_hw *hw) | |||
6442 | hw->ifs_ratio = IFS_RATIO; | 6330 | hw->ifs_ratio = IFS_RATIO; |
6443 | } | 6331 | } |
6444 | hw->in_ifs_mode = false; | 6332 | hw->in_ifs_mode = false; |
6445 | E1000_WRITE_REG(hw, AIT, 0); | 6333 | ew32(AIT, 0); |
6446 | } else { | 6334 | } else { |
6447 | DEBUGOUT("Not in Adaptive IFS mode!\n"); | 6335 | DEBUGOUT("Not in Adaptive IFS mode!\n"); |
6448 | } | 6336 | } |
@@ -6456,8 +6344,7 @@ e1000_reset_adaptive(struct e1000_hw *hw) | |||
6456 | * tx_packets - Number of transmits since last callback | 6344 | * tx_packets - Number of transmits since last callback |
6457 | * total_collisions - Number of collisions since last callback | 6345 | * total_collisions - Number of collisions since last callback |
6458 | *****************************************************************************/ | 6346 | *****************************************************************************/ |
6459 | void | 6347 | void e1000_update_adaptive(struct e1000_hw *hw) |
6460 | e1000_update_adaptive(struct e1000_hw *hw) | ||
6461 | { | 6348 | { |
6462 | DEBUGFUNC("e1000_update_adaptive"); | 6349 | DEBUGFUNC("e1000_update_adaptive"); |
6463 | 6350 | ||
@@ -6470,14 +6357,14 @@ e1000_update_adaptive(struct e1000_hw *hw) | |||
6470 | hw->current_ifs_val = hw->ifs_min_val; | 6357 | hw->current_ifs_val = hw->ifs_min_val; |
6471 | else | 6358 | else |
6472 | hw->current_ifs_val += hw->ifs_step_size; | 6359 | hw->current_ifs_val += hw->ifs_step_size; |
6473 | E1000_WRITE_REG(hw, AIT, hw->current_ifs_val); | 6360 | ew32(AIT, hw->current_ifs_val); |
6474 | } | 6361 | } |
6475 | } | 6362 | } |
6476 | } else { | 6363 | } else { |
6477 | if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { | 6364 | if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { |
6478 | hw->current_ifs_val = 0; | 6365 | hw->current_ifs_val = 0; |
6479 | hw->in_ifs_mode = false; | 6366 | hw->in_ifs_mode = false; |
6480 | E1000_WRITE_REG(hw, AIT, 0); | 6367 | ew32(AIT, 0); |
6481 | } | 6368 | } |
6482 | } | 6369 | } |
6483 | } else { | 6370 | } else { |
@@ -6492,11 +6379,8 @@ e1000_update_adaptive(struct e1000_hw *hw) | |||
6492 | * frame_len - The length of the frame in question | 6379 | * frame_len - The length of the frame in question |
6493 | * mac_addr - The Ethernet destination address of the frame in question | 6380 | * mac_addr - The Ethernet destination address of the frame in question |
6494 | *****************************************************************************/ | 6381 | *****************************************************************************/ |
6495 | void | 6382 | void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, |
6496 | e1000_tbi_adjust_stats(struct e1000_hw *hw, | 6383 | u32 frame_len, u8 *mac_addr) |
6497 | struct e1000_hw_stats *stats, | ||
6498 | u32 frame_len, | ||
6499 | u8 *mac_addr) | ||
6500 | { | 6384 | { |
6501 | u64 carry_bit; | 6385 | u64 carry_bit; |
6502 | 6386 | ||
@@ -6527,7 +6411,7 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw, | |||
6527 | * since the test for a multicast frame will test positive on | 6411 | * since the test for a multicast frame will test positive on |
6528 | * a broadcast frame. | 6412 | * a broadcast frame. |
6529 | */ | 6413 | */ |
6530 | if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff)) | 6414 | if ((mac_addr[0] == (u8)0xff) && (mac_addr[1] == (u8)0xff)) |
6531 | /* Broadcast packet */ | 6415 | /* Broadcast packet */ |
6532 | stats->bprc++; | 6416 | stats->bprc++; |
6533 | else if (*mac_addr & 0x01) | 6417 | else if (*mac_addr & 0x01) |
@@ -6570,8 +6454,7 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw, | |||
6570 | * | 6454 | * |
6571 | * hw - Struct containing variables accessed by shared code | 6455 | * hw - Struct containing variables accessed by shared code |
6572 | *****************************************************************************/ | 6456 | *****************************************************************************/ |
6573 | void | 6457 | void e1000_get_bus_info(struct e1000_hw *hw) |
6574 | e1000_get_bus_info(struct e1000_hw *hw) | ||
6575 | { | 6458 | { |
6576 | s32 ret_val; | 6459 | s32 ret_val; |
6577 | u16 pci_ex_link_status; | 6460 | u16 pci_ex_link_status; |
@@ -6605,7 +6488,7 @@ e1000_get_bus_info(struct e1000_hw *hw) | |||
6605 | hw->bus_width = e1000_bus_width_pciex_1; | 6488 | hw->bus_width = e1000_bus_width_pciex_1; |
6606 | break; | 6489 | break; |
6607 | default: | 6490 | default: |
6608 | status = E1000_READ_REG(hw, STATUS); | 6491 | status = er32(STATUS); |
6609 | hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? | 6492 | hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? |
6610 | e1000_bus_type_pcix : e1000_bus_type_pci; | 6493 | e1000_bus_type_pcix : e1000_bus_type_pci; |
6611 | 6494 | ||
@@ -6645,10 +6528,7 @@ e1000_get_bus_info(struct e1000_hw *hw) | |||
6645 | * offset - offset to write to | 6528 | * offset - offset to write to |
6646 | * value - value to write | 6529 | * value - value to write |
6647 | *****************************************************************************/ | 6530 | *****************************************************************************/ |
6648 | static void | 6531 | static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) |
6649 | e1000_write_reg_io(struct e1000_hw *hw, | ||
6650 | u32 offset, | ||
6651 | u32 value) | ||
6652 | { | 6532 | { |
6653 | unsigned long io_addr = hw->io_base; | 6533 | unsigned long io_addr = hw->io_base; |
6654 | unsigned long io_data = hw->io_base + 4; | 6534 | unsigned long io_data = hw->io_base + 4; |
@@ -6672,10 +6552,8 @@ e1000_write_reg_io(struct e1000_hw *hw, | |||
6672 | * register to the minimum and maximum range. | 6552 | * register to the minimum and maximum range. |
6673 | * For IGP phy's, the function calculates the range by the AGC registers. | 6553 | * For IGP phy's, the function calculates the range by the AGC registers. |
6674 | *****************************************************************************/ | 6554 | *****************************************************************************/ |
6675 | static s32 | 6555 | static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, |
6676 | e1000_get_cable_length(struct e1000_hw *hw, | 6556 | u16 *max_length) |
6677 | u16 *min_length, | ||
6678 | u16 *max_length) | ||
6679 | { | 6557 | { |
6680 | s32 ret_val; | 6558 | s32 ret_val; |
6681 | u16 agc_value = 0; | 6559 | u16 agc_value = 0; |
@@ -6863,9 +6741,8 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
6863 | * return 0. If the link speed is 1000 Mbps the polarity status is in the | 6741 | * return 0. If the link speed is 1000 Mbps the polarity status is in the |
6864 | * IGP01E1000_PHY_PCS_INIT_REG. | 6742 | * IGP01E1000_PHY_PCS_INIT_REG. |
6865 | *****************************************************************************/ | 6743 | *****************************************************************************/ |
6866 | static s32 | 6744 | static s32 e1000_check_polarity(struct e1000_hw *hw, |
6867 | e1000_check_polarity(struct e1000_hw *hw, | 6745 | e1000_rev_polarity *polarity) |
6868 | e1000_rev_polarity *polarity) | ||
6869 | { | 6746 | { |
6870 | s32 ret_val; | 6747 | s32 ret_val; |
6871 | u16 phy_data; | 6748 | u16 phy_data; |
@@ -6939,8 +6816,7 @@ e1000_check_polarity(struct e1000_hw *hw, | |||
6939 | * Link Health register. In IGP this bit is latched high, so the driver must | 6816 | * Link Health register. In IGP this bit is latched high, so the driver must |
6940 | * read it immediately after link is established. | 6817 | * read it immediately after link is established. |
6941 | *****************************************************************************/ | 6818 | *****************************************************************************/ |
6942 | static s32 | 6819 | static s32 e1000_check_downshift(struct e1000_hw *hw) |
6943 | e1000_check_downshift(struct e1000_hw *hw) | ||
6944 | { | 6820 | { |
6945 | s32 ret_val; | 6821 | s32 ret_val; |
6946 | u16 phy_data; | 6822 | u16 phy_data; |
@@ -6985,9 +6861,7 @@ e1000_check_downshift(struct e1000_hw *hw) | |||
6985 | * | 6861 | * |
6986 | ****************************************************************************/ | 6862 | ****************************************************************************/ |
6987 | 6863 | ||
6988 | static s32 | 6864 | static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) |
6989 | e1000_config_dsp_after_link_change(struct e1000_hw *hw, | ||
6990 | bool link_up) | ||
6991 | { | 6865 | { |
6992 | s32 ret_val; | 6866 | s32 ret_val; |
6993 | u16 phy_data, phy_saved_data, speed, duplex, i; | 6867 | u16 phy_data, phy_saved_data, speed, duplex, i; |
@@ -7173,8 +7047,7 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw, | |||
7173 | * | 7047 | * |
7174 | * hw - Struct containing variables accessed by shared code | 7048 | * hw - Struct containing variables accessed by shared code |
7175 | ****************************************************************************/ | 7049 | ****************************************************************************/ |
7176 | static s32 | 7050 | static s32 e1000_set_phy_mode(struct e1000_hw *hw) |
7177 | e1000_set_phy_mode(struct e1000_hw *hw) | ||
7178 | { | 7051 | { |
7179 | s32 ret_val; | 7052 | s32 ret_val; |
7180 | u16 eeprom_data; | 7053 | u16 eeprom_data; |
@@ -7218,9 +7091,7 @@ e1000_set_phy_mode(struct e1000_hw *hw) | |||
7218 | * | 7091 | * |
7219 | ****************************************************************************/ | 7092 | ****************************************************************************/ |
7220 | 7093 | ||
7221 | static s32 | 7094 | static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) |
7222 | e1000_set_d3_lplu_state(struct e1000_hw *hw, | ||
7223 | bool active) | ||
7224 | { | 7095 | { |
7225 | u32 phy_ctrl = 0; | 7096 | u32 phy_ctrl = 0; |
7226 | s32 ret_val; | 7097 | s32 ret_val; |
@@ -7242,7 +7113,7 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw, | |||
7242 | /* MAC writes into PHY register based on the state transition | 7113 | /* MAC writes into PHY register based on the state transition |
7243 | * and start auto-negotiation. SW driver can overwrite the settings | 7114 | * and start auto-negotiation. SW driver can overwrite the settings |
7244 | * in CSR PHY power control E1000_PHY_CTRL register. */ | 7115 | * in CSR PHY power control E1000_PHY_CTRL register. */ |
7245 | phy_ctrl = E1000_READ_REG(hw, PHY_CTRL); | 7116 | phy_ctrl = er32(PHY_CTRL); |
7246 | } else { | 7117 | } else { |
7247 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); | 7118 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); |
7248 | if (ret_val) | 7119 | if (ret_val) |
@@ -7259,7 +7130,7 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw, | |||
7259 | } else { | 7130 | } else { |
7260 | if (hw->mac_type == e1000_ich8lan) { | 7131 | if (hw->mac_type == e1000_ich8lan) { |
7261 | phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; | 7132 | phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; |
7262 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | 7133 | ew32(PHY_CTRL, phy_ctrl); |
7263 | } else { | 7134 | } else { |
7264 | phy_data &= ~IGP02E1000_PM_D3_LPLU; | 7135 | phy_data &= ~IGP02E1000_PM_D3_LPLU; |
7265 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, | 7136 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, |
@@ -7310,7 +7181,7 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw, | |||
7310 | } else { | 7181 | } else { |
7311 | if (hw->mac_type == e1000_ich8lan) { | 7182 | if (hw->mac_type == e1000_ich8lan) { |
7312 | phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; | 7183 | phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; |
7313 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | 7184 | ew32(PHY_CTRL, phy_ctrl); |
7314 | } else { | 7185 | } else { |
7315 | phy_data |= IGP02E1000_PM_D3_LPLU; | 7186 | phy_data |= IGP02E1000_PM_D3_LPLU; |
7316 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, | 7187 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, |
@@ -7348,9 +7219,7 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw, | |||
7348 | * | 7219 | * |
7349 | ****************************************************************************/ | 7220 | ****************************************************************************/ |
7350 | 7221 | ||
7351 | static s32 | 7222 | static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) |
7352 | e1000_set_d0_lplu_state(struct e1000_hw *hw, | ||
7353 | bool active) | ||
7354 | { | 7223 | { |
7355 | u32 phy_ctrl = 0; | 7224 | u32 phy_ctrl = 0; |
7356 | s32 ret_val; | 7225 | s32 ret_val; |
@@ -7361,7 +7230,7 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw, | |||
7361 | return E1000_SUCCESS; | 7230 | return E1000_SUCCESS; |
7362 | 7231 | ||
7363 | if (hw->mac_type == e1000_ich8lan) { | 7232 | if (hw->mac_type == e1000_ich8lan) { |
7364 | phy_ctrl = E1000_READ_REG(hw, PHY_CTRL); | 7233 | phy_ctrl = er32(PHY_CTRL); |
7365 | } else { | 7234 | } else { |
7366 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); | 7235 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); |
7367 | if (ret_val) | 7236 | if (ret_val) |
@@ -7371,7 +7240,7 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw, | |||
7371 | if (!active) { | 7240 | if (!active) { |
7372 | if (hw->mac_type == e1000_ich8lan) { | 7241 | if (hw->mac_type == e1000_ich8lan) { |
7373 | phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; | 7242 | phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; |
7374 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | 7243 | ew32(PHY_CTRL, phy_ctrl); |
7375 | } else { | 7244 | } else { |
7376 | phy_data &= ~IGP02E1000_PM_D0_LPLU; | 7245 | phy_data &= ~IGP02E1000_PM_D0_LPLU; |
7377 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); | 7246 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); |
@@ -7412,7 +7281,7 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw, | |||
7412 | 7281 | ||
7413 | if (hw->mac_type == e1000_ich8lan) { | 7282 | if (hw->mac_type == e1000_ich8lan) { |
7414 | phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; | 7283 | phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; |
7415 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | 7284 | ew32(PHY_CTRL, phy_ctrl); |
7416 | } else { | 7285 | } else { |
7417 | phy_data |= IGP02E1000_PM_D0_LPLU; | 7286 | phy_data |= IGP02E1000_PM_D0_LPLU; |
7418 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); | 7287 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); |
@@ -7439,8 +7308,7 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw, | |||
7439 | * | 7308 | * |
7440 | * hw - Struct containing variables accessed by shared code | 7309 | * hw - Struct containing variables accessed by shared code |
7441 | *****************************************************************************/ | 7310 | *****************************************************************************/ |
7442 | static s32 | 7311 | static s32 e1000_set_vco_speed(struct e1000_hw *hw) |
7443 | e1000_set_vco_speed(struct e1000_hw *hw) | ||
7444 | { | 7312 | { |
7445 | s32 ret_val; | 7313 | s32 ret_val; |
7446 | u16 default_page = 0; | 7314 | u16 default_page = 0; |
@@ -7503,8 +7371,7 @@ e1000_set_vco_speed(struct e1000_hw *hw) | |||
7503 | * | 7371 | * |
7504 | * returns: - E1000_SUCCESS . | 7372 | * returns: - E1000_SUCCESS . |
7505 | ****************************************************************************/ | 7373 | ****************************************************************************/ |
7506 | static s32 | 7374 | static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer) |
7507 | e1000_host_if_read_cookie(struct e1000_hw * hw, u8 *buffer) | ||
7508 | { | 7375 | { |
7509 | u8 i; | 7376 | u8 i; |
7510 | u32 offset = E1000_MNG_DHCP_COOKIE_OFFSET; | 7377 | u32 offset = E1000_MNG_DHCP_COOKIE_OFFSET; |
@@ -7514,7 +7381,7 @@ e1000_host_if_read_cookie(struct e1000_hw * hw, u8 *buffer) | |||
7514 | offset = (offset >> 2); | 7381 | offset = (offset >> 2); |
7515 | 7382 | ||
7516 | for (i = 0; i < length; i++) { | 7383 | for (i = 0; i < length; i++) { |
7517 | *((u32 *) buffer + i) = | 7384 | *((u32 *)buffer + i) = |
7518 | E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i); | 7385 | E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i); |
7519 | } | 7386 | } |
7520 | return E1000_SUCCESS; | 7387 | return E1000_SUCCESS; |
@@ -7530,21 +7397,20 @@ e1000_host_if_read_cookie(struct e1000_hw * hw, u8 *buffer) | |||
7530 | * timeout | 7397 | * timeout |
7531 | * - E1000_SUCCESS for success. | 7398 | * - E1000_SUCCESS for success. |
7532 | ****************************************************************************/ | 7399 | ****************************************************************************/ |
7533 | static s32 | 7400 | static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) |
7534 | e1000_mng_enable_host_if(struct e1000_hw * hw) | ||
7535 | { | 7401 | { |
7536 | u32 hicr; | 7402 | u32 hicr; |
7537 | u8 i; | 7403 | u8 i; |
7538 | 7404 | ||
7539 | /* Check that the host interface is enabled. */ | 7405 | /* Check that the host interface is enabled. */ |
7540 | hicr = E1000_READ_REG(hw, HICR); | 7406 | hicr = er32(HICR); |
7541 | if ((hicr & E1000_HICR_EN) == 0) { | 7407 | if ((hicr & E1000_HICR_EN) == 0) { |
7542 | DEBUGOUT("E1000_HOST_EN bit disabled.\n"); | 7408 | DEBUGOUT("E1000_HOST_EN bit disabled.\n"); |
7543 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | 7409 | return -E1000_ERR_HOST_INTERFACE_COMMAND; |
7544 | } | 7410 | } |
7545 | /* check the previous command is completed */ | 7411 | /* check the previous command is completed */ |
7546 | for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { | 7412 | for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { |
7547 | hicr = E1000_READ_REG(hw, HICR); | 7413 | hicr = er32(HICR); |
7548 | if (!(hicr & E1000_HICR_C)) | 7414 | if (!(hicr & E1000_HICR_C)) |
7549 | break; | 7415 | break; |
7550 | mdelay(1); | 7416 | mdelay(1); |
@@ -7564,9 +7430,8 @@ e1000_mng_enable_host_if(struct e1000_hw * hw) | |||
7564 | * | 7430 | * |
7565 | * returns - E1000_SUCCESS for success. | 7431 | * returns - E1000_SUCCESS for success. |
7566 | ****************************************************************************/ | 7432 | ****************************************************************************/ |
7567 | static s32 | 7433 | static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, |
7568 | e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer, | 7434 | u16 offset, u8 *sum) |
7569 | u16 length, u16 offset, u8 *sum) | ||
7570 | { | 7435 | { |
7571 | u8 *tmp; | 7436 | u8 *tmp; |
7572 | u8 *bufptr = buffer; | 7437 | u8 *bufptr = buffer; |
@@ -7632,9 +7497,8 @@ e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer, | |||
7632 | * | 7497 | * |
7633 | * returns - E1000_SUCCESS for success. | 7498 | * returns - E1000_SUCCESS for success. |
7634 | ****************************************************************************/ | 7499 | ****************************************************************************/ |
7635 | static s32 | 7500 | static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, |
7636 | e1000_mng_write_cmd_header(struct e1000_hw * hw, | 7501 | struct e1000_host_mng_command_header *hdr) |
7637 | struct e1000_host_mng_command_header * hdr) | ||
7638 | { | 7502 | { |
7639 | u16 i; | 7503 | u16 i; |
7640 | u8 sum; | 7504 | u8 sum; |
@@ -7648,7 +7512,7 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw, | |||
7648 | sum = hdr->checksum; | 7512 | sum = hdr->checksum; |
7649 | hdr->checksum = 0; | 7513 | hdr->checksum = 0; |
7650 | 7514 | ||
7651 | buffer = (u8 *) hdr; | 7515 | buffer = (u8 *)hdr; |
7652 | i = length; | 7516 | i = length; |
7653 | while (i--) | 7517 | while (i--) |
7654 | sum += buffer[i]; | 7518 | sum += buffer[i]; |
@@ -7658,8 +7522,8 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw, | |||
7658 | length >>= 2; | 7522 | length >>= 2; |
7659 | /* The device driver writes the relevant command block into the ram area. */ | 7523 | /* The device driver writes the relevant command block into the ram area. */ |
7660 | for (i = 0; i < length; i++) { | 7524 | for (i = 0; i < length; i++) { |
7661 | E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *) hdr + i)); | 7525 | E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *)hdr + i)); |
7662 | E1000_WRITE_FLUSH(hw); | 7526 | E1000_WRITE_FLUSH(); |
7663 | } | 7527 | } |
7664 | 7528 | ||
7665 | return E1000_SUCCESS; | 7529 | return E1000_SUCCESS; |
@@ -7672,14 +7536,13 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw, | |||
7672 | * | 7536 | * |
7673 | * returns - E1000_SUCCESS for success. | 7537 | * returns - E1000_SUCCESS for success. |
7674 | ****************************************************************************/ | 7538 | ****************************************************************************/ |
7675 | static s32 | 7539 | static s32 e1000_mng_write_commit(struct e1000_hw *hw) |
7676 | e1000_mng_write_commit(struct e1000_hw * hw) | ||
7677 | { | 7540 | { |
7678 | u32 hicr; | 7541 | u32 hicr; |
7679 | 7542 | ||
7680 | hicr = E1000_READ_REG(hw, HICR); | 7543 | hicr = er32(HICR); |
7681 | /* Setting this bit tells the ARC that a new command is pending. */ | 7544 | /* Setting this bit tells the ARC that a new command is pending. */ |
7682 | E1000_WRITE_REG(hw, HICR, hicr | E1000_HICR_C); | 7545 | ew32(HICR, hicr | E1000_HICR_C); |
7683 | 7546 | ||
7684 | return E1000_SUCCESS; | 7547 | return E1000_SUCCESS; |
7685 | } | 7548 | } |
@@ -7690,12 +7553,11 @@ e1000_mng_write_commit(struct e1000_hw * hw) | |||
7690 | * | 7553 | * |
7691 | * returns - true when the mode is IAMT or false. | 7554 | * returns - true when the mode is IAMT or false. |
7692 | ****************************************************************************/ | 7555 | ****************************************************************************/ |
7693 | bool | 7556 | bool e1000_check_mng_mode(struct e1000_hw *hw) |
7694 | e1000_check_mng_mode(struct e1000_hw *hw) | ||
7695 | { | 7557 | { |
7696 | u32 fwsm; | 7558 | u32 fwsm; |
7697 | 7559 | ||
7698 | fwsm = E1000_READ_REG(hw, FWSM); | 7560 | fwsm = er32(FWSM); |
7699 | 7561 | ||
7700 | if (hw->mac_type == e1000_ich8lan) { | 7562 | if (hw->mac_type == e1000_ich8lan) { |
7701 | if ((fwsm & E1000_FWSM_MODE_MASK) == | 7563 | if ((fwsm & E1000_FWSM_MODE_MASK) == |
@@ -7712,9 +7574,7 @@ e1000_check_mng_mode(struct e1000_hw *hw) | |||
7712 | /***************************************************************************** | 7574 | /***************************************************************************** |
7713 | * This function writes the dhcp info . | 7575 | * This function writes the dhcp info . |
7714 | ****************************************************************************/ | 7576 | ****************************************************************************/ |
7715 | s32 | 7577 | s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) |
7716 | e1000_mng_write_dhcp_info(struct e1000_hw * hw, u8 *buffer, | ||
7717 | u16 length) | ||
7718 | { | 7578 | { |
7719 | s32 ret_val; | 7579 | s32 ret_val; |
7720 | struct e1000_host_mng_command_header hdr; | 7580 | struct e1000_host_mng_command_header hdr; |
@@ -7744,8 +7604,7 @@ e1000_mng_write_dhcp_info(struct e1000_hw * hw, u8 *buffer, | |||
7744 | * | 7604 | * |
7745 | * returns - checksum of buffer contents. | 7605 | * returns - checksum of buffer contents. |
7746 | ****************************************************************************/ | 7606 | ****************************************************************************/ |
7747 | static u8 | 7607 | static u8 e1000_calculate_mng_checksum(char *buffer, u32 length) |
7748 | e1000_calculate_mng_checksum(char *buffer, u32 length) | ||
7749 | { | 7608 | { |
7750 | u8 sum = 0; | 7609 | u8 sum = 0; |
7751 | u32 i; | 7610 | u32 i; |
@@ -7756,7 +7615,7 @@ e1000_calculate_mng_checksum(char *buffer, u32 length) | |||
7756 | for (i=0; i < length; i++) | 7615 | for (i=0; i < length; i++) |
7757 | sum += buffer[i]; | 7616 | sum += buffer[i]; |
7758 | 7617 | ||
7759 | return (u8) (0 - sum); | 7618 | return (u8)(0 - sum); |
7760 | } | 7619 | } |
7761 | 7620 | ||
7762 | /***************************************************************************** | 7621 | /***************************************************************************** |
@@ -7764,8 +7623,7 @@ e1000_calculate_mng_checksum(char *buffer, u32 length) | |||
7764 | * | 7623 | * |
7765 | * returns - true for packet filtering or false. | 7624 | * returns - true for packet filtering or false. |
7766 | ****************************************************************************/ | 7625 | ****************************************************************************/ |
7767 | bool | 7626 | bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) |
7768 | e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) | ||
7769 | { | 7627 | { |
7770 | /* called in init as well as watchdog timer functions */ | 7628 | /* called in init as well as watchdog timer functions */ |
7771 | 7629 | ||
@@ -7806,21 +7664,20 @@ e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
7806 | * returns: - true/false | 7664 | * returns: - true/false |
7807 | * | 7665 | * |
7808 | *****************************************************************************/ | 7666 | *****************************************************************************/ |
7809 | u32 | 7667 | u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) |
7810 | e1000_enable_mng_pass_thru(struct e1000_hw *hw) | ||
7811 | { | 7668 | { |
7812 | u32 manc; | 7669 | u32 manc; |
7813 | u32 fwsm, factps; | 7670 | u32 fwsm, factps; |
7814 | 7671 | ||
7815 | if (hw->asf_firmware_present) { | 7672 | if (hw->asf_firmware_present) { |
7816 | manc = E1000_READ_REG(hw, MANC); | 7673 | manc = er32(MANC); |
7817 | 7674 | ||
7818 | if (!(manc & E1000_MANC_RCV_TCO_EN) || | 7675 | if (!(manc & E1000_MANC_RCV_TCO_EN) || |
7819 | !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) | 7676 | !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) |
7820 | return false; | 7677 | return false; |
7821 | if (e1000_arc_subsystem_valid(hw)) { | 7678 | if (e1000_arc_subsystem_valid(hw)) { |
7822 | fwsm = E1000_READ_REG(hw, FWSM); | 7679 | fwsm = er32(FWSM); |
7823 | factps = E1000_READ_REG(hw, FACTPS); | 7680 | factps = er32(FACTPS); |
7824 | 7681 | ||
7825 | if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) == | 7682 | if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) == |
7826 | e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG)) | 7683 | e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG)) |
@@ -7832,8 +7689,7 @@ e1000_enable_mng_pass_thru(struct e1000_hw *hw) | |||
7832 | return false; | 7689 | return false; |
7833 | } | 7690 | } |
7834 | 7691 | ||
7835 | static s32 | 7692 | static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) |
7836 | e1000_polarity_reversal_workaround(struct e1000_hw *hw) | ||
7837 | { | 7693 | { |
7838 | s32 ret_val; | 7694 | s32 ret_val; |
7839 | u16 mii_status_reg; | 7695 | u16 mii_status_reg; |
@@ -7926,8 +7782,7 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw) | |||
7926 | * returns: - none. | 7782 | * returns: - none. |
7927 | * | 7783 | * |
7928 | ***************************************************************************/ | 7784 | ***************************************************************************/ |
7929 | static void | 7785 | static void e1000_set_pci_express_master_disable(struct e1000_hw *hw) |
7930 | e1000_set_pci_express_master_disable(struct e1000_hw *hw) | ||
7931 | { | 7786 | { |
7932 | u32 ctrl; | 7787 | u32 ctrl; |
7933 | 7788 | ||
@@ -7936,9 +7791,9 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw) | |||
7936 | if (hw->bus_type != e1000_bus_type_pci_express) | 7791 | if (hw->bus_type != e1000_bus_type_pci_express) |
7937 | return; | 7792 | return; |
7938 | 7793 | ||
7939 | ctrl = E1000_READ_REG(hw, CTRL); | 7794 | ctrl = er32(CTRL); |
7940 | ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; | 7795 | ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; |
7941 | E1000_WRITE_REG(hw, CTRL, ctrl); | 7796 | ew32(CTRL, ctrl); |
7942 | } | 7797 | } |
7943 | 7798 | ||
7944 | /******************************************************************************* | 7799 | /******************************************************************************* |
@@ -7952,8 +7807,7 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw) | |||
7952 | * E1000_SUCCESS master requests disabled. | 7807 | * E1000_SUCCESS master requests disabled. |
7953 | * | 7808 | * |
7954 | ******************************************************************************/ | 7809 | ******************************************************************************/ |
7955 | s32 | 7810 | s32 e1000_disable_pciex_master(struct e1000_hw *hw) |
7956 | e1000_disable_pciex_master(struct e1000_hw *hw) | ||
7957 | { | 7811 | { |
7958 | s32 timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */ | 7812 | s32 timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */ |
7959 | 7813 | ||
@@ -7965,7 +7819,7 @@ e1000_disable_pciex_master(struct e1000_hw *hw) | |||
7965 | e1000_set_pci_express_master_disable(hw); | 7819 | e1000_set_pci_express_master_disable(hw); |
7966 | 7820 | ||
7967 | while (timeout) { | 7821 | while (timeout) { |
7968 | if (!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) | 7822 | if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) |
7969 | break; | 7823 | break; |
7970 | else | 7824 | else |
7971 | udelay(100); | 7825 | udelay(100); |
@@ -7990,8 +7844,7 @@ e1000_disable_pciex_master(struct e1000_hw *hw) | |||
7990 | * E1000_SUCCESS at any other case. | 7844 | * E1000_SUCCESS at any other case. |
7991 | * | 7845 | * |
7992 | ******************************************************************************/ | 7846 | ******************************************************************************/ |
7993 | static s32 | 7847 | static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) |
7994 | e1000_get_auto_rd_done(struct e1000_hw *hw) | ||
7995 | { | 7848 | { |
7996 | s32 timeout = AUTO_READ_DONE_TIMEOUT; | 7849 | s32 timeout = AUTO_READ_DONE_TIMEOUT; |
7997 | 7850 | ||
@@ -8007,7 +7860,7 @@ e1000_get_auto_rd_done(struct e1000_hw *hw) | |||
8007 | case e1000_80003es2lan: | 7860 | case e1000_80003es2lan: |
8008 | case e1000_ich8lan: | 7861 | case e1000_ich8lan: |
8009 | while (timeout) { | 7862 | while (timeout) { |
8010 | if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) | 7863 | if (er32(EECD) & E1000_EECD_AUTO_RD) |
8011 | break; | 7864 | break; |
8012 | else msleep(1); | 7865 | else msleep(1); |
8013 | timeout--; | 7866 | timeout--; |
@@ -8038,8 +7891,7 @@ e1000_get_auto_rd_done(struct e1000_hw *hw) | |||
8038 | * E1000_SUCCESS at any other case. | 7891 | * E1000_SUCCESS at any other case. |
8039 | * | 7892 | * |
8040 | ***************************************************************************/ | 7893 | ***************************************************************************/ |
8041 | static s32 | 7894 | static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) |
8042 | e1000_get_phy_cfg_done(struct e1000_hw *hw) | ||
8043 | { | 7895 | { |
8044 | s32 timeout = PHY_CFG_TIMEOUT; | 7896 | s32 timeout = PHY_CFG_TIMEOUT; |
8045 | u32 cfg_mask = E1000_EEPROM_CFG_DONE; | 7897 | u32 cfg_mask = E1000_EEPROM_CFG_DONE; |
@@ -8052,13 +7904,13 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw) | |||
8052 | break; | 7904 | break; |
8053 | case e1000_80003es2lan: | 7905 | case e1000_80003es2lan: |
8054 | /* Separate *_CFG_DONE_* bit for each port */ | 7906 | /* Separate *_CFG_DONE_* bit for each port */ |
8055 | if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) | 7907 | if (er32(STATUS) & E1000_STATUS_FUNC_1) |
8056 | cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1; | 7908 | cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1; |
8057 | /* Fall Through */ | 7909 | /* Fall Through */ |
8058 | case e1000_82571: | 7910 | case e1000_82571: |
8059 | case e1000_82572: | 7911 | case e1000_82572: |
8060 | while (timeout) { | 7912 | while (timeout) { |
8061 | if (E1000_READ_REG(hw, EEMNGCTL) & cfg_mask) | 7913 | if (er32(EEMNGCTL) & cfg_mask) |
8062 | break; | 7914 | break; |
8063 | else | 7915 | else |
8064 | msleep(1); | 7916 | msleep(1); |
@@ -8085,8 +7937,7 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw) | |||
8085 | * E1000_SUCCESS at any other case. | 7937 | * E1000_SUCCESS at any other case. |
8086 | * | 7938 | * |
8087 | ***************************************************************************/ | 7939 | ***************************************************************************/ |
8088 | static s32 | 7940 | static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) |
8089 | e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) | ||
8090 | { | 7941 | { |
8091 | s32 timeout; | 7942 | s32 timeout; |
8092 | u32 swsm; | 7943 | u32 swsm; |
@@ -8105,11 +7956,11 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) | |||
8105 | /* Get the FW semaphore. */ | 7956 | /* Get the FW semaphore. */ |
8106 | timeout = hw->eeprom.word_size + 1; | 7957 | timeout = hw->eeprom.word_size + 1; |
8107 | while (timeout) { | 7958 | while (timeout) { |
8108 | swsm = E1000_READ_REG(hw, SWSM); | 7959 | swsm = er32(SWSM); |
8109 | swsm |= E1000_SWSM_SWESMBI; | 7960 | swsm |= E1000_SWSM_SWESMBI; |
8110 | E1000_WRITE_REG(hw, SWSM, swsm); | 7961 | ew32(SWSM, swsm); |
8111 | /* if we managed to set the bit we got the semaphore. */ | 7962 | /* if we managed to set the bit we got the semaphore. */ |
8112 | swsm = E1000_READ_REG(hw, SWSM); | 7963 | swsm = er32(SWSM); |
8113 | if (swsm & E1000_SWSM_SWESMBI) | 7964 | if (swsm & E1000_SWSM_SWESMBI) |
8114 | break; | 7965 | break; |
8115 | 7966 | ||
@@ -8135,8 +7986,7 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) | |||
8135 | * returns: - None. | 7986 | * returns: - None. |
8136 | * | 7987 | * |
8137 | ***************************************************************************/ | 7988 | ***************************************************************************/ |
8138 | static void | 7989 | static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) |
8139 | e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) | ||
8140 | { | 7990 | { |
8141 | u32 swsm; | 7991 | u32 swsm; |
8142 | 7992 | ||
@@ -8145,13 +7995,13 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) | |||
8145 | if (!hw->eeprom_semaphore_present) | 7995 | if (!hw->eeprom_semaphore_present) |
8146 | return; | 7996 | return; |
8147 | 7997 | ||
8148 | swsm = E1000_READ_REG(hw, SWSM); | 7998 | swsm = er32(SWSM); |
8149 | if (hw->mac_type == e1000_80003es2lan) { | 7999 | if (hw->mac_type == e1000_80003es2lan) { |
8150 | /* Release both semaphores. */ | 8000 | /* Release both semaphores. */ |
8151 | swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); | 8001 | swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); |
8152 | } else | 8002 | } else |
8153 | swsm &= ~(E1000_SWSM_SWESMBI); | 8003 | swsm &= ~(E1000_SWSM_SWESMBI); |
8154 | E1000_WRITE_REG(hw, SWSM, swsm); | 8004 | ew32(SWSM, swsm); |
8155 | } | 8005 | } |
8156 | 8006 | ||
8157 | /*************************************************************************** | 8007 | /*************************************************************************** |
@@ -8164,8 +8014,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) | |||
8164 | * E1000_SUCCESS at any other case. | 8014 | * E1000_SUCCESS at any other case. |
8165 | * | 8015 | * |
8166 | ***************************************************************************/ | 8016 | ***************************************************************************/ |
8167 | static s32 | 8017 | static s32 e1000_get_software_semaphore(struct e1000_hw *hw) |
8168 | e1000_get_software_semaphore(struct e1000_hw *hw) | ||
8169 | { | 8018 | { |
8170 | s32 timeout = hw->eeprom.word_size + 1; | 8019 | s32 timeout = hw->eeprom.word_size + 1; |
8171 | u32 swsm; | 8020 | u32 swsm; |
@@ -8177,7 +8026,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw) | |||
8177 | } | 8026 | } |
8178 | 8027 | ||
8179 | while (timeout) { | 8028 | while (timeout) { |
8180 | swsm = E1000_READ_REG(hw, SWSM); | 8029 | swsm = er32(SWSM); |
8181 | /* If SMBI bit cleared, it is now set and we hold the semaphore */ | 8030 | /* If SMBI bit cleared, it is now set and we hold the semaphore */ |
8182 | if (!(swsm & E1000_SWSM_SMBI)) | 8031 | if (!(swsm & E1000_SWSM_SMBI)) |
8183 | break; | 8032 | break; |
@@ -8200,8 +8049,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw) | |||
8200 | * hw: Struct containing variables accessed by shared code | 8049 | * hw: Struct containing variables accessed by shared code |
8201 | * | 8050 | * |
8202 | ***************************************************************************/ | 8051 | ***************************************************************************/ |
8203 | static void | 8052 | static void e1000_release_software_semaphore(struct e1000_hw *hw) |
8204 | e1000_release_software_semaphore(struct e1000_hw *hw) | ||
8205 | { | 8053 | { |
8206 | u32 swsm; | 8054 | u32 swsm; |
8207 | 8055 | ||
@@ -8211,10 +8059,10 @@ e1000_release_software_semaphore(struct e1000_hw *hw) | |||
8211 | return; | 8059 | return; |
8212 | } | 8060 | } |
8213 | 8061 | ||
8214 | swsm = E1000_READ_REG(hw, SWSM); | 8062 | swsm = er32(SWSM); |
8215 | /* Release the SW semaphores.*/ | 8063 | /* Release the SW semaphores.*/ |
8216 | swsm &= ~E1000_SWSM_SMBI; | 8064 | swsm &= ~E1000_SWSM_SMBI; |
8217 | E1000_WRITE_REG(hw, SWSM, swsm); | 8065 | ew32(SWSM, swsm); |
8218 | } | 8066 | } |
8219 | 8067 | ||
8220 | /****************************************************************************** | 8068 | /****************************************************************************** |
@@ -8228,26 +8076,24 @@ e1000_release_software_semaphore(struct e1000_hw *hw) | |||
8228 | * E1000_SUCCESS | 8076 | * E1000_SUCCESS |
8229 | * | 8077 | * |
8230 | *****************************************************************************/ | 8078 | *****************************************************************************/ |
8231 | s32 | 8079 | s32 e1000_check_phy_reset_block(struct e1000_hw *hw) |
8232 | e1000_check_phy_reset_block(struct e1000_hw *hw) | ||
8233 | { | 8080 | { |
8234 | u32 manc = 0; | 8081 | u32 manc = 0; |
8235 | u32 fwsm = 0; | 8082 | u32 fwsm = 0; |
8236 | 8083 | ||
8237 | if (hw->mac_type == e1000_ich8lan) { | 8084 | if (hw->mac_type == e1000_ich8lan) { |
8238 | fwsm = E1000_READ_REG(hw, FWSM); | 8085 | fwsm = er32(FWSM); |
8239 | return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS | 8086 | return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS |
8240 | : E1000_BLK_PHY_RESET; | 8087 | : E1000_BLK_PHY_RESET; |
8241 | } | 8088 | } |
8242 | 8089 | ||
8243 | if (hw->mac_type > e1000_82547_rev_2) | 8090 | if (hw->mac_type > e1000_82547_rev_2) |
8244 | manc = E1000_READ_REG(hw, MANC); | 8091 | manc = er32(MANC); |
8245 | return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? | 8092 | return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? |
8246 | E1000_BLK_PHY_RESET : E1000_SUCCESS; | 8093 | E1000_BLK_PHY_RESET : E1000_SUCCESS; |
8247 | } | 8094 | } |
8248 | 8095 | ||
8249 | static u8 | 8096 | static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw) |
8250 | e1000_arc_subsystem_valid(struct e1000_hw *hw) | ||
8251 | { | 8097 | { |
8252 | u32 fwsm; | 8098 | u32 fwsm; |
8253 | 8099 | ||
@@ -8261,7 +8107,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw) | |||
8261 | case e1000_82572: | 8107 | case e1000_82572: |
8262 | case e1000_82573: | 8108 | case e1000_82573: |
8263 | case e1000_80003es2lan: | 8109 | case e1000_80003es2lan: |
8264 | fwsm = E1000_READ_REG(hw, FWSM); | 8110 | fwsm = er32(FWSM); |
8265 | if ((fwsm & E1000_FWSM_MODE_MASK) != 0) | 8111 | if ((fwsm & E1000_FWSM_MODE_MASK) != 0) |
8266 | return true; | 8112 | return true; |
8267 | break; | 8113 | break; |
@@ -8283,8 +8129,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw) | |||
8283 | * returns: E1000_SUCCESS | 8129 | * returns: E1000_SUCCESS |
8284 | * | 8130 | * |
8285 | *****************************************************************************/ | 8131 | *****************************************************************************/ |
8286 | static s32 | 8132 | static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop) |
8287 | e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop) | ||
8288 | { | 8133 | { |
8289 | u32 gcr_reg = 0; | 8134 | u32 gcr_reg = 0; |
8290 | 8135 | ||
@@ -8297,19 +8142,19 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop) | |||
8297 | return E1000_SUCCESS; | 8142 | return E1000_SUCCESS; |
8298 | 8143 | ||
8299 | if (no_snoop) { | 8144 | if (no_snoop) { |
8300 | gcr_reg = E1000_READ_REG(hw, GCR); | 8145 | gcr_reg = er32(GCR); |
8301 | gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL); | 8146 | gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL); |
8302 | gcr_reg |= no_snoop; | 8147 | gcr_reg |= no_snoop; |
8303 | E1000_WRITE_REG(hw, GCR, gcr_reg); | 8148 | ew32(GCR, gcr_reg); |
8304 | } | 8149 | } |
8305 | if (hw->mac_type == e1000_ich8lan) { | 8150 | if (hw->mac_type == e1000_ich8lan) { |
8306 | u32 ctrl_ext; | 8151 | u32 ctrl_ext; |
8307 | 8152 | ||
8308 | E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL); | 8153 | ew32(GCR, PCI_EX_82566_SNOOP_ALL); |
8309 | 8154 | ||
8310 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 8155 | ctrl_ext = er32(CTRL_EXT); |
8311 | ctrl_ext |= E1000_CTRL_EXT_RO_DIS; | 8156 | ctrl_ext |= E1000_CTRL_EXT_RO_DIS; |
8312 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 8157 | ew32(CTRL_EXT, ctrl_ext); |
8313 | } | 8158 | } |
8314 | 8159 | ||
8315 | return E1000_SUCCESS; | 8160 | return E1000_SUCCESS; |
@@ -8324,8 +8169,7 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop) | |||
8324 | * hw: Struct containing variables accessed by shared code | 8169 | * hw: Struct containing variables accessed by shared code |
8325 | * | 8170 | * |
8326 | ***************************************************************************/ | 8171 | ***************************************************************************/ |
8327 | static s32 | 8172 | static s32 e1000_get_software_flag(struct e1000_hw *hw) |
8328 | e1000_get_software_flag(struct e1000_hw *hw) | ||
8329 | { | 8173 | { |
8330 | s32 timeout = PHY_CFG_TIMEOUT; | 8174 | s32 timeout = PHY_CFG_TIMEOUT; |
8331 | u32 extcnf_ctrl; | 8175 | u32 extcnf_ctrl; |
@@ -8334,11 +8178,11 @@ e1000_get_software_flag(struct e1000_hw *hw) | |||
8334 | 8178 | ||
8335 | if (hw->mac_type == e1000_ich8lan) { | 8179 | if (hw->mac_type == e1000_ich8lan) { |
8336 | while (timeout) { | 8180 | while (timeout) { |
8337 | extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); | 8181 | extcnf_ctrl = er32(EXTCNF_CTRL); |
8338 | extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; | 8182 | extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; |
8339 | E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); | 8183 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
8340 | 8184 | ||
8341 | extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); | 8185 | extcnf_ctrl = er32(EXTCNF_CTRL); |
8342 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) | 8186 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) |
8343 | break; | 8187 | break; |
8344 | mdelay(1); | 8188 | mdelay(1); |
@@ -8363,17 +8207,16 @@ e1000_get_software_flag(struct e1000_hw *hw) | |||
8363 | * hw: Struct containing variables accessed by shared code | 8207 | * hw: Struct containing variables accessed by shared code |
8364 | * | 8208 | * |
8365 | ***************************************************************************/ | 8209 | ***************************************************************************/ |
8366 | static void | 8210 | static void e1000_release_software_flag(struct e1000_hw *hw) |
8367 | e1000_release_software_flag(struct e1000_hw *hw) | ||
8368 | { | 8211 | { |
8369 | u32 extcnf_ctrl; | 8212 | u32 extcnf_ctrl; |
8370 | 8213 | ||
8371 | DEBUGFUNC("e1000_release_software_flag"); | 8214 | DEBUGFUNC("e1000_release_software_flag"); |
8372 | 8215 | ||
8373 | if (hw->mac_type == e1000_ich8lan) { | 8216 | if (hw->mac_type == e1000_ich8lan) { |
8374 | extcnf_ctrl= E1000_READ_REG(hw, EXTCNF_CTRL); | 8217 | extcnf_ctrl= er32(EXTCNF_CTRL); |
8375 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | 8218 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; |
8376 | E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); | 8219 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
8377 | } | 8220 | } |
8378 | 8221 | ||
8379 | return; | 8222 | return; |
@@ -8388,9 +8231,8 @@ e1000_release_software_flag(struct e1000_hw *hw) | |||
8388 | * data - word read from the EEPROM | 8231 | * data - word read from the EEPROM |
8389 | * words - number of words to read | 8232 | * words - number of words to read |
8390 | *****************************************************************************/ | 8233 | *****************************************************************************/ |
8391 | static s32 | 8234 | static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, |
8392 | e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, | 8235 | u16 *data) |
8393 | u16 *data) | ||
8394 | { | 8236 | { |
8395 | s32 error = E1000_SUCCESS; | 8237 | s32 error = E1000_SUCCESS; |
8396 | u32 flash_bank = 0; | 8238 | u32 flash_bank = 0; |
@@ -8405,7 +8247,7 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, | |||
8405 | * to be updated with each read. | 8247 | * to be updated with each read. |
8406 | */ | 8248 | */ |
8407 | /* Value of bit 22 corresponds to the flash bank we're on. */ | 8249 | /* Value of bit 22 corresponds to the flash bank we're on. */ |
8408 | flash_bank = (E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL) ? 1 : 0; | 8250 | flash_bank = (er32(EECD) & E1000_EECD_SEC1VAL) ? 1 : 0; |
8409 | 8251 | ||
8410 | /* Adjust offset appropriately if we're on bank 1 - adjust for word size */ | 8252 | /* Adjust offset appropriately if we're on bank 1 - adjust for word size */ |
8411 | bank_offset = flash_bank * (hw->flash_bank_size * 2); | 8253 | bank_offset = flash_bank * (hw->flash_bank_size * 2); |
@@ -8444,9 +8286,8 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, | |||
8444 | * words - number of words to write | 8286 | * words - number of words to write |
8445 | * data - words to write to the EEPROM | 8287 | * data - words to write to the EEPROM |
8446 | *****************************************************************************/ | 8288 | *****************************************************************************/ |
8447 | static s32 | 8289 | static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, |
8448 | e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, | 8290 | u16 *data) |
8449 | u16 *data) | ||
8450 | { | 8291 | { |
8451 | u32 i = 0; | 8292 | u32 i = 0; |
8452 | s32 error = E1000_SUCCESS; | 8293 | s32 error = E1000_SUCCESS; |
@@ -8491,8 +8332,7 @@ e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, | |||
8491 | * | 8332 | * |
8492 | * hw - The pointer to the hw structure | 8333 | * hw - The pointer to the hw structure |
8493 | ****************************************************************************/ | 8334 | ****************************************************************************/ |
8494 | static s32 | 8335 | static s32 e1000_ich8_cycle_init(struct e1000_hw *hw) |
8495 | e1000_ich8_cycle_init(struct e1000_hw *hw) | ||
8496 | { | 8336 | { |
8497 | union ich8_hws_flash_status hsfsts; | 8337 | union ich8_hws_flash_status hsfsts; |
8498 | s32 error = E1000_ERR_EEPROM; | 8338 | s32 error = E1000_ERR_EEPROM; |
@@ -8558,8 +8398,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw) | |||
8558 | * | 8398 | * |
8559 | * hw - The pointer to the hw structure | 8399 | * hw - The pointer to the hw structure |
8560 | ****************************************************************************/ | 8400 | ****************************************************************************/ |
8561 | static s32 | 8401 | static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout) |
8562 | e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout) | ||
8563 | { | 8402 | { |
8564 | union ich8_hws_flash_ctrl hsflctl; | 8403 | union ich8_hws_flash_ctrl hsflctl; |
8565 | union ich8_hws_flash_status hsfsts; | 8404 | union ich8_hws_flash_status hsfsts; |
@@ -8593,9 +8432,8 @@ e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout) | |||
8593 | * size - Size of data to read, 1=byte 2=word | 8432 | * size - Size of data to read, 1=byte 2=word |
8594 | * data - Pointer to the word to store the value read. | 8433 | * data - Pointer to the word to store the value read. |
8595 | *****************************************************************************/ | 8434 | *****************************************************************************/ |
8596 | static s32 | 8435 | static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size, |
8597 | e1000_read_ich8_data(struct e1000_hw *hw, u32 index, | 8436 | u16 *data) |
8598 | u32 size, u16* data) | ||
8599 | { | 8437 | { |
8600 | union ich8_hws_flash_status hsfsts; | 8438 | union ich8_hws_flash_status hsfsts; |
8601 | union ich8_hws_flash_ctrl hsflctl; | 8439 | union ich8_hws_flash_ctrl hsflctl; |
@@ -8672,9 +8510,8 @@ e1000_read_ich8_data(struct e1000_hw *hw, u32 index, | |||
8672 | * size - Size of data to read, 1=byte 2=word | 8510 | * size - Size of data to read, 1=byte 2=word |
8673 | * data - The byte(s) to write to the NVM. | 8511 | * data - The byte(s) to write to the NVM. |
8674 | *****************************************************************************/ | 8512 | *****************************************************************************/ |
8675 | static s32 | 8513 | static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size, |
8676 | e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size, | 8514 | u16 data) |
8677 | u16 data) | ||
8678 | { | 8515 | { |
8679 | union ich8_hws_flash_status hsfsts; | 8516 | union ich8_hws_flash_status hsfsts; |
8680 | union ich8_hws_flash_ctrl hsflctl; | 8517 | union ich8_hws_flash_ctrl hsflctl; |
@@ -8747,8 +8584,7 @@ e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size, | |||
8747 | * index - The index of the byte to read. | 8584 | * index - The index of the byte to read. |
8748 | * data - Pointer to a byte to store the value read. | 8585 | * data - Pointer to a byte to store the value read. |
8749 | *****************************************************************************/ | 8586 | *****************************************************************************/ |
8750 | static s32 | 8587 | static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data) |
8751 | e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8* data) | ||
8752 | { | 8588 | { |
8753 | s32 status = E1000_SUCCESS; | 8589 | s32 status = E1000_SUCCESS; |
8754 | u16 word = 0; | 8590 | u16 word = 0; |
@@ -8770,8 +8606,7 @@ e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8* data) | |||
8770 | * index - The index of the byte to write. | 8606 | * index - The index of the byte to write. |
8771 | * byte - The byte to write to the NVM. | 8607 | * byte - The byte to write to the NVM. |
8772 | *****************************************************************************/ | 8608 | *****************************************************************************/ |
8773 | static s32 | 8609 | static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte) |
8774 | e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte) | ||
8775 | { | 8610 | { |
8776 | s32 error = E1000_SUCCESS; | 8611 | s32 error = E1000_SUCCESS; |
8777 | s32 program_retries = 0; | 8612 | s32 program_retries = 0; |
@@ -8803,8 +8638,7 @@ e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte) | |||
8803 | * index - The index of the byte to read. | 8638 | * index - The index of the byte to read. |
8804 | * data - The byte to write to the NVM. | 8639 | * data - The byte to write to the NVM. |
8805 | *****************************************************************************/ | 8640 | *****************************************************************************/ |
8806 | static s32 | 8641 | static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data) |
8807 | e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data) | ||
8808 | { | 8642 | { |
8809 | s32 status = E1000_SUCCESS; | 8643 | s32 status = E1000_SUCCESS; |
8810 | u16 word = (u16)data; | 8644 | u16 word = (u16)data; |
@@ -8821,8 +8655,7 @@ e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data) | |||
8821 | * index - The starting byte index of the word to read. | 8655 | * index - The starting byte index of the word to read. |
8822 | * data - Pointer to a word to store the value read. | 8656 | * data - Pointer to a word to store the value read. |
8823 | *****************************************************************************/ | 8657 | *****************************************************************************/ |
8824 | static s32 | 8658 | static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data) |
8825 | e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data) | ||
8826 | { | 8659 | { |
8827 | s32 status = E1000_SUCCESS; | 8660 | s32 status = E1000_SUCCESS; |
8828 | status = e1000_read_ich8_data(hw, index, 2, data); | 8661 | status = e1000_read_ich8_data(hw, index, 2, data); |
@@ -8840,8 +8673,7 @@ e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data) | |||
8840 | * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the | 8673 | * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the |
8841 | * bank size may be 4, 8 or 64 KBytes | 8674 | * bank size may be 4, 8 or 64 KBytes |
8842 | *****************************************************************************/ | 8675 | *****************************************************************************/ |
8843 | static s32 | 8676 | static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank) |
8844 | e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank) | ||
8845 | { | 8677 | { |
8846 | union ich8_hws_flash_status hsfsts; | 8678 | union ich8_hws_flash_status hsfsts; |
8847 | union ich8_hws_flash_ctrl hsflctl; | 8679 | union ich8_hws_flash_ctrl hsflctl; |
@@ -8930,9 +8762,9 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank) | |||
8930 | return error; | 8762 | return error; |
8931 | } | 8763 | } |
8932 | 8764 | ||
8933 | static s32 | 8765 | static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, |
8934 | e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, | 8766 | u32 cnf_base_addr, |
8935 | u32 cnf_base_addr, u32 cnf_size) | 8767 | u32 cnf_size) |
8936 | { | 8768 | { |
8937 | u32 ret_val = E1000_SUCCESS; | 8769 | u32 ret_val = E1000_SUCCESS; |
8938 | u16 word_addr, reg_data, reg_addr; | 8770 | u16 word_addr, reg_data, reg_addr; |
@@ -8972,8 +8804,7 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, | |||
8972 | * | 8804 | * |
8973 | * hw: Struct containing variables accessed by shared code | 8805 | * hw: Struct containing variables accessed by shared code |
8974 | *****************************************************************************/ | 8806 | *****************************************************************************/ |
8975 | static s32 | 8807 | static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw) |
8976 | e1000_init_lcd_from_nvm(struct e1000_hw *hw) | ||
8977 | { | 8808 | { |
8978 | u32 reg_data, cnf_base_addr, cnf_size, ret_val, loop; | 8809 | u32 reg_data, cnf_base_addr, cnf_size, ret_val, loop; |
8979 | 8810 | ||
@@ -8981,32 +8812,32 @@ e1000_init_lcd_from_nvm(struct e1000_hw *hw) | |||
8981 | return E1000_SUCCESS; | 8812 | return E1000_SUCCESS; |
8982 | 8813 | ||
8983 | /* Check if SW needs configure the PHY */ | 8814 | /* Check if SW needs configure the PHY */ |
8984 | reg_data = E1000_READ_REG(hw, FEXTNVM); | 8815 | reg_data = er32(FEXTNVM); |
8985 | if (!(reg_data & FEXTNVM_SW_CONFIG)) | 8816 | if (!(reg_data & FEXTNVM_SW_CONFIG)) |
8986 | return E1000_SUCCESS; | 8817 | return E1000_SUCCESS; |
8987 | 8818 | ||
8988 | /* Wait for basic configuration completes before proceeding*/ | 8819 | /* Wait for basic configuration completes before proceeding*/ |
8989 | loop = 0; | 8820 | loop = 0; |
8990 | do { | 8821 | do { |
8991 | reg_data = E1000_READ_REG(hw, STATUS) & E1000_STATUS_LAN_INIT_DONE; | 8822 | reg_data = er32(STATUS) & E1000_STATUS_LAN_INIT_DONE; |
8992 | udelay(100); | 8823 | udelay(100); |
8993 | loop++; | 8824 | loop++; |
8994 | } while ((!reg_data) && (loop < 50)); | 8825 | } while ((!reg_data) && (loop < 50)); |
8995 | 8826 | ||
8996 | /* Clear the Init Done bit for the next init event */ | 8827 | /* Clear the Init Done bit for the next init event */ |
8997 | reg_data = E1000_READ_REG(hw, STATUS); | 8828 | reg_data = er32(STATUS); |
8998 | reg_data &= ~E1000_STATUS_LAN_INIT_DONE; | 8829 | reg_data &= ~E1000_STATUS_LAN_INIT_DONE; |
8999 | E1000_WRITE_REG(hw, STATUS, reg_data); | 8830 | ew32(STATUS, reg_data); |
9000 | 8831 | ||
9001 | /* Make sure HW does not configure LCD from PHY extended configuration | 8832 | /* Make sure HW does not configure LCD from PHY extended configuration |
9002 | before SW configuration */ | 8833 | before SW configuration */ |
9003 | reg_data = E1000_READ_REG(hw, EXTCNF_CTRL); | 8834 | reg_data = er32(EXTCNF_CTRL); |
9004 | if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) { | 8835 | if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) { |
9005 | reg_data = E1000_READ_REG(hw, EXTCNF_SIZE); | 8836 | reg_data = er32(EXTCNF_SIZE); |
9006 | cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH; | 8837 | cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH; |
9007 | cnf_size >>= 16; | 8838 | cnf_size >>= 16; |
9008 | if (cnf_size) { | 8839 | if (cnf_size) { |
9009 | reg_data = E1000_READ_REG(hw, EXTCNF_CTRL); | 8840 | reg_data = er32(EXTCNF_CTRL); |
9010 | cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER; | 8841 | cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER; |
9011 | /* cnf_base_addr is in DWORD */ | 8842 | /* cnf_base_addr is in DWORD */ |
9012 | cnf_base_addr >>= 16; | 8843 | cnf_base_addr >>= 16; |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index cf12b05cd011..ad6da7b67e55 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -31,12 +31,7 @@ | |||
31 | 31 | ||
32 | char e1000_driver_name[] = "e1000"; | 32 | char e1000_driver_name[] = "e1000"; |
33 | static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | 33 | static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; |
34 | #ifndef CONFIG_E1000_NAPI | 34 | #define DRV_VERSION "7.3.20-k3-NAPI" |
35 | #define DRIVERNAPI | ||
36 | #else | ||
37 | #define DRIVERNAPI "-NAPI" | ||
38 | #endif | ||
39 | #define DRV_VERSION "7.3.20-k2"DRIVERNAPI | ||
40 | const char e1000_driver_version[] = DRV_VERSION; | 35 | const char e1000_driver_version[] = DRV_VERSION; |
41 | static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; | 36 | static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; |
42 | 37 | ||
@@ -138,7 +133,6 @@ static irqreturn_t e1000_intr(int irq, void *data); | |||
138 | static irqreturn_t e1000_intr_msi(int irq, void *data); | 133 | static irqreturn_t e1000_intr_msi(int irq, void *data); |
139 | static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, | 134 | static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, |
140 | struct e1000_tx_ring *tx_ring); | 135 | struct e1000_tx_ring *tx_ring); |
141 | #ifdef CONFIG_E1000_NAPI | ||
142 | static int e1000_clean(struct napi_struct *napi, int budget); | 136 | static int e1000_clean(struct napi_struct *napi, int budget); |
143 | static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | 137 | static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, |
144 | struct e1000_rx_ring *rx_ring, | 138 | struct e1000_rx_ring *rx_ring, |
@@ -146,12 +140,6 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
146 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | 140 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, |
147 | struct e1000_rx_ring *rx_ring, | 141 | struct e1000_rx_ring *rx_ring, |
148 | int *work_done, int work_to_do); | 142 | int *work_done, int work_to_do); |
149 | #else | ||
150 | static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | ||
151 | struct e1000_rx_ring *rx_ring); | ||
152 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | ||
153 | struct e1000_rx_ring *rx_ring); | ||
154 | #endif | ||
155 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | 143 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, |
156 | struct e1000_rx_ring *rx_ring, | 144 | struct e1000_rx_ring *rx_ring, |
157 | int cleaned_count); | 145 | int cleaned_count); |
@@ -232,8 +220,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | |||
232 | * loaded. All it does is register with the PCI subsystem. | 220 | * loaded. All it does is register with the PCI subsystem. |
233 | **/ | 221 | **/ |
234 | 222 | ||
235 | static int __init | 223 | static int __init e1000_init_module(void) |
236 | e1000_init_module(void) | ||
237 | { | 224 | { |
238 | int ret; | 225 | int ret; |
239 | printk(KERN_INFO "%s - version %s\n", | 226 | printk(KERN_INFO "%s - version %s\n", |
@@ -261,8 +248,7 @@ module_init(e1000_init_module); | |||
261 | * from memory. | 248 | * from memory. |
262 | **/ | 249 | **/ |
263 | 250 | ||
264 | static void __exit | 251 | static void __exit e1000_exit_module(void) |
265 | e1000_exit_module(void) | ||
266 | { | 252 | { |
267 | pci_unregister_driver(&e1000_driver); | 253 | pci_unregister_driver(&e1000_driver); |
268 | } | 254 | } |
@@ -271,12 +257,13 @@ module_exit(e1000_exit_module); | |||
271 | 257 | ||
272 | static int e1000_request_irq(struct e1000_adapter *adapter) | 258 | static int e1000_request_irq(struct e1000_adapter *adapter) |
273 | { | 259 | { |
260 | struct e1000_hw *hw = &adapter->hw; | ||
274 | struct net_device *netdev = adapter->netdev; | 261 | struct net_device *netdev = adapter->netdev; |
275 | irq_handler_t handler = e1000_intr; | 262 | irq_handler_t handler = e1000_intr; |
276 | int irq_flags = IRQF_SHARED; | 263 | int irq_flags = IRQF_SHARED; |
277 | int err; | 264 | int err; |
278 | 265 | ||
279 | if (adapter->hw.mac_type >= e1000_82571) { | 266 | if (hw->mac_type >= e1000_82571) { |
280 | adapter->have_msi = !pci_enable_msi(adapter->pdev); | 267 | adapter->have_msi = !pci_enable_msi(adapter->pdev); |
281 | if (adapter->have_msi) { | 268 | if (adapter->have_msi) { |
282 | handler = e1000_intr_msi; | 269 | handler = e1000_intr_msi; |
@@ -311,11 +298,12 @@ static void e1000_free_irq(struct e1000_adapter *adapter) | |||
311 | * @adapter: board private structure | 298 | * @adapter: board private structure |
312 | **/ | 299 | **/ |
313 | 300 | ||
314 | static void | 301 | static void e1000_irq_disable(struct e1000_adapter *adapter) |
315 | e1000_irq_disable(struct e1000_adapter *adapter) | ||
316 | { | 302 | { |
317 | E1000_WRITE_REG(&adapter->hw, IMC, ~0); | 303 | struct e1000_hw *hw = &adapter->hw; |
318 | E1000_WRITE_FLUSH(&adapter->hw); | 304 | |
305 | ew32(IMC, ~0); | ||
306 | E1000_WRITE_FLUSH(); | ||
319 | synchronize_irq(adapter->pdev->irq); | 307 | synchronize_irq(adapter->pdev->irq); |
320 | } | 308 | } |
321 | 309 | ||
@@ -324,22 +312,23 @@ e1000_irq_disable(struct e1000_adapter *adapter) | |||
324 | * @adapter: board private structure | 312 | * @adapter: board private structure |
325 | **/ | 313 | **/ |
326 | 314 | ||
327 | static void | 315 | static void e1000_irq_enable(struct e1000_adapter *adapter) |
328 | e1000_irq_enable(struct e1000_adapter *adapter) | ||
329 | { | 316 | { |
330 | E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); | 317 | struct e1000_hw *hw = &adapter->hw; |
331 | E1000_WRITE_FLUSH(&adapter->hw); | 318 | |
319 | ew32(IMS, IMS_ENABLE_MASK); | ||
320 | E1000_WRITE_FLUSH(); | ||
332 | } | 321 | } |
333 | 322 | ||
334 | static void | 323 | static void e1000_update_mng_vlan(struct e1000_adapter *adapter) |
335 | e1000_update_mng_vlan(struct e1000_adapter *adapter) | ||
336 | { | 324 | { |
325 | struct e1000_hw *hw = &adapter->hw; | ||
337 | struct net_device *netdev = adapter->netdev; | 326 | struct net_device *netdev = adapter->netdev; |
338 | u16 vid = adapter->hw.mng_cookie.vlan_id; | 327 | u16 vid = hw->mng_cookie.vlan_id; |
339 | u16 old_vid = adapter->mng_vlan_id; | 328 | u16 old_vid = adapter->mng_vlan_id; |
340 | if (adapter->vlgrp) { | 329 | if (adapter->vlgrp) { |
341 | if (!vlan_group_get_device(adapter->vlgrp, vid)) { | 330 | if (!vlan_group_get_device(adapter->vlgrp, vid)) { |
342 | if (adapter->hw.mng_cookie.status & | 331 | if (hw->mng_cookie.status & |
343 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { | 332 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { |
344 | e1000_vlan_rx_add_vid(netdev, vid); | 333 | e1000_vlan_rx_add_vid(netdev, vid); |
345 | adapter->mng_vlan_id = vid; | 334 | adapter->mng_vlan_id = vid; |
@@ -366,26 +355,24 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) | |||
366 | * | 355 | * |
367 | **/ | 356 | **/ |
368 | 357 | ||
369 | static void | 358 | static void e1000_release_hw_control(struct e1000_adapter *adapter) |
370 | e1000_release_hw_control(struct e1000_adapter *adapter) | ||
371 | { | 359 | { |
372 | u32 ctrl_ext; | 360 | u32 ctrl_ext; |
373 | u32 swsm; | 361 | u32 swsm; |
362 | struct e1000_hw *hw = &adapter->hw; | ||
374 | 363 | ||
375 | /* Let firmware taken over control of h/w */ | 364 | /* Let firmware taken over control of h/w */ |
376 | switch (adapter->hw.mac_type) { | 365 | switch (hw->mac_type) { |
377 | case e1000_82573: | 366 | case e1000_82573: |
378 | swsm = E1000_READ_REG(&adapter->hw, SWSM); | 367 | swsm = er32(SWSM); |
379 | E1000_WRITE_REG(&adapter->hw, SWSM, | 368 | ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); |
380 | swsm & ~E1000_SWSM_DRV_LOAD); | ||
381 | break; | 369 | break; |
382 | case e1000_82571: | 370 | case e1000_82571: |
383 | case e1000_82572: | 371 | case e1000_82572: |
384 | case e1000_80003es2lan: | 372 | case e1000_80003es2lan: |
385 | case e1000_ich8lan: | 373 | case e1000_ich8lan: |
386 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | 374 | ctrl_ext = er32(CTRL_EXT); |
387 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | 375 | ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); |
388 | ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); | ||
389 | break; | 376 | break; |
390 | default: | 377 | default: |
391 | break; | 378 | break; |
@@ -403,37 +390,36 @@ e1000_release_hw_control(struct e1000_adapter *adapter) | |||
403 | * | 390 | * |
404 | **/ | 391 | **/ |
405 | 392 | ||
406 | static void | 393 | static void e1000_get_hw_control(struct e1000_adapter *adapter) |
407 | e1000_get_hw_control(struct e1000_adapter *adapter) | ||
408 | { | 394 | { |
409 | u32 ctrl_ext; | 395 | u32 ctrl_ext; |
410 | u32 swsm; | 396 | u32 swsm; |
397 | struct e1000_hw *hw = &adapter->hw; | ||
411 | 398 | ||
412 | /* Let firmware know the driver has taken over */ | 399 | /* Let firmware know the driver has taken over */ |
413 | switch (adapter->hw.mac_type) { | 400 | switch (hw->mac_type) { |
414 | case e1000_82573: | 401 | case e1000_82573: |
415 | swsm = E1000_READ_REG(&adapter->hw, SWSM); | 402 | swsm = er32(SWSM); |
416 | E1000_WRITE_REG(&adapter->hw, SWSM, | 403 | ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); |
417 | swsm | E1000_SWSM_DRV_LOAD); | ||
418 | break; | 404 | break; |
419 | case e1000_82571: | 405 | case e1000_82571: |
420 | case e1000_82572: | 406 | case e1000_82572: |
421 | case e1000_80003es2lan: | 407 | case e1000_80003es2lan: |
422 | case e1000_ich8lan: | 408 | case e1000_ich8lan: |
423 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | 409 | ctrl_ext = er32(CTRL_EXT); |
424 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | 410 | ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); |
425 | ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); | ||
426 | break; | 411 | break; |
427 | default: | 412 | default: |
428 | break; | 413 | break; |
429 | } | 414 | } |
430 | } | 415 | } |
431 | 416 | ||
432 | static void | 417 | static void e1000_init_manageability(struct e1000_adapter *adapter) |
433 | e1000_init_manageability(struct e1000_adapter *adapter) | ||
434 | { | 418 | { |
419 | struct e1000_hw *hw = &adapter->hw; | ||
420 | |||
435 | if (adapter->en_mng_pt) { | 421 | if (adapter->en_mng_pt) { |
436 | u32 manc = E1000_READ_REG(&adapter->hw, MANC); | 422 | u32 manc = er32(MANC); |
437 | 423 | ||
438 | /* disable hardware interception of ARP */ | 424 | /* disable hardware interception of ARP */ |
439 | manc &= ~(E1000_MANC_ARP_EN); | 425 | manc &= ~(E1000_MANC_ARP_EN); |
@@ -441,37 +427,38 @@ e1000_init_manageability(struct e1000_adapter *adapter) | |||
441 | /* enable receiving management packets to the host */ | 427 | /* enable receiving management packets to the host */ |
442 | /* this will probably generate destination unreachable messages | 428 | /* this will probably generate destination unreachable messages |
443 | * from the host OS, but the packets will be handled on SMBUS */ | 429 | * from the host OS, but the packets will be handled on SMBUS */ |
444 | if (adapter->hw.has_manc2h) { | 430 | if (hw->has_manc2h) { |
445 | u32 manc2h = E1000_READ_REG(&adapter->hw, MANC2H); | 431 | u32 manc2h = er32(MANC2H); |
446 | 432 | ||
447 | manc |= E1000_MANC_EN_MNG2HOST; | 433 | manc |= E1000_MANC_EN_MNG2HOST; |
448 | #define E1000_MNG2HOST_PORT_623 (1 << 5) | 434 | #define E1000_MNG2HOST_PORT_623 (1 << 5) |
449 | #define E1000_MNG2HOST_PORT_664 (1 << 6) | 435 | #define E1000_MNG2HOST_PORT_664 (1 << 6) |
450 | manc2h |= E1000_MNG2HOST_PORT_623; | 436 | manc2h |= E1000_MNG2HOST_PORT_623; |
451 | manc2h |= E1000_MNG2HOST_PORT_664; | 437 | manc2h |= E1000_MNG2HOST_PORT_664; |
452 | E1000_WRITE_REG(&adapter->hw, MANC2H, manc2h); | 438 | ew32(MANC2H, manc2h); |
453 | } | 439 | } |
454 | 440 | ||
455 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | 441 | ew32(MANC, manc); |
456 | } | 442 | } |
457 | } | 443 | } |
458 | 444 | ||
459 | static void | 445 | static void e1000_release_manageability(struct e1000_adapter *adapter) |
460 | e1000_release_manageability(struct e1000_adapter *adapter) | ||
461 | { | 446 | { |
447 | struct e1000_hw *hw = &adapter->hw; | ||
448 | |||
462 | if (adapter->en_mng_pt) { | 449 | if (adapter->en_mng_pt) { |
463 | u32 manc = E1000_READ_REG(&adapter->hw, MANC); | 450 | u32 manc = er32(MANC); |
464 | 451 | ||
465 | /* re-enable hardware interception of ARP */ | 452 | /* re-enable hardware interception of ARP */ |
466 | manc |= E1000_MANC_ARP_EN; | 453 | manc |= E1000_MANC_ARP_EN; |
467 | 454 | ||
468 | if (adapter->hw.has_manc2h) | 455 | if (hw->has_manc2h) |
469 | manc &= ~E1000_MANC_EN_MNG2HOST; | 456 | manc &= ~E1000_MANC_EN_MNG2HOST; |
470 | 457 | ||
471 | /* don't explicitly have to mess with MANC2H since | 458 | /* don't explicitly have to mess with MANC2H since |
472 | * MANC has an enable disable that gates MANC2H */ | 459 | * MANC has an enable disable that gates MANC2H */ |
473 | 460 | ||
474 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | 461 | ew32(MANC, manc); |
475 | } | 462 | } |
476 | } | 463 | } |
477 | 464 | ||
@@ -506,18 +493,19 @@ static void e1000_configure(struct e1000_adapter *adapter) | |||
506 | 493 | ||
507 | int e1000_up(struct e1000_adapter *adapter) | 494 | int e1000_up(struct e1000_adapter *adapter) |
508 | { | 495 | { |
496 | struct e1000_hw *hw = &adapter->hw; | ||
497 | |||
509 | /* hardware has been reset, we need to reload some things */ | 498 | /* hardware has been reset, we need to reload some things */ |
510 | e1000_configure(adapter); | 499 | e1000_configure(adapter); |
511 | 500 | ||
512 | clear_bit(__E1000_DOWN, &adapter->flags); | 501 | clear_bit(__E1000_DOWN, &adapter->flags); |
513 | 502 | ||
514 | #ifdef CONFIG_E1000_NAPI | ||
515 | napi_enable(&adapter->napi); | 503 | napi_enable(&adapter->napi); |
516 | #endif | 504 | |
517 | e1000_irq_enable(adapter); | 505 | e1000_irq_enable(adapter); |
518 | 506 | ||
519 | /* fire a link change interrupt to start the watchdog */ | 507 | /* fire a link change interrupt to start the watchdog */ |
520 | E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC); | 508 | ew32(ICS, E1000_ICS_LSC); |
521 | return 0; | 509 | return 0; |
522 | } | 510 | } |
523 | 511 | ||
@@ -533,30 +521,33 @@ int e1000_up(struct e1000_adapter *adapter) | |||
533 | 521 | ||
534 | void e1000_power_up_phy(struct e1000_adapter *adapter) | 522 | void e1000_power_up_phy(struct e1000_adapter *adapter) |
535 | { | 523 | { |
524 | struct e1000_hw *hw = &adapter->hw; | ||
536 | u16 mii_reg = 0; | 525 | u16 mii_reg = 0; |
537 | 526 | ||
538 | /* Just clear the power down bit to wake the phy back up */ | 527 | /* Just clear the power down bit to wake the phy back up */ |
539 | if (adapter->hw.media_type == e1000_media_type_copper) { | 528 | if (hw->media_type == e1000_media_type_copper) { |
540 | /* according to the manual, the phy will retain its | 529 | /* according to the manual, the phy will retain its |
541 | * settings across a power-down/up cycle */ | 530 | * settings across a power-down/up cycle */ |
542 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | 531 | e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); |
543 | mii_reg &= ~MII_CR_POWER_DOWN; | 532 | mii_reg &= ~MII_CR_POWER_DOWN; |
544 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); | 533 | e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); |
545 | } | 534 | } |
546 | } | 535 | } |
547 | 536 | ||
548 | static void e1000_power_down_phy(struct e1000_adapter *adapter) | 537 | static void e1000_power_down_phy(struct e1000_adapter *adapter) |
549 | { | 538 | { |
539 | struct e1000_hw *hw = &adapter->hw; | ||
540 | |||
550 | /* Power down the PHY so no link is implied when interface is down * | 541 | /* Power down the PHY so no link is implied when interface is down * |
551 | * The PHY cannot be powered down if any of the following is true * | 542 | * The PHY cannot be powered down if any of the following is true * |
552 | * (a) WoL is enabled | 543 | * (a) WoL is enabled |
553 | * (b) AMT is active | 544 | * (b) AMT is active |
554 | * (c) SoL/IDER session is active */ | 545 | * (c) SoL/IDER session is active */ |
555 | if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && | 546 | if (!adapter->wol && hw->mac_type >= e1000_82540 && |
556 | adapter->hw.media_type == e1000_media_type_copper) { | 547 | hw->media_type == e1000_media_type_copper) { |
557 | u16 mii_reg = 0; | 548 | u16 mii_reg = 0; |
558 | 549 | ||
559 | switch (adapter->hw.mac_type) { | 550 | switch (hw->mac_type) { |
560 | case e1000_82540: | 551 | case e1000_82540: |
561 | case e1000_82545: | 552 | case e1000_82545: |
562 | case e1000_82545_rev_3: | 553 | case e1000_82545_rev_3: |
@@ -566,8 +557,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) | |||
566 | case e1000_82541_rev_2: | 557 | case e1000_82541_rev_2: |
567 | case e1000_82547: | 558 | case e1000_82547: |
568 | case e1000_82547_rev_2: | 559 | case e1000_82547_rev_2: |
569 | if (E1000_READ_REG(&adapter->hw, MANC) & | 560 | if (er32(MANC) & E1000_MANC_SMBUS_EN) |
570 | E1000_MANC_SMBUS_EN) | ||
571 | goto out; | 561 | goto out; |
572 | break; | 562 | break; |
573 | case e1000_82571: | 563 | case e1000_82571: |
@@ -575,24 +565,23 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) | |||
575 | case e1000_82573: | 565 | case e1000_82573: |
576 | case e1000_80003es2lan: | 566 | case e1000_80003es2lan: |
577 | case e1000_ich8lan: | 567 | case e1000_ich8lan: |
578 | if (e1000_check_mng_mode(&adapter->hw) || | 568 | if (e1000_check_mng_mode(hw) || |
579 | e1000_check_phy_reset_block(&adapter->hw)) | 569 | e1000_check_phy_reset_block(hw)) |
580 | goto out; | 570 | goto out; |
581 | break; | 571 | break; |
582 | default: | 572 | default: |
583 | goto out; | 573 | goto out; |
584 | } | 574 | } |
585 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | 575 | e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); |
586 | mii_reg |= MII_CR_POWER_DOWN; | 576 | mii_reg |= MII_CR_POWER_DOWN; |
587 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); | 577 | e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); |
588 | mdelay(1); | 578 | mdelay(1); |
589 | } | 579 | } |
590 | out: | 580 | out: |
591 | return; | 581 | return; |
592 | } | 582 | } |
593 | 583 | ||
594 | void | 584 | void e1000_down(struct e1000_adapter *adapter) |
595 | e1000_down(struct e1000_adapter *adapter) | ||
596 | { | 585 | { |
597 | struct net_device *netdev = adapter->netdev; | 586 | struct net_device *netdev = adapter->netdev; |
598 | 587 | ||
@@ -600,9 +589,8 @@ e1000_down(struct e1000_adapter *adapter) | |||
600 | * reschedule our watchdog timer */ | 589 | * reschedule our watchdog timer */ |
601 | set_bit(__E1000_DOWN, &adapter->flags); | 590 | set_bit(__E1000_DOWN, &adapter->flags); |
602 | 591 | ||
603 | #ifdef CONFIG_E1000_NAPI | ||
604 | napi_disable(&adapter->napi); | 592 | napi_disable(&adapter->napi); |
605 | #endif | 593 | |
606 | e1000_irq_disable(adapter); | 594 | e1000_irq_disable(adapter); |
607 | 595 | ||
608 | del_timer_sync(&adapter->tx_fifo_stall_timer); | 596 | del_timer_sync(&adapter->tx_fifo_stall_timer); |
@@ -620,8 +608,7 @@ e1000_down(struct e1000_adapter *adapter) | |||
620 | e1000_clean_all_rx_rings(adapter); | 608 | e1000_clean_all_rx_rings(adapter); |
621 | } | 609 | } |
622 | 610 | ||
623 | void | 611 | void e1000_reinit_locked(struct e1000_adapter *adapter) |
624 | e1000_reinit_locked(struct e1000_adapter *adapter) | ||
625 | { | 612 | { |
626 | WARN_ON(in_interrupt()); | 613 | WARN_ON(in_interrupt()); |
627 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) | 614 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) |
@@ -631,9 +618,9 @@ e1000_reinit_locked(struct e1000_adapter *adapter) | |||
631 | clear_bit(__E1000_RESETTING, &adapter->flags); | 618 | clear_bit(__E1000_RESETTING, &adapter->flags); |
632 | } | 619 | } |
633 | 620 | ||
634 | void | 621 | void e1000_reset(struct e1000_adapter *adapter) |
635 | e1000_reset(struct e1000_adapter *adapter) | ||
636 | { | 622 | { |
623 | struct e1000_hw *hw = &adapter->hw; | ||
637 | u32 pba = 0, tx_space, min_tx_space, min_rx_space; | 624 | u32 pba = 0, tx_space, min_tx_space, min_rx_space; |
638 | u16 fc_high_water_mark = E1000_FC_HIGH_DIFF; | 625 | u16 fc_high_water_mark = E1000_FC_HIGH_DIFF; |
639 | bool legacy_pba_adjust = false; | 626 | bool legacy_pba_adjust = false; |
@@ -642,7 +629,7 @@ e1000_reset(struct e1000_adapter *adapter) | |||
642 | * To take effect CTRL.RST is required. | 629 | * To take effect CTRL.RST is required. |
643 | */ | 630 | */ |
644 | 631 | ||
645 | switch (adapter->hw.mac_type) { | 632 | switch (hw->mac_type) { |
646 | case e1000_82542_rev2_0: | 633 | case e1000_82542_rev2_0: |
647 | case e1000_82542_rev2_1: | 634 | case e1000_82542_rev2_1: |
648 | case e1000_82543: | 635 | case e1000_82543: |
@@ -683,16 +670,16 @@ e1000_reset(struct e1000_adapter *adapter) | |||
683 | if (adapter->netdev->mtu > E1000_RXBUFFER_8192) | 670 | if (adapter->netdev->mtu > E1000_RXBUFFER_8192) |
684 | pba -= 8; /* allocate more FIFO for Tx */ | 671 | pba -= 8; /* allocate more FIFO for Tx */ |
685 | 672 | ||
686 | if (adapter->hw.mac_type == e1000_82547) { | 673 | if (hw->mac_type == e1000_82547) { |
687 | adapter->tx_fifo_head = 0; | 674 | adapter->tx_fifo_head = 0; |
688 | adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; | 675 | adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; |
689 | adapter->tx_fifo_size = | 676 | adapter->tx_fifo_size = |
690 | (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; | 677 | (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; |
691 | atomic_set(&adapter->tx_fifo_stall, 0); | 678 | atomic_set(&adapter->tx_fifo_stall, 0); |
692 | } | 679 | } |
693 | } else if (adapter->hw.max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) { | 680 | } else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) { |
694 | /* adjust PBA for jumbo frames */ | 681 | /* adjust PBA for jumbo frames */ |
695 | E1000_WRITE_REG(&adapter->hw, PBA, pba); | 682 | ew32(PBA, pba); |
696 | 683 | ||
697 | /* To maintain wire speed transmits, the Tx FIFO should be | 684 | /* To maintain wire speed transmits, the Tx FIFO should be |
698 | * large enough to accomodate two full transmit packets, | 685 | * large enough to accomodate two full transmit packets, |
@@ -700,7 +687,7 @@ e1000_reset(struct e1000_adapter *adapter) | |||
700 | * the Rx FIFO should be large enough to accomodate at least | 687 | * the Rx FIFO should be large enough to accomodate at least |
701 | * one full receive packet and is similarly rounded up and | 688 | * one full receive packet and is similarly rounded up and |
702 | * expressed in KB. */ | 689 | * expressed in KB. */ |
703 | pba = E1000_READ_REG(&adapter->hw, PBA); | 690 | pba = er32(PBA); |
704 | /* upper 16 bits has Tx packet buffer allocation size in KB */ | 691 | /* upper 16 bits has Tx packet buffer allocation size in KB */ |
705 | tx_space = pba >> 16; | 692 | tx_space = pba >> 16; |
706 | /* lower 16 bits has Rx packet buffer allocation size in KB */ | 693 | /* lower 16 bits has Rx packet buffer allocation size in KB */ |
@@ -723,7 +710,7 @@ e1000_reset(struct e1000_adapter *adapter) | |||
723 | pba = pba - (min_tx_space - tx_space); | 710 | pba = pba - (min_tx_space - tx_space); |
724 | 711 | ||
725 | /* PCI/PCIx hardware has PBA alignment constraints */ | 712 | /* PCI/PCIx hardware has PBA alignment constraints */ |
726 | switch (adapter->hw.mac_type) { | 713 | switch (hw->mac_type) { |
727 | case e1000_82545 ... e1000_82546_rev_3: | 714 | case e1000_82545 ... e1000_82546_rev_3: |
728 | pba &= ~(E1000_PBA_8K - 1); | 715 | pba &= ~(E1000_PBA_8K - 1); |
729 | break; | 716 | break; |
@@ -734,7 +721,7 @@ e1000_reset(struct e1000_adapter *adapter) | |||
734 | /* if short on rx space, rx wins and must trump tx | 721 | /* if short on rx space, rx wins and must trump tx |
735 | * adjustment or use Early Receive if available */ | 722 | * adjustment or use Early Receive if available */ |
736 | if (pba < min_rx_space) { | 723 | if (pba < min_rx_space) { |
737 | switch (adapter->hw.mac_type) { | 724 | switch (hw->mac_type) { |
738 | case e1000_82573: | 725 | case e1000_82573: |
739 | /* ERT enabled in e1000_configure_rx */ | 726 | /* ERT enabled in e1000_configure_rx */ |
740 | break; | 727 | break; |
@@ -746,7 +733,7 @@ e1000_reset(struct e1000_adapter *adapter) | |||
746 | } | 733 | } |
747 | } | 734 | } |
748 | 735 | ||
749 | E1000_WRITE_REG(&adapter->hw, PBA, pba); | 736 | ew32(PBA, pba); |
750 | 737 | ||
751 | /* flow control settings */ | 738 | /* flow control settings */ |
752 | /* Set the FC high water mark to 90% of the FIFO size. | 739 | /* Set the FC high water mark to 90% of the FIFO size. |
@@ -759,54 +746,54 @@ e1000_reset(struct e1000_adapter *adapter) | |||
759 | if (pba < E1000_PBA_16K) | 746 | if (pba < E1000_PBA_16K) |
760 | fc_high_water_mark = (pba * 1024) - 1600; | 747 | fc_high_water_mark = (pba * 1024) - 1600; |
761 | 748 | ||
762 | adapter->hw.fc_high_water = fc_high_water_mark; | 749 | hw->fc_high_water = fc_high_water_mark; |
763 | adapter->hw.fc_low_water = fc_high_water_mark - 8; | 750 | hw->fc_low_water = fc_high_water_mark - 8; |
764 | if (adapter->hw.mac_type == e1000_80003es2lan) | 751 | if (hw->mac_type == e1000_80003es2lan) |
765 | adapter->hw.fc_pause_time = 0xFFFF; | 752 | hw->fc_pause_time = 0xFFFF; |
766 | else | 753 | else |
767 | adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; | 754 | hw->fc_pause_time = E1000_FC_PAUSE_TIME; |
768 | adapter->hw.fc_send_xon = 1; | 755 | hw->fc_send_xon = 1; |
769 | adapter->hw.fc = adapter->hw.original_fc; | 756 | hw->fc = hw->original_fc; |
770 | 757 | ||
771 | /* Allow time for pending master requests to run */ | 758 | /* Allow time for pending master requests to run */ |
772 | e1000_reset_hw(&adapter->hw); | 759 | e1000_reset_hw(hw); |
773 | if (adapter->hw.mac_type >= e1000_82544) | 760 | if (hw->mac_type >= e1000_82544) |
774 | E1000_WRITE_REG(&adapter->hw, WUC, 0); | 761 | ew32(WUC, 0); |
775 | 762 | ||
776 | if (e1000_init_hw(&adapter->hw)) | 763 | if (e1000_init_hw(hw)) |
777 | DPRINTK(PROBE, ERR, "Hardware Error\n"); | 764 | DPRINTK(PROBE, ERR, "Hardware Error\n"); |
778 | e1000_update_mng_vlan(adapter); | 765 | e1000_update_mng_vlan(adapter); |
779 | 766 | ||
780 | /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ | 767 | /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ |
781 | if (adapter->hw.mac_type >= e1000_82544 && | 768 | if (hw->mac_type >= e1000_82544 && |
782 | adapter->hw.mac_type <= e1000_82547_rev_2 && | 769 | hw->mac_type <= e1000_82547_rev_2 && |
783 | adapter->hw.autoneg == 1 && | 770 | hw->autoneg == 1 && |
784 | adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) { | 771 | hw->autoneg_advertised == ADVERTISE_1000_FULL) { |
785 | u32 ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 772 | u32 ctrl = er32(CTRL); |
786 | /* clear phy power management bit if we are in gig only mode, | 773 | /* clear phy power management bit if we are in gig only mode, |
787 | * which if enabled will attempt negotiation to 100Mb, which | 774 | * which if enabled will attempt negotiation to 100Mb, which |
788 | * can cause a loss of link at power off or driver unload */ | 775 | * can cause a loss of link at power off or driver unload */ |
789 | ctrl &= ~E1000_CTRL_SWDPIN3; | 776 | ctrl &= ~E1000_CTRL_SWDPIN3; |
790 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 777 | ew32(CTRL, ctrl); |
791 | } | 778 | } |
792 | 779 | ||
793 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ | 780 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ |
794 | E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); | 781 | ew32(VET, ETHERNET_IEEE_VLAN_TYPE); |
795 | 782 | ||
796 | e1000_reset_adaptive(&adapter->hw); | 783 | e1000_reset_adaptive(hw); |
797 | e1000_phy_get_info(&adapter->hw, &adapter->phy_info); | 784 | e1000_phy_get_info(hw, &adapter->phy_info); |
798 | 785 | ||
799 | if (!adapter->smart_power_down && | 786 | if (!adapter->smart_power_down && |
800 | (adapter->hw.mac_type == e1000_82571 || | 787 | (hw->mac_type == e1000_82571 || |
801 | adapter->hw.mac_type == e1000_82572)) { | 788 | hw->mac_type == e1000_82572)) { |
802 | u16 phy_data = 0; | 789 | u16 phy_data = 0; |
803 | /* speed up time to link by disabling smart power down, ignore | 790 | /* speed up time to link by disabling smart power down, ignore |
804 | * the return value of this function because there is nothing | 791 | * the return value of this function because there is nothing |
805 | * different we would do if it failed */ | 792 | * different we would do if it failed */ |
806 | e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, | 793 | e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, |
807 | &phy_data); | 794 | &phy_data); |
808 | phy_data &= ~IGP02E1000_PM_SPD; | 795 | phy_data &= ~IGP02E1000_PM_SPD; |
809 | e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, | 796 | e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, |
810 | phy_data); | 797 | phy_data); |
811 | } | 798 | } |
812 | 799 | ||
@@ -865,13 +852,49 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter) | |||
865 | printk(KERN_ERR "to enable this network device.\n"); | 852 | printk(KERN_ERR "to enable this network device.\n"); |
866 | printk(KERN_ERR "Please inspect the EEPROM dump and report the issue " | 853 | printk(KERN_ERR "Please inspect the EEPROM dump and report the issue " |
867 | "to your hardware vendor\n"); | 854 | "to your hardware vendor\n"); |
868 | printk(KERN_ERR "or Intel Customer Support: linux-nics@intel.com\n"); | 855 | printk(KERN_ERR "or Intel Customer Support.\n"); |
869 | printk(KERN_ERR "/*********************/\n"); | 856 | printk(KERN_ERR "/*********************/\n"); |
870 | 857 | ||
871 | kfree(data); | 858 | kfree(data); |
872 | } | 859 | } |
873 | 860 | ||
874 | /** | 861 | /** |
862 | * e1000_is_need_ioport - determine if an adapter needs ioport resources or not | ||
863 | * @pdev: PCI device information struct | ||
864 | * | ||
865 | * Return true if an adapter needs ioport resources | ||
866 | **/ | ||
867 | static int e1000_is_need_ioport(struct pci_dev *pdev) | ||
868 | { | ||
869 | switch (pdev->device) { | ||
870 | case E1000_DEV_ID_82540EM: | ||
871 | case E1000_DEV_ID_82540EM_LOM: | ||
872 | case E1000_DEV_ID_82540EP: | ||
873 | case E1000_DEV_ID_82540EP_LOM: | ||
874 | case E1000_DEV_ID_82540EP_LP: | ||
875 | case E1000_DEV_ID_82541EI: | ||
876 | case E1000_DEV_ID_82541EI_MOBILE: | ||
877 | case E1000_DEV_ID_82541ER: | ||
878 | case E1000_DEV_ID_82541ER_LOM: | ||
879 | case E1000_DEV_ID_82541GI: | ||
880 | case E1000_DEV_ID_82541GI_LF: | ||
881 | case E1000_DEV_ID_82541GI_MOBILE: | ||
882 | case E1000_DEV_ID_82544EI_COPPER: | ||
883 | case E1000_DEV_ID_82544EI_FIBER: | ||
884 | case E1000_DEV_ID_82544GC_COPPER: | ||
885 | case E1000_DEV_ID_82544GC_LOM: | ||
886 | case E1000_DEV_ID_82545EM_COPPER: | ||
887 | case E1000_DEV_ID_82545EM_FIBER: | ||
888 | case E1000_DEV_ID_82546EB_COPPER: | ||
889 | case E1000_DEV_ID_82546EB_FIBER: | ||
890 | case E1000_DEV_ID_82546EB_QUAD_COPPER: | ||
891 | return true; | ||
892 | default: | ||
893 | return false; | ||
894 | } | ||
895 | } | ||
896 | |||
897 | /** | ||
875 | * e1000_probe - Device Initialization Routine | 898 | * e1000_probe - Device Initialization Routine |
876 | * @pdev: PCI device information struct | 899 | * @pdev: PCI device information struct |
877 | * @ent: entry in e1000_pci_tbl | 900 | * @ent: entry in e1000_pci_tbl |
@@ -882,37 +905,51 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter) | |||
882 | * The OS initialization, configuring of the adapter private structure, | 905 | * The OS initialization, configuring of the adapter private structure, |
883 | * and a hardware reset occur. | 906 | * and a hardware reset occur. |
884 | **/ | 907 | **/ |
885 | 908 | static int __devinit e1000_probe(struct pci_dev *pdev, | |
886 | static int __devinit | 909 | const struct pci_device_id *ent) |
887 | e1000_probe(struct pci_dev *pdev, | ||
888 | const struct pci_device_id *ent) | ||
889 | { | 910 | { |
890 | struct net_device *netdev; | 911 | struct net_device *netdev; |
891 | struct e1000_adapter *adapter; | 912 | struct e1000_adapter *adapter; |
913 | struct e1000_hw *hw; | ||
892 | 914 | ||
893 | static int cards_found = 0; | 915 | static int cards_found = 0; |
894 | static int global_quad_port_a = 0; /* global ksp3 port a indication */ | 916 | static int global_quad_port_a = 0; /* global ksp3 port a indication */ |
895 | int i, err, pci_using_dac; | 917 | int i, err, pci_using_dac; |
896 | u16 eeprom_data = 0; | 918 | u16 eeprom_data = 0; |
897 | u16 eeprom_apme_mask = E1000_EEPROM_APME; | 919 | u16 eeprom_apme_mask = E1000_EEPROM_APME; |
920 | int bars, need_ioport; | ||
898 | DECLARE_MAC_BUF(mac); | 921 | DECLARE_MAC_BUF(mac); |
899 | 922 | ||
900 | if ((err = pci_enable_device(pdev))) | 923 | /* do not allocate ioport bars when not needed */ |
924 | need_ioport = e1000_is_need_ioport(pdev); | ||
925 | if (need_ioport) { | ||
926 | bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); | ||
927 | err = pci_enable_device(pdev); | ||
928 | } else { | ||
929 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
930 | err = pci_enable_device(pdev); | ||
931 | } | ||
932 | if (err) | ||
901 | return err; | 933 | return err; |
902 | 934 | ||
903 | if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && | 935 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) && |
904 | !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { | 936 | !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) { |
905 | pci_using_dac = 1; | 937 | pci_using_dac = 1; |
906 | } else { | 938 | } else { |
907 | if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) && | 939 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); |
908 | (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { | 940 | if (err) { |
909 | E1000_ERR("No usable DMA configuration, aborting\n"); | 941 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); |
910 | goto err_dma; | 942 | if (err) { |
943 | E1000_ERR("No usable DMA configuration, " | ||
944 | "aborting\n"); | ||
945 | goto err_dma; | ||
946 | } | ||
911 | } | 947 | } |
912 | pci_using_dac = 0; | 948 | pci_using_dac = 0; |
913 | } | 949 | } |
914 | 950 | ||
915 | if ((err = pci_request_regions(pdev, e1000_driver_name))) | 951 | err = pci_request_selected_regions(pdev, bars, e1000_driver_name); |
952 | if (err) | ||
916 | goto err_pci_reg; | 953 | goto err_pci_reg; |
917 | 954 | ||
918 | pci_set_master(pdev); | 955 | pci_set_master(pdev); |
@@ -928,21 +965,27 @@ e1000_probe(struct pci_dev *pdev, | |||
928 | adapter = netdev_priv(netdev); | 965 | adapter = netdev_priv(netdev); |
929 | adapter->netdev = netdev; | 966 | adapter->netdev = netdev; |
930 | adapter->pdev = pdev; | 967 | adapter->pdev = pdev; |
931 | adapter->hw.back = adapter; | ||
932 | adapter->msg_enable = (1 << debug) - 1; | 968 | adapter->msg_enable = (1 << debug) - 1; |
969 | adapter->bars = bars; | ||
970 | adapter->need_ioport = need_ioport; | ||
971 | |||
972 | hw = &adapter->hw; | ||
973 | hw->back = adapter; | ||
933 | 974 | ||
934 | err = -EIO; | 975 | err = -EIO; |
935 | adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0), | 976 | hw->hw_addr = ioremap(pci_resource_start(pdev, BAR_0), |
936 | pci_resource_len(pdev, BAR_0)); | 977 | pci_resource_len(pdev, BAR_0)); |
937 | if (!adapter->hw.hw_addr) | 978 | if (!hw->hw_addr) |
938 | goto err_ioremap; | 979 | goto err_ioremap; |
939 | 980 | ||
940 | for (i = BAR_1; i <= BAR_5; i++) { | 981 | if (adapter->need_ioport) { |
941 | if (pci_resource_len(pdev, i) == 0) | 982 | for (i = BAR_1; i <= BAR_5; i++) { |
942 | continue; | 983 | if (pci_resource_len(pdev, i) == 0) |
943 | if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { | 984 | continue; |
944 | adapter->hw.io_base = pci_resource_start(pdev, i); | 985 | if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { |
945 | break; | 986 | hw->io_base = pci_resource_start(pdev, i); |
987 | break; | ||
988 | } | ||
946 | } | 989 | } |
947 | } | 990 | } |
948 | 991 | ||
@@ -957,9 +1000,7 @@ e1000_probe(struct pci_dev *pdev, | |||
957 | e1000_set_ethtool_ops(netdev); | 1000 | e1000_set_ethtool_ops(netdev); |
958 | netdev->tx_timeout = &e1000_tx_timeout; | 1001 | netdev->tx_timeout = &e1000_tx_timeout; |
959 | netdev->watchdog_timeo = 5 * HZ; | 1002 | netdev->watchdog_timeo = 5 * HZ; |
960 | #ifdef CONFIG_E1000_NAPI | ||
961 | netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); | 1003 | netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); |
962 | #endif | ||
963 | netdev->vlan_rx_register = e1000_vlan_rx_register; | 1004 | netdev->vlan_rx_register = e1000_vlan_rx_register; |
964 | netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid; | 1005 | netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid; |
965 | netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid; | 1006 | netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid; |
@@ -972,49 +1013,50 @@ e1000_probe(struct pci_dev *pdev, | |||
972 | 1013 | ||
973 | /* setup the private structure */ | 1014 | /* setup the private structure */ |
974 | 1015 | ||
975 | if ((err = e1000_sw_init(adapter))) | 1016 | err = e1000_sw_init(adapter); |
1017 | if (err) | ||
976 | goto err_sw_init; | 1018 | goto err_sw_init; |
977 | 1019 | ||
978 | err = -EIO; | 1020 | err = -EIO; |
979 | /* Flash BAR mapping must happen after e1000_sw_init | 1021 | /* Flash BAR mapping must happen after e1000_sw_init |
980 | * because it depends on mac_type */ | 1022 | * because it depends on mac_type */ |
981 | if ((adapter->hw.mac_type == e1000_ich8lan) && | 1023 | if ((hw->mac_type == e1000_ich8lan) && |
982 | (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { | 1024 | (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { |
983 | adapter->hw.flash_address = | 1025 | hw->flash_address = |
984 | ioremap(pci_resource_start(pdev, 1), | 1026 | ioremap(pci_resource_start(pdev, 1), |
985 | pci_resource_len(pdev, 1)); | 1027 | pci_resource_len(pdev, 1)); |
986 | if (!adapter->hw.flash_address) | 1028 | if (!hw->flash_address) |
987 | goto err_flashmap; | 1029 | goto err_flashmap; |
988 | } | 1030 | } |
989 | 1031 | ||
990 | if (e1000_check_phy_reset_block(&adapter->hw)) | 1032 | if (e1000_check_phy_reset_block(hw)) |
991 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); | 1033 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); |
992 | 1034 | ||
993 | if (adapter->hw.mac_type >= e1000_82543) { | 1035 | if (hw->mac_type >= e1000_82543) { |
994 | netdev->features = NETIF_F_SG | | 1036 | netdev->features = NETIF_F_SG | |
995 | NETIF_F_HW_CSUM | | 1037 | NETIF_F_HW_CSUM | |
996 | NETIF_F_HW_VLAN_TX | | 1038 | NETIF_F_HW_VLAN_TX | |
997 | NETIF_F_HW_VLAN_RX | | 1039 | NETIF_F_HW_VLAN_RX | |
998 | NETIF_F_HW_VLAN_FILTER; | 1040 | NETIF_F_HW_VLAN_FILTER; |
999 | if (adapter->hw.mac_type == e1000_ich8lan) | 1041 | if (hw->mac_type == e1000_ich8lan) |
1000 | netdev->features &= ~NETIF_F_HW_VLAN_FILTER; | 1042 | netdev->features &= ~NETIF_F_HW_VLAN_FILTER; |
1001 | } | 1043 | } |
1002 | 1044 | ||
1003 | if ((adapter->hw.mac_type >= e1000_82544) && | 1045 | if ((hw->mac_type >= e1000_82544) && |
1004 | (adapter->hw.mac_type != e1000_82547)) | 1046 | (hw->mac_type != e1000_82547)) |
1005 | netdev->features |= NETIF_F_TSO; | 1047 | netdev->features |= NETIF_F_TSO; |
1006 | 1048 | ||
1007 | if (adapter->hw.mac_type > e1000_82547_rev_2) | 1049 | if (hw->mac_type > e1000_82547_rev_2) |
1008 | netdev->features |= NETIF_F_TSO6; | 1050 | netdev->features |= NETIF_F_TSO6; |
1009 | if (pci_using_dac) | 1051 | if (pci_using_dac) |
1010 | netdev->features |= NETIF_F_HIGHDMA; | 1052 | netdev->features |= NETIF_F_HIGHDMA; |
1011 | 1053 | ||
1012 | netdev->features |= NETIF_F_LLTX; | 1054 | netdev->features |= NETIF_F_LLTX; |
1013 | 1055 | ||
1014 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); | 1056 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); |
1015 | 1057 | ||
1016 | /* initialize eeprom parameters */ | 1058 | /* initialize eeprom parameters */ |
1017 | if (e1000_init_eeprom_params(&adapter->hw)) { | 1059 | if (e1000_init_eeprom_params(hw)) { |
1018 | E1000_ERR("EEPROM initialization failed\n"); | 1060 | E1000_ERR("EEPROM initialization failed\n"); |
1019 | goto err_eeprom; | 1061 | goto err_eeprom; |
1020 | } | 1062 | } |
@@ -1022,10 +1064,10 @@ e1000_probe(struct pci_dev *pdev, | |||
1022 | /* before reading the EEPROM, reset the controller to | 1064 | /* before reading the EEPROM, reset the controller to |
1023 | * put the device in a known good starting state */ | 1065 | * put the device in a known good starting state */ |
1024 | 1066 | ||
1025 | e1000_reset_hw(&adapter->hw); | 1067 | e1000_reset_hw(hw); |
1026 | 1068 | ||
1027 | /* make sure the EEPROM is good */ | 1069 | /* make sure the EEPROM is good */ |
1028 | if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) { | 1070 | if (e1000_validate_eeprom_checksum(hw) < 0) { |
1029 | DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); | 1071 | DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); |
1030 | e1000_dump_eeprom(adapter); | 1072 | e1000_dump_eeprom(adapter); |
1031 | /* | 1073 | /* |
@@ -1036,24 +1078,24 @@ e1000_probe(struct pci_dev *pdev, | |||
1036 | * interface after manually setting a hw addr using | 1078 | * interface after manually setting a hw addr using |
1037 | * `ip set address` | 1079 | * `ip set address` |
1038 | */ | 1080 | */ |
1039 | memset(adapter->hw.mac_addr, 0, netdev->addr_len); | 1081 | memset(hw->mac_addr, 0, netdev->addr_len); |
1040 | } else { | 1082 | } else { |
1041 | /* copy the MAC address out of the EEPROM */ | 1083 | /* copy the MAC address out of the EEPROM */ |
1042 | if (e1000_read_mac_addr(&adapter->hw)) | 1084 | if (e1000_read_mac_addr(hw)) |
1043 | DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); | 1085 | DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); |
1044 | } | 1086 | } |
1045 | /* don't block initalization here due to bad MAC address */ | 1087 | /* don't block initalization here due to bad MAC address */ |
1046 | memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); | 1088 | memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); |
1047 | memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); | 1089 | memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); |
1048 | 1090 | ||
1049 | if (!is_valid_ether_addr(netdev->perm_addr)) | 1091 | if (!is_valid_ether_addr(netdev->perm_addr)) |
1050 | DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); | 1092 | DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); |
1051 | 1093 | ||
1052 | e1000_get_bus_info(&adapter->hw); | 1094 | e1000_get_bus_info(hw); |
1053 | 1095 | ||
1054 | init_timer(&adapter->tx_fifo_stall_timer); | 1096 | init_timer(&adapter->tx_fifo_stall_timer); |
1055 | adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall; | 1097 | adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall; |
1056 | adapter->tx_fifo_stall_timer.data = (unsigned long) adapter; | 1098 | adapter->tx_fifo_stall_timer.data = (unsigned long)adapter; |
1057 | 1099 | ||
1058 | init_timer(&adapter->watchdog_timer); | 1100 | init_timer(&adapter->watchdog_timer); |
1059 | adapter->watchdog_timer.function = &e1000_watchdog; | 1101 | adapter->watchdog_timer.function = &e1000_watchdog; |
@@ -1061,7 +1103,7 @@ e1000_probe(struct pci_dev *pdev, | |||
1061 | 1103 | ||
1062 | init_timer(&adapter->phy_info_timer); | 1104 | init_timer(&adapter->phy_info_timer); |
1063 | adapter->phy_info_timer.function = &e1000_update_phy_info; | 1105 | adapter->phy_info_timer.function = &e1000_update_phy_info; |
1064 | adapter->phy_info_timer.data = (unsigned long) adapter; | 1106 | adapter->phy_info_timer.data = (unsigned long)adapter; |
1065 | 1107 | ||
1066 | INIT_WORK(&adapter->reset_task, e1000_reset_task); | 1108 | INIT_WORK(&adapter->reset_task, e1000_reset_task); |
1067 | 1109 | ||
@@ -1072,18 +1114,18 @@ e1000_probe(struct pci_dev *pdev, | |||
1072 | * enable the ACPI Magic Packet filter | 1114 | * enable the ACPI Magic Packet filter |
1073 | */ | 1115 | */ |
1074 | 1116 | ||
1075 | switch (adapter->hw.mac_type) { | 1117 | switch (hw->mac_type) { |
1076 | case e1000_82542_rev2_0: | 1118 | case e1000_82542_rev2_0: |
1077 | case e1000_82542_rev2_1: | 1119 | case e1000_82542_rev2_1: |
1078 | case e1000_82543: | 1120 | case e1000_82543: |
1079 | break; | 1121 | break; |
1080 | case e1000_82544: | 1122 | case e1000_82544: |
1081 | e1000_read_eeprom(&adapter->hw, | 1123 | e1000_read_eeprom(hw, |
1082 | EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); | 1124 | EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); |
1083 | eeprom_apme_mask = E1000_EEPROM_82544_APM; | 1125 | eeprom_apme_mask = E1000_EEPROM_82544_APM; |
1084 | break; | 1126 | break; |
1085 | case e1000_ich8lan: | 1127 | case e1000_ich8lan: |
1086 | e1000_read_eeprom(&adapter->hw, | 1128 | e1000_read_eeprom(hw, |
1087 | EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data); | 1129 | EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data); |
1088 | eeprom_apme_mask = E1000_EEPROM_ICH8_APME; | 1130 | eeprom_apme_mask = E1000_EEPROM_ICH8_APME; |
1089 | break; | 1131 | break; |
@@ -1091,14 +1133,14 @@ e1000_probe(struct pci_dev *pdev, | |||
1091 | case e1000_82546_rev_3: | 1133 | case e1000_82546_rev_3: |
1092 | case e1000_82571: | 1134 | case e1000_82571: |
1093 | case e1000_80003es2lan: | 1135 | case e1000_80003es2lan: |
1094 | if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){ | 1136 | if (er32(STATUS) & E1000_STATUS_FUNC_1){ |
1095 | e1000_read_eeprom(&adapter->hw, | 1137 | e1000_read_eeprom(hw, |
1096 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); | 1138 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); |
1097 | break; | 1139 | break; |
1098 | } | 1140 | } |
1099 | /* Fall Through */ | 1141 | /* Fall Through */ |
1100 | default: | 1142 | default: |
1101 | e1000_read_eeprom(&adapter->hw, | 1143 | e1000_read_eeprom(hw, |
1102 | EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); | 1144 | EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); |
1103 | break; | 1145 | break; |
1104 | } | 1146 | } |
@@ -1117,7 +1159,7 @@ e1000_probe(struct pci_dev *pdev, | |||
1117 | case E1000_DEV_ID_82571EB_FIBER: | 1159 | case E1000_DEV_ID_82571EB_FIBER: |
1118 | /* Wake events only supported on port A for dual fiber | 1160 | /* Wake events only supported on port A for dual fiber |
1119 | * regardless of eeprom setting */ | 1161 | * regardless of eeprom setting */ |
1120 | if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1) | 1162 | if (er32(STATUS) & E1000_STATUS_FUNC_1) |
1121 | adapter->eeprom_wol = 0; | 1163 | adapter->eeprom_wol = 0; |
1122 | break; | 1164 | break; |
1123 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: | 1165 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: |
@@ -1140,8 +1182,6 @@ e1000_probe(struct pci_dev *pdev, | |||
1140 | adapter->wol = adapter->eeprom_wol; | 1182 | adapter->wol = adapter->eeprom_wol; |
1141 | 1183 | ||
1142 | /* print bus type/speed/width info */ | 1184 | /* print bus type/speed/width info */ |
1143 | { | ||
1144 | struct e1000_hw *hw = &adapter->hw; | ||
1145 | DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", | 1185 | DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", |
1146 | ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : | 1186 | ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : |
1147 | (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")), | 1187 | (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")), |
@@ -1154,11 +1194,10 @@ e1000_probe(struct pci_dev *pdev, | |||
1154 | (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" : | 1194 | (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" : |
1155 | (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" : | 1195 | (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" : |
1156 | "32-bit")); | 1196 | "32-bit")); |
1157 | } | ||
1158 | 1197 | ||
1159 | printk("%s\n", print_mac(mac, netdev->dev_addr)); | 1198 | printk("%s\n", print_mac(mac, netdev->dev_addr)); |
1160 | 1199 | ||
1161 | if (adapter->hw.bus_type == e1000_bus_type_pci_express) { | 1200 | if (hw->bus_type == e1000_bus_type_pci_express) { |
1162 | DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no " | 1201 | DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no " |
1163 | "longer be supported by this driver in the future.\n", | 1202 | "longer be supported by this driver in the future.\n", |
1164 | pdev->vendor, pdev->device); | 1203 | pdev->vendor, pdev->device); |
@@ -1173,8 +1212,8 @@ e1000_probe(struct pci_dev *pdev, | |||
1173 | * DRV_LOAD until the interface is up. For all other cases, | 1212 | * DRV_LOAD until the interface is up. For all other cases, |
1174 | * let the f/w know that the h/w is now under the control | 1213 | * let the f/w know that the h/w is now under the control |
1175 | * of the driver. */ | 1214 | * of the driver. */ |
1176 | if (adapter->hw.mac_type != e1000_82573 || | 1215 | if (hw->mac_type != e1000_82573 || |
1177 | !e1000_check_mng_mode(&adapter->hw)) | 1216 | !e1000_check_mng_mode(hw)) |
1178 | e1000_get_hw_control(adapter); | 1217 | e1000_get_hw_control(adapter); |
1179 | 1218 | ||
1180 | /* tell the stack to leave us alone until e1000_open() is called */ | 1219 | /* tell the stack to leave us alone until e1000_open() is called */ |
@@ -1182,7 +1221,8 @@ e1000_probe(struct pci_dev *pdev, | |||
1182 | netif_stop_queue(netdev); | 1221 | netif_stop_queue(netdev); |
1183 | 1222 | ||
1184 | strcpy(netdev->name, "eth%d"); | 1223 | strcpy(netdev->name, "eth%d"); |
1185 | if ((err = register_netdev(netdev))) | 1224 | err = register_netdev(netdev); |
1225 | if (err) | ||
1186 | goto err_register; | 1226 | goto err_register; |
1187 | 1227 | ||
1188 | DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); | 1228 | DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); |
@@ -1193,28 +1233,24 @@ e1000_probe(struct pci_dev *pdev, | |||
1193 | err_register: | 1233 | err_register: |
1194 | e1000_release_hw_control(adapter); | 1234 | e1000_release_hw_control(adapter); |
1195 | err_eeprom: | 1235 | err_eeprom: |
1196 | if (!e1000_check_phy_reset_block(&adapter->hw)) | 1236 | if (!e1000_check_phy_reset_block(hw)) |
1197 | e1000_phy_hw_reset(&adapter->hw); | 1237 | e1000_phy_hw_reset(hw); |
1198 | 1238 | ||
1199 | if (adapter->hw.flash_address) | 1239 | if (hw->flash_address) |
1200 | iounmap(adapter->hw.flash_address); | 1240 | iounmap(hw->flash_address); |
1201 | err_flashmap: | 1241 | err_flashmap: |
1202 | #ifdef CONFIG_E1000_NAPI | ||
1203 | for (i = 0; i < adapter->num_rx_queues; i++) | 1242 | for (i = 0; i < adapter->num_rx_queues; i++) |
1204 | dev_put(&adapter->polling_netdev[i]); | 1243 | dev_put(&adapter->polling_netdev[i]); |
1205 | #endif | ||
1206 | 1244 | ||
1207 | kfree(adapter->tx_ring); | 1245 | kfree(adapter->tx_ring); |
1208 | kfree(adapter->rx_ring); | 1246 | kfree(adapter->rx_ring); |
1209 | #ifdef CONFIG_E1000_NAPI | ||
1210 | kfree(adapter->polling_netdev); | 1247 | kfree(adapter->polling_netdev); |
1211 | #endif | ||
1212 | err_sw_init: | 1248 | err_sw_init: |
1213 | iounmap(adapter->hw.hw_addr); | 1249 | iounmap(hw->hw_addr); |
1214 | err_ioremap: | 1250 | err_ioremap: |
1215 | free_netdev(netdev); | 1251 | free_netdev(netdev); |
1216 | err_alloc_etherdev: | 1252 | err_alloc_etherdev: |
1217 | pci_release_regions(pdev); | 1253 | pci_release_selected_regions(pdev, bars); |
1218 | err_pci_reg: | 1254 | err_pci_reg: |
1219 | err_dma: | 1255 | err_dma: |
1220 | pci_disable_device(pdev); | 1256 | pci_disable_device(pdev); |
@@ -1231,14 +1267,12 @@ err_dma: | |||
1231 | * memory. | 1267 | * memory. |
1232 | **/ | 1268 | **/ |
1233 | 1269 | ||
1234 | static void __devexit | 1270 | static void __devexit e1000_remove(struct pci_dev *pdev) |
1235 | e1000_remove(struct pci_dev *pdev) | ||
1236 | { | 1271 | { |
1237 | struct net_device *netdev = pci_get_drvdata(pdev); | 1272 | struct net_device *netdev = pci_get_drvdata(pdev); |
1238 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1273 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1239 | #ifdef CONFIG_E1000_NAPI | 1274 | struct e1000_hw *hw = &adapter->hw; |
1240 | int i; | 1275 | int i; |
1241 | #endif | ||
1242 | 1276 | ||
1243 | cancel_work_sync(&adapter->reset_task); | 1277 | cancel_work_sync(&adapter->reset_task); |
1244 | 1278 | ||
@@ -1248,26 +1282,22 @@ e1000_remove(struct pci_dev *pdev) | |||
1248 | * would have already happened in close and is redundant. */ | 1282 | * would have already happened in close and is redundant. */ |
1249 | e1000_release_hw_control(adapter); | 1283 | e1000_release_hw_control(adapter); |
1250 | 1284 | ||
1251 | #ifdef CONFIG_E1000_NAPI | ||
1252 | for (i = 0; i < adapter->num_rx_queues; i++) | 1285 | for (i = 0; i < adapter->num_rx_queues; i++) |
1253 | dev_put(&adapter->polling_netdev[i]); | 1286 | dev_put(&adapter->polling_netdev[i]); |
1254 | #endif | ||
1255 | 1287 | ||
1256 | unregister_netdev(netdev); | 1288 | unregister_netdev(netdev); |
1257 | 1289 | ||
1258 | if (!e1000_check_phy_reset_block(&adapter->hw)) | 1290 | if (!e1000_check_phy_reset_block(hw)) |
1259 | e1000_phy_hw_reset(&adapter->hw); | 1291 | e1000_phy_hw_reset(hw); |
1260 | 1292 | ||
1261 | kfree(adapter->tx_ring); | 1293 | kfree(adapter->tx_ring); |
1262 | kfree(adapter->rx_ring); | 1294 | kfree(adapter->rx_ring); |
1263 | #ifdef CONFIG_E1000_NAPI | ||
1264 | kfree(adapter->polling_netdev); | 1295 | kfree(adapter->polling_netdev); |
1265 | #endif | ||
1266 | 1296 | ||
1267 | iounmap(adapter->hw.hw_addr); | 1297 | iounmap(hw->hw_addr); |
1268 | if (adapter->hw.flash_address) | 1298 | if (hw->flash_address) |
1269 | iounmap(adapter->hw.flash_address); | 1299 | iounmap(hw->flash_address); |
1270 | pci_release_regions(pdev); | 1300 | pci_release_selected_regions(pdev, adapter->bars); |
1271 | 1301 | ||
1272 | free_netdev(netdev); | 1302 | free_netdev(netdev); |
1273 | 1303 | ||
@@ -1283,15 +1313,12 @@ e1000_remove(struct pci_dev *pdev) | |||
1283 | * OS network device settings (MTU size). | 1313 | * OS network device settings (MTU size). |
1284 | **/ | 1314 | **/ |
1285 | 1315 | ||
1286 | static int __devinit | 1316 | static int __devinit e1000_sw_init(struct e1000_adapter *adapter) |
1287 | e1000_sw_init(struct e1000_adapter *adapter) | ||
1288 | { | 1317 | { |
1289 | struct e1000_hw *hw = &adapter->hw; | 1318 | struct e1000_hw *hw = &adapter->hw; |
1290 | struct net_device *netdev = adapter->netdev; | 1319 | struct net_device *netdev = adapter->netdev; |
1291 | struct pci_dev *pdev = adapter->pdev; | 1320 | struct pci_dev *pdev = adapter->pdev; |
1292 | #ifdef CONFIG_E1000_NAPI | ||
1293 | int i; | 1321 | int i; |
1294 | #endif | ||
1295 | 1322 | ||
1296 | /* PCI config space info */ | 1323 | /* PCI config space info */ |
1297 | 1324 | ||
@@ -1349,14 +1376,12 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
1349 | return -ENOMEM; | 1376 | return -ENOMEM; |
1350 | } | 1377 | } |
1351 | 1378 | ||
1352 | #ifdef CONFIG_E1000_NAPI | ||
1353 | for (i = 0; i < adapter->num_rx_queues; i++) { | 1379 | for (i = 0; i < adapter->num_rx_queues; i++) { |
1354 | adapter->polling_netdev[i].priv = adapter; | 1380 | adapter->polling_netdev[i].priv = adapter; |
1355 | dev_hold(&adapter->polling_netdev[i]); | 1381 | dev_hold(&adapter->polling_netdev[i]); |
1356 | set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); | 1382 | set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); |
1357 | } | 1383 | } |
1358 | spin_lock_init(&adapter->tx_queue_lock); | 1384 | spin_lock_init(&adapter->tx_queue_lock); |
1359 | #endif | ||
1360 | 1385 | ||
1361 | /* Explicitly disable IRQ since the NIC can be in any state. */ | 1386 | /* Explicitly disable IRQ since the NIC can be in any state. */ |
1362 | e1000_irq_disable(adapter); | 1387 | e1000_irq_disable(adapter); |
@@ -1377,8 +1402,7 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
1377 | * intended for Multiqueue, but should work fine with a single queue. | 1402 | * intended for Multiqueue, but should work fine with a single queue. |
1378 | **/ | 1403 | **/ |
1379 | 1404 | ||
1380 | static int __devinit | 1405 | static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) |
1381 | e1000_alloc_queues(struct e1000_adapter *adapter) | ||
1382 | { | 1406 | { |
1383 | adapter->tx_ring = kcalloc(adapter->num_tx_queues, | 1407 | adapter->tx_ring = kcalloc(adapter->num_tx_queues, |
1384 | sizeof(struct e1000_tx_ring), GFP_KERNEL); | 1408 | sizeof(struct e1000_tx_ring), GFP_KERNEL); |
@@ -1392,7 +1416,6 @@ e1000_alloc_queues(struct e1000_adapter *adapter) | |||
1392 | return -ENOMEM; | 1416 | return -ENOMEM; |
1393 | } | 1417 | } |
1394 | 1418 | ||
1395 | #ifdef CONFIG_E1000_NAPI | ||
1396 | adapter->polling_netdev = kcalloc(adapter->num_rx_queues, | 1419 | adapter->polling_netdev = kcalloc(adapter->num_rx_queues, |
1397 | sizeof(struct net_device), | 1420 | sizeof(struct net_device), |
1398 | GFP_KERNEL); | 1421 | GFP_KERNEL); |
@@ -1401,7 +1424,6 @@ e1000_alloc_queues(struct e1000_adapter *adapter) | |||
1401 | kfree(adapter->rx_ring); | 1424 | kfree(adapter->rx_ring); |
1402 | return -ENOMEM; | 1425 | return -ENOMEM; |
1403 | } | 1426 | } |
1404 | #endif | ||
1405 | 1427 | ||
1406 | return E1000_SUCCESS; | 1428 | return E1000_SUCCESS; |
1407 | } | 1429 | } |
@@ -1419,10 +1441,10 @@ e1000_alloc_queues(struct e1000_adapter *adapter) | |||
1419 | * and the stack is notified that the interface is ready. | 1441 | * and the stack is notified that the interface is ready. |
1420 | **/ | 1442 | **/ |
1421 | 1443 | ||
1422 | static int | 1444 | static int e1000_open(struct net_device *netdev) |
1423 | e1000_open(struct net_device *netdev) | ||
1424 | { | 1445 | { |
1425 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1446 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1447 | struct e1000_hw *hw = &adapter->hw; | ||
1426 | int err; | 1448 | int err; |
1427 | 1449 | ||
1428 | /* disallow open during test */ | 1450 | /* disallow open during test */ |
@@ -1442,15 +1464,15 @@ e1000_open(struct net_device *netdev) | |||
1442 | e1000_power_up_phy(adapter); | 1464 | e1000_power_up_phy(adapter); |
1443 | 1465 | ||
1444 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 1466 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
1445 | if ((adapter->hw.mng_cookie.status & | 1467 | if ((hw->mng_cookie.status & |
1446 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { | 1468 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { |
1447 | e1000_update_mng_vlan(adapter); | 1469 | e1000_update_mng_vlan(adapter); |
1448 | } | 1470 | } |
1449 | 1471 | ||
1450 | /* If AMT is enabled, let the firmware know that the network | 1472 | /* If AMT is enabled, let the firmware know that the network |
1451 | * interface is now open */ | 1473 | * interface is now open */ |
1452 | if (adapter->hw.mac_type == e1000_82573 && | 1474 | if (hw->mac_type == e1000_82573 && |
1453 | e1000_check_mng_mode(&adapter->hw)) | 1475 | e1000_check_mng_mode(hw)) |
1454 | e1000_get_hw_control(adapter); | 1476 | e1000_get_hw_control(adapter); |
1455 | 1477 | ||
1456 | /* before we allocate an interrupt, we must be ready to handle it. | 1478 | /* before we allocate an interrupt, we must be ready to handle it. |
@@ -1466,16 +1488,14 @@ e1000_open(struct net_device *netdev) | |||
1466 | /* From here on the code is the same as e1000_up() */ | 1488 | /* From here on the code is the same as e1000_up() */ |
1467 | clear_bit(__E1000_DOWN, &adapter->flags); | 1489 | clear_bit(__E1000_DOWN, &adapter->flags); |
1468 | 1490 | ||
1469 | #ifdef CONFIG_E1000_NAPI | ||
1470 | napi_enable(&adapter->napi); | 1491 | napi_enable(&adapter->napi); |
1471 | #endif | ||
1472 | 1492 | ||
1473 | e1000_irq_enable(adapter); | 1493 | e1000_irq_enable(adapter); |
1474 | 1494 | ||
1475 | netif_start_queue(netdev); | 1495 | netif_start_queue(netdev); |
1476 | 1496 | ||
1477 | /* fire a link status change interrupt to start the watchdog */ | 1497 | /* fire a link status change interrupt to start the watchdog */ |
1478 | E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC); | 1498 | ew32(ICS, E1000_ICS_LSC); |
1479 | 1499 | ||
1480 | return E1000_SUCCESS; | 1500 | return E1000_SUCCESS; |
1481 | 1501 | ||
@@ -1503,10 +1523,10 @@ err_setup_tx: | |||
1503 | * hardware, and all transmit and receive resources are freed. | 1523 | * hardware, and all transmit and receive resources are freed. |
1504 | **/ | 1524 | **/ |
1505 | 1525 | ||
1506 | static int | 1526 | static int e1000_close(struct net_device *netdev) |
1507 | e1000_close(struct net_device *netdev) | ||
1508 | { | 1527 | { |
1509 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1528 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1529 | struct e1000_hw *hw = &adapter->hw; | ||
1510 | 1530 | ||
1511 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); | 1531 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); |
1512 | e1000_down(adapter); | 1532 | e1000_down(adapter); |
@@ -1518,7 +1538,7 @@ e1000_close(struct net_device *netdev) | |||
1518 | 1538 | ||
1519 | /* kill manageability vlan ID if supported, but not if a vlan with | 1539 | /* kill manageability vlan ID if supported, but not if a vlan with |
1520 | * the same ID is registered on the host OS (let 8021q kill it) */ | 1540 | * the same ID is registered on the host OS (let 8021q kill it) */ |
1521 | if ((adapter->hw.mng_cookie.status & | 1541 | if ((hw->mng_cookie.status & |
1522 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | 1542 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
1523 | !(adapter->vlgrp && | 1543 | !(adapter->vlgrp && |
1524 | vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) { | 1544 | vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) { |
@@ -1527,8 +1547,8 @@ e1000_close(struct net_device *netdev) | |||
1527 | 1547 | ||
1528 | /* If AMT is enabled, let the firmware know that the network | 1548 | /* If AMT is enabled, let the firmware know that the network |
1529 | * interface is now closed */ | 1549 | * interface is now closed */ |
1530 | if (adapter->hw.mac_type == e1000_82573 && | 1550 | if (hw->mac_type == e1000_82573 && |
1531 | e1000_check_mng_mode(&adapter->hw)) | 1551 | e1000_check_mng_mode(hw)) |
1532 | e1000_release_hw_control(adapter); | 1552 | e1000_release_hw_control(adapter); |
1533 | 1553 | ||
1534 | return 0; | 1554 | return 0; |
@@ -1540,17 +1560,17 @@ e1000_close(struct net_device *netdev) | |||
1540 | * @start: address of beginning of memory | 1560 | * @start: address of beginning of memory |
1541 | * @len: length of memory | 1561 | * @len: length of memory |
1542 | **/ | 1562 | **/ |
1543 | static bool | 1563 | static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, |
1544 | e1000_check_64k_bound(struct e1000_adapter *adapter, | 1564 | unsigned long len) |
1545 | void *start, unsigned long len) | ||
1546 | { | 1565 | { |
1547 | unsigned long begin = (unsigned long) start; | 1566 | struct e1000_hw *hw = &adapter->hw; |
1567 | unsigned long begin = (unsigned long)start; | ||
1548 | unsigned long end = begin + len; | 1568 | unsigned long end = begin + len; |
1549 | 1569 | ||
1550 | /* First rev 82545 and 82546 need to not allow any memory | 1570 | /* First rev 82545 and 82546 need to not allow any memory |
1551 | * write location to cross 64k boundary due to errata 23 */ | 1571 | * write location to cross 64k boundary due to errata 23 */ |
1552 | if (adapter->hw.mac_type == e1000_82545 || | 1572 | if (hw->mac_type == e1000_82545 || |
1553 | adapter->hw.mac_type == e1000_82546) { | 1573 | hw->mac_type == e1000_82546) { |
1554 | return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; | 1574 | return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; |
1555 | } | 1575 | } |
1556 | 1576 | ||
@@ -1565,9 +1585,8 @@ e1000_check_64k_bound(struct e1000_adapter *adapter, | |||
1565 | * Return 0 on success, negative on failure | 1585 | * Return 0 on success, negative on failure |
1566 | **/ | 1586 | **/ |
1567 | 1587 | ||
1568 | static int | 1588 | static int e1000_setup_tx_resources(struct e1000_adapter *adapter, |
1569 | e1000_setup_tx_resources(struct e1000_adapter *adapter, | 1589 | struct e1000_tx_ring *txdr) |
1570 | struct e1000_tx_ring *txdr) | ||
1571 | { | 1590 | { |
1572 | struct pci_dev *pdev = adapter->pdev; | 1591 | struct pci_dev *pdev = adapter->pdev; |
1573 | int size; | 1592 | int size; |
@@ -1641,8 +1660,7 @@ setup_tx_desc_die: | |||
1641 | * Return 0 on success, negative on failure | 1660 | * Return 0 on success, negative on failure |
1642 | **/ | 1661 | **/ |
1643 | 1662 | ||
1644 | int | 1663 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) |
1645 | e1000_setup_all_tx_resources(struct e1000_adapter *adapter) | ||
1646 | { | 1664 | { |
1647 | int i, err = 0; | 1665 | int i, err = 0; |
1648 | 1666 | ||
@@ -1668,8 +1686,7 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter) | |||
1668 | * Configure the Tx unit of the MAC after a reset. | 1686 | * Configure the Tx unit of the MAC after a reset. |
1669 | **/ | 1687 | **/ |
1670 | 1688 | ||
1671 | static void | 1689 | static void e1000_configure_tx(struct e1000_adapter *adapter) |
1672 | e1000_configure_tx(struct e1000_adapter *adapter) | ||
1673 | { | 1690 | { |
1674 | u64 tdba; | 1691 | u64 tdba; |
1675 | struct e1000_hw *hw = &adapter->hw; | 1692 | struct e1000_hw *hw = &adapter->hw; |
@@ -1684,18 +1701,18 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1684 | tdba = adapter->tx_ring[0].dma; | 1701 | tdba = adapter->tx_ring[0].dma; |
1685 | tdlen = adapter->tx_ring[0].count * | 1702 | tdlen = adapter->tx_ring[0].count * |
1686 | sizeof(struct e1000_tx_desc); | 1703 | sizeof(struct e1000_tx_desc); |
1687 | E1000_WRITE_REG(hw, TDLEN, tdlen); | 1704 | ew32(TDLEN, tdlen); |
1688 | E1000_WRITE_REG(hw, TDBAH, (tdba >> 32)); | 1705 | ew32(TDBAH, (tdba >> 32)); |
1689 | E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); | 1706 | ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); |
1690 | E1000_WRITE_REG(hw, TDT, 0); | 1707 | ew32(TDT, 0); |
1691 | E1000_WRITE_REG(hw, TDH, 0); | 1708 | ew32(TDH, 0); |
1692 | adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); | 1709 | adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); |
1693 | adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); | 1710 | adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); |
1694 | break; | 1711 | break; |
1695 | } | 1712 | } |
1696 | 1713 | ||
1697 | /* Set the default values for the Tx Inter Packet Gap timer */ | 1714 | /* Set the default values for the Tx Inter Packet Gap timer */ |
1698 | if (adapter->hw.mac_type <= e1000_82547_rev_2 && | 1715 | if (hw->mac_type <= e1000_82547_rev_2 && |
1699 | (hw->media_type == e1000_media_type_fiber || | 1716 | (hw->media_type == e1000_media_type_fiber || |
1700 | hw->media_type == e1000_media_type_internal_serdes)) | 1717 | hw->media_type == e1000_media_type_internal_serdes)) |
1701 | tipg = DEFAULT_82543_TIPG_IPGT_FIBER; | 1718 | tipg = DEFAULT_82543_TIPG_IPGT_FIBER; |
@@ -1720,34 +1737,34 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1720 | } | 1737 | } |
1721 | tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; | 1738 | tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; |
1722 | tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; | 1739 | tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; |
1723 | E1000_WRITE_REG(hw, TIPG, tipg); | 1740 | ew32(TIPG, tipg); |
1724 | 1741 | ||
1725 | /* Set the Tx Interrupt Delay register */ | 1742 | /* Set the Tx Interrupt Delay register */ |
1726 | 1743 | ||
1727 | E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay); | 1744 | ew32(TIDV, adapter->tx_int_delay); |
1728 | if (hw->mac_type >= e1000_82540) | 1745 | if (hw->mac_type >= e1000_82540) |
1729 | E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay); | 1746 | ew32(TADV, adapter->tx_abs_int_delay); |
1730 | 1747 | ||
1731 | /* Program the Transmit Control Register */ | 1748 | /* Program the Transmit Control Register */ |
1732 | 1749 | ||
1733 | tctl = E1000_READ_REG(hw, TCTL); | 1750 | tctl = er32(TCTL); |
1734 | tctl &= ~E1000_TCTL_CT; | 1751 | tctl &= ~E1000_TCTL_CT; |
1735 | tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | | 1752 | tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | |
1736 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); | 1753 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); |
1737 | 1754 | ||
1738 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { | 1755 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { |
1739 | tarc = E1000_READ_REG(hw, TARC0); | 1756 | tarc = er32(TARC0); |
1740 | /* set the speed mode bit, we'll clear it if we're not at | 1757 | /* set the speed mode bit, we'll clear it if we're not at |
1741 | * gigabit link later */ | 1758 | * gigabit link later */ |
1742 | tarc |= (1 << 21); | 1759 | tarc |= (1 << 21); |
1743 | E1000_WRITE_REG(hw, TARC0, tarc); | 1760 | ew32(TARC0, tarc); |
1744 | } else if (hw->mac_type == e1000_80003es2lan) { | 1761 | } else if (hw->mac_type == e1000_80003es2lan) { |
1745 | tarc = E1000_READ_REG(hw, TARC0); | 1762 | tarc = er32(TARC0); |
1746 | tarc |= 1; | 1763 | tarc |= 1; |
1747 | E1000_WRITE_REG(hw, TARC0, tarc); | 1764 | ew32(TARC0, tarc); |
1748 | tarc = E1000_READ_REG(hw, TARC1); | 1765 | tarc = er32(TARC1); |
1749 | tarc |= 1; | 1766 | tarc |= 1; |
1750 | E1000_WRITE_REG(hw, TARC1, tarc); | 1767 | ew32(TARC1, tarc); |
1751 | } | 1768 | } |
1752 | 1769 | ||
1753 | e1000_config_collision_dist(hw); | 1770 | e1000_config_collision_dist(hw); |
@@ -1770,7 +1787,7 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1770 | hw->bus_type == e1000_bus_type_pcix) | 1787 | hw->bus_type == e1000_bus_type_pcix) |
1771 | adapter->pcix_82544 = 1; | 1788 | adapter->pcix_82544 = 1; |
1772 | 1789 | ||
1773 | E1000_WRITE_REG(hw, TCTL, tctl); | 1790 | ew32(TCTL, tctl); |
1774 | 1791 | ||
1775 | } | 1792 | } |
1776 | 1793 | ||
@@ -1782,10 +1799,10 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1782 | * Returns 0 on success, negative on failure | 1799 | * Returns 0 on success, negative on failure |
1783 | **/ | 1800 | **/ |
1784 | 1801 | ||
1785 | static int | 1802 | static int e1000_setup_rx_resources(struct e1000_adapter *adapter, |
1786 | e1000_setup_rx_resources(struct e1000_adapter *adapter, | 1803 | struct e1000_rx_ring *rxdr) |
1787 | struct e1000_rx_ring *rxdr) | ||
1788 | { | 1804 | { |
1805 | struct e1000_hw *hw = &adapter->hw; | ||
1789 | struct pci_dev *pdev = adapter->pdev; | 1806 | struct pci_dev *pdev = adapter->pdev; |
1790 | int size, desc_len; | 1807 | int size, desc_len; |
1791 | 1808 | ||
@@ -1818,7 +1835,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter, | |||
1818 | return -ENOMEM; | 1835 | return -ENOMEM; |
1819 | } | 1836 | } |
1820 | 1837 | ||
1821 | if (adapter->hw.mac_type <= e1000_82547_rev_2) | 1838 | if (hw->mac_type <= e1000_82547_rev_2) |
1822 | desc_len = sizeof(struct e1000_rx_desc); | 1839 | desc_len = sizeof(struct e1000_rx_desc); |
1823 | else | 1840 | else |
1824 | desc_len = sizeof(union e1000_rx_desc_packet_split); | 1841 | desc_len = sizeof(union e1000_rx_desc_packet_split); |
@@ -1887,8 +1904,7 @@ setup_rx_desc_die: | |||
1887 | * Return 0 on success, negative on failure | 1904 | * Return 0 on success, negative on failure |
1888 | **/ | 1905 | **/ |
1889 | 1906 | ||
1890 | int | 1907 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) |
1891 | e1000_setup_all_rx_resources(struct e1000_adapter *adapter) | ||
1892 | { | 1908 | { |
1893 | int i, err = 0; | 1909 | int i, err = 0; |
1894 | 1910 | ||
@@ -1913,24 +1929,24 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter) | |||
1913 | **/ | 1929 | **/ |
1914 | #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ | 1930 | #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ |
1915 | (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) | 1931 | (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) |
1916 | static void | 1932 | static void e1000_setup_rctl(struct e1000_adapter *adapter) |
1917 | e1000_setup_rctl(struct e1000_adapter *adapter) | ||
1918 | { | 1933 | { |
1934 | struct e1000_hw *hw = &adapter->hw; | ||
1919 | u32 rctl, rfctl; | 1935 | u32 rctl, rfctl; |
1920 | u32 psrctl = 0; | 1936 | u32 psrctl = 0; |
1921 | #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT | 1937 | #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT |
1922 | u32 pages = 0; | 1938 | u32 pages = 0; |
1923 | #endif | 1939 | #endif |
1924 | 1940 | ||
1925 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 1941 | rctl = er32(RCTL); |
1926 | 1942 | ||
1927 | rctl &= ~(3 << E1000_RCTL_MO_SHIFT); | 1943 | rctl &= ~(3 << E1000_RCTL_MO_SHIFT); |
1928 | 1944 | ||
1929 | rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | | 1945 | rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | |
1930 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | 1946 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | |
1931 | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); | 1947 | (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); |
1932 | 1948 | ||
1933 | if (adapter->hw.tbi_compatibility_on == 1) | 1949 | if (hw->tbi_compatibility_on == 1) |
1934 | rctl |= E1000_RCTL_SBP; | 1950 | rctl |= E1000_RCTL_SBP; |
1935 | else | 1951 | else |
1936 | rctl &= ~E1000_RCTL_SBP; | 1952 | rctl &= ~E1000_RCTL_SBP; |
@@ -1983,7 +1999,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1983 | /* allocations using alloc_page take too long for regular MTU | 1999 | /* allocations using alloc_page take too long for regular MTU |
1984 | * so only enable packet split for jumbo frames */ | 2000 | * so only enable packet split for jumbo frames */ |
1985 | pages = PAGE_USE_COUNT(adapter->netdev->mtu); | 2001 | pages = PAGE_USE_COUNT(adapter->netdev->mtu); |
1986 | if ((adapter->hw.mac_type >= e1000_82571) && (pages <= 3) && | 2002 | if ((hw->mac_type >= e1000_82571) && (pages <= 3) && |
1987 | PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE)) | 2003 | PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE)) |
1988 | adapter->rx_ps_pages = pages; | 2004 | adapter->rx_ps_pages = pages; |
1989 | else | 2005 | else |
@@ -1991,14 +2007,14 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1991 | #endif | 2007 | #endif |
1992 | if (adapter->rx_ps_pages) { | 2008 | if (adapter->rx_ps_pages) { |
1993 | /* Configure extra packet-split registers */ | 2009 | /* Configure extra packet-split registers */ |
1994 | rfctl = E1000_READ_REG(&adapter->hw, RFCTL); | 2010 | rfctl = er32(RFCTL); |
1995 | rfctl |= E1000_RFCTL_EXTEN; | 2011 | rfctl |= E1000_RFCTL_EXTEN; |
1996 | /* disable packet split support for IPv6 extension headers, | 2012 | /* disable packet split support for IPv6 extension headers, |
1997 | * because some malformed IPv6 headers can hang the RX */ | 2013 | * because some malformed IPv6 headers can hang the RX */ |
1998 | rfctl |= (E1000_RFCTL_IPV6_EX_DIS | | 2014 | rfctl |= (E1000_RFCTL_IPV6_EX_DIS | |
1999 | E1000_RFCTL_NEW_IPV6_EXT_DIS); | 2015 | E1000_RFCTL_NEW_IPV6_EXT_DIS); |
2000 | 2016 | ||
2001 | E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); | 2017 | ew32(RFCTL, rfctl); |
2002 | 2018 | ||
2003 | rctl |= E1000_RCTL_DTYP_PS; | 2019 | rctl |= E1000_RCTL_DTYP_PS; |
2004 | 2020 | ||
@@ -2018,10 +2034,10 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
2018 | break; | 2034 | break; |
2019 | } | 2035 | } |
2020 | 2036 | ||
2021 | E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl); | 2037 | ew32(PSRCTL, psrctl); |
2022 | } | 2038 | } |
2023 | 2039 | ||
2024 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 2040 | ew32(RCTL, rctl); |
2025 | } | 2041 | } |
2026 | 2042 | ||
2027 | /** | 2043 | /** |
@@ -2031,8 +2047,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
2031 | * Configure the Rx unit of the MAC after a reset. | 2047 | * Configure the Rx unit of the MAC after a reset. |
2032 | **/ | 2048 | **/ |
2033 | 2049 | ||
2034 | static void | 2050 | static void e1000_configure_rx(struct e1000_adapter *adapter) |
2035 | e1000_configure_rx(struct e1000_adapter *adapter) | ||
2036 | { | 2051 | { |
2037 | u64 rdba; | 2052 | u64 rdba; |
2038 | struct e1000_hw *hw = &adapter->hw; | 2053 | struct e1000_hw *hw = &adapter->hw; |
@@ -2052,30 +2067,27 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
2052 | } | 2067 | } |
2053 | 2068 | ||
2054 | /* disable receives while setting up the descriptors */ | 2069 | /* disable receives while setting up the descriptors */ |
2055 | rctl = E1000_READ_REG(hw, RCTL); | 2070 | rctl = er32(RCTL); |
2056 | E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); | 2071 | ew32(RCTL, rctl & ~E1000_RCTL_EN); |
2057 | 2072 | ||
2058 | /* set the Receive Delay Timer Register */ | 2073 | /* set the Receive Delay Timer Register */ |
2059 | E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay); | 2074 | ew32(RDTR, adapter->rx_int_delay); |
2060 | 2075 | ||
2061 | if (hw->mac_type >= e1000_82540) { | 2076 | if (hw->mac_type >= e1000_82540) { |
2062 | E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); | 2077 | ew32(RADV, adapter->rx_abs_int_delay); |
2063 | if (adapter->itr_setting != 0) | 2078 | if (adapter->itr_setting != 0) |
2064 | E1000_WRITE_REG(hw, ITR, | 2079 | ew32(ITR, 1000000000 / (adapter->itr * 256)); |
2065 | 1000000000 / (adapter->itr * 256)); | ||
2066 | } | 2080 | } |
2067 | 2081 | ||
2068 | if (hw->mac_type >= e1000_82571) { | 2082 | if (hw->mac_type >= e1000_82571) { |
2069 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 2083 | ctrl_ext = er32(CTRL_EXT); |
2070 | /* Reset delay timers after every interrupt */ | 2084 | /* Reset delay timers after every interrupt */ |
2071 | ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; | 2085 | ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; |
2072 | #ifdef CONFIG_E1000_NAPI | ||
2073 | /* Auto-Mask interrupts upon ICR access */ | 2086 | /* Auto-Mask interrupts upon ICR access */ |
2074 | ctrl_ext |= E1000_CTRL_EXT_IAME; | 2087 | ctrl_ext |= E1000_CTRL_EXT_IAME; |
2075 | E1000_WRITE_REG(hw, IAM, 0xffffffff); | 2088 | ew32(IAM, 0xffffffff); |
2076 | #endif | 2089 | ew32(CTRL_EXT, ctrl_ext); |
2077 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | 2090 | E1000_WRITE_FLUSH(); |
2078 | E1000_WRITE_FLUSH(hw); | ||
2079 | } | 2091 | } |
2080 | 2092 | ||
2081 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 2093 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
@@ -2084,11 +2096,11 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
2084 | case 1: | 2096 | case 1: |
2085 | default: | 2097 | default: |
2086 | rdba = adapter->rx_ring[0].dma; | 2098 | rdba = adapter->rx_ring[0].dma; |
2087 | E1000_WRITE_REG(hw, RDLEN, rdlen); | 2099 | ew32(RDLEN, rdlen); |
2088 | E1000_WRITE_REG(hw, RDBAH, (rdba >> 32)); | 2100 | ew32(RDBAH, (rdba >> 32)); |
2089 | E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); | 2101 | ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); |
2090 | E1000_WRITE_REG(hw, RDT, 0); | 2102 | ew32(RDT, 0); |
2091 | E1000_WRITE_REG(hw, RDH, 0); | 2103 | ew32(RDH, 0); |
2092 | adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); | 2104 | adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); |
2093 | adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); | 2105 | adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); |
2094 | break; | 2106 | break; |
@@ -2096,7 +2108,7 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
2096 | 2108 | ||
2097 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ | 2109 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ |
2098 | if (hw->mac_type >= e1000_82543) { | 2110 | if (hw->mac_type >= e1000_82543) { |
2099 | rxcsum = E1000_READ_REG(hw, RXCSUM); | 2111 | rxcsum = er32(RXCSUM); |
2100 | if (adapter->rx_csum) { | 2112 | if (adapter->rx_csum) { |
2101 | rxcsum |= E1000_RXCSUM_TUOFL; | 2113 | rxcsum |= E1000_RXCSUM_TUOFL; |
2102 | 2114 | ||
@@ -2110,17 +2122,17 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
2110 | rxcsum &= ~E1000_RXCSUM_TUOFL; | 2122 | rxcsum &= ~E1000_RXCSUM_TUOFL; |
2111 | /* don't need to clear IPPCSE as it defaults to 0 */ | 2123 | /* don't need to clear IPPCSE as it defaults to 0 */ |
2112 | } | 2124 | } |
2113 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); | 2125 | ew32(RXCSUM, rxcsum); |
2114 | } | 2126 | } |
2115 | 2127 | ||
2116 | /* enable early receives on 82573, only takes effect if using > 2048 | 2128 | /* enable early receives on 82573, only takes effect if using > 2048 |
2117 | * byte total frame size. for example only for jumbo frames */ | 2129 | * byte total frame size. for example only for jumbo frames */ |
2118 | #define E1000_ERT_2048 0x100 | 2130 | #define E1000_ERT_2048 0x100 |
2119 | if (hw->mac_type == e1000_82573) | 2131 | if (hw->mac_type == e1000_82573) |
2120 | E1000_WRITE_REG(hw, ERT, E1000_ERT_2048); | 2132 | ew32(ERT, E1000_ERT_2048); |
2121 | 2133 | ||
2122 | /* Enable Receives */ | 2134 | /* Enable Receives */ |
2123 | E1000_WRITE_REG(hw, RCTL, rctl); | 2135 | ew32(RCTL, rctl); |
2124 | } | 2136 | } |
2125 | 2137 | ||
2126 | /** | 2138 | /** |
@@ -2131,9 +2143,8 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
2131 | * Free all transmit software resources | 2143 | * Free all transmit software resources |
2132 | **/ | 2144 | **/ |
2133 | 2145 | ||
2134 | static void | 2146 | static void e1000_free_tx_resources(struct e1000_adapter *adapter, |
2135 | e1000_free_tx_resources(struct e1000_adapter *adapter, | 2147 | struct e1000_tx_ring *tx_ring) |
2136 | struct e1000_tx_ring *tx_ring) | ||
2137 | { | 2148 | { |
2138 | struct pci_dev *pdev = adapter->pdev; | 2149 | struct pci_dev *pdev = adapter->pdev; |
2139 | 2150 | ||
@@ -2154,8 +2165,7 @@ e1000_free_tx_resources(struct e1000_adapter *adapter, | |||
2154 | * Free all transmit software resources | 2165 | * Free all transmit software resources |
2155 | **/ | 2166 | **/ |
2156 | 2167 | ||
2157 | void | 2168 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter) |
2158 | e1000_free_all_tx_resources(struct e1000_adapter *adapter) | ||
2159 | { | 2169 | { |
2160 | int i; | 2170 | int i; |
2161 | 2171 | ||
@@ -2163,9 +2173,8 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter) | |||
2163 | e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); | 2173 | e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); |
2164 | } | 2174 | } |
2165 | 2175 | ||
2166 | static void | 2176 | static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, |
2167 | e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, | 2177 | struct e1000_buffer *buffer_info) |
2168 | struct e1000_buffer *buffer_info) | ||
2169 | { | 2178 | { |
2170 | if (buffer_info->dma) { | 2179 | if (buffer_info->dma) { |
2171 | pci_unmap_page(adapter->pdev, | 2180 | pci_unmap_page(adapter->pdev, |
@@ -2187,10 +2196,10 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, | |||
2187 | * @tx_ring: ring to be cleaned | 2196 | * @tx_ring: ring to be cleaned |
2188 | **/ | 2197 | **/ |
2189 | 2198 | ||
2190 | static void | 2199 | static void e1000_clean_tx_ring(struct e1000_adapter *adapter, |
2191 | e1000_clean_tx_ring(struct e1000_adapter *adapter, | 2200 | struct e1000_tx_ring *tx_ring) |
2192 | struct e1000_tx_ring *tx_ring) | ||
2193 | { | 2201 | { |
2202 | struct e1000_hw *hw = &adapter->hw; | ||
2194 | struct e1000_buffer *buffer_info; | 2203 | struct e1000_buffer *buffer_info; |
2195 | unsigned long size; | 2204 | unsigned long size; |
2196 | unsigned int i; | 2205 | unsigned int i; |
@@ -2213,8 +2222,8 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter, | |||
2213 | tx_ring->next_to_clean = 0; | 2222 | tx_ring->next_to_clean = 0; |
2214 | tx_ring->last_tx_tso = 0; | 2223 | tx_ring->last_tx_tso = 0; |
2215 | 2224 | ||
2216 | writel(0, adapter->hw.hw_addr + tx_ring->tdh); | 2225 | writel(0, hw->hw_addr + tx_ring->tdh); |
2217 | writel(0, adapter->hw.hw_addr + tx_ring->tdt); | 2226 | writel(0, hw->hw_addr + tx_ring->tdt); |
2218 | } | 2227 | } |
2219 | 2228 | ||
2220 | /** | 2229 | /** |
@@ -2222,8 +2231,7 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter, | |||
2222 | * @adapter: board private structure | 2231 | * @adapter: board private structure |
2223 | **/ | 2232 | **/ |
2224 | 2233 | ||
2225 | static void | 2234 | static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) |
2226 | e1000_clean_all_tx_rings(struct e1000_adapter *adapter) | ||
2227 | { | 2235 | { |
2228 | int i; | 2236 | int i; |
2229 | 2237 | ||
@@ -2239,9 +2247,8 @@ e1000_clean_all_tx_rings(struct e1000_adapter *adapter) | |||
2239 | * Free all receive software resources | 2247 | * Free all receive software resources |
2240 | **/ | 2248 | **/ |
2241 | 2249 | ||
2242 | static void | 2250 | static void e1000_free_rx_resources(struct e1000_adapter *adapter, |
2243 | e1000_free_rx_resources(struct e1000_adapter *adapter, | 2251 | struct e1000_rx_ring *rx_ring) |
2244 | struct e1000_rx_ring *rx_ring) | ||
2245 | { | 2252 | { |
2246 | struct pci_dev *pdev = adapter->pdev; | 2253 | struct pci_dev *pdev = adapter->pdev; |
2247 | 2254 | ||
@@ -2266,8 +2273,7 @@ e1000_free_rx_resources(struct e1000_adapter *adapter, | |||
2266 | * Free all receive software resources | 2273 | * Free all receive software resources |
2267 | **/ | 2274 | **/ |
2268 | 2275 | ||
2269 | void | 2276 | void e1000_free_all_rx_resources(struct e1000_adapter *adapter) |
2270 | e1000_free_all_rx_resources(struct e1000_adapter *adapter) | ||
2271 | { | 2277 | { |
2272 | int i; | 2278 | int i; |
2273 | 2279 | ||
@@ -2281,10 +2287,10 @@ e1000_free_all_rx_resources(struct e1000_adapter *adapter) | |||
2281 | * @rx_ring: ring to free buffers from | 2287 | * @rx_ring: ring to free buffers from |
2282 | **/ | 2288 | **/ |
2283 | 2289 | ||
2284 | static void | 2290 | static void e1000_clean_rx_ring(struct e1000_adapter *adapter, |
2285 | e1000_clean_rx_ring(struct e1000_adapter *adapter, | 2291 | struct e1000_rx_ring *rx_ring) |
2286 | struct e1000_rx_ring *rx_ring) | ||
2287 | { | 2292 | { |
2293 | struct e1000_hw *hw = &adapter->hw; | ||
2288 | struct e1000_buffer *buffer_info; | 2294 | struct e1000_buffer *buffer_info; |
2289 | struct e1000_ps_page *ps_page; | 2295 | struct e1000_ps_page *ps_page; |
2290 | struct e1000_ps_page_dma *ps_page_dma; | 2296 | struct e1000_ps_page_dma *ps_page_dma; |
@@ -2331,8 +2337,8 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter, | |||
2331 | rx_ring->next_to_clean = 0; | 2337 | rx_ring->next_to_clean = 0; |
2332 | rx_ring->next_to_use = 0; | 2338 | rx_ring->next_to_use = 0; |
2333 | 2339 | ||
2334 | writel(0, adapter->hw.hw_addr + rx_ring->rdh); | 2340 | writel(0, hw->hw_addr + rx_ring->rdh); |
2335 | writel(0, adapter->hw.hw_addr + rx_ring->rdt); | 2341 | writel(0, hw->hw_addr + rx_ring->rdt); |
2336 | } | 2342 | } |
2337 | 2343 | ||
2338 | /** | 2344 | /** |
@@ -2340,8 +2346,7 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter, | |||
2340 | * @adapter: board private structure | 2346 | * @adapter: board private structure |
2341 | **/ | 2347 | **/ |
2342 | 2348 | ||
2343 | static void | 2349 | static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) |
2344 | e1000_clean_all_rx_rings(struct e1000_adapter *adapter) | ||
2345 | { | 2350 | { |
2346 | int i; | 2351 | int i; |
2347 | 2352 | ||
@@ -2352,38 +2357,38 @@ e1000_clean_all_rx_rings(struct e1000_adapter *adapter) | |||
2352 | /* The 82542 2.0 (revision 2) needs to have the receive unit in reset | 2357 | /* The 82542 2.0 (revision 2) needs to have the receive unit in reset |
2353 | * and memory write and invalidate disabled for certain operations | 2358 | * and memory write and invalidate disabled for certain operations |
2354 | */ | 2359 | */ |
2355 | static void | 2360 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter) |
2356 | e1000_enter_82542_rst(struct e1000_adapter *adapter) | ||
2357 | { | 2361 | { |
2362 | struct e1000_hw *hw = &adapter->hw; | ||
2358 | struct net_device *netdev = adapter->netdev; | 2363 | struct net_device *netdev = adapter->netdev; |
2359 | u32 rctl; | 2364 | u32 rctl; |
2360 | 2365 | ||
2361 | e1000_pci_clear_mwi(&adapter->hw); | 2366 | e1000_pci_clear_mwi(hw); |
2362 | 2367 | ||
2363 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 2368 | rctl = er32(RCTL); |
2364 | rctl |= E1000_RCTL_RST; | 2369 | rctl |= E1000_RCTL_RST; |
2365 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 2370 | ew32(RCTL, rctl); |
2366 | E1000_WRITE_FLUSH(&adapter->hw); | 2371 | E1000_WRITE_FLUSH(); |
2367 | mdelay(5); | 2372 | mdelay(5); |
2368 | 2373 | ||
2369 | if (netif_running(netdev)) | 2374 | if (netif_running(netdev)) |
2370 | e1000_clean_all_rx_rings(adapter); | 2375 | e1000_clean_all_rx_rings(adapter); |
2371 | } | 2376 | } |
2372 | 2377 | ||
2373 | static void | 2378 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter) |
2374 | e1000_leave_82542_rst(struct e1000_adapter *adapter) | ||
2375 | { | 2379 | { |
2380 | struct e1000_hw *hw = &adapter->hw; | ||
2376 | struct net_device *netdev = adapter->netdev; | 2381 | struct net_device *netdev = adapter->netdev; |
2377 | u32 rctl; | 2382 | u32 rctl; |
2378 | 2383 | ||
2379 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 2384 | rctl = er32(RCTL); |
2380 | rctl &= ~E1000_RCTL_RST; | 2385 | rctl &= ~E1000_RCTL_RST; |
2381 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 2386 | ew32(RCTL, rctl); |
2382 | E1000_WRITE_FLUSH(&adapter->hw); | 2387 | E1000_WRITE_FLUSH(); |
2383 | mdelay(5); | 2388 | mdelay(5); |
2384 | 2389 | ||
2385 | if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE) | 2390 | if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) |
2386 | e1000_pci_set_mwi(&adapter->hw); | 2391 | e1000_pci_set_mwi(hw); |
2387 | 2392 | ||
2388 | if (netif_running(netdev)) { | 2393 | if (netif_running(netdev)) { |
2389 | /* No need to loop, because 82542 supports only 1 queue */ | 2394 | /* No need to loop, because 82542 supports only 1 queue */ |
@@ -2401,10 +2406,10 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter) | |||
2401 | * Returns 0 on success, negative on failure | 2406 | * Returns 0 on success, negative on failure |
2402 | **/ | 2407 | **/ |
2403 | 2408 | ||
2404 | static int | 2409 | static int e1000_set_mac(struct net_device *netdev, void *p) |
2405 | e1000_set_mac(struct net_device *netdev, void *p) | ||
2406 | { | 2410 | { |
2407 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2411 | struct e1000_adapter *adapter = netdev_priv(netdev); |
2412 | struct e1000_hw *hw = &adapter->hw; | ||
2408 | struct sockaddr *addr = p; | 2413 | struct sockaddr *addr = p; |
2409 | 2414 | ||
2410 | if (!is_valid_ether_addr(addr->sa_data)) | 2415 | if (!is_valid_ether_addr(addr->sa_data)) |
@@ -2412,19 +2417,19 @@ e1000_set_mac(struct net_device *netdev, void *p) | |||
2412 | 2417 | ||
2413 | /* 82542 2.0 needs to be in reset to write receive address registers */ | 2418 | /* 82542 2.0 needs to be in reset to write receive address registers */ |
2414 | 2419 | ||
2415 | if (adapter->hw.mac_type == e1000_82542_rev2_0) | 2420 | if (hw->mac_type == e1000_82542_rev2_0) |
2416 | e1000_enter_82542_rst(adapter); | 2421 | e1000_enter_82542_rst(adapter); |
2417 | 2422 | ||
2418 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 2423 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
2419 | memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); | 2424 | memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); |
2420 | 2425 | ||
2421 | e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); | 2426 | e1000_rar_set(hw, hw->mac_addr, 0); |
2422 | 2427 | ||
2423 | /* With 82571 controllers, LAA may be overwritten (with the default) | 2428 | /* With 82571 controllers, LAA may be overwritten (with the default) |
2424 | * due to controller reset from the other port. */ | 2429 | * due to controller reset from the other port. */ |
2425 | if (adapter->hw.mac_type == e1000_82571) { | 2430 | if (hw->mac_type == e1000_82571) { |
2426 | /* activate the work around */ | 2431 | /* activate the work around */ |
2427 | adapter->hw.laa_is_present = 1; | 2432 | hw->laa_is_present = 1; |
2428 | 2433 | ||
2429 | /* Hold a copy of the LAA in RAR[14] This is done so that | 2434 | /* Hold a copy of the LAA in RAR[14] This is done so that |
2430 | * between the time RAR[0] gets clobbered and the time it | 2435 | * between the time RAR[0] gets clobbered and the time it |
@@ -2432,11 +2437,11 @@ e1000_set_mac(struct net_device *netdev, void *p) | |||
2432 | * of the RARs and no incoming packets directed to this port | 2437 | * of the RARs and no incoming packets directed to this port |
2433 | * are dropped. Eventaully the LAA will be in RAR[0] and | 2438 | * are dropped. Eventaully the LAA will be in RAR[0] and |
2434 | * RAR[14] */ | 2439 | * RAR[14] */ |
2435 | e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, | 2440 | e1000_rar_set(hw, hw->mac_addr, |
2436 | E1000_RAR_ENTRIES - 1); | 2441 | E1000_RAR_ENTRIES - 1); |
2437 | } | 2442 | } |
2438 | 2443 | ||
2439 | if (adapter->hw.mac_type == e1000_82542_rev2_0) | 2444 | if (hw->mac_type == e1000_82542_rev2_0) |
2440 | e1000_leave_82542_rst(adapter); | 2445 | e1000_leave_82542_rst(adapter); |
2441 | 2446 | ||
2442 | return 0; | 2447 | return 0; |
@@ -2452,8 +2457,7 @@ e1000_set_mac(struct net_device *netdev, void *p) | |||
2452 | * promiscuous mode, and all-multi behavior. | 2457 | * promiscuous mode, and all-multi behavior. |
2453 | **/ | 2458 | **/ |
2454 | 2459 | ||
2455 | static void | 2460 | static void e1000_set_rx_mode(struct net_device *netdev) |
2456 | e1000_set_rx_mode(struct net_device *netdev) | ||
2457 | { | 2461 | { |
2458 | struct e1000_adapter *adapter = netdev_priv(netdev); | 2462 | struct e1000_adapter *adapter = netdev_priv(netdev); |
2459 | struct e1000_hw *hw = &adapter->hw; | 2463 | struct e1000_hw *hw = &adapter->hw; |
@@ -2466,16 +2470,16 @@ e1000_set_rx_mode(struct net_device *netdev) | |||
2466 | E1000_NUM_MTA_REGISTERS_ICH8LAN : | 2470 | E1000_NUM_MTA_REGISTERS_ICH8LAN : |
2467 | E1000_NUM_MTA_REGISTERS; | 2471 | E1000_NUM_MTA_REGISTERS; |
2468 | 2472 | ||
2469 | if (adapter->hw.mac_type == e1000_ich8lan) | 2473 | if (hw->mac_type == e1000_ich8lan) |
2470 | rar_entries = E1000_RAR_ENTRIES_ICH8LAN; | 2474 | rar_entries = E1000_RAR_ENTRIES_ICH8LAN; |
2471 | 2475 | ||
2472 | /* reserve RAR[14] for LAA over-write work-around */ | 2476 | /* reserve RAR[14] for LAA over-write work-around */ |
2473 | if (adapter->hw.mac_type == e1000_82571) | 2477 | if (hw->mac_type == e1000_82571) |
2474 | rar_entries--; | 2478 | rar_entries--; |
2475 | 2479 | ||
2476 | /* Check for Promiscuous and All Multicast modes */ | 2480 | /* Check for Promiscuous and All Multicast modes */ |
2477 | 2481 | ||
2478 | rctl = E1000_READ_REG(hw, RCTL); | 2482 | rctl = er32(RCTL); |
2479 | 2483 | ||
2480 | if (netdev->flags & IFF_PROMISC) { | 2484 | if (netdev->flags & IFF_PROMISC) { |
2481 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); | 2485 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); |
@@ -2498,7 +2502,7 @@ e1000_set_rx_mode(struct net_device *netdev) | |||
2498 | uc_ptr = netdev->uc_list; | 2502 | uc_ptr = netdev->uc_list; |
2499 | } | 2503 | } |
2500 | 2504 | ||
2501 | E1000_WRITE_REG(hw, RCTL, rctl); | 2505 | ew32(RCTL, rctl); |
2502 | 2506 | ||
2503 | /* 82542 2.0 needs to be in reset to write receive address registers */ | 2507 | /* 82542 2.0 needs to be in reset to write receive address registers */ |
2504 | 2508 | ||
@@ -2524,9 +2528,9 @@ e1000_set_rx_mode(struct net_device *netdev) | |||
2524 | mc_ptr = mc_ptr->next; | 2528 | mc_ptr = mc_ptr->next; |
2525 | } else { | 2529 | } else { |
2526 | E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); | 2530 | E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); |
2527 | E1000_WRITE_FLUSH(hw); | 2531 | E1000_WRITE_FLUSH(); |
2528 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); | 2532 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); |
2529 | E1000_WRITE_FLUSH(hw); | 2533 | E1000_WRITE_FLUSH(); |
2530 | } | 2534 | } |
2531 | } | 2535 | } |
2532 | WARN_ON(uc_ptr != NULL); | 2536 | WARN_ON(uc_ptr != NULL); |
@@ -2535,7 +2539,7 @@ e1000_set_rx_mode(struct net_device *netdev) | |||
2535 | 2539 | ||
2536 | for (i = 0; i < mta_reg_count; i++) { | 2540 | for (i = 0; i < mta_reg_count; i++) { |
2537 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); | 2541 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); |
2538 | E1000_WRITE_FLUSH(hw); | 2542 | E1000_WRITE_FLUSH(); |
2539 | } | 2543 | } |
2540 | 2544 | ||
2541 | /* load any remaining addresses into the hash table */ | 2545 | /* load any remaining addresses into the hash table */ |
@@ -2552,11 +2556,11 @@ e1000_set_rx_mode(struct net_device *netdev) | |||
2552 | /* Need to wait a few seconds after link up to get diagnostic information from | 2556 | /* Need to wait a few seconds after link up to get diagnostic information from |
2553 | * the phy */ | 2557 | * the phy */ |
2554 | 2558 | ||
2555 | static void | 2559 | static void e1000_update_phy_info(unsigned long data) |
2556 | e1000_update_phy_info(unsigned long data) | ||
2557 | { | 2560 | { |
2558 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 2561 | struct e1000_adapter *adapter = (struct e1000_adapter *)data; |
2559 | e1000_phy_get_info(&adapter->hw, &adapter->phy_info); | 2562 | struct e1000_hw *hw = &adapter->hw; |
2563 | e1000_phy_get_info(hw, &adapter->phy_info); | ||
2560 | } | 2564 | } |
2561 | 2565 | ||
2562 | /** | 2566 | /** |
@@ -2564,33 +2568,25 @@ e1000_update_phy_info(unsigned long data) | |||
2564 | * @data: pointer to adapter cast into an unsigned long | 2568 | * @data: pointer to adapter cast into an unsigned long |
2565 | **/ | 2569 | **/ |
2566 | 2570 | ||
2567 | static void | 2571 | static void e1000_82547_tx_fifo_stall(unsigned long data) |
2568 | e1000_82547_tx_fifo_stall(unsigned long data) | ||
2569 | { | 2572 | { |
2570 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 2573 | struct e1000_adapter *adapter = (struct e1000_adapter *)data; |
2574 | struct e1000_hw *hw = &adapter->hw; | ||
2571 | struct net_device *netdev = adapter->netdev; | 2575 | struct net_device *netdev = adapter->netdev; |
2572 | u32 tctl; | 2576 | u32 tctl; |
2573 | 2577 | ||
2574 | if (atomic_read(&adapter->tx_fifo_stall)) { | 2578 | if (atomic_read(&adapter->tx_fifo_stall)) { |
2575 | if ((E1000_READ_REG(&adapter->hw, TDT) == | 2579 | if ((er32(TDT) == er32(TDH)) && |
2576 | E1000_READ_REG(&adapter->hw, TDH)) && | 2580 | (er32(TDFT) == er32(TDFH)) && |
2577 | (E1000_READ_REG(&adapter->hw, TDFT) == | 2581 | (er32(TDFTS) == er32(TDFHS))) { |
2578 | E1000_READ_REG(&adapter->hw, TDFH)) && | 2582 | tctl = er32(TCTL); |
2579 | (E1000_READ_REG(&adapter->hw, TDFTS) == | 2583 | ew32(TCTL, tctl & ~E1000_TCTL_EN); |
2580 | E1000_READ_REG(&adapter->hw, TDFHS))) { | 2584 | ew32(TDFT, adapter->tx_head_addr); |
2581 | tctl = E1000_READ_REG(&adapter->hw, TCTL); | 2585 | ew32(TDFH, adapter->tx_head_addr); |
2582 | E1000_WRITE_REG(&adapter->hw, TCTL, | 2586 | ew32(TDFTS, adapter->tx_head_addr); |
2583 | tctl & ~E1000_TCTL_EN); | 2587 | ew32(TDFHS, adapter->tx_head_addr); |
2584 | E1000_WRITE_REG(&adapter->hw, TDFT, | 2588 | ew32(TCTL, tctl); |
2585 | adapter->tx_head_addr); | 2589 | E1000_WRITE_FLUSH(); |
2586 | E1000_WRITE_REG(&adapter->hw, TDFH, | ||
2587 | adapter->tx_head_addr); | ||
2588 | E1000_WRITE_REG(&adapter->hw, TDFTS, | ||
2589 | adapter->tx_head_addr); | ||
2590 | E1000_WRITE_REG(&adapter->hw, TDFHS, | ||
2591 | adapter->tx_head_addr); | ||
2592 | E1000_WRITE_REG(&adapter->hw, TCTL, tctl); | ||
2593 | E1000_WRITE_FLUSH(&adapter->hw); | ||
2594 | 2590 | ||
2595 | adapter->tx_fifo_head = 0; | 2591 | adapter->tx_fifo_head = 0; |
2596 | atomic_set(&adapter->tx_fifo_stall, 0); | 2592 | atomic_set(&adapter->tx_fifo_stall, 0); |
@@ -2605,45 +2601,45 @@ e1000_82547_tx_fifo_stall(unsigned long data) | |||
2605 | * e1000_watchdog - Timer Call-back | 2601 | * e1000_watchdog - Timer Call-back |
2606 | * @data: pointer to adapter cast into an unsigned long | 2602 | * @data: pointer to adapter cast into an unsigned long |
2607 | **/ | 2603 | **/ |
2608 | static void | 2604 | static void e1000_watchdog(unsigned long data) |
2609 | e1000_watchdog(unsigned long data) | ||
2610 | { | 2605 | { |
2611 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 2606 | struct e1000_adapter *adapter = (struct e1000_adapter *)data; |
2607 | struct e1000_hw *hw = &adapter->hw; | ||
2612 | struct net_device *netdev = adapter->netdev; | 2608 | struct net_device *netdev = adapter->netdev; |
2613 | struct e1000_tx_ring *txdr = adapter->tx_ring; | 2609 | struct e1000_tx_ring *txdr = adapter->tx_ring; |
2614 | u32 link, tctl; | 2610 | u32 link, tctl; |
2615 | s32 ret_val; | 2611 | s32 ret_val; |
2616 | 2612 | ||
2617 | ret_val = e1000_check_for_link(&adapter->hw); | 2613 | ret_val = e1000_check_for_link(hw); |
2618 | if ((ret_val == E1000_ERR_PHY) && | 2614 | if ((ret_val == E1000_ERR_PHY) && |
2619 | (adapter->hw.phy_type == e1000_phy_igp_3) && | 2615 | (hw->phy_type == e1000_phy_igp_3) && |
2620 | (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { | 2616 | (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { |
2621 | /* See e1000_kumeran_lock_loss_workaround() */ | 2617 | /* See e1000_kumeran_lock_loss_workaround() */ |
2622 | DPRINTK(LINK, INFO, | 2618 | DPRINTK(LINK, INFO, |
2623 | "Gigabit has been disabled, downgrading speed\n"); | 2619 | "Gigabit has been disabled, downgrading speed\n"); |
2624 | } | 2620 | } |
2625 | 2621 | ||
2626 | if (adapter->hw.mac_type == e1000_82573) { | 2622 | if (hw->mac_type == e1000_82573) { |
2627 | e1000_enable_tx_pkt_filtering(&adapter->hw); | 2623 | e1000_enable_tx_pkt_filtering(hw); |
2628 | if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) | 2624 | if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id) |
2629 | e1000_update_mng_vlan(adapter); | 2625 | e1000_update_mng_vlan(adapter); |
2630 | } | 2626 | } |
2631 | 2627 | ||
2632 | if ((adapter->hw.media_type == e1000_media_type_internal_serdes) && | 2628 | if ((hw->media_type == e1000_media_type_internal_serdes) && |
2633 | !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) | 2629 | !(er32(TXCW) & E1000_TXCW_ANE)) |
2634 | link = !adapter->hw.serdes_link_down; | 2630 | link = !hw->serdes_link_down; |
2635 | else | 2631 | else |
2636 | link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU; | 2632 | link = er32(STATUS) & E1000_STATUS_LU; |
2637 | 2633 | ||
2638 | if (link) { | 2634 | if (link) { |
2639 | if (!netif_carrier_ok(netdev)) { | 2635 | if (!netif_carrier_ok(netdev)) { |
2640 | u32 ctrl; | 2636 | u32 ctrl; |
2641 | bool txb2b = true; | 2637 | bool txb2b = true; |
2642 | e1000_get_speed_and_duplex(&adapter->hw, | 2638 | e1000_get_speed_and_duplex(hw, |
2643 | &adapter->link_speed, | 2639 | &adapter->link_speed, |
2644 | &adapter->link_duplex); | 2640 | &adapter->link_duplex); |
2645 | 2641 | ||
2646 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 2642 | ctrl = er32(CTRL); |
2647 | DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, " | 2643 | DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, " |
2648 | "Flow Control: %s\n", | 2644 | "Flow Control: %s\n", |
2649 | adapter->link_speed, | 2645 | adapter->link_speed, |
@@ -2671,19 +2667,19 @@ e1000_watchdog(unsigned long data) | |||
2671 | break; | 2667 | break; |
2672 | } | 2668 | } |
2673 | 2669 | ||
2674 | if ((adapter->hw.mac_type == e1000_82571 || | 2670 | if ((hw->mac_type == e1000_82571 || |
2675 | adapter->hw.mac_type == e1000_82572) && | 2671 | hw->mac_type == e1000_82572) && |
2676 | !txb2b) { | 2672 | !txb2b) { |
2677 | u32 tarc0; | 2673 | u32 tarc0; |
2678 | tarc0 = E1000_READ_REG(&adapter->hw, TARC0); | 2674 | tarc0 = er32(TARC0); |
2679 | tarc0 &= ~(1 << 21); | 2675 | tarc0 &= ~(1 << 21); |
2680 | E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); | 2676 | ew32(TARC0, tarc0); |
2681 | } | 2677 | } |
2682 | 2678 | ||
2683 | /* disable TSO for pcie and 10/100 speeds, to avoid | 2679 | /* disable TSO for pcie and 10/100 speeds, to avoid |
2684 | * some hardware issues */ | 2680 | * some hardware issues */ |
2685 | if (!adapter->tso_force && | 2681 | if (!adapter->tso_force && |
2686 | adapter->hw.bus_type == e1000_bus_type_pci_express){ | 2682 | hw->bus_type == e1000_bus_type_pci_express){ |
2687 | switch (adapter->link_speed) { | 2683 | switch (adapter->link_speed) { |
2688 | case SPEED_10: | 2684 | case SPEED_10: |
2689 | case SPEED_100: | 2685 | case SPEED_100: |
@@ -2704,9 +2700,9 @@ e1000_watchdog(unsigned long data) | |||
2704 | 2700 | ||
2705 | /* enable transmits in the hardware, need to do this | 2701 | /* enable transmits in the hardware, need to do this |
2706 | * after setting TARC0 */ | 2702 | * after setting TARC0 */ |
2707 | tctl = E1000_READ_REG(&adapter->hw, TCTL); | 2703 | tctl = er32(TCTL); |
2708 | tctl |= E1000_TCTL_EN; | 2704 | tctl |= E1000_TCTL_EN; |
2709 | E1000_WRITE_REG(&adapter->hw, TCTL, tctl); | 2705 | ew32(TCTL, tctl); |
2710 | 2706 | ||
2711 | netif_carrier_on(netdev); | 2707 | netif_carrier_on(netdev); |
2712 | netif_wake_queue(netdev); | 2708 | netif_wake_queue(netdev); |
@@ -2714,10 +2710,9 @@ e1000_watchdog(unsigned long data) | |||
2714 | adapter->smartspeed = 0; | 2710 | adapter->smartspeed = 0; |
2715 | } else { | 2711 | } else { |
2716 | /* make sure the receive unit is started */ | 2712 | /* make sure the receive unit is started */ |
2717 | if (adapter->hw.rx_needs_kicking) { | 2713 | if (hw->rx_needs_kicking) { |
2718 | struct e1000_hw *hw = &adapter->hw; | 2714 | u32 rctl = er32(RCTL); |
2719 | u32 rctl = E1000_READ_REG(hw, RCTL); | 2715 | ew32(RCTL, rctl | E1000_RCTL_EN); |
2720 | E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN); | ||
2721 | } | 2716 | } |
2722 | } | 2717 | } |
2723 | } else { | 2718 | } else { |
@@ -2734,7 +2729,7 @@ e1000_watchdog(unsigned long data) | |||
2734 | * disable receives in the ISR and | 2729 | * disable receives in the ISR and |
2735 | * reset device here in the watchdog | 2730 | * reset device here in the watchdog |
2736 | */ | 2731 | */ |
2737 | if (adapter->hw.mac_type == e1000_80003es2lan) | 2732 | if (hw->mac_type == e1000_80003es2lan) |
2738 | /* reset device */ | 2733 | /* reset device */ |
2739 | schedule_work(&adapter->reset_task); | 2734 | schedule_work(&adapter->reset_task); |
2740 | } | 2735 | } |
@@ -2744,9 +2739,9 @@ e1000_watchdog(unsigned long data) | |||
2744 | 2739 | ||
2745 | e1000_update_stats(adapter); | 2740 | e1000_update_stats(adapter); |
2746 | 2741 | ||
2747 | adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; | 2742 | hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; |
2748 | adapter->tpt_old = adapter->stats.tpt; | 2743 | adapter->tpt_old = adapter->stats.tpt; |
2749 | adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old; | 2744 | hw->collision_delta = adapter->stats.colc - adapter->colc_old; |
2750 | adapter->colc_old = adapter->stats.colc; | 2745 | adapter->colc_old = adapter->stats.colc; |
2751 | 2746 | ||
2752 | adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; | 2747 | adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; |
@@ -2754,7 +2749,7 @@ e1000_watchdog(unsigned long data) | |||
2754 | adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; | 2749 | adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; |
2755 | adapter->gotcl_old = adapter->stats.gotcl; | 2750 | adapter->gotcl_old = adapter->stats.gotcl; |
2756 | 2751 | ||
2757 | e1000_update_adaptive(&adapter->hw); | 2752 | e1000_update_adaptive(hw); |
2758 | 2753 | ||
2759 | if (!netif_carrier_ok(netdev)) { | 2754 | if (!netif_carrier_ok(netdev)) { |
2760 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { | 2755 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { |
@@ -2768,15 +2763,15 @@ e1000_watchdog(unsigned long data) | |||
2768 | } | 2763 | } |
2769 | 2764 | ||
2770 | /* Cause software interrupt to ensure rx ring is cleaned */ | 2765 | /* Cause software interrupt to ensure rx ring is cleaned */ |
2771 | E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); | 2766 | ew32(ICS, E1000_ICS_RXDMT0); |
2772 | 2767 | ||
2773 | /* Force detection of hung controller every watchdog period */ | 2768 | /* Force detection of hung controller every watchdog period */ |
2774 | adapter->detect_tx_hung = true; | 2769 | adapter->detect_tx_hung = true; |
2775 | 2770 | ||
2776 | /* With 82571 controllers, LAA may be overwritten due to controller | 2771 | /* With 82571 controllers, LAA may be overwritten due to controller |
2777 | * reset from the other port. Set the appropriate LAA in RAR[0] */ | 2772 | * reset from the other port. Set the appropriate LAA in RAR[0] */ |
2778 | if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) | 2773 | if (hw->mac_type == e1000_82571 && hw->laa_is_present) |
2779 | e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); | 2774 | e1000_rar_set(hw, hw->mac_addr, 0); |
2780 | 2775 | ||
2781 | /* Reset the timer */ | 2776 | /* Reset the timer */ |
2782 | mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); | 2777 | mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); |
@@ -2806,9 +2801,7 @@ enum latency_range { | |||
2806 | * @bytes: the number of bytes during this measurement interval | 2801 | * @bytes: the number of bytes during this measurement interval |
2807 | **/ | 2802 | **/ |
2808 | static unsigned int e1000_update_itr(struct e1000_adapter *adapter, | 2803 | static unsigned int e1000_update_itr(struct e1000_adapter *adapter, |
2809 | u16 itr_setting, | 2804 | u16 itr_setting, int packets, int bytes) |
2810 | int packets, | ||
2811 | int bytes) | ||
2812 | { | 2805 | { |
2813 | unsigned int retval = itr_setting; | 2806 | unsigned int retval = itr_setting; |
2814 | struct e1000_hw *hw = &adapter->hw; | 2807 | struct e1000_hw *hw = &adapter->hw; |
@@ -2913,7 +2906,7 @@ set_itr_now: | |||
2913 | min(adapter->itr + (new_itr >> 2), new_itr) : | 2906 | min(adapter->itr + (new_itr >> 2), new_itr) : |
2914 | new_itr; | 2907 | new_itr; |
2915 | adapter->itr = new_itr; | 2908 | adapter->itr = new_itr; |
2916 | E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256)); | 2909 | ew32(ITR, 1000000000 / (new_itr * 256)); |
2917 | } | 2910 | } |
2918 | 2911 | ||
2919 | return; | 2912 | return; |
@@ -2926,9 +2919,8 @@ set_itr_now: | |||
2926 | #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 | 2919 | #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 |
2927 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 | 2920 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 |
2928 | 2921 | ||
2929 | static int | 2922 | static int e1000_tso(struct e1000_adapter *adapter, |
2930 | e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | 2923 | struct e1000_tx_ring *tx_ring, struct sk_buff *skb) |
2931 | struct sk_buff *skb) | ||
2932 | { | 2924 | { |
2933 | struct e1000_context_desc *context_desc; | 2925 | struct e1000_context_desc *context_desc; |
2934 | struct e1000_buffer *buffer_info; | 2926 | struct e1000_buffer *buffer_info; |
@@ -2999,9 +2991,8 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2999 | return false; | 2991 | return false; |
3000 | } | 2992 | } |
3001 | 2993 | ||
3002 | static bool | 2994 | static bool e1000_tx_csum(struct e1000_adapter *adapter, |
3003 | e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | 2995 | struct e1000_tx_ring *tx_ring, struct sk_buff *skb) |
3004 | struct sk_buff *skb) | ||
3005 | { | 2996 | { |
3006 | struct e1000_context_desc *context_desc; | 2997 | struct e1000_context_desc *context_desc; |
3007 | struct e1000_buffer *buffer_info; | 2998 | struct e1000_buffer *buffer_info; |
@@ -3038,11 +3029,13 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
3038 | #define E1000_MAX_TXD_PWR 12 | 3029 | #define E1000_MAX_TXD_PWR 12 |
3039 | #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) | 3030 | #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) |
3040 | 3031 | ||
3041 | static int | 3032 | static int e1000_tx_map(struct e1000_adapter *adapter, |
3042 | e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | 3033 | struct e1000_tx_ring *tx_ring, |
3043 | struct sk_buff *skb, unsigned int first, unsigned int max_per_txd, | 3034 | struct sk_buff *skb, unsigned int first, |
3044 | unsigned int nr_frags, unsigned int mss) | 3035 | unsigned int max_per_txd, unsigned int nr_frags, |
3036 | unsigned int mss) | ||
3045 | { | 3037 | { |
3038 | struct e1000_hw *hw = &adapter->hw; | ||
3046 | struct e1000_buffer *buffer_info; | 3039 | struct e1000_buffer *buffer_info; |
3047 | unsigned int len = skb->len; | 3040 | unsigned int len = skb->len; |
3048 | unsigned int offset = 0, size, count = 0, i; | 3041 | unsigned int offset = 0, size, count = 0, i; |
@@ -3073,7 +3066,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
3073 | * The fix is to make sure that the first descriptor of a | 3066 | * The fix is to make sure that the first descriptor of a |
3074 | * packet is smaller than 2048 - 16 - 16 (or 2016) bytes | 3067 | * packet is smaller than 2048 - 16 - 16 (or 2016) bytes |
3075 | */ | 3068 | */ |
3076 | if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && | 3069 | if (unlikely((hw->bus_type == e1000_bus_type_pcix) && |
3077 | (size > 2015) && count == 0)) | 3070 | (size > 2015) && count == 0)) |
3078 | size = 2015; | 3071 | size = 2015; |
3079 | 3072 | ||
@@ -3145,10 +3138,11 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
3145 | return count; | 3138 | return count; |
3146 | } | 3139 | } |
3147 | 3140 | ||
3148 | static void | 3141 | static void e1000_tx_queue(struct e1000_adapter *adapter, |
3149 | e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | 3142 | struct e1000_tx_ring *tx_ring, int tx_flags, |
3150 | int tx_flags, int count) | 3143 | int count) |
3151 | { | 3144 | { |
3145 | struct e1000_hw *hw = &adapter->hw; | ||
3152 | struct e1000_tx_desc *tx_desc = NULL; | 3146 | struct e1000_tx_desc *tx_desc = NULL; |
3153 | struct e1000_buffer *buffer_info; | 3147 | struct e1000_buffer *buffer_info; |
3154 | u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; | 3148 | u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; |
@@ -3194,7 +3188,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
3194 | wmb(); | 3188 | wmb(); |
3195 | 3189 | ||
3196 | tx_ring->next_to_use = i; | 3190 | tx_ring->next_to_use = i; |
3197 | writel(i, adapter->hw.hw_addr + tx_ring->tdt); | 3191 | writel(i, hw->hw_addr + tx_ring->tdt); |
3198 | /* we need this if more than one processor can write to our tail | 3192 | /* we need this if more than one processor can write to our tail |
3199 | * at a time, it syncronizes IO on IA64/Altix systems */ | 3193 | * at a time, it syncronizes IO on IA64/Altix systems */ |
3200 | mmiowb(); | 3194 | mmiowb(); |
@@ -3212,8 +3206,8 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
3212 | #define E1000_FIFO_HDR 0x10 | 3206 | #define E1000_FIFO_HDR 0x10 |
3213 | #define E1000_82547_PAD_LEN 0x3E0 | 3207 | #define E1000_82547_PAD_LEN 0x3E0 |
3214 | 3208 | ||
3215 | static int | 3209 | static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, |
3216 | e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb) | 3210 | struct sk_buff *skb) |
3217 | { | 3211 | { |
3218 | u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; | 3212 | u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; |
3219 | u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; | 3213 | u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; |
@@ -3239,19 +3233,19 @@ no_fifo_stall_required: | |||
3239 | } | 3233 | } |
3240 | 3234 | ||
3241 | #define MINIMUM_DHCP_PACKET_SIZE 282 | 3235 | #define MINIMUM_DHCP_PACKET_SIZE 282 |
3242 | static int | 3236 | static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, |
3243 | e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) | 3237 | struct sk_buff *skb) |
3244 | { | 3238 | { |
3245 | struct e1000_hw *hw = &adapter->hw; | 3239 | struct e1000_hw *hw = &adapter->hw; |
3246 | u16 length, offset; | 3240 | u16 length, offset; |
3247 | if (vlan_tx_tag_present(skb)) { | 3241 | if (vlan_tx_tag_present(skb)) { |
3248 | if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && | 3242 | if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) && |
3249 | ( adapter->hw.mng_cookie.status & | 3243 | ( hw->mng_cookie.status & |
3250 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) | 3244 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) |
3251 | return 0; | 3245 | return 0; |
3252 | } | 3246 | } |
3253 | if (skb->len > MINIMUM_DHCP_PACKET_SIZE) { | 3247 | if (skb->len > MINIMUM_DHCP_PACKET_SIZE) { |
3254 | struct ethhdr *eth = (struct ethhdr *) skb->data; | 3248 | struct ethhdr *eth = (struct ethhdr *)skb->data; |
3255 | if ((htons(ETH_P_IP) == eth->h_proto)) { | 3249 | if ((htons(ETH_P_IP) == eth->h_proto)) { |
3256 | const struct iphdr *ip = | 3250 | const struct iphdr *ip = |
3257 | (struct iphdr *)((u8 *)skb->data+14); | 3251 | (struct iphdr *)((u8 *)skb->data+14); |
@@ -3304,10 +3298,10 @@ static int e1000_maybe_stop_tx(struct net_device *netdev, | |||
3304 | } | 3298 | } |
3305 | 3299 | ||
3306 | #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) | 3300 | #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) |
3307 | static int | 3301 | static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
3308 | e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | ||
3309 | { | 3302 | { |
3310 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3303 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3304 | struct e1000_hw *hw = &adapter->hw; | ||
3311 | struct e1000_tx_ring *tx_ring; | 3305 | struct e1000_tx_ring *tx_ring; |
3312 | unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; | 3306 | unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; |
3313 | unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; | 3307 | unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; |
@@ -3333,7 +3327,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3333 | 3327 | ||
3334 | /* 82571 and newer doesn't need the workaround that limited descriptor | 3328 | /* 82571 and newer doesn't need the workaround that limited descriptor |
3335 | * length to 4kB */ | 3329 | * length to 4kB */ |
3336 | if (adapter->hw.mac_type >= e1000_82571) | 3330 | if (hw->mac_type >= e1000_82571) |
3337 | max_per_txd = 8192; | 3331 | max_per_txd = 8192; |
3338 | 3332 | ||
3339 | mss = skb_shinfo(skb)->gso_size; | 3333 | mss = skb_shinfo(skb)->gso_size; |
@@ -3353,7 +3347,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3353 | * frags into skb->data */ | 3347 | * frags into skb->data */ |
3354 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 3348 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
3355 | if (skb->data_len && hdr_len == len) { | 3349 | if (skb->data_len && hdr_len == len) { |
3356 | switch (adapter->hw.mac_type) { | 3350 | switch (hw->mac_type) { |
3357 | unsigned int pull_size; | 3351 | unsigned int pull_size; |
3358 | case e1000_82544: | 3352 | case e1000_82544: |
3359 | /* Make sure we have room to chop off 4 bytes, | 3353 | /* Make sure we have room to chop off 4 bytes, |
@@ -3402,7 +3396,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3402 | /* work-around for errata 10 and it applies to all controllers | 3396 | /* work-around for errata 10 and it applies to all controllers |
3403 | * in PCI-X mode, so add one more descriptor to the count | 3397 | * in PCI-X mode, so add one more descriptor to the count |
3404 | */ | 3398 | */ |
3405 | if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && | 3399 | if (unlikely((hw->bus_type == e1000_bus_type_pcix) && |
3406 | (len > 2015))) | 3400 | (len > 2015))) |
3407 | count++; | 3401 | count++; |
3408 | 3402 | ||
@@ -3414,8 +3408,8 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3414 | count += nr_frags; | 3408 | count += nr_frags; |
3415 | 3409 | ||
3416 | 3410 | ||
3417 | if (adapter->hw.tx_pkt_filtering && | 3411 | if (hw->tx_pkt_filtering && |
3418 | (adapter->hw.mac_type == e1000_82573)) | 3412 | (hw->mac_type == e1000_82573)) |
3419 | e1000_transfer_dhcp_info(adapter, skb); | 3413 | e1000_transfer_dhcp_info(adapter, skb); |
3420 | 3414 | ||
3421 | if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) | 3415 | if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) |
@@ -3429,7 +3423,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3429 | return NETDEV_TX_BUSY; | 3423 | return NETDEV_TX_BUSY; |
3430 | } | 3424 | } |
3431 | 3425 | ||
3432 | if (unlikely(adapter->hw.mac_type == e1000_82547)) { | 3426 | if (unlikely(hw->mac_type == e1000_82547)) { |
3433 | if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { | 3427 | if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { |
3434 | netif_stop_queue(netdev); | 3428 | netif_stop_queue(netdev); |
3435 | mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); | 3429 | mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); |
@@ -3482,8 +3476,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3482 | * @netdev: network interface device structure | 3476 | * @netdev: network interface device structure |
3483 | **/ | 3477 | **/ |
3484 | 3478 | ||
3485 | static void | 3479 | static void e1000_tx_timeout(struct net_device *netdev) |
3486 | e1000_tx_timeout(struct net_device *netdev) | ||
3487 | { | 3480 | { |
3488 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3481 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3489 | 3482 | ||
@@ -3492,8 +3485,7 @@ e1000_tx_timeout(struct net_device *netdev) | |||
3492 | schedule_work(&adapter->reset_task); | 3485 | schedule_work(&adapter->reset_task); |
3493 | } | 3486 | } |
3494 | 3487 | ||
3495 | static void | 3488 | static void e1000_reset_task(struct work_struct *work) |
3496 | e1000_reset_task(struct work_struct *work) | ||
3497 | { | 3489 | { |
3498 | struct e1000_adapter *adapter = | 3490 | struct e1000_adapter *adapter = |
3499 | container_of(work, struct e1000_adapter, reset_task); | 3491 | container_of(work, struct e1000_adapter, reset_task); |
@@ -3509,8 +3501,7 @@ e1000_reset_task(struct work_struct *work) | |||
3509 | * The statistics are actually updated from the timer callback. | 3501 | * The statistics are actually updated from the timer callback. |
3510 | **/ | 3502 | **/ |
3511 | 3503 | ||
3512 | static struct net_device_stats * | 3504 | static struct net_device_stats *e1000_get_stats(struct net_device *netdev) |
3513 | e1000_get_stats(struct net_device *netdev) | ||
3514 | { | 3505 | { |
3515 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3506 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3516 | 3507 | ||
@@ -3526,10 +3517,10 @@ e1000_get_stats(struct net_device *netdev) | |||
3526 | * Returns 0 on success, negative on failure | 3517 | * Returns 0 on success, negative on failure |
3527 | **/ | 3518 | **/ |
3528 | 3519 | ||
3529 | static int | 3520 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu) |
3530 | e1000_change_mtu(struct net_device *netdev, int new_mtu) | ||
3531 | { | 3521 | { |
3532 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3522 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3523 | struct e1000_hw *hw = &adapter->hw; | ||
3533 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | 3524 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
3534 | u16 eeprom_data = 0; | 3525 | u16 eeprom_data = 0; |
3535 | 3526 | ||
@@ -3540,7 +3531,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3540 | } | 3531 | } |
3541 | 3532 | ||
3542 | /* Adapter-specific max frame size limits. */ | 3533 | /* Adapter-specific max frame size limits. */ |
3543 | switch (adapter->hw.mac_type) { | 3534 | switch (hw->mac_type) { |
3544 | case e1000_undefined ... e1000_82542_rev2_1: | 3535 | case e1000_undefined ... e1000_82542_rev2_1: |
3545 | case e1000_ich8lan: | 3536 | case e1000_ich8lan: |
3546 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | 3537 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { |
@@ -3552,9 +3543,9 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3552 | /* Jumbo Frames not supported if: | 3543 | /* Jumbo Frames not supported if: |
3553 | * - this is not an 82573L device | 3544 | * - this is not an 82573L device |
3554 | * - ASPM is enabled in any way (0x1A bits 3:2) */ | 3545 | * - ASPM is enabled in any way (0x1A bits 3:2) */ |
3555 | e1000_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3, 1, | 3546 | e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1, |
3556 | &eeprom_data); | 3547 | &eeprom_data); |
3557 | if ((adapter->hw.device_id != E1000_DEV_ID_82573L) || | 3548 | if ((hw->device_id != E1000_DEV_ID_82573L) || |
3558 | (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) { | 3549 | (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) { |
3559 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | 3550 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { |
3560 | DPRINTK(PROBE, ERR, | 3551 | DPRINTK(PROBE, ERR, |
@@ -3601,13 +3592,13 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3601 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; | 3592 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; |
3602 | 3593 | ||
3603 | /* adjust allocation if LPE protects us, and we aren't using SBP */ | 3594 | /* adjust allocation if LPE protects us, and we aren't using SBP */ |
3604 | if (!adapter->hw.tbi_compatibility_on && | 3595 | if (!hw->tbi_compatibility_on && |
3605 | ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || | 3596 | ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || |
3606 | (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) | 3597 | (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) |
3607 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; | 3598 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
3608 | 3599 | ||
3609 | netdev->mtu = new_mtu; | 3600 | netdev->mtu = new_mtu; |
3610 | adapter->hw.max_frame_size = max_frame; | 3601 | hw->max_frame_size = max_frame; |
3611 | 3602 | ||
3612 | if (netif_running(netdev)) | 3603 | if (netif_running(netdev)) |
3613 | e1000_reinit_locked(adapter); | 3604 | e1000_reinit_locked(adapter); |
@@ -3620,8 +3611,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3620 | * @adapter: board private structure | 3611 | * @adapter: board private structure |
3621 | **/ | 3612 | **/ |
3622 | 3613 | ||
3623 | void | 3614 | void e1000_update_stats(struct e1000_adapter *adapter) |
3624 | e1000_update_stats(struct e1000_adapter *adapter) | ||
3625 | { | 3615 | { |
3626 | struct e1000_hw *hw = &adapter->hw; | 3616 | struct e1000_hw *hw = &adapter->hw; |
3627 | struct pci_dev *pdev = adapter->pdev; | 3617 | struct pci_dev *pdev = adapter->pdev; |
@@ -3646,89 +3636,89 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3646 | * be written while holding adapter->stats_lock | 3636 | * be written while holding adapter->stats_lock |
3647 | */ | 3637 | */ |
3648 | 3638 | ||
3649 | adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS); | 3639 | adapter->stats.crcerrs += er32(CRCERRS); |
3650 | adapter->stats.gprc += E1000_READ_REG(hw, GPRC); | 3640 | adapter->stats.gprc += er32(GPRC); |
3651 | adapter->stats.gorcl += E1000_READ_REG(hw, GORCL); | 3641 | adapter->stats.gorcl += er32(GORCL); |
3652 | adapter->stats.gorch += E1000_READ_REG(hw, GORCH); | 3642 | adapter->stats.gorch += er32(GORCH); |
3653 | adapter->stats.bprc += E1000_READ_REG(hw, BPRC); | 3643 | adapter->stats.bprc += er32(BPRC); |
3654 | adapter->stats.mprc += E1000_READ_REG(hw, MPRC); | 3644 | adapter->stats.mprc += er32(MPRC); |
3655 | adapter->stats.roc += E1000_READ_REG(hw, ROC); | 3645 | adapter->stats.roc += er32(ROC); |
3656 | 3646 | ||
3657 | if (adapter->hw.mac_type != e1000_ich8lan) { | 3647 | if (hw->mac_type != e1000_ich8lan) { |
3658 | adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); | 3648 | adapter->stats.prc64 += er32(PRC64); |
3659 | adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); | 3649 | adapter->stats.prc127 += er32(PRC127); |
3660 | adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); | 3650 | adapter->stats.prc255 += er32(PRC255); |
3661 | adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); | 3651 | adapter->stats.prc511 += er32(PRC511); |
3662 | adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); | 3652 | adapter->stats.prc1023 += er32(PRC1023); |
3663 | adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); | 3653 | adapter->stats.prc1522 += er32(PRC1522); |
3664 | } | 3654 | } |
3665 | 3655 | ||
3666 | adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); | 3656 | adapter->stats.symerrs += er32(SYMERRS); |
3667 | adapter->stats.mpc += E1000_READ_REG(hw, MPC); | 3657 | adapter->stats.mpc += er32(MPC); |
3668 | adapter->stats.scc += E1000_READ_REG(hw, SCC); | 3658 | adapter->stats.scc += er32(SCC); |
3669 | adapter->stats.ecol += E1000_READ_REG(hw, ECOL); | 3659 | adapter->stats.ecol += er32(ECOL); |
3670 | adapter->stats.mcc += E1000_READ_REG(hw, MCC); | 3660 | adapter->stats.mcc += er32(MCC); |
3671 | adapter->stats.latecol += E1000_READ_REG(hw, LATECOL); | 3661 | adapter->stats.latecol += er32(LATECOL); |
3672 | adapter->stats.dc += E1000_READ_REG(hw, DC); | 3662 | adapter->stats.dc += er32(DC); |
3673 | adapter->stats.sec += E1000_READ_REG(hw, SEC); | 3663 | adapter->stats.sec += er32(SEC); |
3674 | adapter->stats.rlec += E1000_READ_REG(hw, RLEC); | 3664 | adapter->stats.rlec += er32(RLEC); |
3675 | adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC); | 3665 | adapter->stats.xonrxc += er32(XONRXC); |
3676 | adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC); | 3666 | adapter->stats.xontxc += er32(XONTXC); |
3677 | adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC); | 3667 | adapter->stats.xoffrxc += er32(XOFFRXC); |
3678 | adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC); | 3668 | adapter->stats.xofftxc += er32(XOFFTXC); |
3679 | adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC); | 3669 | adapter->stats.fcruc += er32(FCRUC); |
3680 | adapter->stats.gptc += E1000_READ_REG(hw, GPTC); | 3670 | adapter->stats.gptc += er32(GPTC); |
3681 | adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL); | 3671 | adapter->stats.gotcl += er32(GOTCL); |
3682 | adapter->stats.gotch += E1000_READ_REG(hw, GOTCH); | 3672 | adapter->stats.gotch += er32(GOTCH); |
3683 | adapter->stats.rnbc += E1000_READ_REG(hw, RNBC); | 3673 | adapter->stats.rnbc += er32(RNBC); |
3684 | adapter->stats.ruc += E1000_READ_REG(hw, RUC); | 3674 | adapter->stats.ruc += er32(RUC); |
3685 | adapter->stats.rfc += E1000_READ_REG(hw, RFC); | 3675 | adapter->stats.rfc += er32(RFC); |
3686 | adapter->stats.rjc += E1000_READ_REG(hw, RJC); | 3676 | adapter->stats.rjc += er32(RJC); |
3687 | adapter->stats.torl += E1000_READ_REG(hw, TORL); | 3677 | adapter->stats.torl += er32(TORL); |
3688 | adapter->stats.torh += E1000_READ_REG(hw, TORH); | 3678 | adapter->stats.torh += er32(TORH); |
3689 | adapter->stats.totl += E1000_READ_REG(hw, TOTL); | 3679 | adapter->stats.totl += er32(TOTL); |
3690 | adapter->stats.toth += E1000_READ_REG(hw, TOTH); | 3680 | adapter->stats.toth += er32(TOTH); |
3691 | adapter->stats.tpr += E1000_READ_REG(hw, TPR); | 3681 | adapter->stats.tpr += er32(TPR); |
3692 | 3682 | ||
3693 | if (adapter->hw.mac_type != e1000_ich8lan) { | 3683 | if (hw->mac_type != e1000_ich8lan) { |
3694 | adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); | 3684 | adapter->stats.ptc64 += er32(PTC64); |
3695 | adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); | 3685 | adapter->stats.ptc127 += er32(PTC127); |
3696 | adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); | 3686 | adapter->stats.ptc255 += er32(PTC255); |
3697 | adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); | 3687 | adapter->stats.ptc511 += er32(PTC511); |
3698 | adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); | 3688 | adapter->stats.ptc1023 += er32(PTC1023); |
3699 | adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); | 3689 | adapter->stats.ptc1522 += er32(PTC1522); |
3700 | } | 3690 | } |
3701 | 3691 | ||
3702 | adapter->stats.mptc += E1000_READ_REG(hw, MPTC); | 3692 | adapter->stats.mptc += er32(MPTC); |
3703 | adapter->stats.bptc += E1000_READ_REG(hw, BPTC); | 3693 | adapter->stats.bptc += er32(BPTC); |
3704 | 3694 | ||
3705 | /* used for adaptive IFS */ | 3695 | /* used for adaptive IFS */ |
3706 | 3696 | ||
3707 | hw->tx_packet_delta = E1000_READ_REG(hw, TPT); | 3697 | hw->tx_packet_delta = er32(TPT); |
3708 | adapter->stats.tpt += hw->tx_packet_delta; | 3698 | adapter->stats.tpt += hw->tx_packet_delta; |
3709 | hw->collision_delta = E1000_READ_REG(hw, COLC); | 3699 | hw->collision_delta = er32(COLC); |
3710 | adapter->stats.colc += hw->collision_delta; | 3700 | adapter->stats.colc += hw->collision_delta; |
3711 | 3701 | ||
3712 | if (hw->mac_type >= e1000_82543) { | 3702 | if (hw->mac_type >= e1000_82543) { |
3713 | adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC); | 3703 | adapter->stats.algnerrc += er32(ALGNERRC); |
3714 | adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC); | 3704 | adapter->stats.rxerrc += er32(RXERRC); |
3715 | adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS); | 3705 | adapter->stats.tncrs += er32(TNCRS); |
3716 | adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR); | 3706 | adapter->stats.cexterr += er32(CEXTERR); |
3717 | adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); | 3707 | adapter->stats.tsctc += er32(TSCTC); |
3718 | adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); | 3708 | adapter->stats.tsctfc += er32(TSCTFC); |
3719 | } | 3709 | } |
3720 | if (hw->mac_type > e1000_82547_rev_2) { | 3710 | if (hw->mac_type > e1000_82547_rev_2) { |
3721 | adapter->stats.iac += E1000_READ_REG(hw, IAC); | 3711 | adapter->stats.iac += er32(IAC); |
3722 | adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); | 3712 | adapter->stats.icrxoc += er32(ICRXOC); |
3723 | 3713 | ||
3724 | if (adapter->hw.mac_type != e1000_ich8lan) { | 3714 | if (hw->mac_type != e1000_ich8lan) { |
3725 | adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); | 3715 | adapter->stats.icrxptc += er32(ICRXPTC); |
3726 | adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); | 3716 | adapter->stats.icrxatc += er32(ICRXATC); |
3727 | adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); | 3717 | adapter->stats.ictxptc += er32(ICTXPTC); |
3728 | adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC); | 3718 | adapter->stats.ictxatc += er32(ICTXATC); |
3729 | adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); | 3719 | adapter->stats.ictxqec += er32(ICTXQEC); |
3730 | adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); | 3720 | adapter->stats.ictxqmtc += er32(ICTXQMTC); |
3731 | adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); | 3721 | adapter->stats.icrxdmtc += er32(ICRXDMTC); |
3732 | } | 3722 | } |
3733 | } | 3723 | } |
3734 | 3724 | ||
@@ -3756,7 +3746,7 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3756 | adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; | 3746 | adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; |
3757 | adapter->net_stats.tx_window_errors = adapter->stats.latecol; | 3747 | adapter->net_stats.tx_window_errors = adapter->stats.latecol; |
3758 | adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; | 3748 | adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; |
3759 | if (adapter->hw.bad_tx_carr_stats_fd && | 3749 | if (hw->bad_tx_carr_stats_fd && |
3760 | adapter->link_duplex == FULL_DUPLEX) { | 3750 | adapter->link_duplex == FULL_DUPLEX) { |
3761 | adapter->net_stats.tx_carrier_errors = 0; | 3751 | adapter->net_stats.tx_carrier_errors = 0; |
3762 | adapter->stats.tncrs = 0; | 3752 | adapter->stats.tncrs = 0; |
@@ -3779,10 +3769,10 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3779 | } | 3769 | } |
3780 | 3770 | ||
3781 | /* Management Stats */ | 3771 | /* Management Stats */ |
3782 | if (adapter->hw.has_smbus) { | 3772 | if (hw->has_smbus) { |
3783 | adapter->stats.mgptc += E1000_READ_REG(hw, MGTPTC); | 3773 | adapter->stats.mgptc += er32(MGTPTC); |
3784 | adapter->stats.mgprc += E1000_READ_REG(hw, MGTPRC); | 3774 | adapter->stats.mgprc += er32(MGTPRC); |
3785 | adapter->stats.mgpdc += E1000_READ_REG(hw, MGTPDC); | 3775 | adapter->stats.mgpdc += er32(MGTPDC); |
3786 | } | 3776 | } |
3787 | 3777 | ||
3788 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 3778 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
@@ -3794,16 +3784,12 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3794 | * @data: pointer to a network interface device structure | 3784 | * @data: pointer to a network interface device structure |
3795 | **/ | 3785 | **/ |
3796 | 3786 | ||
3797 | static irqreturn_t | 3787 | static irqreturn_t e1000_intr_msi(int irq, void *data) |
3798 | e1000_intr_msi(int irq, void *data) | ||
3799 | { | 3788 | { |
3800 | struct net_device *netdev = data; | 3789 | struct net_device *netdev = data; |
3801 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3790 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3802 | struct e1000_hw *hw = &adapter->hw; | 3791 | struct e1000_hw *hw = &adapter->hw; |
3803 | #ifndef CONFIG_E1000_NAPI | 3792 | u32 icr = er32(ICR); |
3804 | int i; | ||
3805 | #endif | ||
3806 | u32 icr = E1000_READ_REG(hw, ICR); | ||
3807 | 3793 | ||
3808 | /* in NAPI mode read ICR disables interrupts using IAM */ | 3794 | /* in NAPI mode read ICR disables interrupts using IAM */ |
3809 | 3795 | ||
@@ -3813,17 +3799,16 @@ e1000_intr_msi(int irq, void *data) | |||
3813 | * link down event; disable receives here in the ISR and reset | 3799 | * link down event; disable receives here in the ISR and reset |
3814 | * adapter in watchdog */ | 3800 | * adapter in watchdog */ |
3815 | if (netif_carrier_ok(netdev) && | 3801 | if (netif_carrier_ok(netdev) && |
3816 | (adapter->hw.mac_type == e1000_80003es2lan)) { | 3802 | (hw->mac_type == e1000_80003es2lan)) { |
3817 | /* disable receives */ | 3803 | /* disable receives */ |
3818 | u32 rctl = E1000_READ_REG(hw, RCTL); | 3804 | u32 rctl = er32(RCTL); |
3819 | E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); | 3805 | ew32(RCTL, rctl & ~E1000_RCTL_EN); |
3820 | } | 3806 | } |
3821 | /* guard against interrupt when we're going down */ | 3807 | /* guard against interrupt when we're going down */ |
3822 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 3808 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
3823 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 3809 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
3824 | } | 3810 | } |
3825 | 3811 | ||
3826 | #ifdef CONFIG_E1000_NAPI | ||
3827 | if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) { | 3812 | if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) { |
3828 | adapter->total_tx_bytes = 0; | 3813 | adapter->total_tx_bytes = 0; |
3829 | adapter->total_tx_packets = 0; | 3814 | adapter->total_tx_packets = 0; |
@@ -3832,20 +3817,6 @@ e1000_intr_msi(int irq, void *data) | |||
3832 | __netif_rx_schedule(netdev, &adapter->napi); | 3817 | __netif_rx_schedule(netdev, &adapter->napi); |
3833 | } else | 3818 | } else |
3834 | e1000_irq_enable(adapter); | 3819 | e1000_irq_enable(adapter); |
3835 | #else | ||
3836 | adapter->total_tx_bytes = 0; | ||
3837 | adapter->total_rx_bytes = 0; | ||
3838 | adapter->total_tx_packets = 0; | ||
3839 | adapter->total_rx_packets = 0; | ||
3840 | |||
3841 | for (i = 0; i < E1000_MAX_INTR; i++) | ||
3842 | if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & | ||
3843 | !e1000_clean_tx_irq(adapter, adapter->tx_ring))) | ||
3844 | break; | ||
3845 | |||
3846 | if (likely(adapter->itr_setting & 3)) | ||
3847 | e1000_set_itr(adapter); | ||
3848 | #endif | ||
3849 | 3820 | ||
3850 | return IRQ_HANDLED; | 3821 | return IRQ_HANDLED; |
3851 | } | 3822 | } |
@@ -3856,20 +3827,16 @@ e1000_intr_msi(int irq, void *data) | |||
3856 | * @data: pointer to a network interface device structure | 3827 | * @data: pointer to a network interface device structure |
3857 | **/ | 3828 | **/ |
3858 | 3829 | ||
3859 | static irqreturn_t | 3830 | static irqreturn_t e1000_intr(int irq, void *data) |
3860 | e1000_intr(int irq, void *data) | ||
3861 | { | 3831 | { |
3862 | struct net_device *netdev = data; | 3832 | struct net_device *netdev = data; |
3863 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3833 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3864 | struct e1000_hw *hw = &adapter->hw; | 3834 | struct e1000_hw *hw = &adapter->hw; |
3865 | u32 rctl, icr = E1000_READ_REG(hw, ICR); | 3835 | u32 rctl, icr = er32(ICR); |
3866 | #ifndef CONFIG_E1000_NAPI | 3836 | |
3867 | int i; | ||
3868 | #endif | ||
3869 | if (unlikely(!icr)) | 3837 | if (unlikely(!icr)) |
3870 | return IRQ_NONE; /* Not our interrupt */ | 3838 | return IRQ_NONE; /* Not our interrupt */ |
3871 | 3839 | ||
3872 | #ifdef CONFIG_E1000_NAPI | ||
3873 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is | 3840 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is |
3874 | * not set, then the adapter didn't send an interrupt */ | 3841 | * not set, then the adapter didn't send an interrupt */ |
3875 | if (unlikely(hw->mac_type >= e1000_82571 && | 3842 | if (unlikely(hw->mac_type >= e1000_82571 && |
@@ -3878,7 +3845,6 @@ e1000_intr(int irq, void *data) | |||
3878 | 3845 | ||
3879 | /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No | 3846 | /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No |
3880 | * need for the IMC write */ | 3847 | * need for the IMC write */ |
3881 | #endif | ||
3882 | 3848 | ||
3883 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { | 3849 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { |
3884 | hw->get_link_status = 1; | 3850 | hw->get_link_status = 1; |
@@ -3888,21 +3854,20 @@ e1000_intr(int irq, void *data) | |||
3888 | * reset adapter in watchdog | 3854 | * reset adapter in watchdog |
3889 | */ | 3855 | */ |
3890 | if (netif_carrier_ok(netdev) && | 3856 | if (netif_carrier_ok(netdev) && |
3891 | (adapter->hw.mac_type == e1000_80003es2lan)) { | 3857 | (hw->mac_type == e1000_80003es2lan)) { |
3892 | /* disable receives */ | 3858 | /* disable receives */ |
3893 | rctl = E1000_READ_REG(hw, RCTL); | 3859 | rctl = er32(RCTL); |
3894 | E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); | 3860 | ew32(RCTL, rctl & ~E1000_RCTL_EN); |
3895 | } | 3861 | } |
3896 | /* guard against interrupt when we're going down */ | 3862 | /* guard against interrupt when we're going down */ |
3897 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 3863 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
3898 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 3864 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
3899 | } | 3865 | } |
3900 | 3866 | ||
3901 | #ifdef CONFIG_E1000_NAPI | ||
3902 | if (unlikely(hw->mac_type < e1000_82571)) { | 3867 | if (unlikely(hw->mac_type < e1000_82571)) { |
3903 | /* disable interrupts, without the synchronize_irq bit */ | 3868 | /* disable interrupts, without the synchronize_irq bit */ |
3904 | E1000_WRITE_REG(hw, IMC, ~0); | 3869 | ew32(IMC, ~0); |
3905 | E1000_WRITE_FLUSH(hw); | 3870 | E1000_WRITE_FLUSH(); |
3906 | } | 3871 | } |
3907 | if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) { | 3872 | if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) { |
3908 | adapter->total_tx_bytes = 0; | 3873 | adapter->total_tx_bytes = 0; |
@@ -3914,48 +3879,15 @@ e1000_intr(int irq, void *data) | |||
3914 | /* this really should not happen! if it does it is basically a | 3879 | /* this really should not happen! if it does it is basically a |
3915 | * bug, but not a hard error, so enable ints and continue */ | 3880 | * bug, but not a hard error, so enable ints and continue */ |
3916 | e1000_irq_enable(adapter); | 3881 | e1000_irq_enable(adapter); |
3917 | #else | ||
3918 | /* Writing IMC and IMS is needed for 82547. | ||
3919 | * Due to Hub Link bus being occupied, an interrupt | ||
3920 | * de-assertion message is not able to be sent. | ||
3921 | * When an interrupt assertion message is generated later, | ||
3922 | * two messages are re-ordered and sent out. | ||
3923 | * That causes APIC to think 82547 is in de-assertion | ||
3924 | * state, while 82547 is in assertion state, resulting | ||
3925 | * in dead lock. Writing IMC forces 82547 into | ||
3926 | * de-assertion state. | ||
3927 | */ | ||
3928 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) | ||
3929 | E1000_WRITE_REG(hw, IMC, ~0); | ||
3930 | |||
3931 | adapter->total_tx_bytes = 0; | ||
3932 | adapter->total_rx_bytes = 0; | ||
3933 | adapter->total_tx_packets = 0; | ||
3934 | adapter->total_rx_packets = 0; | ||
3935 | 3882 | ||
3936 | for (i = 0; i < E1000_MAX_INTR; i++) | ||
3937 | if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & | ||
3938 | !e1000_clean_tx_irq(adapter, adapter->tx_ring))) | ||
3939 | break; | ||
3940 | |||
3941 | if (likely(adapter->itr_setting & 3)) | ||
3942 | e1000_set_itr(adapter); | ||
3943 | |||
3944 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) | ||
3945 | e1000_irq_enable(adapter); | ||
3946 | |||
3947 | #endif | ||
3948 | return IRQ_HANDLED; | 3883 | return IRQ_HANDLED; |
3949 | } | 3884 | } |
3950 | 3885 | ||
3951 | #ifdef CONFIG_E1000_NAPI | ||
3952 | /** | 3886 | /** |
3953 | * e1000_clean - NAPI Rx polling callback | 3887 | * e1000_clean - NAPI Rx polling callback |
3954 | * @adapter: board private structure | 3888 | * @adapter: board private structure |
3955 | **/ | 3889 | **/ |
3956 | 3890 | static int e1000_clean(struct napi_struct *napi, int budget) | |
3957 | static int | ||
3958 | e1000_clean(struct napi_struct *napi, int budget) | ||
3959 | { | 3891 | { |
3960 | struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); | 3892 | struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); |
3961 | struct net_device *poll_dev = adapter->netdev; | 3893 | struct net_device *poll_dev = adapter->netdev; |
@@ -3991,23 +3923,19 @@ e1000_clean(struct napi_struct *napi, int budget) | |||
3991 | return work_done; | 3923 | return work_done; |
3992 | } | 3924 | } |
3993 | 3925 | ||
3994 | #endif | ||
3995 | /** | 3926 | /** |
3996 | * e1000_clean_tx_irq - Reclaim resources after transmit completes | 3927 | * e1000_clean_tx_irq - Reclaim resources after transmit completes |
3997 | * @adapter: board private structure | 3928 | * @adapter: board private structure |
3998 | **/ | 3929 | **/ |
3999 | 3930 | static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, | |
4000 | static bool | 3931 | struct e1000_tx_ring *tx_ring) |
4001 | e1000_clean_tx_irq(struct e1000_adapter *adapter, | ||
4002 | struct e1000_tx_ring *tx_ring) | ||
4003 | { | 3932 | { |
3933 | struct e1000_hw *hw = &adapter->hw; | ||
4004 | struct net_device *netdev = adapter->netdev; | 3934 | struct net_device *netdev = adapter->netdev; |
4005 | struct e1000_tx_desc *tx_desc, *eop_desc; | 3935 | struct e1000_tx_desc *tx_desc, *eop_desc; |
4006 | struct e1000_buffer *buffer_info; | 3936 | struct e1000_buffer *buffer_info; |
4007 | unsigned int i, eop; | 3937 | unsigned int i, eop; |
4008 | #ifdef CONFIG_E1000_NAPI | ||
4009 | unsigned int count = 0; | 3938 | unsigned int count = 0; |
4010 | #endif | ||
4011 | bool cleaned = false; | 3939 | bool cleaned = false; |
4012 | unsigned int total_tx_bytes=0, total_tx_packets=0; | 3940 | unsigned int total_tx_bytes=0, total_tx_packets=0; |
4013 | 3941 | ||
@@ -4039,11 +3967,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
4039 | 3967 | ||
4040 | eop = tx_ring->buffer_info[i].next_to_watch; | 3968 | eop = tx_ring->buffer_info[i].next_to_watch; |
4041 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | 3969 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
4042 | #ifdef CONFIG_E1000_NAPI | ||
4043 | #define E1000_TX_WEIGHT 64 | 3970 | #define E1000_TX_WEIGHT 64 |
4044 | /* weight of a sort for tx, to avoid endless transmit cleanup */ | 3971 | /* weight of a sort for tx, to avoid endless transmit cleanup */ |
4045 | if (count++ == E1000_TX_WEIGHT) break; | 3972 | if (count++ == E1000_TX_WEIGHT) |
4046 | #endif | 3973 | break; |
4047 | } | 3974 | } |
4048 | 3975 | ||
4049 | tx_ring->next_to_clean = i; | 3976 | tx_ring->next_to_clean = i; |
@@ -4068,8 +3995,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
4068 | if (tx_ring->buffer_info[eop].dma && | 3995 | if (tx_ring->buffer_info[eop].dma && |
4069 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + | 3996 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + |
4070 | (adapter->tx_timeout_factor * HZ)) | 3997 | (adapter->tx_timeout_factor * HZ)) |
4071 | && !(E1000_READ_REG(&adapter->hw, STATUS) & | 3998 | && !(er32(STATUS) & E1000_STATUS_TXOFF)) { |
4072 | E1000_STATUS_TXOFF)) { | ||
4073 | 3999 | ||
4074 | /* detected Tx unit hang */ | 4000 | /* detected Tx unit hang */ |
4075 | DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" | 4001 | DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" |
@@ -4085,8 +4011,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
4085 | " next_to_watch.status <%x>\n", | 4011 | " next_to_watch.status <%x>\n", |
4086 | (unsigned long)((tx_ring - adapter->tx_ring) / | 4012 | (unsigned long)((tx_ring - adapter->tx_ring) / |
4087 | sizeof(struct e1000_tx_ring)), | 4013 | sizeof(struct e1000_tx_ring)), |
4088 | readl(adapter->hw.hw_addr + tx_ring->tdh), | 4014 | readl(hw->hw_addr + tx_ring->tdh), |
4089 | readl(adapter->hw.hw_addr + tx_ring->tdt), | 4015 | readl(hw->hw_addr + tx_ring->tdt), |
4090 | tx_ring->next_to_use, | 4016 | tx_ring->next_to_use, |
4091 | tx_ring->next_to_clean, | 4017 | tx_ring->next_to_clean, |
4092 | tx_ring->buffer_info[eop].time_stamp, | 4018 | tx_ring->buffer_info[eop].time_stamp, |
@@ -4111,17 +4037,16 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
4111 | * @sk_buff: socket buffer with received data | 4037 | * @sk_buff: socket buffer with received data |
4112 | **/ | 4038 | **/ |
4113 | 4039 | ||
4114 | static void | 4040 | static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, |
4115 | e1000_rx_checksum(struct e1000_adapter *adapter, | 4041 | u32 csum, struct sk_buff *skb) |
4116 | u32 status_err, u32 csum, | ||
4117 | struct sk_buff *skb) | ||
4118 | { | 4042 | { |
4043 | struct e1000_hw *hw = &adapter->hw; | ||
4119 | u16 status = (u16)status_err; | 4044 | u16 status = (u16)status_err; |
4120 | u8 errors = (u8)(status_err >> 24); | 4045 | u8 errors = (u8)(status_err >> 24); |
4121 | skb->ip_summed = CHECKSUM_NONE; | 4046 | skb->ip_summed = CHECKSUM_NONE; |
4122 | 4047 | ||
4123 | /* 82543 or newer only */ | 4048 | /* 82543 or newer only */ |
4124 | if (unlikely(adapter->hw.mac_type < e1000_82543)) return; | 4049 | if (unlikely(hw->mac_type < e1000_82543)) return; |
4125 | /* Ignore Checksum bit is set */ | 4050 | /* Ignore Checksum bit is set */ |
4126 | if (unlikely(status & E1000_RXD_STAT_IXSM)) return; | 4051 | if (unlikely(status & E1000_RXD_STAT_IXSM)) return; |
4127 | /* TCP/UDP checksum error bit is set */ | 4052 | /* TCP/UDP checksum error bit is set */ |
@@ -4131,7 +4056,7 @@ e1000_rx_checksum(struct e1000_adapter *adapter, | |||
4131 | return; | 4056 | return; |
4132 | } | 4057 | } |
4133 | /* TCP/UDP Checksum has not been calculated */ | 4058 | /* TCP/UDP Checksum has not been calculated */ |
4134 | if (adapter->hw.mac_type <= e1000_82547_rev_2) { | 4059 | if (hw->mac_type <= e1000_82547_rev_2) { |
4135 | if (!(status & E1000_RXD_STAT_TCPCS)) | 4060 | if (!(status & E1000_RXD_STAT_TCPCS)) |
4136 | return; | 4061 | return; |
4137 | } else { | 4062 | } else { |
@@ -4142,7 +4067,7 @@ e1000_rx_checksum(struct e1000_adapter *adapter, | |||
4142 | if (likely(status & E1000_RXD_STAT_TCPCS)) { | 4067 | if (likely(status & E1000_RXD_STAT_TCPCS)) { |
4143 | /* TCP checksum is good */ | 4068 | /* TCP checksum is good */ |
4144 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 4069 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
4145 | } else if (adapter->hw.mac_type > e1000_82547_rev_2) { | 4070 | } else if (hw->mac_type > e1000_82547_rev_2) { |
4146 | /* IP fragment with UDP payload */ | 4071 | /* IP fragment with UDP payload */ |
4147 | /* Hardware complements the payload checksum, so we undo it | 4072 | /* Hardware complements the payload checksum, so we undo it |
4148 | * and then put the value in host order for further stack use. | 4073 | * and then put the value in host order for further stack use. |
@@ -4158,17 +4083,11 @@ e1000_rx_checksum(struct e1000_adapter *adapter, | |||
4158 | * e1000_clean_rx_irq - Send received data up the network stack; legacy | 4083 | * e1000_clean_rx_irq - Send received data up the network stack; legacy |
4159 | * @adapter: board private structure | 4084 | * @adapter: board private structure |
4160 | **/ | 4085 | **/ |
4161 | 4086 | static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |
4162 | static bool | 4087 | struct e1000_rx_ring *rx_ring, |
4163 | #ifdef CONFIG_E1000_NAPI | 4088 | int *work_done, int work_to_do) |
4164 | e1000_clean_rx_irq(struct e1000_adapter *adapter, | ||
4165 | struct e1000_rx_ring *rx_ring, | ||
4166 | int *work_done, int work_to_do) | ||
4167 | #else | ||
4168 | e1000_clean_rx_irq(struct e1000_adapter *adapter, | ||
4169 | struct e1000_rx_ring *rx_ring) | ||
4170 | #endif | ||
4171 | { | 4089 | { |
4090 | struct e1000_hw *hw = &adapter->hw; | ||
4172 | struct net_device *netdev = adapter->netdev; | 4091 | struct net_device *netdev = adapter->netdev; |
4173 | struct pci_dev *pdev = adapter->pdev; | 4092 | struct pci_dev *pdev = adapter->pdev; |
4174 | struct e1000_rx_desc *rx_desc, *next_rxd; | 4093 | struct e1000_rx_desc *rx_desc, *next_rxd; |
@@ -4189,11 +4108,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4189 | struct sk_buff *skb; | 4108 | struct sk_buff *skb; |
4190 | u8 status; | 4109 | u8 status; |
4191 | 4110 | ||
4192 | #ifdef CONFIG_E1000_NAPI | ||
4193 | if (*work_done >= work_to_do) | 4111 | if (*work_done >= work_to_do) |
4194 | break; | 4112 | break; |
4195 | (*work_done)++; | 4113 | (*work_done)++; |
4196 | #endif | 4114 | |
4197 | status = rx_desc->status; | 4115 | status = rx_desc->status; |
4198 | skb = buffer_info->skb; | 4116 | skb = buffer_info->skb; |
4199 | buffer_info->skb = NULL; | 4117 | buffer_info->skb = NULL; |
@@ -4226,11 +4144,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4226 | 4144 | ||
4227 | if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { | 4145 | if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { |
4228 | last_byte = *(skb->data + length - 1); | 4146 | last_byte = *(skb->data + length - 1); |
4229 | if (TBI_ACCEPT(&adapter->hw, status, | 4147 | if (TBI_ACCEPT(hw, status, rx_desc->errors, length, |
4230 | rx_desc->errors, length, last_byte)) { | 4148 | last_byte)) { |
4231 | spin_lock_irqsave(&adapter->stats_lock, flags); | 4149 | spin_lock_irqsave(&adapter->stats_lock, flags); |
4232 | e1000_tbi_adjust_stats(&adapter->hw, | 4150 | e1000_tbi_adjust_stats(hw, &adapter->stats, |
4233 | &adapter->stats, | ||
4234 | length, skb->data); | 4151 | length, skb->data); |
4235 | spin_unlock_irqrestore(&adapter->stats_lock, | 4152 | spin_unlock_irqrestore(&adapter->stats_lock, |
4236 | flags); | 4153 | flags); |
@@ -4280,7 +4197,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4280 | le16_to_cpu(rx_desc->csum), skb); | 4197 | le16_to_cpu(rx_desc->csum), skb); |
4281 | 4198 | ||
4282 | skb->protocol = eth_type_trans(skb, netdev); | 4199 | skb->protocol = eth_type_trans(skb, netdev); |
4283 | #ifdef CONFIG_E1000_NAPI | 4200 | |
4284 | if (unlikely(adapter->vlgrp && | 4201 | if (unlikely(adapter->vlgrp && |
4285 | (status & E1000_RXD_STAT_VP))) { | 4202 | (status & E1000_RXD_STAT_VP))) { |
4286 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | 4203 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, |
@@ -4288,15 +4205,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4288 | } else { | 4205 | } else { |
4289 | netif_receive_skb(skb); | 4206 | netif_receive_skb(skb); |
4290 | } | 4207 | } |
4291 | #else /* CONFIG_E1000_NAPI */ | 4208 | |
4292 | if (unlikely(adapter->vlgrp && | ||
4293 | (status & E1000_RXD_STAT_VP))) { | ||
4294 | vlan_hwaccel_rx(skb, adapter->vlgrp, | ||
4295 | le16_to_cpu(rx_desc->special)); | ||
4296 | } else { | ||
4297 | netif_rx(skb); | ||
4298 | } | ||
4299 | #endif /* CONFIG_E1000_NAPI */ | ||
4300 | netdev->last_rx = jiffies; | 4209 | netdev->last_rx = jiffies; |
4301 | 4210 | ||
4302 | next_desc: | 4211 | next_desc: |
@@ -4330,15 +4239,9 @@ next_desc: | |||
4330 | * @adapter: board private structure | 4239 | * @adapter: board private structure |
4331 | **/ | 4240 | **/ |
4332 | 4241 | ||
4333 | static bool | 4242 | static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, |
4334 | #ifdef CONFIG_E1000_NAPI | 4243 | struct e1000_rx_ring *rx_ring, |
4335 | e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | 4244 | int *work_done, int work_to_do) |
4336 | struct e1000_rx_ring *rx_ring, | ||
4337 | int *work_done, int work_to_do) | ||
4338 | #else | ||
4339 | e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | ||
4340 | struct e1000_rx_ring *rx_ring) | ||
4341 | #endif | ||
4342 | { | 4245 | { |
4343 | union e1000_rx_desc_packet_split *rx_desc, *next_rxd; | 4246 | union e1000_rx_desc_packet_split *rx_desc, *next_rxd; |
4344 | struct net_device *netdev = adapter->netdev; | 4247 | struct net_device *netdev = adapter->netdev; |
@@ -4361,11 +4264,11 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
4361 | while (staterr & E1000_RXD_STAT_DD) { | 4264 | while (staterr & E1000_RXD_STAT_DD) { |
4362 | ps_page = &rx_ring->ps_page[i]; | 4265 | ps_page = &rx_ring->ps_page[i]; |
4363 | ps_page_dma = &rx_ring->ps_page_dma[i]; | 4266 | ps_page_dma = &rx_ring->ps_page_dma[i]; |
4364 | #ifdef CONFIG_E1000_NAPI | 4267 | |
4365 | if (unlikely(*work_done >= work_to_do)) | 4268 | if (unlikely(*work_done >= work_to_do)) |
4366 | break; | 4269 | break; |
4367 | (*work_done)++; | 4270 | (*work_done)++; |
4368 | #endif | 4271 | |
4369 | skb = buffer_info->skb; | 4272 | skb = buffer_info->skb; |
4370 | 4273 | ||
4371 | /* in the packet split case this is header only */ | 4274 | /* in the packet split case this is header only */ |
@@ -4438,7 +4341,8 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
4438 | } | 4341 | } |
4439 | 4342 | ||
4440 | for (j = 0; j < adapter->rx_ps_pages; j++) { | 4343 | for (j = 0; j < adapter->rx_ps_pages; j++) { |
4441 | if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j]))) | 4344 | length = le16_to_cpu(rx_desc->wb.upper.length[j]); |
4345 | if (!length) | ||
4442 | break; | 4346 | break; |
4443 | pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], | 4347 | pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], |
4444 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 4348 | PAGE_SIZE, PCI_DMA_FROMDEVICE); |
@@ -4466,21 +4370,14 @@ copydone: | |||
4466 | if (likely(rx_desc->wb.upper.header_status & | 4370 | if (likely(rx_desc->wb.upper.header_status & |
4467 | cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))) | 4371 | cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))) |
4468 | adapter->rx_hdr_split++; | 4372 | adapter->rx_hdr_split++; |
4469 | #ifdef CONFIG_E1000_NAPI | 4373 | |
4470 | if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { | 4374 | if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { |
4471 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | 4375 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, |
4472 | le16_to_cpu(rx_desc->wb.middle.vlan)); | 4376 | le16_to_cpu(rx_desc->wb.middle.vlan)); |
4473 | } else { | 4377 | } else { |
4474 | netif_receive_skb(skb); | 4378 | netif_receive_skb(skb); |
4475 | } | 4379 | } |
4476 | #else /* CONFIG_E1000_NAPI */ | 4380 | |
4477 | if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { | ||
4478 | vlan_hwaccel_rx(skb, adapter->vlgrp, | ||
4479 | le16_to_cpu(rx_desc->wb.middle.vlan)); | ||
4480 | } else { | ||
4481 | netif_rx(skb); | ||
4482 | } | ||
4483 | #endif /* CONFIG_E1000_NAPI */ | ||
4484 | netdev->last_rx = jiffies; | 4381 | netdev->last_rx = jiffies; |
4485 | 4382 | ||
4486 | next_desc: | 4383 | next_desc: |
@@ -4517,11 +4414,11 @@ next_desc: | |||
4517 | * @adapter: address of board private structure | 4414 | * @adapter: address of board private structure |
4518 | **/ | 4415 | **/ |
4519 | 4416 | ||
4520 | static void | 4417 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, |
4521 | e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | 4418 | struct e1000_rx_ring *rx_ring, |
4522 | struct e1000_rx_ring *rx_ring, | 4419 | int cleaned_count) |
4523 | int cleaned_count) | ||
4524 | { | 4420 | { |
4421 | struct e1000_hw *hw = &adapter->hw; | ||
4525 | struct net_device *netdev = adapter->netdev; | 4422 | struct net_device *netdev = adapter->netdev; |
4526 | struct pci_dev *pdev = adapter->pdev; | 4423 | struct pci_dev *pdev = adapter->pdev; |
4527 | struct e1000_rx_desc *rx_desc; | 4424 | struct e1000_rx_desc *rx_desc; |
@@ -4619,7 +4516,7 @@ map_skb: | |||
4619 | * applicable for weak-ordered memory model archs, | 4516 | * applicable for weak-ordered memory model archs, |
4620 | * such as IA-64). */ | 4517 | * such as IA-64). */ |
4621 | wmb(); | 4518 | wmb(); |
4622 | writel(i, adapter->hw.hw_addr + rx_ring->rdt); | 4519 | writel(i, hw->hw_addr + rx_ring->rdt); |
4623 | } | 4520 | } |
4624 | } | 4521 | } |
4625 | 4522 | ||
@@ -4628,11 +4525,11 @@ map_skb: | |||
4628 | * @adapter: address of board private structure | 4525 | * @adapter: address of board private structure |
4629 | **/ | 4526 | **/ |
4630 | 4527 | ||
4631 | static void | 4528 | static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, |
4632 | e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | 4529 | struct e1000_rx_ring *rx_ring, |
4633 | struct e1000_rx_ring *rx_ring, | 4530 | int cleaned_count) |
4634 | int cleaned_count) | ||
4635 | { | 4531 | { |
4532 | struct e1000_hw *hw = &adapter->hw; | ||
4636 | struct net_device *netdev = adapter->netdev; | 4533 | struct net_device *netdev = adapter->netdev; |
4637 | struct pci_dev *pdev = adapter->pdev; | 4534 | struct pci_dev *pdev = adapter->pdev; |
4638 | union e1000_rx_desc_packet_split *rx_desc; | 4535 | union e1000_rx_desc_packet_split *rx_desc; |
@@ -4717,7 +4614,7 @@ no_buffers: | |||
4717 | * descriptors are 32 bytes...so we increment tail | 4614 | * descriptors are 32 bytes...so we increment tail |
4718 | * twice as much. | 4615 | * twice as much. |
4719 | */ | 4616 | */ |
4720 | writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt); | 4617 | writel(i<<1, hw->hw_addr + rx_ring->rdt); |
4721 | } | 4618 | } |
4722 | } | 4619 | } |
4723 | 4620 | ||
@@ -4726,49 +4623,49 @@ no_buffers: | |||
4726 | * @adapter: | 4623 | * @adapter: |
4727 | **/ | 4624 | **/ |
4728 | 4625 | ||
4729 | static void | 4626 | static void e1000_smartspeed(struct e1000_adapter *adapter) |
4730 | e1000_smartspeed(struct e1000_adapter *adapter) | ||
4731 | { | 4627 | { |
4628 | struct e1000_hw *hw = &adapter->hw; | ||
4732 | u16 phy_status; | 4629 | u16 phy_status; |
4733 | u16 phy_ctrl; | 4630 | u16 phy_ctrl; |
4734 | 4631 | ||
4735 | if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || | 4632 | if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || |
4736 | !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) | 4633 | !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) |
4737 | return; | 4634 | return; |
4738 | 4635 | ||
4739 | if (adapter->smartspeed == 0) { | 4636 | if (adapter->smartspeed == 0) { |
4740 | /* If Master/Slave config fault is asserted twice, | 4637 | /* If Master/Slave config fault is asserted twice, |
4741 | * we assume back-to-back */ | 4638 | * we assume back-to-back */ |
4742 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); | 4639 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); |
4743 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; | 4640 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; |
4744 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status); | 4641 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); |
4745 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; | 4642 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; |
4746 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); | 4643 | e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); |
4747 | if (phy_ctrl & CR_1000T_MS_ENABLE) { | 4644 | if (phy_ctrl & CR_1000T_MS_ENABLE) { |
4748 | phy_ctrl &= ~CR_1000T_MS_ENABLE; | 4645 | phy_ctrl &= ~CR_1000T_MS_ENABLE; |
4749 | e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, | 4646 | e1000_write_phy_reg(hw, PHY_1000T_CTRL, |
4750 | phy_ctrl); | 4647 | phy_ctrl); |
4751 | adapter->smartspeed++; | 4648 | adapter->smartspeed++; |
4752 | if (!e1000_phy_setup_autoneg(&adapter->hw) && | 4649 | if (!e1000_phy_setup_autoneg(hw) && |
4753 | !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, | 4650 | !e1000_read_phy_reg(hw, PHY_CTRL, |
4754 | &phy_ctrl)) { | 4651 | &phy_ctrl)) { |
4755 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | | 4652 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | |
4756 | MII_CR_RESTART_AUTO_NEG); | 4653 | MII_CR_RESTART_AUTO_NEG); |
4757 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, | 4654 | e1000_write_phy_reg(hw, PHY_CTRL, |
4758 | phy_ctrl); | 4655 | phy_ctrl); |
4759 | } | 4656 | } |
4760 | } | 4657 | } |
4761 | return; | 4658 | return; |
4762 | } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { | 4659 | } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { |
4763 | /* If still no link, perhaps using 2/3 pair cable */ | 4660 | /* If still no link, perhaps using 2/3 pair cable */ |
4764 | e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl); | 4661 | e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); |
4765 | phy_ctrl |= CR_1000T_MS_ENABLE; | 4662 | phy_ctrl |= CR_1000T_MS_ENABLE; |
4766 | e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl); | 4663 | e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); |
4767 | if (!e1000_phy_setup_autoneg(&adapter->hw) && | 4664 | if (!e1000_phy_setup_autoneg(hw) && |
4768 | !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) { | 4665 | !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { |
4769 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | | 4666 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | |
4770 | MII_CR_RESTART_AUTO_NEG); | 4667 | MII_CR_RESTART_AUTO_NEG); |
4771 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl); | 4668 | e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); |
4772 | } | 4669 | } |
4773 | } | 4670 | } |
4774 | /* Restart process after E1000_SMARTSPEED_MAX iterations */ | 4671 | /* Restart process after E1000_SMARTSPEED_MAX iterations */ |
@@ -4783,8 +4680,7 @@ e1000_smartspeed(struct e1000_adapter *adapter) | |||
4783 | * @cmd: | 4680 | * @cmd: |
4784 | **/ | 4681 | **/ |
4785 | 4682 | ||
4786 | static int | 4683 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
4787 | e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
4788 | { | 4684 | { |
4789 | switch (cmd) { | 4685 | switch (cmd) { |
4790 | case SIOCGMIIPHY: | 4686 | case SIOCGMIIPHY: |
@@ -4803,28 +4699,29 @@ e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4803 | * @cmd: | 4699 | * @cmd: |
4804 | **/ | 4700 | **/ |
4805 | 4701 | ||
4806 | static int | 4702 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, |
4807 | e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | 4703 | int cmd) |
4808 | { | 4704 | { |
4809 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4705 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4706 | struct e1000_hw *hw = &adapter->hw; | ||
4810 | struct mii_ioctl_data *data = if_mii(ifr); | 4707 | struct mii_ioctl_data *data = if_mii(ifr); |
4811 | int retval; | 4708 | int retval; |
4812 | u16 mii_reg; | 4709 | u16 mii_reg; |
4813 | u16 spddplx; | 4710 | u16 spddplx; |
4814 | unsigned long flags; | 4711 | unsigned long flags; |
4815 | 4712 | ||
4816 | if (adapter->hw.media_type != e1000_media_type_copper) | 4713 | if (hw->media_type != e1000_media_type_copper) |
4817 | return -EOPNOTSUPP; | 4714 | return -EOPNOTSUPP; |
4818 | 4715 | ||
4819 | switch (cmd) { | 4716 | switch (cmd) { |
4820 | case SIOCGMIIPHY: | 4717 | case SIOCGMIIPHY: |
4821 | data->phy_id = adapter->hw.phy_addr; | 4718 | data->phy_id = hw->phy_addr; |
4822 | break; | 4719 | break; |
4823 | case SIOCGMIIREG: | 4720 | case SIOCGMIIREG: |
4824 | if (!capable(CAP_NET_ADMIN)) | 4721 | if (!capable(CAP_NET_ADMIN)) |
4825 | return -EPERM; | 4722 | return -EPERM; |
4826 | spin_lock_irqsave(&adapter->stats_lock, flags); | 4723 | spin_lock_irqsave(&adapter->stats_lock, flags); |
4827 | if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, | 4724 | if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, |
4828 | &data->val_out)) { | 4725 | &data->val_out)) { |
4829 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4726 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4830 | return -EIO; | 4727 | return -EIO; |
@@ -4838,20 +4735,20 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4838 | return -EFAULT; | 4735 | return -EFAULT; |
4839 | mii_reg = data->val_in; | 4736 | mii_reg = data->val_in; |
4840 | spin_lock_irqsave(&adapter->stats_lock, flags); | 4737 | spin_lock_irqsave(&adapter->stats_lock, flags); |
4841 | if (e1000_write_phy_reg(&adapter->hw, data->reg_num, | 4738 | if (e1000_write_phy_reg(hw, data->reg_num, |
4842 | mii_reg)) { | 4739 | mii_reg)) { |
4843 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4740 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4844 | return -EIO; | 4741 | return -EIO; |
4845 | } | 4742 | } |
4846 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 4743 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
4847 | if (adapter->hw.media_type == e1000_media_type_copper) { | 4744 | if (hw->media_type == e1000_media_type_copper) { |
4848 | switch (data->reg_num) { | 4745 | switch (data->reg_num) { |
4849 | case PHY_CTRL: | 4746 | case PHY_CTRL: |
4850 | if (mii_reg & MII_CR_POWER_DOWN) | 4747 | if (mii_reg & MII_CR_POWER_DOWN) |
4851 | break; | 4748 | break; |
4852 | if (mii_reg & MII_CR_AUTO_NEG_EN) { | 4749 | if (mii_reg & MII_CR_AUTO_NEG_EN) { |
4853 | adapter->hw.autoneg = 1; | 4750 | hw->autoneg = 1; |
4854 | adapter->hw.autoneg_advertised = 0x2F; | 4751 | hw->autoneg_advertised = 0x2F; |
4855 | } else { | 4752 | } else { |
4856 | if (mii_reg & 0x40) | 4753 | if (mii_reg & 0x40) |
4857 | spddplx = SPEED_1000; | 4754 | spddplx = SPEED_1000; |
@@ -4874,7 +4771,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4874 | break; | 4771 | break; |
4875 | case M88E1000_PHY_SPEC_CTRL: | 4772 | case M88E1000_PHY_SPEC_CTRL: |
4876 | case M88E1000_EXT_PHY_SPEC_CTRL: | 4773 | case M88E1000_EXT_PHY_SPEC_CTRL: |
4877 | if (e1000_phy_reset(&adapter->hw)) | 4774 | if (e1000_phy_reset(hw)) |
4878 | return -EIO; | 4775 | return -EIO; |
4879 | break; | 4776 | break; |
4880 | } | 4777 | } |
@@ -4897,8 +4794,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4897 | return E1000_SUCCESS; | 4794 | return E1000_SUCCESS; |
4898 | } | 4795 | } |
4899 | 4796 | ||
4900 | void | 4797 | void e1000_pci_set_mwi(struct e1000_hw *hw) |
4901 | e1000_pci_set_mwi(struct e1000_hw *hw) | ||
4902 | { | 4798 | { |
4903 | struct e1000_adapter *adapter = hw->back; | 4799 | struct e1000_adapter *adapter = hw->back; |
4904 | int ret_val = pci_set_mwi(adapter->pdev); | 4800 | int ret_val = pci_set_mwi(adapter->pdev); |
@@ -4907,30 +4803,26 @@ e1000_pci_set_mwi(struct e1000_hw *hw) | |||
4907 | DPRINTK(PROBE, ERR, "Error in setting MWI\n"); | 4803 | DPRINTK(PROBE, ERR, "Error in setting MWI\n"); |
4908 | } | 4804 | } |
4909 | 4805 | ||
4910 | void | 4806 | void e1000_pci_clear_mwi(struct e1000_hw *hw) |
4911 | e1000_pci_clear_mwi(struct e1000_hw *hw) | ||
4912 | { | 4807 | { |
4913 | struct e1000_adapter *adapter = hw->back; | 4808 | struct e1000_adapter *adapter = hw->back; |
4914 | 4809 | ||
4915 | pci_clear_mwi(adapter->pdev); | 4810 | pci_clear_mwi(adapter->pdev); |
4916 | } | 4811 | } |
4917 | 4812 | ||
4918 | int | 4813 | int e1000_pcix_get_mmrbc(struct e1000_hw *hw) |
4919 | e1000_pcix_get_mmrbc(struct e1000_hw *hw) | ||
4920 | { | 4814 | { |
4921 | struct e1000_adapter *adapter = hw->back; | 4815 | struct e1000_adapter *adapter = hw->back; |
4922 | return pcix_get_mmrbc(adapter->pdev); | 4816 | return pcix_get_mmrbc(adapter->pdev); |
4923 | } | 4817 | } |
4924 | 4818 | ||
4925 | void | 4819 | void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) |
4926 | e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) | ||
4927 | { | 4820 | { |
4928 | struct e1000_adapter *adapter = hw->back; | 4821 | struct e1000_adapter *adapter = hw->back; |
4929 | pcix_set_mmrbc(adapter->pdev, mmrbc); | 4822 | pcix_set_mmrbc(adapter->pdev, mmrbc); |
4930 | } | 4823 | } |
4931 | 4824 | ||
4932 | s32 | 4825 | s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) |
4933 | e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) | ||
4934 | { | 4826 | { |
4935 | struct e1000_adapter *adapter = hw->back; | 4827 | struct e1000_adapter *adapter = hw->back; |
4936 | u16 cap_offset; | 4828 | u16 cap_offset; |
@@ -4944,16 +4836,16 @@ e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) | |||
4944 | return E1000_SUCCESS; | 4836 | return E1000_SUCCESS; |
4945 | } | 4837 | } |
4946 | 4838 | ||
4947 | void | 4839 | void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) |
4948 | e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) | ||
4949 | { | 4840 | { |
4950 | outl(value, port); | 4841 | outl(value, port); |
4951 | } | 4842 | } |
4952 | 4843 | ||
4953 | static void | 4844 | static void e1000_vlan_rx_register(struct net_device *netdev, |
4954 | e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | 4845 | struct vlan_group *grp) |
4955 | { | 4846 | { |
4956 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4847 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4848 | struct e1000_hw *hw = &adapter->hw; | ||
4957 | u32 ctrl, rctl; | 4849 | u32 ctrl, rctl; |
4958 | 4850 | ||
4959 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 4851 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
@@ -4962,22 +4854,22 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
4962 | 4854 | ||
4963 | if (grp) { | 4855 | if (grp) { |
4964 | /* enable VLAN tag insert/strip */ | 4856 | /* enable VLAN tag insert/strip */ |
4965 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 4857 | ctrl = er32(CTRL); |
4966 | ctrl |= E1000_CTRL_VME; | 4858 | ctrl |= E1000_CTRL_VME; |
4967 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 4859 | ew32(CTRL, ctrl); |
4968 | 4860 | ||
4969 | if (adapter->hw.mac_type != e1000_ich8lan) { | 4861 | if (adapter->hw.mac_type != e1000_ich8lan) { |
4970 | /* enable VLAN receive filtering */ | 4862 | /* enable VLAN receive filtering */ |
4971 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 4863 | rctl = er32(RCTL); |
4972 | rctl &= ~E1000_RCTL_CFIEN; | 4864 | rctl &= ~E1000_RCTL_CFIEN; |
4973 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 4865 | ew32(RCTL, rctl); |
4974 | e1000_update_mng_vlan(adapter); | 4866 | e1000_update_mng_vlan(adapter); |
4975 | } | 4867 | } |
4976 | } else { | 4868 | } else { |
4977 | /* disable VLAN tag insert/strip */ | 4869 | /* disable VLAN tag insert/strip */ |
4978 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 4870 | ctrl = er32(CTRL); |
4979 | ctrl &= ~E1000_CTRL_VME; | 4871 | ctrl &= ~E1000_CTRL_VME; |
4980 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 4872 | ew32(CTRL, ctrl); |
4981 | 4873 | ||
4982 | if (adapter->hw.mac_type != e1000_ich8lan) { | 4874 | if (adapter->hw.mac_type != e1000_ich8lan) { |
4983 | if (adapter->mng_vlan_id != | 4875 | if (adapter->mng_vlan_id != |
@@ -4993,27 +4885,27 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
4993 | e1000_irq_enable(adapter); | 4885 | e1000_irq_enable(adapter); |
4994 | } | 4886 | } |
4995 | 4887 | ||
4996 | static void | 4888 | static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) |
4997 | e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | ||
4998 | { | 4889 | { |
4999 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4890 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4891 | struct e1000_hw *hw = &adapter->hw; | ||
5000 | u32 vfta, index; | 4892 | u32 vfta, index; |
5001 | 4893 | ||
5002 | if ((adapter->hw.mng_cookie.status & | 4894 | if ((hw->mng_cookie.status & |
5003 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | 4895 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
5004 | (vid == adapter->mng_vlan_id)) | 4896 | (vid == adapter->mng_vlan_id)) |
5005 | return; | 4897 | return; |
5006 | /* add VID to filter table */ | 4898 | /* add VID to filter table */ |
5007 | index = (vid >> 5) & 0x7F; | 4899 | index = (vid >> 5) & 0x7F; |
5008 | vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); | 4900 | vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); |
5009 | vfta |= (1 << (vid & 0x1F)); | 4901 | vfta |= (1 << (vid & 0x1F)); |
5010 | e1000_write_vfta(&adapter->hw, index, vfta); | 4902 | e1000_write_vfta(hw, index, vfta); |
5011 | } | 4903 | } |
5012 | 4904 | ||
5013 | static void | 4905 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) |
5014 | e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | ||
5015 | { | 4906 | { |
5016 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4907 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4908 | struct e1000_hw *hw = &adapter->hw; | ||
5017 | u32 vfta, index; | 4909 | u32 vfta, index; |
5018 | 4910 | ||
5019 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 4911 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
@@ -5022,7 +4914,7 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
5022 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 4914 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
5023 | e1000_irq_enable(adapter); | 4915 | e1000_irq_enable(adapter); |
5024 | 4916 | ||
5025 | if ((adapter->hw.mng_cookie.status & | 4917 | if ((hw->mng_cookie.status & |
5026 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | 4918 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
5027 | (vid == adapter->mng_vlan_id)) { | 4919 | (vid == adapter->mng_vlan_id)) { |
5028 | /* release control to f/w */ | 4920 | /* release control to f/w */ |
@@ -5032,13 +4924,12 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
5032 | 4924 | ||
5033 | /* remove VID from filter table */ | 4925 | /* remove VID from filter table */ |
5034 | index = (vid >> 5) & 0x7F; | 4926 | index = (vid >> 5) & 0x7F; |
5035 | vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); | 4927 | vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); |
5036 | vfta &= ~(1 << (vid & 0x1F)); | 4928 | vfta &= ~(1 << (vid & 0x1F)); |
5037 | e1000_write_vfta(&adapter->hw, index, vfta); | 4929 | e1000_write_vfta(hw, index, vfta); |
5038 | } | 4930 | } |
5039 | 4931 | ||
5040 | static void | 4932 | static void e1000_restore_vlan(struct e1000_adapter *adapter) |
5041 | e1000_restore_vlan(struct e1000_adapter *adapter) | ||
5042 | { | 4933 | { |
5043 | e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); | 4934 | e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); |
5044 | 4935 | ||
@@ -5052,13 +4943,14 @@ e1000_restore_vlan(struct e1000_adapter *adapter) | |||
5052 | } | 4943 | } |
5053 | } | 4944 | } |
5054 | 4945 | ||
5055 | int | 4946 | int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) |
5056 | e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | ||
5057 | { | 4947 | { |
5058 | adapter->hw.autoneg = 0; | 4948 | struct e1000_hw *hw = &adapter->hw; |
4949 | |||
4950 | hw->autoneg = 0; | ||
5059 | 4951 | ||
5060 | /* Fiber NICs only allow 1000 gbps Full duplex */ | 4952 | /* Fiber NICs only allow 1000 gbps Full duplex */ |
5061 | if ((adapter->hw.media_type == e1000_media_type_fiber) && | 4953 | if ((hw->media_type == e1000_media_type_fiber) && |
5062 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { | 4954 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { |
5063 | DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); | 4955 | DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); |
5064 | return -EINVAL; | 4956 | return -EINVAL; |
@@ -5066,20 +4958,20 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | |||
5066 | 4958 | ||
5067 | switch (spddplx) { | 4959 | switch (spddplx) { |
5068 | case SPEED_10 + DUPLEX_HALF: | 4960 | case SPEED_10 + DUPLEX_HALF: |
5069 | adapter->hw.forced_speed_duplex = e1000_10_half; | 4961 | hw->forced_speed_duplex = e1000_10_half; |
5070 | break; | 4962 | break; |
5071 | case SPEED_10 + DUPLEX_FULL: | 4963 | case SPEED_10 + DUPLEX_FULL: |
5072 | adapter->hw.forced_speed_duplex = e1000_10_full; | 4964 | hw->forced_speed_duplex = e1000_10_full; |
5073 | break; | 4965 | break; |
5074 | case SPEED_100 + DUPLEX_HALF: | 4966 | case SPEED_100 + DUPLEX_HALF: |
5075 | adapter->hw.forced_speed_duplex = e1000_100_half; | 4967 | hw->forced_speed_duplex = e1000_100_half; |
5076 | break; | 4968 | break; |
5077 | case SPEED_100 + DUPLEX_FULL: | 4969 | case SPEED_100 + DUPLEX_FULL: |
5078 | adapter->hw.forced_speed_duplex = e1000_100_full; | 4970 | hw->forced_speed_duplex = e1000_100_full; |
5079 | break; | 4971 | break; |
5080 | case SPEED_1000 + DUPLEX_FULL: | 4972 | case SPEED_1000 + DUPLEX_FULL: |
5081 | adapter->hw.autoneg = 1; | 4973 | hw->autoneg = 1; |
5082 | adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; | 4974 | hw->autoneg_advertised = ADVERTISE_1000_FULL; |
5083 | break; | 4975 | break; |
5084 | case SPEED_1000 + DUPLEX_HALF: /* not supported */ | 4976 | case SPEED_1000 + DUPLEX_HALF: /* not supported */ |
5085 | default: | 4977 | default: |
@@ -5089,11 +4981,11 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | |||
5089 | return 0; | 4981 | return 0; |
5090 | } | 4982 | } |
5091 | 4983 | ||
5092 | static int | 4984 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) |
5093 | e1000_suspend(struct pci_dev *pdev, pm_message_t state) | ||
5094 | { | 4985 | { |
5095 | struct net_device *netdev = pci_get_drvdata(pdev); | 4986 | struct net_device *netdev = pci_get_drvdata(pdev); |
5096 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4987 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4988 | struct e1000_hw *hw = &adapter->hw; | ||
5097 | u32 ctrl, ctrl_ext, rctl, status; | 4989 | u32 ctrl, ctrl_ext, rctl, status; |
5098 | u32 wufc = adapter->wol; | 4990 | u32 wufc = adapter->wol; |
5099 | #ifdef CONFIG_PM | 4991 | #ifdef CONFIG_PM |
@@ -5113,7 +5005,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5113 | return retval; | 5005 | return retval; |
5114 | #endif | 5006 | #endif |
5115 | 5007 | ||
5116 | status = E1000_READ_REG(&adapter->hw, STATUS); | 5008 | status = er32(STATUS); |
5117 | if (status & E1000_STATUS_LU) | 5009 | if (status & E1000_STATUS_LU) |
5118 | wufc &= ~E1000_WUFC_LNKC; | 5010 | wufc &= ~E1000_WUFC_LNKC; |
5119 | 5011 | ||
@@ -5123,40 +5015,40 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5123 | 5015 | ||
5124 | /* turn on all-multi mode if wake on multicast is enabled */ | 5016 | /* turn on all-multi mode if wake on multicast is enabled */ |
5125 | if (wufc & E1000_WUFC_MC) { | 5017 | if (wufc & E1000_WUFC_MC) { |
5126 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 5018 | rctl = er32(RCTL); |
5127 | rctl |= E1000_RCTL_MPE; | 5019 | rctl |= E1000_RCTL_MPE; |
5128 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 5020 | ew32(RCTL, rctl); |
5129 | } | 5021 | } |
5130 | 5022 | ||
5131 | if (adapter->hw.mac_type >= e1000_82540) { | 5023 | if (hw->mac_type >= e1000_82540) { |
5132 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 5024 | ctrl = er32(CTRL); |
5133 | /* advertise wake from D3Cold */ | 5025 | /* advertise wake from D3Cold */ |
5134 | #define E1000_CTRL_ADVD3WUC 0x00100000 | 5026 | #define E1000_CTRL_ADVD3WUC 0x00100000 |
5135 | /* phy power management enable */ | 5027 | /* phy power management enable */ |
5136 | #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 | 5028 | #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 |
5137 | ctrl |= E1000_CTRL_ADVD3WUC | | 5029 | ctrl |= E1000_CTRL_ADVD3WUC | |
5138 | E1000_CTRL_EN_PHY_PWR_MGMT; | 5030 | E1000_CTRL_EN_PHY_PWR_MGMT; |
5139 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 5031 | ew32(CTRL, ctrl); |
5140 | } | 5032 | } |
5141 | 5033 | ||
5142 | if (adapter->hw.media_type == e1000_media_type_fiber || | 5034 | if (hw->media_type == e1000_media_type_fiber || |
5143 | adapter->hw.media_type == e1000_media_type_internal_serdes) { | 5035 | hw->media_type == e1000_media_type_internal_serdes) { |
5144 | /* keep the laser running in D3 */ | 5036 | /* keep the laser running in D3 */ |
5145 | ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); | 5037 | ctrl_ext = er32(CTRL_EXT); |
5146 | ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; | 5038 | ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; |
5147 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext); | 5039 | ew32(CTRL_EXT, ctrl_ext); |
5148 | } | 5040 | } |
5149 | 5041 | ||
5150 | /* Allow time for pending master requests to run */ | 5042 | /* Allow time for pending master requests to run */ |
5151 | e1000_disable_pciex_master(&adapter->hw); | 5043 | e1000_disable_pciex_master(hw); |
5152 | 5044 | ||
5153 | E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); | 5045 | ew32(WUC, E1000_WUC_PME_EN); |
5154 | E1000_WRITE_REG(&adapter->hw, WUFC, wufc); | 5046 | ew32(WUFC, wufc); |
5155 | pci_enable_wake(pdev, PCI_D3hot, 1); | 5047 | pci_enable_wake(pdev, PCI_D3hot, 1); |
5156 | pci_enable_wake(pdev, PCI_D3cold, 1); | 5048 | pci_enable_wake(pdev, PCI_D3cold, 1); |
5157 | } else { | 5049 | } else { |
5158 | E1000_WRITE_REG(&adapter->hw, WUC, 0); | 5050 | ew32(WUC, 0); |
5159 | E1000_WRITE_REG(&adapter->hw, WUFC, 0); | 5051 | ew32(WUFC, 0); |
5160 | pci_enable_wake(pdev, PCI_D3hot, 0); | 5052 | pci_enable_wake(pdev, PCI_D3hot, 0); |
5161 | pci_enable_wake(pdev, PCI_D3cold, 0); | 5053 | pci_enable_wake(pdev, PCI_D3cold, 0); |
5162 | } | 5054 | } |
@@ -5169,8 +5061,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5169 | pci_enable_wake(pdev, PCI_D3cold, 1); | 5061 | pci_enable_wake(pdev, PCI_D3cold, 1); |
5170 | } | 5062 | } |
5171 | 5063 | ||
5172 | if (adapter->hw.phy_type == e1000_phy_igp_3) | 5064 | if (hw->phy_type == e1000_phy_igp_3) |
5173 | e1000_phy_powerdown_workaround(&adapter->hw); | 5065 | e1000_phy_powerdown_workaround(hw); |
5174 | 5066 | ||
5175 | if (netif_running(netdev)) | 5067 | if (netif_running(netdev)) |
5176 | e1000_free_irq(adapter); | 5068 | e1000_free_irq(adapter); |
@@ -5187,16 +5079,21 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5187 | } | 5079 | } |
5188 | 5080 | ||
5189 | #ifdef CONFIG_PM | 5081 | #ifdef CONFIG_PM |
5190 | static int | 5082 | static int e1000_resume(struct pci_dev *pdev) |
5191 | e1000_resume(struct pci_dev *pdev) | ||
5192 | { | 5083 | { |
5193 | struct net_device *netdev = pci_get_drvdata(pdev); | 5084 | struct net_device *netdev = pci_get_drvdata(pdev); |
5194 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5085 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5086 | struct e1000_hw *hw = &adapter->hw; | ||
5195 | u32 err; | 5087 | u32 err; |
5196 | 5088 | ||
5197 | pci_set_power_state(pdev, PCI_D0); | 5089 | pci_set_power_state(pdev, PCI_D0); |
5198 | pci_restore_state(pdev); | 5090 | pci_restore_state(pdev); |
5199 | if ((err = pci_enable_device(pdev))) { | 5091 | |
5092 | if (adapter->need_ioport) | ||
5093 | err = pci_enable_device(pdev); | ||
5094 | else | ||
5095 | err = pci_enable_device_mem(pdev); | ||
5096 | if (err) { | ||
5200 | printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n"); | 5097 | printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n"); |
5201 | return err; | 5098 | return err; |
5202 | } | 5099 | } |
@@ -5205,12 +5102,15 @@ e1000_resume(struct pci_dev *pdev) | |||
5205 | pci_enable_wake(pdev, PCI_D3hot, 0); | 5102 | pci_enable_wake(pdev, PCI_D3hot, 0); |
5206 | pci_enable_wake(pdev, PCI_D3cold, 0); | 5103 | pci_enable_wake(pdev, PCI_D3cold, 0); |
5207 | 5104 | ||
5208 | if (netif_running(netdev) && (err = e1000_request_irq(adapter))) | 5105 | if (netif_running(netdev)) { |
5209 | return err; | 5106 | err = e1000_request_irq(adapter); |
5107 | if (err) | ||
5108 | return err; | ||
5109 | } | ||
5210 | 5110 | ||
5211 | e1000_power_up_phy(adapter); | 5111 | e1000_power_up_phy(adapter); |
5212 | e1000_reset(adapter); | 5112 | e1000_reset(adapter); |
5213 | E1000_WRITE_REG(&adapter->hw, WUS, ~0); | 5113 | ew32(WUS, ~0); |
5214 | 5114 | ||
5215 | e1000_init_manageability(adapter); | 5115 | e1000_init_manageability(adapter); |
5216 | 5116 | ||
@@ -5223,8 +5123,8 @@ e1000_resume(struct pci_dev *pdev) | |||
5223 | * DRV_LOAD until the interface is up. For all other cases, | 5123 | * DRV_LOAD until the interface is up. For all other cases, |
5224 | * let the f/w know that the h/w is now under the control | 5124 | * let the f/w know that the h/w is now under the control |
5225 | * of the driver. */ | 5125 | * of the driver. */ |
5226 | if (adapter->hw.mac_type != e1000_82573 || | 5126 | if (hw->mac_type != e1000_82573 || |
5227 | !e1000_check_mng_mode(&adapter->hw)) | 5127 | !e1000_check_mng_mode(hw)) |
5228 | e1000_get_hw_control(adapter); | 5128 | e1000_get_hw_control(adapter); |
5229 | 5129 | ||
5230 | return 0; | 5130 | return 0; |
@@ -5242,16 +5142,12 @@ static void e1000_shutdown(struct pci_dev *pdev) | |||
5242 | * without having to re-enable interrupts. It's not called while | 5142 | * without having to re-enable interrupts. It's not called while |
5243 | * the interrupt routine is executing. | 5143 | * the interrupt routine is executing. |
5244 | */ | 5144 | */ |
5245 | static void | 5145 | static void e1000_netpoll(struct net_device *netdev) |
5246 | e1000_netpoll(struct net_device *netdev) | ||
5247 | { | 5146 | { |
5248 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5147 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5249 | 5148 | ||
5250 | disable_irq(adapter->pdev->irq); | 5149 | disable_irq(adapter->pdev->irq); |
5251 | e1000_intr(adapter->pdev->irq, netdev); | 5150 | e1000_intr(adapter->pdev->irq, netdev); |
5252 | #ifndef CONFIG_E1000_NAPI | ||
5253 | adapter->clean_rx(adapter, adapter->rx_ring); | ||
5254 | #endif | ||
5255 | enable_irq(adapter->pdev->irq); | 5151 | enable_irq(adapter->pdev->irq); |
5256 | } | 5152 | } |
5257 | #endif | 5153 | #endif |
@@ -5264,7 +5160,8 @@ e1000_netpoll(struct net_device *netdev) | |||
5264 | * This function is called after a PCI bus error affecting | 5160 | * This function is called after a PCI bus error affecting |
5265 | * this device has been detected. | 5161 | * this device has been detected. |
5266 | */ | 5162 | */ |
5267 | static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) | 5163 | static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, |
5164 | pci_channel_state_t state) | ||
5268 | { | 5165 | { |
5269 | struct net_device *netdev = pci_get_drvdata(pdev); | 5166 | struct net_device *netdev = pci_get_drvdata(pdev); |
5270 | struct e1000_adapter *adapter = netdev->priv; | 5167 | struct e1000_adapter *adapter = netdev->priv; |
@@ -5290,8 +5187,14 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | |||
5290 | { | 5187 | { |
5291 | struct net_device *netdev = pci_get_drvdata(pdev); | 5188 | struct net_device *netdev = pci_get_drvdata(pdev); |
5292 | struct e1000_adapter *adapter = netdev->priv; | 5189 | struct e1000_adapter *adapter = netdev->priv; |
5190 | struct e1000_hw *hw = &adapter->hw; | ||
5191 | int err; | ||
5293 | 5192 | ||
5294 | if (pci_enable_device(pdev)) { | 5193 | if (adapter->need_ioport) |
5194 | err = pci_enable_device(pdev); | ||
5195 | else | ||
5196 | err = pci_enable_device_mem(pdev); | ||
5197 | if (err) { | ||
5295 | printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n"); | 5198 | printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n"); |
5296 | return PCI_ERS_RESULT_DISCONNECT; | 5199 | return PCI_ERS_RESULT_DISCONNECT; |
5297 | } | 5200 | } |
@@ -5301,7 +5204,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | |||
5301 | pci_enable_wake(pdev, PCI_D3cold, 0); | 5204 | pci_enable_wake(pdev, PCI_D3cold, 0); |
5302 | 5205 | ||
5303 | e1000_reset(adapter); | 5206 | e1000_reset(adapter); |
5304 | E1000_WRITE_REG(&adapter->hw, WUS, ~0); | 5207 | ew32(WUS, ~0); |
5305 | 5208 | ||
5306 | return PCI_ERS_RESULT_RECOVERED; | 5209 | return PCI_ERS_RESULT_RECOVERED; |
5307 | } | 5210 | } |
@@ -5318,6 +5221,7 @@ static void e1000_io_resume(struct pci_dev *pdev) | |||
5318 | { | 5221 | { |
5319 | struct net_device *netdev = pci_get_drvdata(pdev); | 5222 | struct net_device *netdev = pci_get_drvdata(pdev); |
5320 | struct e1000_adapter *adapter = netdev->priv; | 5223 | struct e1000_adapter *adapter = netdev->priv; |
5224 | struct e1000_hw *hw = &adapter->hw; | ||
5321 | 5225 | ||
5322 | e1000_init_manageability(adapter); | 5226 | e1000_init_manageability(adapter); |
5323 | 5227 | ||
@@ -5334,8 +5238,8 @@ static void e1000_io_resume(struct pci_dev *pdev) | |||
5334 | * DRV_LOAD until the interface is up. For all other cases, | 5238 | * DRV_LOAD until the interface is up. For all other cases, |
5335 | * let the f/w know that the h/w is now under the control | 5239 | * let the f/w know that the h/w is now under the control |
5336 | * of the driver. */ | 5240 | * of the driver. */ |
5337 | if (adapter->hw.mac_type != e1000_82573 || | 5241 | if (hw->mac_type != e1000_82573 || |
5338 | !e1000_check_mng_mode(&adapter->hw)) | 5242 | !e1000_check_mng_mode(hw)) |
5339 | e1000_get_hw_control(adapter); | 5243 | e1000_get_hw_control(adapter); |
5340 | 5244 | ||
5341 | } | 5245 | } |
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h index 365626d3177e..d9298522f5ae 100644 --- a/drivers/net/e1000/e1000_osdep.h +++ b/drivers/net/e1000/e1000_osdep.h | |||
@@ -55,13 +55,13 @@ | |||
55 | #define DEBUGOUT7 DEBUGOUT3 | 55 | #define DEBUGOUT7 DEBUGOUT3 |
56 | 56 | ||
57 | 57 | ||
58 | #define E1000_WRITE_REG(a, reg, value) ( \ | 58 | #define er32(reg) \ |
59 | writel((value), ((a)->hw_addr + \ | 59 | (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ |
60 | (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg)))) | 60 | ? E1000_##reg : E1000_82542_##reg))) |
61 | 61 | ||
62 | #define E1000_READ_REG(a, reg) ( \ | 62 | #define ew32(reg, value) \ |
63 | readl((a)->hw_addr + \ | 63 | (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \ |
64 | (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg))) | 64 | ? E1000_##reg : E1000_82542_##reg)))) |
65 | 65 | ||
66 | #define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ | 66 | #define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ |
67 | writel((value), ((a)->hw_addr + \ | 67 | writel((value), ((a)->hw_addr + \ |
@@ -96,7 +96,7 @@ | |||
96 | (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ | 96 | (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ |
97 | (offset))) | 97 | (offset))) |
98 | 98 | ||
99 | #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) | 99 | #define E1000_WRITE_FLUSH() er32(STATUS) |
100 | 100 | ||
101 | #define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ | 101 | #define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ |
102 | writel((value), ((a)->flash_address + reg))) | 102 | writel((value), ((a)->flash_address + reg))) |
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c index e6565ce686bc..b9f90a5d3d4d 100644 --- a/drivers/net/e1000/e1000_param.c +++ b/drivers/net/e1000/e1000_param.c | |||
@@ -213,10 +213,9 @@ struct e1000_option { | |||
213 | } arg; | 213 | } arg; |
214 | }; | 214 | }; |
215 | 215 | ||
216 | static int __devinit | 216 | static int __devinit e1000_validate_option(unsigned int *value, |
217 | e1000_validate_option(unsigned int *value, | 217 | const struct e1000_option *opt, |
218 | const struct e1000_option *opt, | 218 | struct e1000_adapter *adapter) |
219 | struct e1000_adapter *adapter) | ||
220 | { | 219 | { |
221 | if (*value == OPTION_UNSET) { | 220 | if (*value == OPTION_UNSET) { |
222 | *value = opt->def; | 221 | *value = opt->def; |
@@ -278,8 +277,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter); | |||
278 | * in a variable in the adapter structure. | 277 | * in a variable in the adapter structure. |
279 | **/ | 278 | **/ |
280 | 279 | ||
281 | void __devinit | 280 | void __devinit e1000_check_options(struct e1000_adapter *adapter) |
282 | e1000_check_options(struct e1000_adapter *adapter) | ||
283 | { | 281 | { |
284 | int bd = adapter->bd_number; | 282 | int bd = adapter->bd_number; |
285 | if (bd >= E1000_MAX_NIC) { | 283 | if (bd >= E1000_MAX_NIC) { |
@@ -551,8 +549,7 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
551 | * Handles speed and duplex options on fiber adapters | 549 | * Handles speed and duplex options on fiber adapters |
552 | **/ | 550 | **/ |
553 | 551 | ||
554 | static void __devinit | 552 | static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter) |
555 | e1000_check_fiber_options(struct e1000_adapter *adapter) | ||
556 | { | 553 | { |
557 | int bd = adapter->bd_number; | 554 | int bd = adapter->bd_number; |
558 | if (num_Speed > bd) { | 555 | if (num_Speed > bd) { |
@@ -579,8 +576,7 @@ e1000_check_fiber_options(struct e1000_adapter *adapter) | |||
579 | * Handles speed and duplex options on copper adapters | 576 | * Handles speed and duplex options on copper adapters |
580 | **/ | 577 | **/ |
581 | 578 | ||
582 | static void __devinit | 579 | static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) |
583 | e1000_check_copper_options(struct e1000_adapter *adapter) | ||
584 | { | 580 | { |
585 | unsigned int speed, dplx, an; | 581 | unsigned int speed, dplx, an; |
586 | int bd = adapter->bd_number; | 582 | int bd = adapter->bd_number; |
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index ae9ecb7df22b..4e4f68304e82 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c | |||
@@ -197,9 +197,6 @@ static void mpc52xx_fec_adjust_link(struct net_device *dev) | |||
197 | if (priv->link == PHY_DOWN) { | 197 | if (priv->link == PHY_DOWN) { |
198 | new_state = 1; | 198 | new_state = 1; |
199 | priv->link = phydev->link; | 199 | priv->link = phydev->link; |
200 | netif_tx_schedule_all(dev); | ||
201 | netif_carrier_on(dev); | ||
202 | netif_start_queue(dev); | ||
203 | } | 200 | } |
204 | 201 | ||
205 | } else if (priv->link) { | 202 | } else if (priv->link) { |
@@ -207,8 +204,6 @@ static void mpc52xx_fec_adjust_link(struct net_device *dev) | |||
207 | priv->link = PHY_DOWN; | 204 | priv->link = PHY_DOWN; |
208 | priv->speed = 0; | 205 | priv->speed = 0; |
209 | priv->duplex = -1; | 206 | priv->duplex = -1; |
210 | netif_stop_queue(dev); | ||
211 | netif_carrier_off(dev); | ||
212 | } | 207 | } |
213 | 208 | ||
214 | if (new_state && netif_msg_link(priv)) | 209 | if (new_state && netif_msg_link(priv)) |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 92591384afa5..9a51ec8293cc 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -730,9 +730,6 @@ static void generic_adjust_link(struct net_device *dev) | |||
730 | if (!fep->oldlink) { | 730 | if (!fep->oldlink) { |
731 | new_state = 1; | 731 | new_state = 1; |
732 | fep->oldlink = 1; | 732 | fep->oldlink = 1; |
733 | netif_tx_schedule_all(dev); | ||
734 | netif_carrier_on(dev); | ||
735 | netif_start_queue(dev); | ||
736 | } | 733 | } |
737 | 734 | ||
738 | if (new_state) | 735 | if (new_state) |
@@ -742,8 +739,6 @@ static void generic_adjust_link(struct net_device *dev) | |||
742 | fep->oldlink = 0; | 739 | fep->oldlink = 0; |
743 | fep->oldspeed = 0; | 740 | fep->oldspeed = 0; |
744 | fep->oldduplex = -1; | 741 | fep->oldduplex = -1; |
745 | netif_carrier_off(dev); | ||
746 | netif_stop_queue(dev); | ||
747 | } | 742 | } |
748 | 743 | ||
749 | if (new_state && netif_msg_link(fep)) | 744 | if (new_state && netif_msg_link(fep)) |
@@ -818,6 +813,8 @@ static int fs_enet_open(struct net_device *dev) | |||
818 | } | 813 | } |
819 | phy_start(fep->phydev); | 814 | phy_start(fep->phydev); |
820 | 815 | ||
816 | netif_start_queue(dev); | ||
817 | |||
821 | return 0; | 818 | return 0; |
822 | } | 819 | } |
823 | 820 | ||
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index b6500b2aacf2..58f4b1d7bf1f 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c | |||
@@ -123,6 +123,7 @@ static LIST_HEAD(bpq_devices); | |||
123 | * off into a separate class since they always nest. | 123 | * off into a separate class since they always nest. |
124 | */ | 124 | */ |
125 | static struct lock_class_key bpq_netdev_xmit_lock_key; | 125 | static struct lock_class_key bpq_netdev_xmit_lock_key; |
126 | static struct lock_class_key bpq_netdev_addr_lock_key; | ||
126 | 127 | ||
127 | static void bpq_set_lockdep_class_one(struct net_device *dev, | 128 | static void bpq_set_lockdep_class_one(struct net_device *dev, |
128 | struct netdev_queue *txq, | 129 | struct netdev_queue *txq, |
@@ -133,6 +134,7 @@ static void bpq_set_lockdep_class_one(struct net_device *dev, | |||
133 | 134 | ||
134 | static void bpq_set_lockdep_class(struct net_device *dev) | 135 | static void bpq_set_lockdep_class(struct net_device *dev) |
135 | { | 136 | { |
137 | lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key); | ||
136 | netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL); | 138 | netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL); |
137 | } | 139 | } |
138 | 140 | ||
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c index c2c4f49d7578..8239939554bc 100644 --- a/drivers/net/hp-plus.c +++ b/drivers/net/hp-plus.c | |||
@@ -262,7 +262,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr) | |||
262 | } | 262 | } |
263 | 263 | ||
264 | outw(Perf_Page, ioaddr + HP_PAGING); | 264 | outw(Perf_Page, ioaddr + HP_PAGING); |
265 | NS8390_init(dev, 0); | 265 | NS8390p_init(dev, 0); |
266 | /* Leave the 8390 and HP chip reset. */ | 266 | /* Leave the 8390 and HP chip reset. */ |
267 | outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION); | 267 | outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION); |
268 | 268 | ||
diff --git a/drivers/net/hp.c b/drivers/net/hp.c index 8281209ededf..0a8c64930ad3 100644 --- a/drivers/net/hp.c +++ b/drivers/net/hp.c | |||
@@ -389,7 +389,7 @@ static void __init | |||
389 | hp_init_card(struct net_device *dev) | 389 | hp_init_card(struct net_device *dev) |
390 | { | 390 | { |
391 | int irq = dev->irq; | 391 | int irq = dev->irq; |
392 | NS8390_init(dev, 0); | 392 | NS8390p_init(dev, 0); |
393 | outb_p(irqmap[irq&0x0f] | HP_RUN, | 393 | outb_p(irqmap[irq&0x0f] | HP_RUN, |
394 | dev->base_addr - NIC_OFFSET + HP_CONFIGURE); | 394 | dev->base_addr - NIC_OFFSET + HP_CONFIGURE); |
395 | return; | 395 | return; |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 1b7cb29fe68e..b602c4dd0d14 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -385,7 +385,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) | |||
385 | 385 | ||
386 | for (i = 0; i < adapter->num_rx_queues; i++) { | 386 | for (i = 0; i < adapter->num_rx_queues; i++) { |
387 | struct igb_ring *rx_ring = &adapter->rx_ring[i]; | 387 | struct igb_ring *rx_ring = &adapter->rx_ring[i]; |
388 | rx_ring->buddy = 0; | 388 | rx_ring->buddy = NULL; |
389 | igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++); | 389 | igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++); |
390 | adapter->eims_enable_mask |= rx_ring->eims_value; | 390 | adapter->eims_enable_mask |= rx_ring->eims_value; |
391 | if (rx_ring->itr_val) | 391 | if (rx_ring->itr_val) |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index be7b723c924f..e5f3da8468cc 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -70,8 +70,6 @@ static struct pci_device_id ixgbe_pci_tbl[] = { | |||
70 | board_82598 }, | 70 | board_82598 }, |
71 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), | 71 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), |
72 | board_82598 }, | 72 | board_82598 }, |
73 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT_DUAL_PORT), | ||
74 | board_82598 }, | ||
75 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), | 73 | {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), |
76 | board_82598 }, | 74 | board_82598 }, |
77 | 75 | ||
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index 0496d16f9de5..daba82bbcb56 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
@@ -164,9 +164,7 @@ static void macb_handle_link_change(struct net_device *dev) | |||
164 | } | 164 | } |
165 | 165 | ||
166 | if (phydev->link != bp->link) { | 166 | if (phydev->link != bp->link) { |
167 | if (phydev->link) | 167 | if (!phydev->link) { |
168 | netif_tx_schedule_all(dev); | ||
169 | else { | ||
170 | bp->speed = 0; | 168 | bp->speed = 0; |
171 | bp->duplex = -1; | 169 | bp->duplex = -1; |
172 | } | 170 | } |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index efbc15567dd3..42394505bb50 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -276,6 +276,7 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu) | |||
276 | * separate class since they always nest. | 276 | * separate class since they always nest. |
277 | */ | 277 | */ |
278 | static struct lock_class_key macvlan_netdev_xmit_lock_key; | 278 | static struct lock_class_key macvlan_netdev_xmit_lock_key; |
279 | static struct lock_class_key macvlan_netdev_addr_lock_key; | ||
279 | 280 | ||
280 | #define MACVLAN_FEATURES \ | 281 | #define MACVLAN_FEATURES \ |
281 | (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ | 282 | (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ |
@@ -295,6 +296,8 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev, | |||
295 | 296 | ||
296 | static void macvlan_set_lockdep_class(struct net_device *dev) | 297 | static void macvlan_set_lockdep_class(struct net_device *dev) |
297 | { | 298 | { |
299 | lockdep_set_class(&dev->addr_list_lock, | ||
300 | &macvlan_netdev_addr_lock_key); | ||
298 | netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); | 301 | netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); |
299 | } | 302 | } |
300 | 303 | ||
diff --git a/drivers/net/meth.c b/drivers/net/meth.c index 0b32648a2136..4cb364e67dc6 100644 --- a/drivers/net/meth.c +++ b/drivers/net/meth.c | |||
@@ -287,7 +287,7 @@ int meth_reset(struct net_device *dev) | |||
287 | 287 | ||
288 | /* Initial mode: 10 | Half-duplex | Accept normal packets */ | 288 | /* Initial mode: 10 | Half-duplex | Accept normal packets */ |
289 | priv->mac_ctrl = METH_ACCEPT_MCAST | METH_DEFAULT_IPG; | 289 | priv->mac_ctrl = METH_ACCEPT_MCAST | METH_DEFAULT_IPG; |
290 | if (dev->flags | IFF_PROMISC) | 290 | if (dev->flags & IFF_PROMISC) |
291 | priv->mac_ctrl |= METH_PROMISC; | 291 | priv->mac_ctrl |= METH_PROMISC; |
292 | mace->eth.mac_ctrl = priv->mac_ctrl; | 292 | mace->eth.mac_ctrl = priv->mac_ctrl; |
293 | 293 | ||
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 83a877f3a553..8a97a0066a88 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -2112,7 +2112,7 @@ static void mv643xx_eth_netpoll(struct net_device *dev) | |||
2112 | 2112 | ||
2113 | mv643xx_eth_irq(dev->irq, dev); | 2113 | mv643xx_eth_irq(dev->irq, dev); |
2114 | 2114 | ||
2115 | wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_CAUSE_EXT); | 2115 | wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); |
2116 | } | 2116 | } |
2117 | #endif | 2117 | #endif |
2118 | 2118 | ||
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index b3981ed972bf..3ab0e5289f7a 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -125,7 +125,6 @@ struct myri10ge_cmd { | |||
125 | 125 | ||
126 | struct myri10ge_rx_buf { | 126 | struct myri10ge_rx_buf { |
127 | struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */ | 127 | struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */ |
128 | u8 __iomem *wc_fifo; /* w/c rx dma addr fifo address */ | ||
129 | struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */ | 128 | struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */ |
130 | struct myri10ge_rx_buffer_state *info; | 129 | struct myri10ge_rx_buffer_state *info; |
131 | struct page *page; | 130 | struct page *page; |
@@ -140,7 +139,6 @@ struct myri10ge_rx_buf { | |||
140 | 139 | ||
141 | struct myri10ge_tx_buf { | 140 | struct myri10ge_tx_buf { |
142 | struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */ | 141 | struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */ |
143 | u8 __iomem *wc_fifo; /* w/c send fifo address */ | ||
144 | struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */ | 142 | struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */ |
145 | char *req_bytes; | 143 | char *req_bytes; |
146 | struct myri10ge_tx_buffer_state *info; | 144 | struct myri10ge_tx_buffer_state *info; |
@@ -332,10 +330,6 @@ MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed"); | |||
332 | 330 | ||
333 | static int myri10ge_reset_recover = 1; | 331 | static int myri10ge_reset_recover = 1; |
334 | 332 | ||
335 | static int myri10ge_wcfifo = 0; | ||
336 | module_param(myri10ge_wcfifo, int, S_IRUGO); | ||
337 | MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled"); | ||
338 | |||
339 | static int myri10ge_max_slices = 1; | 333 | static int myri10ge_max_slices = 1; |
340 | module_param(myri10ge_max_slices, int, S_IRUGO); | 334 | module_param(myri10ge_max_slices, int, S_IRUGO); |
341 | MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues"); | 335 | MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues"); |
@@ -1218,14 +1212,8 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, | |||
1218 | 1212 | ||
1219 | /* copy 8 descriptors to the firmware at a time */ | 1213 | /* copy 8 descriptors to the firmware at a time */ |
1220 | if ((idx & 7) == 7) { | 1214 | if ((idx & 7) == 7) { |
1221 | if (rx->wc_fifo == NULL) | 1215 | myri10ge_submit_8rx(&rx->lanai[idx - 7], |
1222 | myri10ge_submit_8rx(&rx->lanai[idx - 7], | 1216 | &rx->shadow[idx - 7]); |
1223 | &rx->shadow[idx - 7]); | ||
1224 | else { | ||
1225 | mb(); | ||
1226 | myri10ge_pio_copy(rx->wc_fifo, | ||
1227 | &rx->shadow[idx - 7], 64); | ||
1228 | } | ||
1229 | } | 1217 | } |
1230 | } | 1218 | } |
1231 | } | 1219 | } |
@@ -2229,18 +2217,6 @@ static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice) | |||
2229 | ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *) | 2217 | ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *) |
2230 | (mgp->sram + cmd.data0); | 2218 | (mgp->sram + cmd.data0); |
2231 | 2219 | ||
2232 | if (myri10ge_wcfifo && mgp->wc_enabled) { | ||
2233 | ss->tx.wc_fifo = (u8 __iomem *) | ||
2234 | mgp->sram + MXGEFW_ETH_SEND_4 + 64 * slice; | ||
2235 | ss->rx_small.wc_fifo = (u8 __iomem *) | ||
2236 | mgp->sram + MXGEFW_ETH_RECV_SMALL + 64 * slice; | ||
2237 | ss->rx_big.wc_fifo = (u8 __iomem *) | ||
2238 | mgp->sram + MXGEFW_ETH_RECV_BIG + 64 * slice; | ||
2239 | } else { | ||
2240 | ss->tx.wc_fifo = NULL; | ||
2241 | ss->rx_small.wc_fifo = NULL; | ||
2242 | ss->rx_big.wc_fifo = NULL; | ||
2243 | } | ||
2244 | return status; | 2220 | return status; |
2245 | 2221 | ||
2246 | } | 2222 | } |
@@ -2573,27 +2549,6 @@ myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src, | |||
2573 | mb(); | 2549 | mb(); |
2574 | } | 2550 | } |
2575 | 2551 | ||
2576 | static inline void | ||
2577 | myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx, | ||
2578 | struct mcp_kreq_ether_send *src, int cnt) | ||
2579 | { | ||
2580 | tx->req += cnt; | ||
2581 | mb(); | ||
2582 | while (cnt >= 4) { | ||
2583 | myri10ge_pio_copy(tx->wc_fifo, src, 64); | ||
2584 | mb(); | ||
2585 | src += 4; | ||
2586 | cnt -= 4; | ||
2587 | } | ||
2588 | if (cnt > 0) { | ||
2589 | /* pad it to 64 bytes. The src is 64 bytes bigger than it | ||
2590 | * needs to be so that we don't overrun it */ | ||
2591 | myri10ge_pio_copy(tx->wc_fifo + MXGEFW_ETH_SEND_OFFSET(cnt), | ||
2592 | src, 64); | ||
2593 | mb(); | ||
2594 | } | ||
2595 | } | ||
2596 | |||
2597 | /* | 2552 | /* |
2598 | * Transmit a packet. We need to split the packet so that a single | 2553 | * Transmit a packet. We need to split the packet so that a single |
2599 | * segment does not cross myri10ge->tx_boundary, so this makes segment | 2554 | * segment does not cross myri10ge->tx_boundary, so this makes segment |
@@ -2830,10 +2785,7 @@ again: | |||
2830 | MXGEFW_FLAGS_FIRST))); | 2785 | MXGEFW_FLAGS_FIRST))); |
2831 | idx = ((count - 1) + tx->req) & tx->mask; | 2786 | idx = ((count - 1) + tx->req) & tx->mask; |
2832 | tx->info[idx].last = 1; | 2787 | tx->info[idx].last = 1; |
2833 | if (tx->wc_fifo == NULL) | 2788 | myri10ge_submit_req(tx, tx->req_list, count); |
2834 | myri10ge_submit_req(tx, tx->req_list, count); | ||
2835 | else | ||
2836 | myri10ge_submit_req_wc(tx, tx->req_list, count); | ||
2837 | tx->pkt_start++; | 2789 | tx->pkt_start++; |
2838 | if ((avail - count) < MXGEFW_MAX_SEND_DESC) { | 2790 | if ((avail - count) < MXGEFW_MAX_SEND_DESC) { |
2839 | tx->stop_queue++; | 2791 | tx->stop_queue++; |
@@ -3768,14 +3720,14 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3768 | if (mgp->sram_size > mgp->board_span) { | 3720 | if (mgp->sram_size > mgp->board_span) { |
3769 | dev_err(&pdev->dev, "board span %ld bytes too small\n", | 3721 | dev_err(&pdev->dev, "board span %ld bytes too small\n", |
3770 | mgp->board_span); | 3722 | mgp->board_span); |
3771 | goto abort_with_wc; | 3723 | goto abort_with_mtrr; |
3772 | } | 3724 | } |
3773 | mgp->sram = ioremap(mgp->iomem_base, mgp->board_span); | 3725 | mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span); |
3774 | if (mgp->sram == NULL) { | 3726 | if (mgp->sram == NULL) { |
3775 | dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n", | 3727 | dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n", |
3776 | mgp->board_span, mgp->iomem_base); | 3728 | mgp->board_span, mgp->iomem_base); |
3777 | status = -ENXIO; | 3729 | status = -ENXIO; |
3778 | goto abort_with_wc; | 3730 | goto abort_with_mtrr; |
3779 | } | 3731 | } |
3780 | memcpy_fromio(mgp->eeprom_strings, | 3732 | memcpy_fromio(mgp->eeprom_strings, |
3781 | mgp->sram + mgp->sram_size - MYRI10GE_EEPROM_STRINGS_SIZE, | 3733 | mgp->sram + mgp->sram_size - MYRI10GE_EEPROM_STRINGS_SIZE, |
@@ -3876,7 +3828,7 @@ abort_with_firmware: | |||
3876 | abort_with_ioremap: | 3828 | abort_with_ioremap: |
3877 | iounmap(mgp->sram); | 3829 | iounmap(mgp->sram); |
3878 | 3830 | ||
3879 | abort_with_wc: | 3831 | abort_with_mtrr: |
3880 | #ifdef CONFIG_MTRR | 3832 | #ifdef CONFIG_MTRR |
3881 | if (mgp->mtrr >= 0) | 3833 | if (mgp->mtrr >= 0) |
3882 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); | 3834 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); |
diff --git a/drivers/net/ne.c b/drivers/net/ne.c index 14126973bd12..2fec6122c7fa 100644 --- a/drivers/net/ne.c +++ b/drivers/net/ne.c | |||
@@ -355,7 +355,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) | |||
355 | } | 355 | } |
356 | 356 | ||
357 | /* Read the 16 bytes of station address PROM. | 357 | /* Read the 16 bytes of station address PROM. |
358 | We must first initialize registers, similar to NS8390_init(eifdev, 0). | 358 | We must first initialize registers, similar to NS8390p_init(eifdev, 0). |
359 | We can't reliably read the SAPROM address without this. | 359 | We can't reliably read the SAPROM address without this. |
360 | (I learned the hard way!). */ | 360 | (I learned the hard way!). */ |
361 | { | 361 | { |
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c index 8f7256346922..332df75a9ab6 100644 --- a/drivers/net/ne2.c +++ b/drivers/net/ne2.c | |||
@@ -404,7 +404,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot) | |||
404 | 404 | ||
405 | /* Read the 16 bytes of station address PROM. | 405 | /* Read the 16 bytes of station address PROM. |
406 | We must first initialize registers, similar to | 406 | We must first initialize registers, similar to |
407 | NS8390_init(eifdev, 0). | 407 | NS8390p_init(eifdev, 0). |
408 | We can't reliably read the SAPROM address without this. | 408 | We can't reliably read the SAPROM address without this. |
409 | (I learned the hard way!). */ | 409 | (I learned the hard way!). */ |
410 | { | 410 | { |
diff --git a/drivers/net/netxen/Makefile b/drivers/net/netxen/Makefile index a07cdc6f7384..8e7c4c910d2a 100644 --- a/drivers/net/netxen/Makefile +++ b/drivers/net/netxen/Makefile | |||
@@ -32,4 +32,4 @@ | |||
32 | obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o | 32 | obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o |
33 | 33 | ||
34 | netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \ | 34 | netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \ |
35 | netxen_nic_isr.o netxen_nic_ethtool.o netxen_nic_niu.o | 35 | netxen_nic_ethtool.o netxen_nic_niu.o netxen_nic_ctx.o |
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index da4c4fb97064..8e736614407d 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -54,6 +54,7 @@ | |||
54 | 54 | ||
55 | #include <linux/mm.h> | 55 | #include <linux/mm.h> |
56 | #include <linux/mman.h> | 56 | #include <linux/mman.h> |
57 | #include <linux/vmalloc.h> | ||
57 | 58 | ||
58 | #include <asm/system.h> | 59 | #include <asm/system.h> |
59 | #include <asm/io.h> | 60 | #include <asm/io.h> |
@@ -63,10 +64,12 @@ | |||
63 | 64 | ||
64 | #include "netxen_nic_hw.h" | 65 | #include "netxen_nic_hw.h" |
65 | 66 | ||
66 | #define _NETXEN_NIC_LINUX_MAJOR 3 | 67 | #define _NETXEN_NIC_LINUX_MAJOR 4 |
67 | #define _NETXEN_NIC_LINUX_MINOR 4 | 68 | #define _NETXEN_NIC_LINUX_MINOR 0 |
68 | #define _NETXEN_NIC_LINUX_SUBVERSION 18 | 69 | #define _NETXEN_NIC_LINUX_SUBVERSION 0 |
69 | #define NETXEN_NIC_LINUX_VERSIONID "3.4.18" | 70 | #define NETXEN_NIC_LINUX_VERSIONID "4.0.0" |
71 | |||
72 | #define NETXEN_VERSION_CODE(a, b, c) (((a) << 16) + ((b) << 8) + (c)) | ||
70 | 73 | ||
71 | #define NETXEN_NUM_FLASH_SECTORS (64) | 74 | #define NETXEN_NUM_FLASH_SECTORS (64) |
72 | #define NETXEN_FLASH_SECTOR_SIZE (64 * 1024) | 75 | #define NETXEN_FLASH_SECTOR_SIZE (64 * 1024) |
@@ -84,7 +87,7 @@ | |||
84 | #define TX_RINGSIZE \ | 87 | #define TX_RINGSIZE \ |
85 | (sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count) | 88 | (sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count) |
86 | #define RCV_BUFFSIZE \ | 89 | #define RCV_BUFFSIZE \ |
87 | (sizeof(struct netxen_rx_buffer) * rcv_desc->max_rx_desc_count) | 90 | (sizeof(struct netxen_rx_buffer) * rds_ring->max_rx_desc_count) |
88 | #define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) | 91 | #define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) |
89 | 92 | ||
90 | #define NETXEN_NETDEV_STATUS 0x1 | 93 | #define NETXEN_NETDEV_STATUS 0x1 |
@@ -111,6 +114,13 @@ | |||
111 | 114 | ||
112 | #define NX_P2_C0 0x24 | 115 | #define NX_P2_C0 0x24 |
113 | #define NX_P2_C1 0x25 | 116 | #define NX_P2_C1 0x25 |
117 | #define NX_P3_A0 0x30 | ||
118 | #define NX_P3_A2 0x30 | ||
119 | #define NX_P3_B0 0x40 | ||
120 | #define NX_P3_B1 0x41 | ||
121 | |||
122 | #define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1) | ||
123 | #define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0) | ||
114 | 124 | ||
115 | #define FIRST_PAGE_GROUP_START 0 | 125 | #define FIRST_PAGE_GROUP_START 0 |
116 | #define FIRST_PAGE_GROUP_END 0x100000 | 126 | #define FIRST_PAGE_GROUP_END 0x100000 |
@@ -125,6 +135,16 @@ | |||
125 | #define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START | 135 | #define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START |
126 | #define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START | 136 | #define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START |
127 | 137 | ||
138 | #define P2_MAX_MTU (8000) | ||
139 | #define P3_MAX_MTU (9600) | ||
140 | #define NX_ETHERMTU 1500 | ||
141 | #define NX_MAX_ETHERHDR 32 /* This contains some padding */ | ||
142 | |||
143 | #define NX_RX_NORMAL_BUF_MAX_LEN (NX_MAX_ETHERHDR + NX_ETHERMTU) | ||
144 | #define NX_P2_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P2_MAX_MTU) | ||
145 | #define NX_P3_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P3_MAX_MTU) | ||
146 | #define NX_CT_DEFAULT_RX_BUF_LEN 2048 | ||
147 | |||
128 | #define MAX_RX_BUFFER_LENGTH 1760 | 148 | #define MAX_RX_BUFFER_LENGTH 1760 |
129 | #define MAX_RX_JUMBO_BUFFER_LENGTH 8062 | 149 | #define MAX_RX_JUMBO_BUFFER_LENGTH 8062 |
130 | #define MAX_RX_LRO_BUFFER_LENGTH ((48*1024)-512) | 150 | #define MAX_RX_LRO_BUFFER_LENGTH ((48*1024)-512) |
@@ -132,7 +152,6 @@ | |||
132 | #define RX_JUMBO_DMA_MAP_LEN \ | 152 | #define RX_JUMBO_DMA_MAP_LEN \ |
133 | (MAX_RX_JUMBO_BUFFER_LENGTH - 2) | 153 | (MAX_RX_JUMBO_BUFFER_LENGTH - 2) |
134 | #define RX_LRO_DMA_MAP_LEN (MAX_RX_LRO_BUFFER_LENGTH - 2) | 154 | #define RX_LRO_DMA_MAP_LEN (MAX_RX_LRO_BUFFER_LENGTH - 2) |
135 | #define NETXEN_ROM_ROUNDUP 0x80000000ULL | ||
136 | 155 | ||
137 | /* | 156 | /* |
138 | * Maximum number of ring contexts | 157 | * Maximum number of ring contexts |
@@ -140,16 +159,16 @@ | |||
140 | #define MAX_RING_CTX 1 | 159 | #define MAX_RING_CTX 1 |
141 | 160 | ||
142 | /* Opcodes to be used with the commands */ | 161 | /* Opcodes to be used with the commands */ |
143 | enum { | 162 | #define TX_ETHER_PKT 0x01 |
144 | TX_ETHER_PKT = 0x01, | 163 | #define TX_TCP_PKT 0x02 |
145 | /* The following opcodes are for IP checksum */ | 164 | #define TX_UDP_PKT 0x03 |
146 | TX_TCP_PKT, | 165 | #define TX_IP_PKT 0x04 |
147 | TX_UDP_PKT, | 166 | #define TX_TCP_LSO 0x05 |
148 | TX_IP_PKT, | 167 | #define TX_TCP_LSO6 0x06 |
149 | TX_TCP_LSO, | 168 | #define TX_IPSEC 0x07 |
150 | TX_IPSEC, | 169 | #define TX_IPSEC_CMD 0x0a |
151 | TX_IPSEC_CMD | 170 | #define TX_TCPV6_PKT 0x0b |
152 | }; | 171 | #define TX_UDPV6_PKT 0x0c |
153 | 172 | ||
154 | /* The following opcodes are for internal consumption. */ | 173 | /* The following opcodes are for internal consumption. */ |
155 | #define NETXEN_CONTROL_OP 0x10 | 174 | #define NETXEN_CONTROL_OP 0x10 |
@@ -191,6 +210,7 @@ enum { | |||
191 | #define MAX_RCV_DESCRIPTORS 16384 | 210 | #define MAX_RCV_DESCRIPTORS 16384 |
192 | #define MAX_CMD_DESCRIPTORS_HOST (MAX_CMD_DESCRIPTORS / 4) | 211 | #define MAX_CMD_DESCRIPTORS_HOST (MAX_CMD_DESCRIPTORS / 4) |
193 | #define MAX_RCV_DESCRIPTORS_1G (MAX_RCV_DESCRIPTORS / 4) | 212 | #define MAX_RCV_DESCRIPTORS_1G (MAX_RCV_DESCRIPTORS / 4) |
213 | #define MAX_RCV_DESCRIPTORS_10G 8192 | ||
194 | #define MAX_JUMBO_RCV_DESCRIPTORS 1024 | 214 | #define MAX_JUMBO_RCV_DESCRIPTORS 1024 |
195 | #define MAX_LRO_RCV_DESCRIPTORS 64 | 215 | #define MAX_LRO_RCV_DESCRIPTORS 64 |
196 | #define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS | 216 | #define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS |
@@ -219,8 +239,6 @@ enum { | |||
219 | #define MPORT_MULTI_FUNCTION_MODE 0x2222 | 239 | #define MPORT_MULTI_FUNCTION_MODE 0x2222 |
220 | 240 | ||
221 | #include "netxen_nic_phan_reg.h" | 241 | #include "netxen_nic_phan_reg.h" |
222 | extern unsigned long long netxen_dma_mask; | ||
223 | extern unsigned long last_schedule_time; | ||
224 | 242 | ||
225 | /* | 243 | /* |
226 | * NetXen host-peg signal message structure | 244 | * NetXen host-peg signal message structure |
@@ -289,7 +307,7 @@ struct netxen_ring_ctx { | |||
289 | #define netxen_set_cmd_desc_port(cmd_desc, var) \ | 307 | #define netxen_set_cmd_desc_port(cmd_desc, var) \ |
290 | ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) | 308 | ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) |
291 | #define netxen_set_cmd_desc_ctxid(cmd_desc, var) \ | 309 | #define netxen_set_cmd_desc_ctxid(cmd_desc, var) \ |
292 | ((cmd_desc)->port_ctxid |= ((var) & 0xF0)) | 310 | ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0)) |
293 | 311 | ||
294 | #define netxen_set_cmd_desc_flags(cmd_desc, val) \ | 312 | #define netxen_set_cmd_desc_flags(cmd_desc, val) \ |
295 | (cmd_desc)->flags_opcode = ((cmd_desc)->flags_opcode & \ | 313 | (cmd_desc)->flags_opcode = ((cmd_desc)->flags_opcode & \ |
@@ -377,8 +395,8 @@ struct rcv_desc { | |||
377 | }; | 395 | }; |
378 | 396 | ||
379 | /* opcode field in status_desc */ | 397 | /* opcode field in status_desc */ |
380 | #define RCV_NIC_PKT (0xA) | 398 | #define NETXEN_NIC_RXPKT_DESC 0x04 |
381 | #define STATUS_NIC_PKT ((RCV_NIC_PKT) << 12) | 399 | #define NETXEN_OLD_RXPKT_DESC 0x3f |
382 | 400 | ||
383 | /* for status field in status_desc */ | 401 | /* for status field in status_desc */ |
384 | #define STATUS_NEED_CKSUM (1) | 402 | #define STATUS_NEED_CKSUM (1) |
@@ -410,6 +428,8 @@ struct rcv_desc { | |||
410 | (((sts_data) >> 28) & 0xFFFF) | 428 | (((sts_data) >> 28) & 0xFFFF) |
411 | #define netxen_get_sts_prot(sts_data) \ | 429 | #define netxen_get_sts_prot(sts_data) \ |
412 | (((sts_data) >> 44) & 0x0F) | 430 | (((sts_data) >> 44) & 0x0F) |
431 | #define netxen_get_sts_pkt_offset(sts_data) \ | ||
432 | (((sts_data) >> 48) & 0x1F) | ||
413 | #define netxen_get_sts_opcode(sts_data) \ | 433 | #define netxen_get_sts_opcode(sts_data) \ |
414 | (((sts_data) >> 58) & 0x03F) | 434 | (((sts_data) >> 58) & 0x03F) |
415 | 435 | ||
@@ -424,17 +444,30 @@ struct rcv_desc { | |||
424 | 444 | ||
425 | struct status_desc { | 445 | struct status_desc { |
426 | /* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-27 total_length | 446 | /* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-27 total_length |
427 | 28-43 reference_handle, 44-47 protocol, 48-52 unused | 447 | 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset |
428 | 53-55 desc_cnt, 56-57 owner, 58-63 opcode | 448 | 53-55 desc_cnt, 56-57 owner, 58-63 opcode |
429 | */ | 449 | */ |
430 | __le64 status_desc_data; | 450 | __le64 status_desc_data; |
431 | __le32 hash_value; | 451 | union { |
432 | u8 hash_type; | 452 | struct { |
433 | u8 msg_type; | 453 | __le32 hash_value; |
434 | u8 unused; | 454 | u8 hash_type; |
435 | /* Bit pattern: 0-6 lro_count indicates frag sequence, | 455 | u8 msg_type; |
436 | 7 last_frag indicates last frag */ | 456 | u8 unused; |
437 | u8 lro; | 457 | union { |
458 | /* Bit pattern: 0-6 lro_count indicates frag | ||
459 | * sequence, 7 last_frag indicates last frag | ||
460 | */ | ||
461 | u8 lro; | ||
462 | |||
463 | /* chained buffers */ | ||
464 | u8 nr_frags; | ||
465 | }; | ||
466 | }; | ||
467 | struct { | ||
468 | __le16 frag_handles[4]; | ||
469 | }; | ||
470 | }; | ||
438 | } __attribute__ ((aligned(16))); | 471 | } __attribute__ ((aligned(16))); |
439 | 472 | ||
440 | enum { | 473 | enum { |
@@ -464,7 +497,20 @@ typedef enum { | |||
464 | 497 | ||
465 | NETXEN_BRDTYPE_P2_SB31_10G_IMEZ = 0x000d, | 498 | NETXEN_BRDTYPE_P2_SB31_10G_IMEZ = 0x000d, |
466 | NETXEN_BRDTYPE_P2_SB31_10G_HMEZ = 0x000e, | 499 | NETXEN_BRDTYPE_P2_SB31_10G_HMEZ = 0x000e, |
467 | NETXEN_BRDTYPE_P2_SB31_10G_CX4 = 0x000f | 500 | NETXEN_BRDTYPE_P2_SB31_10G_CX4 = 0x000f, |
501 | |||
502 | NETXEN_BRDTYPE_P3_REF_QG = 0x0021, | ||
503 | NETXEN_BRDTYPE_P3_HMEZ = 0x0022, | ||
504 | NETXEN_BRDTYPE_P3_10G_CX4_LP = 0x0023, | ||
505 | NETXEN_BRDTYPE_P3_4_GB = 0x0024, | ||
506 | NETXEN_BRDTYPE_P3_IMEZ = 0x0025, | ||
507 | NETXEN_BRDTYPE_P3_10G_SFP_PLUS = 0x0026, | ||
508 | NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027, | ||
509 | NETXEN_BRDTYPE_P3_XG_LOM = 0x0028, | ||
510 | NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029, | ||
511 | NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031, | ||
512 | NETXEN_BRDTYPE_P3_10G_XFP = 0x0032 | ||
513 | |||
468 | } netxen_brdtype_t; | 514 | } netxen_brdtype_t; |
469 | 515 | ||
470 | typedef enum { | 516 | typedef enum { |
@@ -747,6 +793,7 @@ struct netxen_cmd_buffer { | |||
747 | 793 | ||
748 | /* In rx_buffer, we do not need multiple fragments as is a single buffer */ | 794 | /* In rx_buffer, we do not need multiple fragments as is a single buffer */ |
749 | struct netxen_rx_buffer { | 795 | struct netxen_rx_buffer { |
796 | struct list_head list; | ||
750 | struct sk_buff *skb; | 797 | struct sk_buff *skb; |
751 | u64 dma; | 798 | u64 dma; |
752 | u16 ref_handle; | 799 | u16 ref_handle; |
@@ -765,7 +812,6 @@ struct netxen_rx_buffer { | |||
765 | * contains interrupt info as well shared hardware info. | 812 | * contains interrupt info as well shared hardware info. |
766 | */ | 813 | */ |
767 | struct netxen_hardware_context { | 814 | struct netxen_hardware_context { |
768 | struct pci_dev *pdev; | ||
769 | void __iomem *pci_base0; | 815 | void __iomem *pci_base0; |
770 | void __iomem *pci_base1; | 816 | void __iomem *pci_base1; |
771 | void __iomem *pci_base2; | 817 | void __iomem *pci_base2; |
@@ -773,15 +819,20 @@ struct netxen_hardware_context { | |||
773 | unsigned long first_page_group_start; | 819 | unsigned long first_page_group_start; |
774 | void __iomem *db_base; | 820 | void __iomem *db_base; |
775 | unsigned long db_len; | 821 | unsigned long db_len; |
822 | unsigned long pci_len0; | ||
823 | |||
824 | u8 cut_through; | ||
825 | int qdr_sn_window; | ||
826 | int ddr_mn_window; | ||
827 | unsigned long mn_win_crb; | ||
828 | unsigned long ms_win_crb; | ||
776 | 829 | ||
777 | u8 revision_id; | 830 | u8 revision_id; |
778 | u16 board_type; | 831 | u16 board_type; |
779 | struct netxen_board_info boardcfg; | 832 | struct netxen_board_info boardcfg; |
780 | u32 xg_linkup; | 833 | u32 linkup; |
781 | u32 qg_linksup; | ||
782 | /* Address of cmd ring in Phantom */ | 834 | /* Address of cmd ring in Phantom */ |
783 | struct cmd_desc_type0 *cmd_desc_head; | 835 | struct cmd_desc_type0 *cmd_desc_head; |
784 | struct pci_dev *cmd_desc_pdev; | ||
785 | dma_addr_t cmd_desc_phys_addr; | 836 | dma_addr_t cmd_desc_phys_addr; |
786 | struct netxen_adapter *adapter; | 837 | struct netxen_adapter *adapter; |
787 | int pci_func; | 838 | int pci_func; |
@@ -813,17 +864,17 @@ struct netxen_adapter_stats { | |||
813 | * Rcv Descriptor Context. One such per Rcv Descriptor. There may | 864 | * Rcv Descriptor Context. One such per Rcv Descriptor. There may |
814 | * be one Rcv Descriptor for normal packets, one for jumbo and may be others. | 865 | * be one Rcv Descriptor for normal packets, one for jumbo and may be others. |
815 | */ | 866 | */ |
816 | struct netxen_rcv_desc_ctx { | 867 | struct nx_host_rds_ring { |
817 | u32 flags; | 868 | u32 flags; |
818 | u32 producer; | 869 | u32 producer; |
819 | u32 rcv_pending; /* Num of bufs posted in phantom */ | ||
820 | dma_addr_t phys_addr; | 870 | dma_addr_t phys_addr; |
821 | struct pci_dev *phys_pdev; | 871 | u32 crb_rcv_producer; /* reg offset */ |
822 | struct rcv_desc *desc_head; /* address of rx ring in Phantom */ | 872 | struct rcv_desc *desc_head; /* address of rx ring in Phantom */ |
823 | u32 max_rx_desc_count; | 873 | u32 max_rx_desc_count; |
824 | u32 dma_size; | 874 | u32 dma_size; |
825 | u32 skb_size; | 875 | u32 skb_size; |
826 | struct netxen_rx_buffer *rx_buf_arr; /* rx buffers for receive */ | 876 | struct netxen_rx_buffer *rx_buf_arr; /* rx buffers for receive */ |
877 | struct list_head free_list; | ||
827 | int begin_alloc; | 878 | int begin_alloc; |
828 | }; | 879 | }; |
829 | 880 | ||
@@ -834,17 +885,319 @@ struct netxen_rcv_desc_ctx { | |||
834 | * present elsewhere. | 885 | * present elsewhere. |
835 | */ | 886 | */ |
836 | struct netxen_recv_context { | 887 | struct netxen_recv_context { |
837 | struct netxen_rcv_desc_ctx rcv_desc[NUM_RCV_DESC_RINGS]; | 888 | u32 state; |
838 | u32 status_rx_producer; | 889 | u16 context_id; |
890 | u16 virt_port; | ||
891 | |||
892 | struct nx_host_rds_ring rds_rings[NUM_RCV_DESC_RINGS]; | ||
839 | u32 status_rx_consumer; | 893 | u32 status_rx_consumer; |
894 | u32 crb_sts_consumer; /* reg offset */ | ||
840 | dma_addr_t rcv_status_desc_phys_addr; | 895 | dma_addr_t rcv_status_desc_phys_addr; |
841 | struct pci_dev *rcv_status_desc_pdev; | ||
842 | struct status_desc *rcv_status_desc_head; | 896 | struct status_desc *rcv_status_desc_head; |
843 | }; | 897 | }; |
844 | 898 | ||
845 | #define NETXEN_NIC_MSI_ENABLED 0x02 | 899 | /* New HW context creation */ |
846 | #define NETXEN_DMA_MASK 0xfffffffe | 900 | |
847 | #define NETXEN_DB_MAPSIZE_BYTES 0x1000 | 901 | #define NX_OS_CRB_RETRY_COUNT 4000 |
902 | #define NX_CDRP_SIGNATURE_MAKE(pcifn, version) \ | ||
903 | (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16)) | ||
904 | |||
905 | #define NX_CDRP_CLEAR 0x00000000 | ||
906 | #define NX_CDRP_CMD_BIT 0x80000000 | ||
907 | |||
908 | /* | ||
909 | * All responses must have the NX_CDRP_CMD_BIT cleared | ||
910 | * in the crb NX_CDRP_CRB_OFFSET. | ||
911 | */ | ||
912 | #define NX_CDRP_FORM_RSP(rsp) (rsp) | ||
913 | #define NX_CDRP_IS_RSP(rsp) (((rsp) & NX_CDRP_CMD_BIT) == 0) | ||
914 | |||
915 | #define NX_CDRP_RSP_OK 0x00000001 | ||
916 | #define NX_CDRP_RSP_FAIL 0x00000002 | ||
917 | #define NX_CDRP_RSP_TIMEOUT 0x00000003 | ||
918 | |||
919 | /* | ||
920 | * All commands must have the NX_CDRP_CMD_BIT set in | ||
921 | * the crb NX_CDRP_CRB_OFFSET. | ||
922 | */ | ||
923 | #define NX_CDRP_FORM_CMD(cmd) (NX_CDRP_CMD_BIT | (cmd)) | ||
924 | #define NX_CDRP_IS_CMD(cmd) (((cmd) & NX_CDRP_CMD_BIT) != 0) | ||
925 | |||
926 | #define NX_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001 | ||
927 | #define NX_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002 | ||
928 | #define NX_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003 | ||
929 | #define NX_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004 | ||
930 | #define NX_CDRP_CMD_READ_MAX_RX_CTX 0x00000005 | ||
931 | #define NX_CDRP_CMD_READ_MAX_TX_CTX 0x00000006 | ||
932 | #define NX_CDRP_CMD_CREATE_RX_CTX 0x00000007 | ||
933 | #define NX_CDRP_CMD_DESTROY_RX_CTX 0x00000008 | ||
934 | #define NX_CDRP_CMD_CREATE_TX_CTX 0x00000009 | ||
935 | #define NX_CDRP_CMD_DESTROY_TX_CTX 0x0000000a | ||
936 | #define NX_CDRP_CMD_SETUP_STATISTICS 0x0000000e | ||
937 | #define NX_CDRP_CMD_GET_STATISTICS 0x0000000f | ||
938 | #define NX_CDRP_CMD_DELETE_STATISTICS 0x00000010 | ||
939 | #define NX_CDRP_CMD_SET_MTU 0x00000012 | ||
940 | #define NX_CDRP_CMD_MAX 0x00000013 | ||
941 | |||
942 | #define NX_RCODE_SUCCESS 0 | ||
943 | #define NX_RCODE_NO_HOST_MEM 1 | ||
944 | #define NX_RCODE_NO_HOST_RESOURCE 2 | ||
945 | #define NX_RCODE_NO_CARD_CRB 3 | ||
946 | #define NX_RCODE_NO_CARD_MEM 4 | ||
947 | #define NX_RCODE_NO_CARD_RESOURCE 5 | ||
948 | #define NX_RCODE_INVALID_ARGS 6 | ||
949 | #define NX_RCODE_INVALID_ACTION 7 | ||
950 | #define NX_RCODE_INVALID_STATE 8 | ||
951 | #define NX_RCODE_NOT_SUPPORTED 9 | ||
952 | #define NX_RCODE_NOT_PERMITTED 10 | ||
953 | #define NX_RCODE_NOT_READY 11 | ||
954 | #define NX_RCODE_DOES_NOT_EXIST 12 | ||
955 | #define NX_RCODE_ALREADY_EXISTS 13 | ||
956 | #define NX_RCODE_BAD_SIGNATURE 14 | ||
957 | #define NX_RCODE_CMD_NOT_IMPL 15 | ||
958 | #define NX_RCODE_CMD_INVALID 16 | ||
959 | #define NX_RCODE_TIMEOUT 17 | ||
960 | #define NX_RCODE_CMD_FAILED 18 | ||
961 | #define NX_RCODE_MAX_EXCEEDED 19 | ||
962 | #define NX_RCODE_MAX 20 | ||
963 | |||
964 | #define NX_DESTROY_CTX_RESET 0 | ||
965 | #define NX_DESTROY_CTX_D3_RESET 1 | ||
966 | #define NX_DESTROY_CTX_MAX 2 | ||
967 | |||
968 | /* | ||
969 | * Capabilities | ||
970 | */ | ||
971 | #define NX_CAP_BIT(class, bit) (1 << bit) | ||
972 | #define NX_CAP0_LEGACY_CONTEXT NX_CAP_BIT(0, 0) | ||
973 | #define NX_CAP0_MULTI_CONTEXT NX_CAP_BIT(0, 1) | ||
974 | #define NX_CAP0_LEGACY_MN NX_CAP_BIT(0, 2) | ||
975 | #define NX_CAP0_LEGACY_MS NX_CAP_BIT(0, 3) | ||
976 | #define NX_CAP0_CUT_THROUGH NX_CAP_BIT(0, 4) | ||
977 | #define NX_CAP0_LRO NX_CAP_BIT(0, 5) | ||
978 | #define NX_CAP0_LSO NX_CAP_BIT(0, 6) | ||
979 | #define NX_CAP0_JUMBO_CONTIGUOUS NX_CAP_BIT(0, 7) | ||
980 | #define NX_CAP0_LRO_CONTIGUOUS NX_CAP_BIT(0, 8) | ||
981 | |||
982 | /* | ||
983 | * Context state | ||
984 | */ | ||
985 | #define NX_HOST_CTX_STATE_FREED 0 | ||
986 | #define NX_HOST_CTX_STATE_ALLOCATED 1 | ||
987 | #define NX_HOST_CTX_STATE_ACTIVE 2 | ||
988 | #define NX_HOST_CTX_STATE_DISABLED 3 | ||
989 | #define NX_HOST_CTX_STATE_QUIESCED 4 | ||
990 | #define NX_HOST_CTX_STATE_MAX 5 | ||
991 | |||
992 | /* | ||
993 | * Rx context | ||
994 | */ | ||
995 | |||
996 | typedef struct { | ||
997 | u64 host_phys_addr; /* Ring base addr */ | ||
998 | u32 ring_size; /* Ring entries */ | ||
999 | u16 msi_index; | ||
1000 | u16 rsvd; /* Padding */ | ||
1001 | } nx_hostrq_sds_ring_t; | ||
1002 | |||
1003 | typedef struct { | ||
1004 | u64 host_phys_addr; /* Ring base addr */ | ||
1005 | u64 buff_size; /* Packet buffer size */ | ||
1006 | u32 ring_size; /* Ring entries */ | ||
1007 | u32 ring_kind; /* Class of ring */ | ||
1008 | } nx_hostrq_rds_ring_t; | ||
1009 | |||
1010 | typedef struct { | ||
1011 | u64 host_rsp_dma_addr; /* Response dma'd here */ | ||
1012 | u32 capabilities[4]; /* Flag bit vector */ | ||
1013 | u32 host_int_crb_mode; /* Interrupt crb usage */ | ||
1014 | u32 host_rds_crb_mode; /* RDS crb usage */ | ||
1015 | /* These ring offsets are relative to data[0] below */ | ||
1016 | u32 rds_ring_offset; /* Offset to RDS config */ | ||
1017 | u32 sds_ring_offset; /* Offset to SDS config */ | ||
1018 | u16 num_rds_rings; /* Count of RDS rings */ | ||
1019 | u16 num_sds_rings; /* Count of SDS rings */ | ||
1020 | u16 rsvd1; /* Padding */ | ||
1021 | u16 rsvd2; /* Padding */ | ||
1022 | u8 reserved[128]; /* reserve space for future expansion*/ | ||
1023 | /* MUST BE 64-bit aligned. | ||
1024 | The following is packed: | ||
1025 | - N hostrq_rds_rings | ||
1026 | - N hostrq_sds_rings */ | ||
1027 | char data[0]; | ||
1028 | } nx_hostrq_rx_ctx_t; | ||
1029 | |||
1030 | typedef struct { | ||
1031 | u32 host_producer_crb; /* Crb to use */ | ||
1032 | u32 rsvd1; /* Padding */ | ||
1033 | } nx_cardrsp_rds_ring_t; | ||
1034 | |||
1035 | typedef struct { | ||
1036 | u32 host_consumer_crb; /* Crb to use */ | ||
1037 | u32 interrupt_crb; /* Crb to use */ | ||
1038 | } nx_cardrsp_sds_ring_t; | ||
1039 | |||
1040 | typedef struct { | ||
1041 | /* These ring offsets are relative to data[0] below */ | ||
1042 | u32 rds_ring_offset; /* Offset to RDS config */ | ||
1043 | u32 sds_ring_offset; /* Offset to SDS config */ | ||
1044 | u32 host_ctx_state; /* Starting State */ | ||
1045 | u32 num_fn_per_port; /* How many PCI fn share the port */ | ||
1046 | u16 num_rds_rings; /* Count of RDS rings */ | ||
1047 | u16 num_sds_rings; /* Count of SDS rings */ | ||
1048 | u16 context_id; /* Handle for context */ | ||
1049 | u8 phys_port; /* Physical id of port */ | ||
1050 | u8 virt_port; /* Virtual/Logical id of port */ | ||
1051 | u8 reserved[128]; /* save space for future expansion */ | ||
1052 | /* MUST BE 64-bit aligned. | ||
1053 | The following is packed: | ||
1054 | - N cardrsp_rds_rings | ||
1055 | - N cardrs_sds_rings */ | ||
1056 | char data[0]; | ||
1057 | } nx_cardrsp_rx_ctx_t; | ||
1058 | |||
1059 | #define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \ | ||
1060 | (sizeof(HOSTRQ_RX) + \ | ||
1061 | (rds_rings)*(sizeof(nx_hostrq_rds_ring_t)) + \ | ||
1062 | (sds_rings)*(sizeof(nx_hostrq_sds_ring_t))) | ||
1063 | |||
1064 | #define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \ | ||
1065 | (sizeof(CARDRSP_RX) + \ | ||
1066 | (rds_rings)*(sizeof(nx_cardrsp_rds_ring_t)) + \ | ||
1067 | (sds_rings)*(sizeof(nx_cardrsp_sds_ring_t))) | ||
1068 | |||
1069 | /* | ||
1070 | * Tx context | ||
1071 | */ | ||
1072 | |||
1073 | typedef struct { | ||
1074 | u64 host_phys_addr; /* Ring base addr */ | ||
1075 | u32 ring_size; /* Ring entries */ | ||
1076 | u32 rsvd; /* Padding */ | ||
1077 | } nx_hostrq_cds_ring_t; | ||
1078 | |||
1079 | typedef struct { | ||
1080 | u64 host_rsp_dma_addr; /* Response dma'd here */ | ||
1081 | u64 cmd_cons_dma_addr; /* */ | ||
1082 | u64 dummy_dma_addr; /* */ | ||
1083 | u32 capabilities[4]; /* Flag bit vector */ | ||
1084 | u32 host_int_crb_mode; /* Interrupt crb usage */ | ||
1085 | u32 rsvd1; /* Padding */ | ||
1086 | u16 rsvd2; /* Padding */ | ||
1087 | u16 interrupt_ctl; | ||
1088 | u16 msi_index; | ||
1089 | u16 rsvd3; /* Padding */ | ||
1090 | nx_hostrq_cds_ring_t cds_ring; /* Desc of cds ring */ | ||
1091 | u8 reserved[128]; /* future expansion */ | ||
1092 | } nx_hostrq_tx_ctx_t; | ||
1093 | |||
1094 | typedef struct { | ||
1095 | u32 host_producer_crb; /* Crb to use */ | ||
1096 | u32 interrupt_crb; /* Crb to use */ | ||
1097 | } nx_cardrsp_cds_ring_t; | ||
1098 | |||
1099 | typedef struct { | ||
1100 | u32 host_ctx_state; /* Starting state */ | ||
1101 | u16 context_id; /* Handle for context */ | ||
1102 | u8 phys_port; /* Physical id of port */ | ||
1103 | u8 virt_port; /* Virtual/Logical id of port */ | ||
1104 | nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */ | ||
1105 | u8 reserved[128]; /* future expansion */ | ||
1106 | } nx_cardrsp_tx_ctx_t; | ||
1107 | |||
1108 | #define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX)) | ||
1109 | #define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX)) | ||
1110 | |||
1111 | /* CRB */ | ||
1112 | |||
1113 | #define NX_HOST_RDS_CRB_MODE_UNIQUE 0 | ||
1114 | #define NX_HOST_RDS_CRB_MODE_SHARED 1 | ||
1115 | #define NX_HOST_RDS_CRB_MODE_CUSTOM 2 | ||
1116 | #define NX_HOST_RDS_CRB_MODE_MAX 3 | ||
1117 | |||
1118 | #define NX_HOST_INT_CRB_MODE_UNIQUE 0 | ||
1119 | #define NX_HOST_INT_CRB_MODE_SHARED 1 | ||
1120 | #define NX_HOST_INT_CRB_MODE_NORX 2 | ||
1121 | #define NX_HOST_INT_CRB_MODE_NOTX 3 | ||
1122 | #define NX_HOST_INT_CRB_MODE_NORXTX 4 | ||
1123 | |||
1124 | |||
1125 | /* MAC */ | ||
1126 | |||
1127 | #define MC_COUNT_P2 16 | ||
1128 | #define MC_COUNT_P3 38 | ||
1129 | |||
1130 | #define NETXEN_MAC_NOOP 0 | ||
1131 | #define NETXEN_MAC_ADD 1 | ||
1132 | #define NETXEN_MAC_DEL 2 | ||
1133 | |||
1134 | typedef struct nx_mac_list_s { | ||
1135 | struct nx_mac_list_s *next; | ||
1136 | uint8_t mac_addr[MAX_ADDR_LEN]; | ||
1137 | } nx_mac_list_t; | ||
1138 | |||
1139 | /* | ||
1140 | * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is | ||
1141 | * adjusted based on configured MTU. | ||
1142 | */ | ||
1143 | #define NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US 3 | ||
1144 | #define NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS 256 | ||
1145 | #define NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS 64 | ||
1146 | #define NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US 4 | ||
1147 | |||
1148 | #define NETXEN_NIC_INTR_DEFAULT 0x04 | ||
1149 | |||
1150 | typedef union { | ||
1151 | struct { | ||
1152 | uint16_t rx_packets; | ||
1153 | uint16_t rx_time_us; | ||
1154 | uint16_t tx_packets; | ||
1155 | uint16_t tx_time_us; | ||
1156 | } data; | ||
1157 | uint64_t word; | ||
1158 | } nx_nic_intr_coalesce_data_t; | ||
1159 | |||
1160 | typedef struct { | ||
1161 | uint16_t stats_time_us; | ||
1162 | uint16_t rate_sample_time; | ||
1163 | uint16_t flags; | ||
1164 | uint16_t rsvd_1; | ||
1165 | uint32_t low_threshold; | ||
1166 | uint32_t high_threshold; | ||
1167 | nx_nic_intr_coalesce_data_t normal; | ||
1168 | nx_nic_intr_coalesce_data_t low; | ||
1169 | nx_nic_intr_coalesce_data_t high; | ||
1170 | nx_nic_intr_coalesce_data_t irq; | ||
1171 | } nx_nic_intr_coalesce_t; | ||
1172 | |||
1173 | typedef struct { | ||
1174 | u64 qhdr; | ||
1175 | u64 req_hdr; | ||
1176 | u64 words[6]; | ||
1177 | } nx_nic_req_t; | ||
1178 | |||
1179 | typedef struct { | ||
1180 | u8 op; | ||
1181 | u8 tag; | ||
1182 | u8 mac_addr[6]; | ||
1183 | } nx_mac_req_t; | ||
1184 | |||
1185 | #define MAX_PENDING_DESC_BLOCK_SIZE 64 | ||
1186 | |||
1187 | #define NETXEN_NIC_MSI_ENABLED 0x02 | ||
1188 | #define NETXEN_NIC_MSIX_ENABLED 0x04 | ||
1189 | #define NETXEN_IS_MSI_FAMILY(adapter) \ | ||
1190 | ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED)) | ||
1191 | |||
1192 | #define MSIX_ENTRIES_PER_ADAPTER 8 | ||
1193 | #define NETXEN_MSIX_TBL_SPACE 8192 | ||
1194 | #define NETXEN_PCI_REG_MSIX_TBL 0x44 | ||
1195 | |||
1196 | #define NETXEN_DB_MAPSIZE_BYTES 0x1000 | ||
1197 | |||
1198 | #define NETXEN_NETDEV_WEIGHT 120 | ||
1199 | #define NETXEN_ADAPTER_UP_MAGIC 777 | ||
1200 | #define NETXEN_NIC_PEG_TUNE 0 | ||
848 | 1201 | ||
849 | struct netxen_dummy_dma { | 1202 | struct netxen_dummy_dma { |
850 | void *addr; | 1203 | void *addr; |
@@ -854,46 +1207,65 @@ struct netxen_dummy_dma { | |||
854 | struct netxen_adapter { | 1207 | struct netxen_adapter { |
855 | struct netxen_hardware_context ahw; | 1208 | struct netxen_hardware_context ahw; |
856 | 1209 | ||
857 | struct netxen_adapter *master; | ||
858 | struct net_device *netdev; | 1210 | struct net_device *netdev; |
859 | struct pci_dev *pdev; | 1211 | struct pci_dev *pdev; |
1212 | int pci_using_dac; | ||
860 | struct napi_struct napi; | 1213 | struct napi_struct napi; |
861 | struct net_device_stats net_stats; | 1214 | struct net_device_stats net_stats; |
862 | unsigned char mac_addr[ETH_ALEN]; | ||
863 | int mtu; | 1215 | int mtu; |
864 | int portnum; | 1216 | int portnum; |
865 | u8 physical_port; | 1217 | u8 physical_port; |
1218 | u16 tx_context_id; | ||
1219 | |||
1220 | uint8_t mc_enabled; | ||
1221 | uint8_t max_mc_count; | ||
1222 | nx_mac_list_t *mac_list; | ||
1223 | |||
1224 | struct netxen_legacy_intr_set legacy_intr; | ||
1225 | u32 crb_intr_mask; | ||
866 | 1226 | ||
867 | struct work_struct watchdog_task; | 1227 | struct work_struct watchdog_task; |
868 | struct timer_list watchdog_timer; | 1228 | struct timer_list watchdog_timer; |
869 | struct work_struct tx_timeout_task; | 1229 | struct work_struct tx_timeout_task; |
870 | 1230 | ||
871 | u32 curr_window; | 1231 | u32 curr_window; |
1232 | u32 crb_win; | ||
1233 | rwlock_t adapter_lock; | ||
1234 | |||
1235 | uint64_t dma_mask; | ||
872 | 1236 | ||
873 | u32 cmd_producer; | 1237 | u32 cmd_producer; |
874 | __le32 *cmd_consumer; | 1238 | __le32 *cmd_consumer; |
875 | u32 last_cmd_consumer; | 1239 | u32 last_cmd_consumer; |
1240 | u32 crb_addr_cmd_producer; | ||
1241 | u32 crb_addr_cmd_consumer; | ||
876 | 1242 | ||
877 | u32 max_tx_desc_count; | 1243 | u32 max_tx_desc_count; |
878 | u32 max_rx_desc_count; | 1244 | u32 max_rx_desc_count; |
879 | u32 max_jumbo_rx_desc_count; | 1245 | u32 max_jumbo_rx_desc_count; |
880 | u32 max_lro_rx_desc_count; | 1246 | u32 max_lro_rx_desc_count; |
881 | 1247 | ||
1248 | int max_rds_rings; | ||
1249 | |||
882 | u32 flags; | 1250 | u32 flags; |
883 | u32 irq; | 1251 | u32 irq; |
884 | int driver_mismatch; | 1252 | int driver_mismatch; |
885 | u32 temp; | 1253 | u32 temp; |
886 | 1254 | ||
1255 | u32 fw_major; | ||
1256 | |||
1257 | u8 msix_supported; | ||
1258 | u8 max_possible_rss_rings; | ||
1259 | struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER]; | ||
1260 | |||
887 | struct netxen_adapter_stats stats; | 1261 | struct netxen_adapter_stats stats; |
888 | 1262 | ||
889 | u16 portno; | ||
890 | u16 link_speed; | 1263 | u16 link_speed; |
891 | u16 link_duplex; | 1264 | u16 link_duplex; |
892 | u16 state; | 1265 | u16 state; |
893 | u16 link_autoneg; | 1266 | u16 link_autoneg; |
894 | int rx_csum; | 1267 | int rx_csum; |
895 | int status; | 1268 | int status; |
896 | spinlock_t stats_lock; | ||
897 | 1269 | ||
898 | struct netxen_cmd_buffer *cmd_buf_arr; /* Command buffers for xmit */ | 1270 | struct netxen_cmd_buffer *cmd_buf_arr; /* Command buffers for xmit */ |
899 | 1271 | ||
@@ -905,25 +1277,33 @@ struct netxen_adapter { | |||
905 | 1277 | ||
906 | int is_up; | 1278 | int is_up; |
907 | struct netxen_dummy_dma dummy_dma; | 1279 | struct netxen_dummy_dma dummy_dma; |
1280 | nx_nic_intr_coalesce_t coal; | ||
908 | 1281 | ||
909 | /* Context interface shared between card and host */ | 1282 | /* Context interface shared between card and host */ |
910 | struct netxen_ring_ctx *ctx_desc; | 1283 | struct netxen_ring_ctx *ctx_desc; |
911 | struct pci_dev *ctx_desc_pdev; | ||
912 | dma_addr_t ctx_desc_phys_addr; | 1284 | dma_addr_t ctx_desc_phys_addr; |
913 | int intr_scheme; | 1285 | int intr_scheme; |
914 | int msi_mode; | 1286 | int msi_mode; |
915 | int (*enable_phy_interrupts) (struct netxen_adapter *); | 1287 | int (*enable_phy_interrupts) (struct netxen_adapter *); |
916 | int (*disable_phy_interrupts) (struct netxen_adapter *); | 1288 | int (*disable_phy_interrupts) (struct netxen_adapter *); |
917 | void (*handle_phy_intr) (struct netxen_adapter *); | ||
918 | int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t); | 1289 | int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t); |
919 | int (*set_mtu) (struct netxen_adapter *, int); | 1290 | int (*set_mtu) (struct netxen_adapter *, int); |
920 | int (*set_promisc) (struct netxen_adapter *, netxen_niu_prom_mode_t); | 1291 | int (*set_promisc) (struct netxen_adapter *, netxen_niu_prom_mode_t); |
921 | int (*unset_promisc) (struct netxen_adapter *, netxen_niu_prom_mode_t); | ||
922 | int (*phy_read) (struct netxen_adapter *, long reg, u32 *); | 1292 | int (*phy_read) (struct netxen_adapter *, long reg, u32 *); |
923 | int (*phy_write) (struct netxen_adapter *, long reg, u32 val); | 1293 | int (*phy_write) (struct netxen_adapter *, long reg, u32 val); |
924 | int (*init_port) (struct netxen_adapter *, int); | 1294 | int (*init_port) (struct netxen_adapter *, int); |
925 | void (*init_niu) (struct netxen_adapter *); | ||
926 | int (*stop_port) (struct netxen_adapter *); | 1295 | int (*stop_port) (struct netxen_adapter *); |
1296 | |||
1297 | int (*hw_read_wx)(struct netxen_adapter *, ulong, void *, int); | ||
1298 | int (*hw_write_wx)(struct netxen_adapter *, ulong, void *, int); | ||
1299 | int (*pci_mem_read)(struct netxen_adapter *, u64, void *, int); | ||
1300 | int (*pci_mem_write)(struct netxen_adapter *, u64, void *, int); | ||
1301 | int (*pci_write_immediate)(struct netxen_adapter *, u64, u32); | ||
1302 | u32 (*pci_read_immediate)(struct netxen_adapter *, u64); | ||
1303 | void (*pci_write_normalize)(struct netxen_adapter *, u64, u32); | ||
1304 | u32 (*pci_read_normalize)(struct netxen_adapter *, u64); | ||
1305 | unsigned long (*pci_set_window)(struct netxen_adapter *, | ||
1306 | unsigned long long); | ||
927 | }; /* netxen_adapter structure */ | 1307 | }; /* netxen_adapter structure */ |
928 | 1308 | ||
929 | /* | 1309 | /* |
@@ -988,8 +1368,6 @@ int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter); | |||
988 | int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter); | 1368 | int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter); |
989 | int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter); | 1369 | int netxen_niu_xgbe_disable_phy_interrupts(struct netxen_adapter *adapter); |
990 | int netxen_niu_gbe_disable_phy_interrupts(struct netxen_adapter *adapter); | 1370 | int netxen_niu_gbe_disable_phy_interrupts(struct netxen_adapter *adapter); |
991 | void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter); | ||
992 | void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter); | ||
993 | int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg, | 1371 | int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg, |
994 | __u32 * readval); | 1372 | __u32 * readval); |
995 | int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, | 1373 | int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, |
@@ -998,27 +1376,61 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, | |||
998 | /* Functions available from netxen_nic_hw.c */ | 1376 | /* Functions available from netxen_nic_hw.c */ |
999 | int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu); | 1377 | int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu); |
1000 | int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu); | 1378 | int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu); |
1001 | void netxen_nic_init_niu_gb(struct netxen_adapter *adapter); | ||
1002 | void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw); | ||
1003 | void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val); | 1379 | void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val); |
1004 | int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off); | 1380 | int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off); |
1005 | void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value); | 1381 | void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value); |
1006 | void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 * value); | 1382 | void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 *value); |
1383 | void netxen_nic_write_w1(struct netxen_adapter *adapter, u32 index, u32 value); | ||
1384 | void netxen_nic_read_w1(struct netxen_adapter *adapter, u32 index, u32 *value); | ||
1007 | 1385 | ||
1008 | int netxen_nic_get_board_info(struct netxen_adapter *adapter); | 1386 | int netxen_nic_get_board_info(struct netxen_adapter *adapter); |
1009 | int netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data, | 1387 | |
1010 | int len); | 1388 | int netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, |
1011 | int netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data, | 1389 | ulong off, void *data, int len); |
1012 | int len); | 1390 | int netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, |
1391 | ulong off, void *data, int len); | ||
1392 | int netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter, | ||
1393 | u64 off, void *data, int size); | ||
1394 | int netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter, | ||
1395 | u64 off, void *data, int size); | ||
1396 | int netxen_nic_pci_write_immediate_128M(struct netxen_adapter *adapter, | ||
1397 | u64 off, u32 data); | ||
1398 | u32 netxen_nic_pci_read_immediate_128M(struct netxen_adapter *adapter, u64 off); | ||
1399 | void netxen_nic_pci_write_normalize_128M(struct netxen_adapter *adapter, | ||
1400 | u64 off, u32 data); | ||
1401 | u32 netxen_nic_pci_read_normalize_128M(struct netxen_adapter *adapter, u64 off); | ||
1402 | unsigned long netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter, | ||
1403 | unsigned long long addr); | ||
1404 | void netxen_nic_pci_change_crbwindow_128M(struct netxen_adapter *adapter, | ||
1405 | u32 wndw); | ||
1406 | |||
1407 | int netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, | ||
1408 | ulong off, void *data, int len); | ||
1409 | int netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, | ||
1410 | ulong off, void *data, int len); | ||
1411 | int netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter, | ||
1412 | u64 off, void *data, int size); | ||
1413 | int netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter, | ||
1414 | u64 off, void *data, int size); | ||
1013 | void netxen_crb_writelit_adapter(struct netxen_adapter *adapter, | 1415 | void netxen_crb_writelit_adapter(struct netxen_adapter *adapter, |
1014 | unsigned long off, int data); | 1416 | unsigned long off, int data); |
1417 | int netxen_nic_pci_write_immediate_2M(struct netxen_adapter *adapter, | ||
1418 | u64 off, u32 data); | ||
1419 | u32 netxen_nic_pci_read_immediate_2M(struct netxen_adapter *adapter, u64 off); | ||
1420 | void netxen_nic_pci_write_normalize_2M(struct netxen_adapter *adapter, | ||
1421 | u64 off, u32 data); | ||
1422 | u32 netxen_nic_pci_read_normalize_2M(struct netxen_adapter *adapter, u64 off); | ||
1423 | unsigned long netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter, | ||
1424 | unsigned long long addr); | ||
1015 | 1425 | ||
1016 | /* Functions from netxen_nic_init.c */ | 1426 | /* Functions from netxen_nic_init.c */ |
1017 | void netxen_free_adapter_offload(struct netxen_adapter *adapter); | 1427 | void netxen_free_adapter_offload(struct netxen_adapter *adapter); |
1018 | int netxen_initialize_adapter_offload(struct netxen_adapter *adapter); | 1428 | int netxen_initialize_adapter_offload(struct netxen_adapter *adapter); |
1019 | int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val); | 1429 | int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val); |
1430 | int netxen_receive_peg_ready(struct netxen_adapter *adapter); | ||
1020 | int netxen_load_firmware(struct netxen_adapter *adapter); | 1431 | int netxen_load_firmware(struct netxen_adapter *adapter); |
1021 | int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose); | 1432 | int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose); |
1433 | |||
1022 | int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); | 1434 | int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); |
1023 | int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, | 1435 | int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, |
1024 | u8 *bytes, size_t size); | 1436 | u8 *bytes, size_t size); |
@@ -1032,33 +1444,43 @@ void netxen_halt_pegs(struct netxen_adapter *adapter); | |||
1032 | 1444 | ||
1033 | int netxen_rom_se(struct netxen_adapter *adapter, int addr); | 1445 | int netxen_rom_se(struct netxen_adapter *adapter, int addr); |
1034 | 1446 | ||
1035 | /* Functions from netxen_nic_isr.c */ | 1447 | int netxen_alloc_sw_resources(struct netxen_adapter *adapter); |
1036 | void netxen_initialize_adapter_sw(struct netxen_adapter *adapter); | 1448 | void netxen_free_sw_resources(struct netxen_adapter *adapter); |
1037 | void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr, | 1449 | |
1038 | struct pci_dev **used_dev); | 1450 | int netxen_alloc_hw_resources(struct netxen_adapter *adapter); |
1451 | void netxen_free_hw_resources(struct netxen_adapter *adapter); | ||
1452 | |||
1453 | void netxen_release_rx_buffers(struct netxen_adapter *adapter); | ||
1454 | void netxen_release_tx_buffers(struct netxen_adapter *adapter); | ||
1455 | |||
1039 | void netxen_initialize_adapter_ops(struct netxen_adapter *adapter); | 1456 | void netxen_initialize_adapter_ops(struct netxen_adapter *adapter); |
1040 | int netxen_init_firmware(struct netxen_adapter *adapter); | 1457 | int netxen_init_firmware(struct netxen_adapter *adapter); |
1041 | void netxen_free_hw_resources(struct netxen_adapter *adapter); | ||
1042 | void netxen_tso_check(struct netxen_adapter *adapter, | 1458 | void netxen_tso_check(struct netxen_adapter *adapter, |
1043 | struct cmd_desc_type0 *desc, struct sk_buff *skb); | 1459 | struct cmd_desc_type0 *desc, struct sk_buff *skb); |
1044 | int netxen_nic_hw_resources(struct netxen_adapter *adapter); | ||
1045 | void netxen_nic_clear_stats(struct netxen_adapter *adapter); | 1460 | void netxen_nic_clear_stats(struct netxen_adapter *adapter); |
1046 | void netxen_watchdog_task(struct work_struct *work); | 1461 | void netxen_watchdog_task(struct work_struct *work); |
1047 | void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, | 1462 | void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, |
1048 | u32 ringid); | 1463 | u32 ringid); |
1049 | int netxen_process_cmd_ring(struct netxen_adapter *adapter); | 1464 | int netxen_process_cmd_ring(struct netxen_adapter *adapter); |
1050 | u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max); | 1465 | u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max); |
1051 | void netxen_nic_set_multi(struct net_device *netdev); | 1466 | void netxen_p2_nic_set_multi(struct net_device *netdev); |
1467 | void netxen_p3_nic_set_multi(struct net_device *netdev); | ||
1468 | int netxen_config_intr_coalesce(struct netxen_adapter *adapter); | ||
1469 | |||
1470 | u32 nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu); | ||
1052 | int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); | 1471 | int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); |
1472 | |||
1053 | int netxen_nic_set_mac(struct net_device *netdev, void *p); | 1473 | int netxen_nic_set_mac(struct net_device *netdev, void *p); |
1054 | struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev); | 1474 | struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev); |
1055 | 1475 | ||
1476 | void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, | ||
1477 | uint32_t crb_producer); | ||
1056 | 1478 | ||
1057 | /* | 1479 | /* |
1058 | * NetXen Board information | 1480 | * NetXen Board information |
1059 | */ | 1481 | */ |
1060 | 1482 | ||
1061 | #define NETXEN_MAX_SHORT_NAME 16 | 1483 | #define NETXEN_MAX_SHORT_NAME 32 |
1062 | struct netxen_brdinfo { | 1484 | struct netxen_brdinfo { |
1063 | netxen_brdtype_t brdtype; /* type of board */ | 1485 | netxen_brdtype_t brdtype; /* type of board */ |
1064 | long ports; /* max no of physical ports */ | 1486 | long ports; /* max no of physical ports */ |
@@ -1072,6 +1494,17 @@ static const struct netxen_brdinfo netxen_boards[] = { | |||
1072 | {NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"}, | 1494 | {NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"}, |
1073 | {NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"}, | 1495 | {NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"}, |
1074 | {NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"}, | 1496 | {NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"}, |
1497 | {NETXEN_BRDTYPE_P3_REF_QG, 4, "Reference Quad Gig "}, | ||
1498 | {NETXEN_BRDTYPE_P3_HMEZ, 2, "Dual XGb HMEZ"}, | ||
1499 | {NETXEN_BRDTYPE_P3_10G_CX4_LP, 2, "Dual XGb CX4 LP"}, | ||
1500 | {NETXEN_BRDTYPE_P3_4_GB, 4, "Quad Gig LP"}, | ||
1501 | {NETXEN_BRDTYPE_P3_IMEZ, 2, "Dual XGb IMEZ"}, | ||
1502 | {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"}, | ||
1503 | {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"}, | ||
1504 | {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"}, | ||
1505 | {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "Quad GB - March Madness"}, | ||
1506 | {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"}, | ||
1507 | {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"} | ||
1075 | }; | 1508 | }; |
1076 | 1509 | ||
1077 | #define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards) | 1510 | #define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards) |
@@ -1097,7 +1530,7 @@ dma_watchdog_shutdown_request(struct netxen_adapter *adapter) | |||
1097 | u32 ctrl; | 1530 | u32 ctrl; |
1098 | 1531 | ||
1099 | /* check if already inactive */ | 1532 | /* check if already inactive */ |
1100 | if (netxen_nic_hw_read_wx(adapter, | 1533 | if (adapter->hw_read_wx(adapter, |
1101 | NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4)) | 1534 | NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4)) |
1102 | printk(KERN_ERR "failed to read dma watchdog status\n"); | 1535 | printk(KERN_ERR "failed to read dma watchdog status\n"); |
1103 | 1536 | ||
@@ -1117,7 +1550,7 @@ dma_watchdog_shutdown_poll_result(struct netxen_adapter *adapter) | |||
1117 | { | 1550 | { |
1118 | u32 ctrl; | 1551 | u32 ctrl; |
1119 | 1552 | ||
1120 | if (netxen_nic_hw_read_wx(adapter, | 1553 | if (adapter->hw_read_wx(adapter, |
1121 | NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4)) | 1554 | NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4)) |
1122 | printk(KERN_ERR "failed to read dma watchdog status\n"); | 1555 | printk(KERN_ERR "failed to read dma watchdog status\n"); |
1123 | 1556 | ||
@@ -1129,7 +1562,7 @@ dma_watchdog_wakeup(struct netxen_adapter *adapter) | |||
1129 | { | 1562 | { |
1130 | u32 ctrl; | 1563 | u32 ctrl; |
1131 | 1564 | ||
1132 | if (netxen_nic_hw_read_wx(adapter, | 1565 | if (adapter->hw_read_wx(adapter, |
1133 | NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4)) | 1566 | NETXEN_CAM_RAM(NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL), &ctrl, 4)) |
1134 | printk(KERN_ERR "failed to read dma watchdog status\n"); | 1567 | printk(KERN_ERR "failed to read dma watchdog status\n"); |
1135 | 1568 | ||
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c new file mode 100644 index 000000000000..64babc59e699 --- /dev/null +++ b/drivers/net/netxen/netxen_nic_ctx.c | |||
@@ -0,0 +1,710 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2008 NetXen, Inc. | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version 2 | ||
8 | * of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
18 | * MA 02111-1307, USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution | ||
21 | * in the file called LICENSE. | ||
22 | * | ||
23 | * Contact Information: | ||
24 | * info@netxen.com | ||
25 | * NetXen, | ||
26 | * 3965 Freedom Circle, Fourth floor, | ||
27 | * Santa Clara, CA 95054 | ||
28 | * | ||
29 | */ | ||
30 | |||
31 | #include "netxen_nic_hw.h" | ||
32 | #include "netxen_nic.h" | ||
33 | #include "netxen_nic_phan_reg.h" | ||
34 | |||
35 | #define NXHAL_VERSION 1 | ||
36 | |||
37 | static int | ||
38 | netxen_api_lock(struct netxen_adapter *adapter) | ||
39 | { | ||
40 | u32 done = 0, timeout = 0; | ||
41 | |||
42 | for (;;) { | ||
43 | /* Acquire PCIE HW semaphore5 */ | ||
44 | netxen_nic_read_w0(adapter, | ||
45 | NETXEN_PCIE_REG(PCIE_SEM5_LOCK), &done); | ||
46 | |||
47 | if (done == 1) | ||
48 | break; | ||
49 | |||
50 | if (++timeout >= NX_OS_CRB_RETRY_COUNT) { | ||
51 | printk(KERN_ERR "%s: lock timeout.\n", __func__); | ||
52 | return -1; | ||
53 | } | ||
54 | |||
55 | msleep(1); | ||
56 | } | ||
57 | |||
58 | #if 0 | ||
59 | netxen_nic_write_w1(adapter, | ||
60 | NETXEN_API_LOCK_ID, NX_OS_API_LOCK_DRIVER); | ||
61 | #endif | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static int | ||
66 | netxen_api_unlock(struct netxen_adapter *adapter) | ||
67 | { | ||
68 | u32 val; | ||
69 | |||
70 | /* Release PCIE HW semaphore5 */ | ||
71 | netxen_nic_read_w0(adapter, | ||
72 | NETXEN_PCIE_REG(PCIE_SEM5_UNLOCK), &val); | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static u32 | ||
77 | netxen_poll_rsp(struct netxen_adapter *adapter) | ||
78 | { | ||
79 | u32 raw_rsp, rsp = NX_CDRP_RSP_OK; | ||
80 | int timeout = 0; | ||
81 | |||
82 | do { | ||
83 | /* give atleast 1ms for firmware to respond */ | ||
84 | msleep(1); | ||
85 | |||
86 | if (++timeout > NX_OS_CRB_RETRY_COUNT) | ||
87 | return NX_CDRP_RSP_TIMEOUT; | ||
88 | |||
89 | netxen_nic_read_w1(adapter, NX_CDRP_CRB_OFFSET, | ||
90 | &raw_rsp); | ||
91 | |||
92 | rsp = le32_to_cpu(raw_rsp); | ||
93 | } while (!NX_CDRP_IS_RSP(rsp)); | ||
94 | |||
95 | return rsp; | ||
96 | } | ||
97 | |||
98 | static u32 | ||
99 | netxen_issue_cmd(struct netxen_adapter *adapter, | ||
100 | u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd) | ||
101 | { | ||
102 | u32 rsp; | ||
103 | u32 signature = 0; | ||
104 | u32 rcode = NX_RCODE_SUCCESS; | ||
105 | |||
106 | signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version); | ||
107 | |||
108 | /* Acquire semaphore before accessing CRB */ | ||
109 | if (netxen_api_lock(adapter)) | ||
110 | return NX_RCODE_TIMEOUT; | ||
111 | |||
112 | netxen_nic_write_w1(adapter, NX_SIGN_CRB_OFFSET, | ||
113 | cpu_to_le32(signature)); | ||
114 | |||
115 | netxen_nic_write_w1(adapter, NX_ARG1_CRB_OFFSET, | ||
116 | cpu_to_le32(arg1)); | ||
117 | |||
118 | netxen_nic_write_w1(adapter, NX_ARG2_CRB_OFFSET, | ||
119 | cpu_to_le32(arg2)); | ||
120 | |||
121 | netxen_nic_write_w1(adapter, NX_ARG3_CRB_OFFSET, | ||
122 | cpu_to_le32(arg3)); | ||
123 | |||
124 | netxen_nic_write_w1(adapter, NX_CDRP_CRB_OFFSET, | ||
125 | cpu_to_le32(NX_CDRP_FORM_CMD(cmd))); | ||
126 | |||
127 | rsp = netxen_poll_rsp(adapter); | ||
128 | |||
129 | if (rsp == NX_CDRP_RSP_TIMEOUT) { | ||
130 | printk(KERN_ERR "%s: card response timeout.\n", | ||
131 | netxen_nic_driver_name); | ||
132 | |||
133 | rcode = NX_RCODE_TIMEOUT; | ||
134 | } else if (rsp == NX_CDRP_RSP_FAIL) { | ||
135 | netxen_nic_read_w1(adapter, NX_ARG1_CRB_OFFSET, &rcode); | ||
136 | rcode = le32_to_cpu(rcode); | ||
137 | |||
138 | printk(KERN_ERR "%s: failed card response code:0x%x\n", | ||
139 | netxen_nic_driver_name, rcode); | ||
140 | } | ||
141 | |||
142 | /* Release semaphore */ | ||
143 | netxen_api_unlock(adapter); | ||
144 | |||
145 | return rcode; | ||
146 | } | ||
147 | |||
148 | u32 | ||
149 | nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu) | ||
150 | { | ||
151 | u32 rcode = NX_RCODE_SUCCESS; | ||
152 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0]; | ||
153 | |||
154 | if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) | ||
155 | rcode = netxen_issue_cmd(adapter, | ||
156 | adapter->ahw.pci_func, | ||
157 | NXHAL_VERSION, | ||
158 | recv_ctx->context_id, | ||
159 | mtu, | ||
160 | 0, | ||
161 | NX_CDRP_CMD_SET_MTU); | ||
162 | |||
163 | return rcode; | ||
164 | } | ||
165 | |||
166 | static int | ||
167 | nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) | ||
168 | { | ||
169 | void *addr; | ||
170 | nx_hostrq_rx_ctx_t *prq; | ||
171 | nx_cardrsp_rx_ctx_t *prsp; | ||
172 | nx_hostrq_rds_ring_t *prq_rds; | ||
173 | nx_hostrq_sds_ring_t *prq_sds; | ||
174 | nx_cardrsp_rds_ring_t *prsp_rds; | ||
175 | nx_cardrsp_sds_ring_t *prsp_sds; | ||
176 | struct nx_host_rds_ring *rds_ring; | ||
177 | |||
178 | dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; | ||
179 | u64 phys_addr; | ||
180 | |||
181 | int i, nrds_rings, nsds_rings; | ||
182 | size_t rq_size, rsp_size; | ||
183 | u32 cap, reg; | ||
184 | |||
185 | int err; | ||
186 | |||
187 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0]; | ||
188 | |||
189 | /* only one sds ring for now */ | ||
190 | nrds_rings = adapter->max_rds_rings; | ||
191 | nsds_rings = 1; | ||
192 | |||
193 | rq_size = | ||
194 | SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings); | ||
195 | rsp_size = | ||
196 | SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings); | ||
197 | |||
198 | addr = pci_alloc_consistent(adapter->pdev, | ||
199 | rq_size, &hostrq_phys_addr); | ||
200 | if (addr == NULL) | ||
201 | return -ENOMEM; | ||
202 | prq = (nx_hostrq_rx_ctx_t *)addr; | ||
203 | |||
204 | addr = pci_alloc_consistent(adapter->pdev, | ||
205 | rsp_size, &cardrsp_phys_addr); | ||
206 | if (addr == NULL) { | ||
207 | err = -ENOMEM; | ||
208 | goto out_free_rq; | ||
209 | } | ||
210 | prsp = (nx_cardrsp_rx_ctx_t *)addr; | ||
211 | |||
212 | prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); | ||
213 | |||
214 | cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN); | ||
215 | cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS); | ||
216 | |||
217 | prq->capabilities[0] = cpu_to_le32(cap); | ||
218 | prq->host_int_crb_mode = | ||
219 | cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); | ||
220 | prq->host_rds_crb_mode = | ||
221 | cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE); | ||
222 | |||
223 | prq->num_rds_rings = cpu_to_le16(nrds_rings); | ||
224 | prq->num_sds_rings = cpu_to_le16(nsds_rings); | ||
225 | prq->rds_ring_offset = 0; | ||
226 | prq->sds_ring_offset = prq->rds_ring_offset + | ||
227 | (sizeof(nx_hostrq_rds_ring_t) * nrds_rings); | ||
228 | |||
229 | prq_rds = (nx_hostrq_rds_ring_t *)(prq->data + prq->rds_ring_offset); | ||
230 | |||
231 | for (i = 0; i < nrds_rings; i++) { | ||
232 | |||
233 | rds_ring = &recv_ctx->rds_rings[i]; | ||
234 | |||
235 | prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); | ||
236 | prq_rds[i].ring_size = cpu_to_le32(rds_ring->max_rx_desc_count); | ||
237 | prq_rds[i].ring_kind = cpu_to_le32(i); | ||
238 | prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); | ||
239 | } | ||
240 | |||
241 | prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + prq->sds_ring_offset); | ||
242 | |||
243 | prq_sds[0].host_phys_addr = | ||
244 | cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr); | ||
245 | prq_sds[0].ring_size = cpu_to_le32(adapter->max_rx_desc_count); | ||
246 | /* only one msix vector for now */ | ||
247 | prq_sds[0].msi_index = cpu_to_le32(0); | ||
248 | |||
249 | /* now byteswap offsets */ | ||
250 | prq->rds_ring_offset = cpu_to_le32(prq->rds_ring_offset); | ||
251 | prq->sds_ring_offset = cpu_to_le32(prq->sds_ring_offset); | ||
252 | |||
253 | phys_addr = hostrq_phys_addr; | ||
254 | err = netxen_issue_cmd(adapter, | ||
255 | adapter->ahw.pci_func, | ||
256 | NXHAL_VERSION, | ||
257 | (u32)(phys_addr >> 32), | ||
258 | (u32)(phys_addr & 0xffffffff), | ||
259 | rq_size, | ||
260 | NX_CDRP_CMD_CREATE_RX_CTX); | ||
261 | if (err) { | ||
262 | printk(KERN_WARNING | ||
263 | "Failed to create rx ctx in firmware%d\n", err); | ||
264 | goto out_free_rsp; | ||
265 | } | ||
266 | |||
267 | |||
268 | prsp_rds = ((nx_cardrsp_rds_ring_t *) | ||
269 | &prsp->data[prsp->rds_ring_offset]); | ||
270 | |||
271 | for (i = 0; i < le32_to_cpu(prsp->num_rds_rings); i++) { | ||
272 | rds_ring = &recv_ctx->rds_rings[i]; | ||
273 | |||
274 | reg = le32_to_cpu(prsp_rds[i].host_producer_crb); | ||
275 | rds_ring->crb_rcv_producer = NETXEN_NIC_REG(reg - 0x200); | ||
276 | } | ||
277 | |||
278 | prsp_sds = ((nx_cardrsp_sds_ring_t *) | ||
279 | &prsp->data[prsp->sds_ring_offset]); | ||
280 | reg = le32_to_cpu(prsp_sds[0].host_consumer_crb); | ||
281 | recv_ctx->crb_sts_consumer = NETXEN_NIC_REG(reg - 0x200); | ||
282 | |||
283 | reg = le32_to_cpu(prsp_sds[0].interrupt_crb); | ||
284 | adapter->crb_intr_mask = NETXEN_NIC_REG(reg - 0x200); | ||
285 | |||
286 | recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); | ||
287 | recv_ctx->context_id = le16_to_cpu(prsp->context_id); | ||
288 | recv_ctx->virt_port = le16_to_cpu(prsp->virt_port); | ||
289 | |||
290 | out_free_rsp: | ||
291 | pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr); | ||
292 | out_free_rq: | ||
293 | pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr); | ||
294 | return err; | ||
295 | } | ||
296 | |||
297 | static void | ||
298 | nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) | ||
299 | { | ||
300 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0]; | ||
301 | |||
302 | if (netxen_issue_cmd(adapter, | ||
303 | adapter->ahw.pci_func, | ||
304 | NXHAL_VERSION, | ||
305 | recv_ctx->context_id, | ||
306 | NX_DESTROY_CTX_RESET, | ||
307 | 0, | ||
308 | NX_CDRP_CMD_DESTROY_RX_CTX)) { | ||
309 | |||
310 | printk(KERN_WARNING | ||
311 | "%s: Failed to destroy rx ctx in firmware\n", | ||
312 | netxen_nic_driver_name); | ||
313 | } | ||
314 | } | ||
315 | |||
316 | static int | ||
317 | nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter) | ||
318 | { | ||
319 | nx_hostrq_tx_ctx_t *prq; | ||
320 | nx_hostrq_cds_ring_t *prq_cds; | ||
321 | nx_cardrsp_tx_ctx_t *prsp; | ||
322 | void *rq_addr, *rsp_addr; | ||
323 | size_t rq_size, rsp_size; | ||
324 | u32 temp; | ||
325 | int err = 0; | ||
326 | u64 offset, phys_addr; | ||
327 | dma_addr_t rq_phys_addr, rsp_phys_addr; | ||
328 | |||
329 | rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t); | ||
330 | rq_addr = pci_alloc_consistent(adapter->pdev, | ||
331 | rq_size, &rq_phys_addr); | ||
332 | if (!rq_addr) | ||
333 | return -ENOMEM; | ||
334 | |||
335 | rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t); | ||
336 | rsp_addr = pci_alloc_consistent(adapter->pdev, | ||
337 | rsp_size, &rsp_phys_addr); | ||
338 | if (!rsp_addr) { | ||
339 | err = -ENOMEM; | ||
340 | goto out_free_rq; | ||
341 | } | ||
342 | |||
343 | memset(rq_addr, 0, rq_size); | ||
344 | prq = (nx_hostrq_tx_ctx_t *)rq_addr; | ||
345 | |||
346 | memset(rsp_addr, 0, rsp_size); | ||
347 | prsp = (nx_cardrsp_tx_ctx_t *)rsp_addr; | ||
348 | |||
349 | prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); | ||
350 | |||
351 | temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO); | ||
352 | prq->capabilities[0] = cpu_to_le32(temp); | ||
353 | |||
354 | prq->host_int_crb_mode = | ||
355 | cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); | ||
356 | |||
357 | prq->interrupt_ctl = 0; | ||
358 | prq->msi_index = 0; | ||
359 | |||
360 | prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr); | ||
361 | |||
362 | offset = adapter->ctx_desc_phys_addr+sizeof(struct netxen_ring_ctx); | ||
363 | prq->cmd_cons_dma_addr = cpu_to_le64(offset); | ||
364 | |||
365 | prq_cds = &prq->cds_ring; | ||
366 | |||
367 | prq_cds->host_phys_addr = | ||
368 | cpu_to_le64(adapter->ahw.cmd_desc_phys_addr); | ||
369 | |||
370 | prq_cds->ring_size = cpu_to_le32(adapter->max_tx_desc_count); | ||
371 | |||
372 | phys_addr = rq_phys_addr; | ||
373 | err = netxen_issue_cmd(adapter, | ||
374 | adapter->ahw.pci_func, | ||
375 | NXHAL_VERSION, | ||
376 | (u32)(phys_addr >> 32), | ||
377 | ((u32)phys_addr & 0xffffffff), | ||
378 | rq_size, | ||
379 | NX_CDRP_CMD_CREATE_TX_CTX); | ||
380 | |||
381 | if (err == NX_RCODE_SUCCESS) { | ||
382 | temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); | ||
383 | adapter->crb_addr_cmd_producer = | ||
384 | NETXEN_NIC_REG(temp - 0x200); | ||
385 | #if 0 | ||
386 | adapter->tx_state = | ||
387 | le32_to_cpu(prsp->host_ctx_state); | ||
388 | #endif | ||
389 | adapter->tx_context_id = | ||
390 | le16_to_cpu(prsp->context_id); | ||
391 | } else { | ||
392 | printk(KERN_WARNING | ||
393 | "Failed to create tx ctx in firmware%d\n", err); | ||
394 | err = -EIO; | ||
395 | } | ||
396 | |||
397 | pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr); | ||
398 | |||
399 | out_free_rq: | ||
400 | pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr); | ||
401 | |||
402 | return err; | ||
403 | } | ||
404 | |||
405 | static void | ||
406 | nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter) | ||
407 | { | ||
408 | if (netxen_issue_cmd(adapter, | ||
409 | adapter->ahw.pci_func, | ||
410 | NXHAL_VERSION, | ||
411 | adapter->tx_context_id, | ||
412 | NX_DESTROY_CTX_RESET, | ||
413 | 0, | ||
414 | NX_CDRP_CMD_DESTROY_TX_CTX)) { | ||
415 | |||
416 | printk(KERN_WARNING | ||
417 | "%s: Failed to destroy tx ctx in firmware\n", | ||
418 | netxen_nic_driver_name); | ||
419 | } | ||
420 | } | ||
421 | |||
422 | static u64 ctx_addr_sig_regs[][3] = { | ||
423 | {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)}, | ||
424 | {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)}, | ||
425 | {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)}, | ||
426 | {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)} | ||
427 | }; | ||
428 | |||
429 | #define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0]) | ||
430 | #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2]) | ||
431 | #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1]) | ||
432 | |||
433 | #define lower32(x) ((u32)((x) & 0xffffffff)) | ||
434 | #define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff)) | ||
435 | |||
436 | static struct netxen_recv_crb recv_crb_registers[] = { | ||
437 | /* Instance 0 */ | ||
438 | { | ||
439 | /* crb_rcv_producer: */ | ||
440 | { | ||
441 | NETXEN_NIC_REG(0x100), | ||
442 | /* Jumbo frames */ | ||
443 | NETXEN_NIC_REG(0x110), | ||
444 | /* LRO */ | ||
445 | NETXEN_NIC_REG(0x120) | ||
446 | }, | ||
447 | /* crb_sts_consumer: */ | ||
448 | NETXEN_NIC_REG(0x138), | ||
449 | }, | ||
450 | /* Instance 1 */ | ||
451 | { | ||
452 | /* crb_rcv_producer: */ | ||
453 | { | ||
454 | NETXEN_NIC_REG(0x144), | ||
455 | /* Jumbo frames */ | ||
456 | NETXEN_NIC_REG(0x154), | ||
457 | /* LRO */ | ||
458 | NETXEN_NIC_REG(0x164) | ||
459 | }, | ||
460 | /* crb_sts_consumer: */ | ||
461 | NETXEN_NIC_REG(0x17c), | ||
462 | }, | ||
463 | /* Instance 2 */ | ||
464 | { | ||
465 | /* crb_rcv_producer: */ | ||
466 | { | ||
467 | NETXEN_NIC_REG(0x1d8), | ||
468 | /* Jumbo frames */ | ||
469 | NETXEN_NIC_REG(0x1f8), | ||
470 | /* LRO */ | ||
471 | NETXEN_NIC_REG(0x208) | ||
472 | }, | ||
473 | /* crb_sts_consumer: */ | ||
474 | NETXEN_NIC_REG(0x220), | ||
475 | }, | ||
476 | /* Instance 3 */ | ||
477 | { | ||
478 | /* crb_rcv_producer: */ | ||
479 | { | ||
480 | NETXEN_NIC_REG(0x22c), | ||
481 | /* Jumbo frames */ | ||
482 | NETXEN_NIC_REG(0x23c), | ||
483 | /* LRO */ | ||
484 | NETXEN_NIC_REG(0x24c) | ||
485 | }, | ||
486 | /* crb_sts_consumer: */ | ||
487 | NETXEN_NIC_REG(0x264), | ||
488 | }, | ||
489 | }; | ||
490 | |||
491 | static int | ||
492 | netxen_init_old_ctx(struct netxen_adapter *adapter) | ||
493 | { | ||
494 | struct netxen_recv_context *recv_ctx; | ||
495 | struct nx_host_rds_ring *rds_ring; | ||
496 | int ctx, ring; | ||
497 | int func_id = adapter->portnum; | ||
498 | |||
499 | adapter->ctx_desc->cmd_ring_addr = | ||
500 | cpu_to_le64(adapter->ahw.cmd_desc_phys_addr); | ||
501 | adapter->ctx_desc->cmd_ring_size = | ||
502 | cpu_to_le32(adapter->max_tx_desc_count); | ||
503 | |||
504 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | ||
505 | recv_ctx = &adapter->recv_ctx[ctx]; | ||
506 | |||
507 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | ||
508 | rds_ring = &recv_ctx->rds_rings[ring]; | ||
509 | |||
510 | adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr = | ||
511 | cpu_to_le64(rds_ring->phys_addr); | ||
512 | adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size = | ||
513 | cpu_to_le32(rds_ring->max_rx_desc_count); | ||
514 | } | ||
515 | adapter->ctx_desc->sts_ring_addr = | ||
516 | cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr); | ||
517 | adapter->ctx_desc->sts_ring_size = | ||
518 | cpu_to_le32(adapter->max_rx_desc_count); | ||
519 | } | ||
520 | |||
521 | adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_LO(func_id), | ||
522 | lower32(adapter->ctx_desc_phys_addr)); | ||
523 | adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_HI(func_id), | ||
524 | upper32(adapter->ctx_desc_phys_addr)); | ||
525 | adapter->pci_write_normalize(adapter, CRB_CTX_SIGNATURE_REG(func_id), | ||
526 | NETXEN_CTX_SIGNATURE | func_id); | ||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | static uint32_t sw_int_mask[4] = { | ||
531 | CRB_SW_INT_MASK_0, CRB_SW_INT_MASK_1, | ||
532 | CRB_SW_INT_MASK_2, CRB_SW_INT_MASK_3 | ||
533 | }; | ||
534 | |||
535 | int netxen_alloc_hw_resources(struct netxen_adapter *adapter) | ||
536 | { | ||
537 | struct netxen_hardware_context *hw = &adapter->ahw; | ||
538 | u32 state = 0; | ||
539 | void *addr; | ||
540 | int err = 0; | ||
541 | int ctx, ring; | ||
542 | struct netxen_recv_context *recv_ctx; | ||
543 | struct nx_host_rds_ring *rds_ring; | ||
544 | |||
545 | err = netxen_receive_peg_ready(adapter); | ||
546 | if (err) { | ||
547 | printk(KERN_ERR "Rcv Peg initialization not complete:%x.\n", | ||
548 | state); | ||
549 | return err; | ||
550 | } | ||
551 | |||
552 | addr = pci_alloc_consistent(adapter->pdev, | ||
553 | sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), | ||
554 | &adapter->ctx_desc_phys_addr); | ||
555 | |||
556 | if (addr == NULL) { | ||
557 | DPRINTK(ERR, "failed to allocate hw context\n"); | ||
558 | return -ENOMEM; | ||
559 | } | ||
560 | memset(addr, 0, sizeof(struct netxen_ring_ctx)); | ||
561 | adapter->ctx_desc = (struct netxen_ring_ctx *)addr; | ||
562 | adapter->ctx_desc->ctx_id = cpu_to_le32(adapter->portnum); | ||
563 | adapter->ctx_desc->cmd_consumer_offset = | ||
564 | cpu_to_le64(adapter->ctx_desc_phys_addr + | ||
565 | sizeof(struct netxen_ring_ctx)); | ||
566 | adapter->cmd_consumer = | ||
567 | (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx)); | ||
568 | |||
569 | /* cmd desc ring */ | ||
570 | addr = pci_alloc_consistent(adapter->pdev, | ||
571 | sizeof(struct cmd_desc_type0) * | ||
572 | adapter->max_tx_desc_count, | ||
573 | &hw->cmd_desc_phys_addr); | ||
574 | |||
575 | if (addr == NULL) { | ||
576 | printk(KERN_ERR "%s failed to allocate tx desc ring\n", | ||
577 | netxen_nic_driver_name); | ||
578 | return -ENOMEM; | ||
579 | } | ||
580 | |||
581 | hw->cmd_desc_head = (struct cmd_desc_type0 *)addr; | ||
582 | |||
583 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | ||
584 | recv_ctx = &adapter->recv_ctx[ctx]; | ||
585 | |||
586 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | ||
587 | /* rx desc ring */ | ||
588 | rds_ring = &recv_ctx->rds_rings[ring]; | ||
589 | addr = pci_alloc_consistent(adapter->pdev, | ||
590 | RCV_DESC_RINGSIZE, | ||
591 | &rds_ring->phys_addr); | ||
592 | if (addr == NULL) { | ||
593 | printk(KERN_ERR "%s failed to allocate rx " | ||
594 | "desc ring[%d]\n", | ||
595 | netxen_nic_driver_name, ring); | ||
596 | err = -ENOMEM; | ||
597 | goto err_out_free; | ||
598 | } | ||
599 | rds_ring->desc_head = (struct rcv_desc *)addr; | ||
600 | |||
601 | if (adapter->fw_major < 4) | ||
602 | rds_ring->crb_rcv_producer = | ||
603 | recv_crb_registers[adapter->portnum]. | ||
604 | crb_rcv_producer[ring]; | ||
605 | } | ||
606 | |||
607 | /* status desc ring */ | ||
608 | addr = pci_alloc_consistent(adapter->pdev, | ||
609 | STATUS_DESC_RINGSIZE, | ||
610 | &recv_ctx->rcv_status_desc_phys_addr); | ||
611 | if (addr == NULL) { | ||
612 | printk(KERN_ERR "%s failed to allocate sts desc ring\n", | ||
613 | netxen_nic_driver_name); | ||
614 | err = -ENOMEM; | ||
615 | goto err_out_free; | ||
616 | } | ||
617 | recv_ctx->rcv_status_desc_head = (struct status_desc *)addr; | ||
618 | |||
619 | if (adapter->fw_major < 4) | ||
620 | recv_ctx->crb_sts_consumer = | ||
621 | recv_crb_registers[adapter->portnum]. | ||
622 | crb_sts_consumer; | ||
623 | } | ||
624 | |||
625 | if (adapter->fw_major >= 4) { | ||
626 | adapter->intr_scheme = INTR_SCHEME_PERPORT; | ||
627 | adapter->msi_mode = MSI_MODE_MULTIFUNC; | ||
628 | |||
629 | err = nx_fw_cmd_create_rx_ctx(adapter); | ||
630 | if (err) | ||
631 | goto err_out_free; | ||
632 | err = nx_fw_cmd_create_tx_ctx(adapter); | ||
633 | if (err) | ||
634 | goto err_out_free; | ||
635 | } else { | ||
636 | |||
637 | adapter->intr_scheme = adapter->pci_read_normalize(adapter, | ||
638 | CRB_NIC_CAPABILITIES_FW); | ||
639 | adapter->msi_mode = adapter->pci_read_normalize(adapter, | ||
640 | CRB_NIC_MSI_MODE_FW); | ||
641 | adapter->crb_intr_mask = sw_int_mask[adapter->portnum]; | ||
642 | |||
643 | err = netxen_init_old_ctx(adapter); | ||
644 | if (err) { | ||
645 | netxen_free_hw_resources(adapter); | ||
646 | return err; | ||
647 | } | ||
648 | |||
649 | } | ||
650 | |||
651 | return 0; | ||
652 | |||
653 | err_out_free: | ||
654 | netxen_free_hw_resources(adapter); | ||
655 | return err; | ||
656 | } | ||
657 | |||
658 | void netxen_free_hw_resources(struct netxen_adapter *adapter) | ||
659 | { | ||
660 | struct netxen_recv_context *recv_ctx; | ||
661 | struct nx_host_rds_ring *rds_ring; | ||
662 | int ctx, ring; | ||
663 | |||
664 | if (adapter->fw_major >= 4) { | ||
665 | nx_fw_cmd_destroy_tx_ctx(adapter); | ||
666 | nx_fw_cmd_destroy_rx_ctx(adapter); | ||
667 | } | ||
668 | |||
669 | if (adapter->ctx_desc != NULL) { | ||
670 | pci_free_consistent(adapter->pdev, | ||
671 | sizeof(struct netxen_ring_ctx) + | ||
672 | sizeof(uint32_t), | ||
673 | adapter->ctx_desc, | ||
674 | adapter->ctx_desc_phys_addr); | ||
675 | adapter->ctx_desc = NULL; | ||
676 | } | ||
677 | |||
678 | if (adapter->ahw.cmd_desc_head != NULL) { | ||
679 | pci_free_consistent(adapter->pdev, | ||
680 | sizeof(struct cmd_desc_type0) * | ||
681 | adapter->max_tx_desc_count, | ||
682 | adapter->ahw.cmd_desc_head, | ||
683 | adapter->ahw.cmd_desc_phys_addr); | ||
684 | adapter->ahw.cmd_desc_head = NULL; | ||
685 | } | ||
686 | |||
687 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | ||
688 | recv_ctx = &adapter->recv_ctx[ctx]; | ||
689 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | ||
690 | rds_ring = &recv_ctx->rds_rings[ring]; | ||
691 | |||
692 | if (rds_ring->desc_head != NULL) { | ||
693 | pci_free_consistent(adapter->pdev, | ||
694 | RCV_DESC_RINGSIZE, | ||
695 | rds_ring->desc_head, | ||
696 | rds_ring->phys_addr); | ||
697 | rds_ring->desc_head = NULL; | ||
698 | } | ||
699 | } | ||
700 | |||
701 | if (recv_ctx->rcv_status_desc_head != NULL) { | ||
702 | pci_free_consistent(adapter->pdev, | ||
703 | STATUS_DESC_RINGSIZE, | ||
704 | recv_ctx->rcv_status_desc_head, | ||
705 | recv_ctx->rcv_status_desc_phys_addr); | ||
706 | recv_ctx->rcv_status_desc_head = NULL; | ||
707 | } | ||
708 | } | ||
709 | } | ||
710 | |||
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 723487bf200c..48ee06b6f4e9 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c | |||
@@ -93,17 +93,21 @@ static void | |||
93 | netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) | 93 | netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) |
94 | { | 94 | { |
95 | struct netxen_adapter *adapter = netdev_priv(dev); | 95 | struct netxen_adapter *adapter = netdev_priv(dev); |
96 | unsigned long flags; | ||
96 | u32 fw_major = 0; | 97 | u32 fw_major = 0; |
97 | u32 fw_minor = 0; | 98 | u32 fw_minor = 0; |
98 | u32 fw_build = 0; | 99 | u32 fw_build = 0; |
99 | 100 | ||
100 | strncpy(drvinfo->driver, netxen_nic_driver_name, 32); | 101 | strncpy(drvinfo->driver, netxen_nic_driver_name, 32); |
101 | strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32); | 102 | strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32); |
102 | fw_major = readl(NETXEN_CRB_NORMALIZE(adapter, | 103 | write_lock_irqsave(&adapter->adapter_lock, flags); |
103 | NETXEN_FW_VERSION_MAJOR)); | 104 | fw_major = adapter->pci_read_normalize(adapter, |
104 | fw_minor = readl(NETXEN_CRB_NORMALIZE(adapter, | 105 | NETXEN_FW_VERSION_MAJOR); |
105 | NETXEN_FW_VERSION_MINOR)); | 106 | fw_minor = adapter->pci_read_normalize(adapter, |
106 | fw_build = readl(NETXEN_CRB_NORMALIZE(adapter, NETXEN_FW_VERSION_SUB)); | 107 | NETXEN_FW_VERSION_MINOR); |
108 | fw_build = adapter->pci_read_normalize(adapter, | ||
109 | NETXEN_FW_VERSION_SUB); | ||
110 | write_unlock_irqrestore(&adapter->adapter_lock, flags); | ||
107 | sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); | 111 | sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); |
108 | 112 | ||
109 | strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); | 113 | strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); |
@@ -159,9 +163,16 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
159 | switch ((netxen_brdtype_t) boardinfo->board_type) { | 163 | switch ((netxen_brdtype_t) boardinfo->board_type) { |
160 | case NETXEN_BRDTYPE_P2_SB35_4G: | 164 | case NETXEN_BRDTYPE_P2_SB35_4G: |
161 | case NETXEN_BRDTYPE_P2_SB31_2G: | 165 | case NETXEN_BRDTYPE_P2_SB31_2G: |
166 | case NETXEN_BRDTYPE_P3_REF_QG: | ||
167 | case NETXEN_BRDTYPE_P3_4_GB: | ||
168 | case NETXEN_BRDTYPE_P3_4_GB_MM: | ||
169 | case NETXEN_BRDTYPE_P3_10000_BASE_T: | ||
170 | |||
162 | ecmd->supported |= SUPPORTED_Autoneg; | 171 | ecmd->supported |= SUPPORTED_Autoneg; |
163 | ecmd->advertising |= ADVERTISED_Autoneg; | 172 | ecmd->advertising |= ADVERTISED_Autoneg; |
164 | case NETXEN_BRDTYPE_P2_SB31_10G_CX4: | 173 | case NETXEN_BRDTYPE_P2_SB31_10G_CX4: |
174 | case NETXEN_BRDTYPE_P3_10G_CX4: | ||
175 | case NETXEN_BRDTYPE_P3_10G_CX4_LP: | ||
165 | ecmd->supported |= SUPPORTED_TP; | 176 | ecmd->supported |= SUPPORTED_TP; |
166 | ecmd->advertising |= ADVERTISED_TP; | 177 | ecmd->advertising |= ADVERTISED_TP; |
167 | ecmd->port = PORT_TP; | 178 | ecmd->port = PORT_TP; |
@@ -171,12 +182,17 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |||
171 | break; | 182 | break; |
172 | case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: | 183 | case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: |
173 | case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: | 184 | case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: |
185 | case NETXEN_BRDTYPE_P3_IMEZ: | ||
186 | case NETXEN_BRDTYPE_P3_XG_LOM: | ||
187 | case NETXEN_BRDTYPE_P3_HMEZ: | ||
174 | ecmd->supported |= SUPPORTED_MII; | 188 | ecmd->supported |= SUPPORTED_MII; |
175 | ecmd->advertising |= ADVERTISED_MII; | 189 | ecmd->advertising |= ADVERTISED_MII; |
176 | ecmd->port = PORT_FIBRE; | 190 | ecmd->port = PORT_FIBRE; |
177 | ecmd->autoneg = AUTONEG_DISABLE; | 191 | ecmd->autoneg = AUTONEG_DISABLE; |
178 | break; | 192 | break; |
179 | case NETXEN_BRDTYPE_P2_SB31_10G: | 193 | case NETXEN_BRDTYPE_P2_SB31_10G: |
194 | case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: | ||
195 | case NETXEN_BRDTYPE_P3_10G_XFP: | ||
180 | ecmd->supported |= SUPPORTED_FIBRE; | 196 | ecmd->supported |= SUPPORTED_FIBRE; |
181 | ecmd->advertising |= ADVERTISED_FIBRE; | 197 | ecmd->advertising |= ADVERTISED_FIBRE; |
182 | ecmd->port = PORT_FIBRE; | 198 | ecmd->port = PORT_FIBRE; |
@@ -349,19 +365,18 @@ netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) | |||
349 | { | 365 | { |
350 | struct netxen_adapter *adapter = netdev_priv(dev); | 366 | struct netxen_adapter *adapter = netdev_priv(dev); |
351 | __u32 mode, *regs_buff = p; | 367 | __u32 mode, *regs_buff = p; |
352 | void __iomem *addr; | ||
353 | int i, window; | 368 | int i, window; |
354 | 369 | ||
355 | memset(p, 0, NETXEN_NIC_REGS_LEN); | 370 | memset(p, 0, NETXEN_NIC_REGS_LEN); |
356 | regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | | 371 | regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | |
357 | (adapter->pdev)->device; | 372 | (adapter->pdev)->device; |
358 | /* which mode */ | 373 | /* which mode */ |
359 | NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_MODE, ®s_buff[0]); | 374 | adapter->hw_read_wx(adapter, NETXEN_NIU_MODE, ®s_buff[0], 4); |
360 | mode = regs_buff[0]; | 375 | mode = regs_buff[0]; |
361 | 376 | ||
362 | /* Common registers to all the modes */ | 377 | /* Common registers to all the modes */ |
363 | NETXEN_NIC_LOCKED_READ_REG(NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER, | 378 | adapter->hw_read_wx(adapter, |
364 | ®s_buff[2]); | 379 | NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER, ®s_buff[2], 4); |
365 | /* GB/XGB Mode */ | 380 | /* GB/XGB Mode */ |
366 | mode = (mode / 2) - 1; | 381 | mode = (mode / 2) - 1; |
367 | window = 0; | 382 | window = 0; |
@@ -372,9 +387,9 @@ netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) | |||
372 | window = adapter->physical_port * | 387 | window = adapter->physical_port * |
373 | NETXEN_NIC_PORT_WINDOW; | 388 | NETXEN_NIC_PORT_WINDOW; |
374 | 389 | ||
375 | NETXEN_NIC_LOCKED_READ_REG(niu_registers[mode]. | 390 | adapter->hw_read_wx(adapter, |
376 | reg[i - 3] + window, | 391 | niu_registers[mode].reg[i - 3] + window, |
377 | ®s_buff[i]); | 392 | ®s_buff[i], 4); |
378 | } | 393 | } |
379 | 394 | ||
380 | } | 395 | } |
@@ -398,7 +413,7 @@ static u32 netxen_nic_test_link(struct net_device *dev) | |||
398 | return !val; | 413 | return !val; |
399 | } | 414 | } |
400 | } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { | 415 | } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { |
401 | val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE)); | 416 | val = adapter->pci_read_normalize(adapter, CRB_XG_STATE); |
402 | return (val == XG_LINK_UP) ? 0 : 1; | 417 | return (val == XG_LINK_UP) ? 0 : 1; |
403 | } | 418 | } |
404 | return -EIO; | 419 | return -EIO; |
@@ -427,6 +442,7 @@ netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | |||
427 | return 0; | 442 | return 0; |
428 | } | 443 | } |
429 | 444 | ||
445 | #if 0 | ||
430 | static int | 446 | static int |
431 | netxen_nic_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | 447 | netxen_nic_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, |
432 | u8 * bytes) | 448 | u8 * bytes) |
@@ -447,7 +463,6 @@ netxen_nic_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | |||
447 | } | 463 | } |
448 | printk(KERN_INFO "%s: flash unlocked. \n", | 464 | printk(KERN_INFO "%s: flash unlocked. \n", |
449 | netxen_nic_driver_name); | 465 | netxen_nic_driver_name); |
450 | last_schedule_time = jiffies; | ||
451 | ret = netxen_flash_erase_secondary(adapter); | 466 | ret = netxen_flash_erase_secondary(adapter); |
452 | if (ret != FLASH_SUCCESS) { | 467 | if (ret != FLASH_SUCCESS) { |
453 | printk(KERN_ERR "%s: Flash erase failed.\n", | 468 | printk(KERN_ERR "%s: Flash erase failed.\n", |
@@ -497,6 +512,7 @@ netxen_nic_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | |||
497 | 512 | ||
498 | return netxen_rom_fast_write_words(adapter, offset, bytes, eeprom->len); | 513 | return netxen_rom_fast_write_words(adapter, offset, bytes, eeprom->len); |
499 | } | 514 | } |
515 | #endif /* 0 */ | ||
500 | 516 | ||
501 | static void | 517 | static void |
502 | netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) | 518 | netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) |
@@ -508,9 +524,9 @@ netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) | |||
508 | ring->rx_jumbo_pending = 0; | 524 | ring->rx_jumbo_pending = 0; |
509 | for (i = 0; i < MAX_RCV_CTX; ++i) { | 525 | for (i = 0; i < MAX_RCV_CTX; ++i) { |
510 | ring->rx_pending += adapter->recv_ctx[i]. | 526 | ring->rx_pending += adapter->recv_ctx[i]. |
511 | rcv_desc[RCV_DESC_NORMAL_CTXID].max_rx_desc_count; | 527 | rds_rings[RCV_DESC_NORMAL_CTXID].max_rx_desc_count; |
512 | ring->rx_jumbo_pending += adapter->recv_ctx[i]. | 528 | ring->rx_jumbo_pending += adapter->recv_ctx[i]. |
513 | rcv_desc[RCV_DESC_JUMBO_CTXID].max_rx_desc_count; | 529 | rds_rings[RCV_DESC_JUMBO_CTXID].max_rx_desc_count; |
514 | } | 530 | } |
515 | ring->tx_pending = adapter->max_tx_desc_count; | 531 | ring->tx_pending = adapter->max_tx_desc_count; |
516 | 532 | ||
@@ -655,7 +671,7 @@ static int netxen_nic_reg_test(struct net_device *dev) | |||
655 | data_written = (u32)0xa5a5a5a5; | 671 | data_written = (u32)0xa5a5a5a5; |
656 | 672 | ||
657 | netxen_nic_reg_write(adapter, CRB_SCRATCHPAD_TEST, data_written); | 673 | netxen_nic_reg_write(adapter, CRB_SCRATCHPAD_TEST, data_written); |
658 | data_read = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_SCRATCHPAD_TEST)); | 674 | data_read = adapter->pci_read_normalize(adapter, CRB_SCRATCHPAD_TEST); |
659 | if (data_written != data_read) | 675 | if (data_written != data_read) |
660 | return 1; | 676 | return 1; |
661 | 677 | ||
@@ -736,6 +752,117 @@ static int netxen_nic_set_rx_csum(struct net_device *dev, u32 data) | |||
736 | return 0; | 752 | return 0; |
737 | } | 753 | } |
738 | 754 | ||
755 | static u32 netxen_nic_get_tso(struct net_device *dev) | ||
756 | { | ||
757 | struct netxen_adapter *adapter = netdev_priv(dev); | ||
758 | |||
759 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
760 | return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0; | ||
761 | |||
762 | return (dev->features & NETIF_F_TSO) != 0; | ||
763 | } | ||
764 | |||
765 | static int netxen_nic_set_tso(struct net_device *dev, u32 data) | ||
766 | { | ||
767 | if (data) { | ||
768 | struct netxen_adapter *adapter = netdev_priv(dev); | ||
769 | |||
770 | dev->features |= NETIF_F_TSO; | ||
771 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
772 | dev->features |= NETIF_F_TSO6; | ||
773 | } else | ||
774 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | ||
775 | |||
776 | return 0; | ||
777 | } | ||
778 | |||
779 | /* | ||
780 | * Set the coalescing parameters. Currently only normal is supported. | ||
781 | * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the | ||
782 | * firmware coalescing to default. | ||
783 | */ | ||
784 | static int netxen_set_intr_coalesce(struct net_device *netdev, | ||
785 | struct ethtool_coalesce *ethcoal) | ||
786 | { | ||
787 | struct netxen_adapter *adapter = netdev_priv(netdev); | ||
788 | |||
789 | if (!NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
790 | return -EINVAL; | ||
791 | |||
792 | if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) | ||
793 | return -EINVAL; | ||
794 | |||
795 | /* | ||
796 | * Return Error if unsupported values or | ||
797 | * unsupported parameters are set. | ||
798 | */ | ||
799 | if (ethcoal->rx_coalesce_usecs > 0xffff || | ||
800 | ethcoal->rx_max_coalesced_frames > 0xffff || | ||
801 | ethcoal->tx_coalesce_usecs > 0xffff || | ||
802 | ethcoal->tx_max_coalesced_frames > 0xffff || | ||
803 | ethcoal->rx_coalesce_usecs_irq || | ||
804 | ethcoal->rx_max_coalesced_frames_irq || | ||
805 | ethcoal->tx_coalesce_usecs_irq || | ||
806 | ethcoal->tx_max_coalesced_frames_irq || | ||
807 | ethcoal->stats_block_coalesce_usecs || | ||
808 | ethcoal->use_adaptive_rx_coalesce || | ||
809 | ethcoal->use_adaptive_tx_coalesce || | ||
810 | ethcoal->pkt_rate_low || | ||
811 | ethcoal->rx_coalesce_usecs_low || | ||
812 | ethcoal->rx_max_coalesced_frames_low || | ||
813 | ethcoal->tx_coalesce_usecs_low || | ||
814 | ethcoal->tx_max_coalesced_frames_low || | ||
815 | ethcoal->pkt_rate_high || | ||
816 | ethcoal->rx_coalesce_usecs_high || | ||
817 | ethcoal->rx_max_coalesced_frames_high || | ||
818 | ethcoal->tx_coalesce_usecs_high || | ||
819 | ethcoal->tx_max_coalesced_frames_high) | ||
820 | return -EINVAL; | ||
821 | |||
822 | if (!ethcoal->rx_coalesce_usecs || | ||
823 | !ethcoal->rx_max_coalesced_frames) { | ||
824 | adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; | ||
825 | adapter->coal.normal.data.rx_time_us = | ||
826 | NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; | ||
827 | adapter->coal.normal.data.rx_packets = | ||
828 | NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; | ||
829 | } else { | ||
830 | adapter->coal.flags = 0; | ||
831 | adapter->coal.normal.data.rx_time_us = | ||
832 | ethcoal->rx_coalesce_usecs; | ||
833 | adapter->coal.normal.data.rx_packets = | ||
834 | ethcoal->rx_max_coalesced_frames; | ||
835 | } | ||
836 | adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs; | ||
837 | adapter->coal.normal.data.tx_packets = | ||
838 | ethcoal->tx_max_coalesced_frames; | ||
839 | |||
840 | netxen_config_intr_coalesce(adapter); | ||
841 | |||
842 | return 0; | ||
843 | } | ||
844 | |||
845 | static int netxen_get_intr_coalesce(struct net_device *netdev, | ||
846 | struct ethtool_coalesce *ethcoal) | ||
847 | { | ||
848 | struct netxen_adapter *adapter = netdev_priv(netdev); | ||
849 | |||
850 | if (!NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
851 | return -EINVAL; | ||
852 | |||
853 | if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) | ||
854 | return -EINVAL; | ||
855 | |||
856 | ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us; | ||
857 | ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us; | ||
858 | ethcoal->rx_max_coalesced_frames = | ||
859 | adapter->coal.normal.data.rx_packets; | ||
860 | ethcoal->tx_max_coalesced_frames = | ||
861 | adapter->coal.normal.data.tx_packets; | ||
862 | |||
863 | return 0; | ||
864 | } | ||
865 | |||
739 | struct ethtool_ops netxen_nic_ethtool_ops = { | 866 | struct ethtool_ops netxen_nic_ethtool_ops = { |
740 | .get_settings = netxen_nic_get_settings, | 867 | .get_settings = netxen_nic_get_settings, |
741 | .set_settings = netxen_nic_set_settings, | 868 | .set_settings = netxen_nic_set_settings, |
@@ -745,17 +872,22 @@ struct ethtool_ops netxen_nic_ethtool_ops = { | |||
745 | .get_link = ethtool_op_get_link, | 872 | .get_link = ethtool_op_get_link, |
746 | .get_eeprom_len = netxen_nic_get_eeprom_len, | 873 | .get_eeprom_len = netxen_nic_get_eeprom_len, |
747 | .get_eeprom = netxen_nic_get_eeprom, | 874 | .get_eeprom = netxen_nic_get_eeprom, |
875 | #if 0 | ||
748 | .set_eeprom = netxen_nic_set_eeprom, | 876 | .set_eeprom = netxen_nic_set_eeprom, |
877 | #endif | ||
749 | .get_ringparam = netxen_nic_get_ringparam, | 878 | .get_ringparam = netxen_nic_get_ringparam, |
750 | .get_pauseparam = netxen_nic_get_pauseparam, | 879 | .get_pauseparam = netxen_nic_get_pauseparam, |
751 | .set_pauseparam = netxen_nic_set_pauseparam, | 880 | .set_pauseparam = netxen_nic_set_pauseparam, |
752 | .set_tx_csum = ethtool_op_set_tx_csum, | 881 | .set_tx_csum = ethtool_op_set_tx_csum, |
753 | .set_sg = ethtool_op_set_sg, | 882 | .set_sg = ethtool_op_set_sg, |
754 | .set_tso = ethtool_op_set_tso, | 883 | .get_tso = netxen_nic_get_tso, |
884 | .set_tso = netxen_nic_set_tso, | ||
755 | .self_test = netxen_nic_diag_test, | 885 | .self_test = netxen_nic_diag_test, |
756 | .get_strings = netxen_nic_get_strings, | 886 | .get_strings = netxen_nic_get_strings, |
757 | .get_ethtool_stats = netxen_nic_get_ethtool_stats, | 887 | .get_ethtool_stats = netxen_nic_get_ethtool_stats, |
758 | .get_sset_count = netxen_get_sset_count, | 888 | .get_sset_count = netxen_get_sset_count, |
759 | .get_rx_csum = netxen_nic_get_rx_csum, | 889 | .get_rx_csum = netxen_nic_get_rx_csum, |
760 | .set_rx_csum = netxen_nic_set_rx_csum, | 890 | .set_rx_csum = netxen_nic_set_rx_csum, |
891 | .get_coalesce = netxen_get_intr_coalesce, | ||
892 | .set_coalesce = netxen_set_intr_coalesce, | ||
761 | }; | 893 | }; |
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h index 24d027e29c45..3ce13e451aac 100644 --- a/drivers/net/netxen/netxen_nic_hdr.h +++ b/drivers/net/netxen/netxen_nic_hdr.h | |||
@@ -126,7 +126,8 @@ enum { | |||
126 | NETXEN_HW_PEGR0_CRB_AGT_ADR, | 126 | NETXEN_HW_PEGR0_CRB_AGT_ADR, |
127 | NETXEN_HW_PEGR1_CRB_AGT_ADR, | 127 | NETXEN_HW_PEGR1_CRB_AGT_ADR, |
128 | NETXEN_HW_PEGR2_CRB_AGT_ADR, | 128 | NETXEN_HW_PEGR2_CRB_AGT_ADR, |
129 | NETXEN_HW_PEGR3_CRB_AGT_ADR | 129 | NETXEN_HW_PEGR3_CRB_AGT_ADR, |
130 | NETXEN_HW_PEGN4_CRB_AGT_ADR | ||
130 | }; | 131 | }; |
131 | 132 | ||
132 | /* Hub 5 */ | 133 | /* Hub 5 */ |
@@ -316,6 +317,8 @@ enum { | |||
316 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR) | 317 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR) |
317 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3 \ | 318 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3 \ |
318 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR) | 319 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR) |
320 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGN4 \ | ||
321 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN4_CRB_AGT_ADR) | ||
319 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC \ | 322 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC \ |
320 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR) | 323 | ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR) |
321 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0 \ | 324 | #define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0 \ |
@@ -435,6 +438,7 @@ enum { | |||
435 | #define NETXEN_CRB_ROMUSB \ | 438 | #define NETXEN_CRB_ROMUSB \ |
436 | NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB) | 439 | NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB) |
437 | #define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q) | 440 | #define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q) |
441 | #define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB) | ||
438 | #define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64) | 442 | #define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64) |
439 | 443 | ||
440 | #define NETXEN_CRB_PCIX_HOST NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH) | 444 | #define NETXEN_CRB_PCIX_HOST NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH) |
@@ -446,6 +450,7 @@ enum { | |||
446 | #define NETXEN_CRB_PEG_NET_D NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND) | 450 | #define NETXEN_CRB_PEG_NET_D NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND) |
447 | #define NETXEN_CRB_PEG_NET_I NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI) | 451 | #define NETXEN_CRB_PEG_NET_I NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI) |
448 | #define NETXEN_CRB_DDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN) | 452 | #define NETXEN_CRB_DDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN) |
453 | #define NETXEN_CRB_QDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SN) | ||
449 | 454 | ||
450 | #define NETXEN_CRB_PCIX_MD NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS) | 455 | #define NETXEN_CRB_PCIX_MD NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS) |
451 | #define NETXEN_CRB_PCIE NETXEN_CRB_PCIX_MD | 456 | #define NETXEN_CRB_PCIE NETXEN_CRB_PCIX_MD |
@@ -461,11 +466,20 @@ enum { | |||
461 | #define ISR_INT_TARGET_MASK_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F2)) | 466 | #define ISR_INT_TARGET_MASK_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F2)) |
462 | #define ISR_INT_TARGET_STATUS_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F3)) | 467 | #define ISR_INT_TARGET_STATUS_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F3)) |
463 | #define ISR_INT_TARGET_MASK_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F3)) | 468 | #define ISR_INT_TARGET_MASK_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F3)) |
469 | #define ISR_INT_TARGET_STATUS_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F4)) | ||
470 | #define ISR_INT_TARGET_MASK_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F4)) | ||
471 | #define ISR_INT_TARGET_STATUS_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F5)) | ||
472 | #define ISR_INT_TARGET_MASK_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F5)) | ||
473 | #define ISR_INT_TARGET_STATUS_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F6)) | ||
474 | #define ISR_INT_TARGET_MASK_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F6)) | ||
475 | #define ISR_INT_TARGET_STATUS_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F7)) | ||
476 | #define ISR_INT_TARGET_MASK_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F7)) | ||
464 | 477 | ||
465 | #define NETXEN_PCI_MAPSIZE 128 | 478 | #define NETXEN_PCI_MAPSIZE 128 |
466 | #define NETXEN_PCI_DDR_NET (0x00000000UL) | 479 | #define NETXEN_PCI_DDR_NET (0x00000000UL) |
467 | #define NETXEN_PCI_QDR_NET (0x04000000UL) | 480 | #define NETXEN_PCI_QDR_NET (0x04000000UL) |
468 | #define NETXEN_PCI_DIRECT_CRB (0x04400000UL) | 481 | #define NETXEN_PCI_DIRECT_CRB (0x04400000UL) |
482 | #define NETXEN_PCI_CAMQM (0x04800000UL) | ||
469 | #define NETXEN_PCI_CAMQM_MAX (0x04ffffffUL) | 483 | #define NETXEN_PCI_CAMQM_MAX (0x04ffffffUL) |
470 | #define NETXEN_PCI_OCM0 (0x05000000UL) | 484 | #define NETXEN_PCI_OCM0 (0x05000000UL) |
471 | #define NETXEN_PCI_OCM0_MAX (0x050fffffUL) | 485 | #define NETXEN_PCI_OCM0_MAX (0x050fffffUL) |
@@ -474,6 +488,13 @@ enum { | |||
474 | #define NETXEN_PCI_CRBSPACE (0x06000000UL) | 488 | #define NETXEN_PCI_CRBSPACE (0x06000000UL) |
475 | #define NETXEN_PCI_128MB_SIZE (0x08000000UL) | 489 | #define NETXEN_PCI_128MB_SIZE (0x08000000UL) |
476 | #define NETXEN_PCI_32MB_SIZE (0x02000000UL) | 490 | #define NETXEN_PCI_32MB_SIZE (0x02000000UL) |
491 | #define NETXEN_PCI_2MB_SIZE (0x00200000UL) | ||
492 | |||
493 | #define NETXEN_PCI_MN_2M (0) | ||
494 | #define NETXEN_PCI_MS_2M (0x80000) | ||
495 | #define NETXEN_PCI_OCM0_2M (0x000c0000UL) | ||
496 | #define NETXEN_PCI_CAMQM_2M_BASE (0x000ff800UL) | ||
497 | #define NETXEN_PCI_CAMQM_2M_END (0x04800800UL) | ||
477 | 498 | ||
478 | #define NETXEN_CRB_CAM NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM) | 499 | #define NETXEN_CRB_CAM NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM) |
479 | 500 | ||
@@ -484,7 +505,14 @@ enum { | |||
484 | #define NETXEN_ADDR_OCM1 (0x0000000200400000ULL) | 505 | #define NETXEN_ADDR_OCM1 (0x0000000200400000ULL) |
485 | #define NETXEN_ADDR_OCM1_MAX (0x00000002004fffffULL) | 506 | #define NETXEN_ADDR_OCM1_MAX (0x00000002004fffffULL) |
486 | #define NETXEN_ADDR_QDR_NET (0x0000000300000000ULL) | 507 | #define NETXEN_ADDR_QDR_NET (0x0000000300000000ULL) |
487 | #define NETXEN_ADDR_QDR_NET_MAX (0x00000003003fffffULL) | 508 | #define NETXEN_ADDR_QDR_NET_MAX_P2 (0x00000003003fffffULL) |
509 | #define NETXEN_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL) | ||
510 | |||
511 | /* | ||
512 | * Register offsets for MN | ||
513 | */ | ||
514 | #define NETXEN_MIU_CONTROL (0x000) | ||
515 | #define NETXEN_MIU_MN_CONTROL (NETXEN_CRB_DDR_NET+NETXEN_MIU_CONTROL) | ||
488 | 516 | ||
489 | /* 200ms delay in each loop */ | 517 | /* 200ms delay in each loop */ |
490 | #define NETXEN_NIU_PHY_WAITLEN 200000 | 518 | #define NETXEN_NIU_PHY_WAITLEN 200000 |
@@ -550,6 +578,9 @@ enum { | |||
550 | #define NETXEN_MULTICAST_ADDR_HI_2 (NETXEN_CRB_NIU + 0x1018) | 578 | #define NETXEN_MULTICAST_ADDR_HI_2 (NETXEN_CRB_NIU + 0x1018) |
551 | #define NETXEN_MULTICAST_ADDR_HI_3 (NETXEN_CRB_NIU + 0x101c) | 579 | #define NETXEN_MULTICAST_ADDR_HI_3 (NETXEN_CRB_NIU + 0x101c) |
552 | 580 | ||
581 | #define NETXEN_UNICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1080) | ||
582 | #define NETXEN_MULTICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1100) | ||
583 | |||
553 | #define NETXEN_NIU_GB_MAC_CONFIG_0(I) \ | 584 | #define NETXEN_NIU_GB_MAC_CONFIG_0(I) \ |
554 | (NETXEN_CRB_NIU + 0x30000 + (I)*0x10000) | 585 | (NETXEN_CRB_NIU + 0x30000 + (I)*0x10000) |
555 | #define NETXEN_NIU_GB_MAC_CONFIG_1(I) \ | 586 | #define NETXEN_NIU_GB_MAC_CONFIG_1(I) \ |
@@ -630,16 +661,76 @@ enum { | |||
630 | #define NETXEN_NIU_XG1_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x80054) | 661 | #define NETXEN_NIU_XG1_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x80054) |
631 | #define NETXEN_NIU_XG1_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x80058) | 662 | #define NETXEN_NIU_XG1_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x80058) |
632 | 663 | ||
664 | /* P3 802.3ap */ | ||
665 | #define NETXEN_NIU_AP_MAC_CONFIG_0(I) (NETXEN_CRB_NIU+0xa0000+(I)*0x10000) | ||
666 | #define NETXEN_NIU_AP_MAC_CONFIG_1(I) (NETXEN_CRB_NIU+0xa0004+(I)*0x10000) | ||
667 | #define NETXEN_NIU_AP_MAC_IPG_IFG(I) (NETXEN_CRB_NIU+0xa0008+(I)*0x10000) | ||
668 | #define NETXEN_NIU_AP_HALF_DUPLEX_CTRL(I) (NETXEN_CRB_NIU+0xa000c+(I)*0x10000) | ||
669 | #define NETXEN_NIU_AP_MAX_FRAME_SIZE(I) (NETXEN_CRB_NIU+0xa0010+(I)*0x10000) | ||
670 | #define NETXEN_NIU_AP_TEST_REG(I) (NETXEN_CRB_NIU+0xa001c+(I)*0x10000) | ||
671 | #define NETXEN_NIU_AP_MII_MGMT_CONFIG(I) (NETXEN_CRB_NIU+0xa0020+(I)*0x10000) | ||
672 | #define NETXEN_NIU_AP_MII_MGMT_COMMAND(I) (NETXEN_CRB_NIU+0xa0024+(I)*0x10000) | ||
673 | #define NETXEN_NIU_AP_MII_MGMT_ADDR(I) (NETXEN_CRB_NIU+0xa0028+(I)*0x10000) | ||
674 | #define NETXEN_NIU_AP_MII_MGMT_CTRL(I) (NETXEN_CRB_NIU+0xa002c+(I)*0x10000) | ||
675 | #define NETXEN_NIU_AP_MII_MGMT_STATUS(I) (NETXEN_CRB_NIU+0xa0030+(I)*0x10000) | ||
676 | #define NETXEN_NIU_AP_MII_MGMT_INDICATE(I) (NETXEN_CRB_NIU+0xa0034+(I)*0x10000) | ||
677 | #define NETXEN_NIU_AP_INTERFACE_CTRL(I) (NETXEN_CRB_NIU+0xa0038+(I)*0x10000) | ||
678 | #define NETXEN_NIU_AP_INTERFACE_STATUS(I) (NETXEN_CRB_NIU+0xa003c+(I)*0x10000) | ||
679 | #define NETXEN_NIU_AP_STATION_ADDR_0(I) (NETXEN_CRB_NIU+0xa0040+(I)*0x10000) | ||
680 | #define NETXEN_NIU_AP_STATION_ADDR_1(I) (NETXEN_CRB_NIU+0xa0044+(I)*0x10000) | ||
681 | |||
682 | /* | ||
683 | * Register offsets for MN | ||
684 | */ | ||
685 | #define MIU_CONTROL (0x000) | ||
686 | #define MIU_TEST_AGT_CTRL (0x090) | ||
687 | #define MIU_TEST_AGT_ADDR_LO (0x094) | ||
688 | #define MIU_TEST_AGT_ADDR_HI (0x098) | ||
689 | #define MIU_TEST_AGT_WRDATA_LO (0x0a0) | ||
690 | #define MIU_TEST_AGT_WRDATA_HI (0x0a4) | ||
691 | #define MIU_TEST_AGT_WRDATA(i) (0x0a0+(4*(i))) | ||
692 | #define MIU_TEST_AGT_RDDATA_LO (0x0a8) | ||
693 | #define MIU_TEST_AGT_RDDATA_HI (0x0ac) | ||
694 | #define MIU_TEST_AGT_RDDATA(i) (0x0a8+(4*(i))) | ||
695 | #define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 | ||
696 | #define MIU_TEST_AGT_UPPER_ADDR(off) (0) | ||
697 | |||
698 | /* MIU_TEST_AGT_CTRL flags. work for SIU as well */ | ||
699 | #define MIU_TA_CTL_START 1 | ||
700 | #define MIU_TA_CTL_ENABLE 2 | ||
701 | #define MIU_TA_CTL_WRITE 4 | ||
702 | #define MIU_TA_CTL_BUSY 8 | ||
703 | |||
704 | #define SIU_TEST_AGT_CTRL (0x060) | ||
705 | #define SIU_TEST_AGT_ADDR_LO (0x064) | ||
706 | #define SIU_TEST_AGT_ADDR_HI (0x078) | ||
707 | #define SIU_TEST_AGT_WRDATA_LO (0x068) | ||
708 | #define SIU_TEST_AGT_WRDATA_HI (0x06c) | ||
709 | #define SIU_TEST_AGT_WRDATA(i) (0x068+(4*(i))) | ||
710 | #define SIU_TEST_AGT_RDDATA_LO (0x070) | ||
711 | #define SIU_TEST_AGT_RDDATA_HI (0x074) | ||
712 | #define SIU_TEST_AGT_RDDATA(i) (0x070+(4*(i))) | ||
713 | |||
714 | #define SIU_TEST_AGT_ADDR_MASK 0x3ffff8 | ||
715 | #define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22) | ||
716 | |||
633 | /* XG Link status */ | 717 | /* XG Link status */ |
634 | #define XG_LINK_UP 0x10 | 718 | #define XG_LINK_UP 0x10 |
635 | #define XG_LINK_DOWN 0x20 | 719 | #define XG_LINK_DOWN 0x20 |
636 | 720 | ||
721 | #define XG_LINK_UP_P3 0x01 | ||
722 | #define XG_LINK_DOWN_P3 0x02 | ||
723 | #define XG_LINK_STATE_P3_MASK 0xf | ||
724 | #define XG_LINK_STATE_P3(pcifn,val) \ | ||
725 | (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK) | ||
726 | |||
637 | #define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) | 727 | #define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) |
638 | #define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) | 728 | #define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) |
639 | #define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) | 729 | #define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) |
640 | #define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154)) | 730 | #define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154)) |
641 | #define NETXEN_FW_VERSION_SUB (NETXEN_CAM_RAM(0x158)) | 731 | #define NETXEN_FW_VERSION_SUB (NETXEN_CAM_RAM(0x158)) |
642 | #define NETXEN_ROM_LOCK_ID (NETXEN_CAM_RAM(0x100)) | 732 | #define NETXEN_ROM_LOCK_ID (NETXEN_CAM_RAM(0x100)) |
733 | #define NETXEN_CRB_WIN_LOCK_ID (NETXEN_CAM_RAM(0x124)) | ||
643 | 734 | ||
644 | #define NETXEN_PHY_LOCK_ID (NETXEN_CAM_RAM(0x120)) | 735 | #define NETXEN_PHY_LOCK_ID (NETXEN_CAM_RAM(0x120)) |
645 | 736 | ||
@@ -654,30 +745,71 @@ enum { | |||
654 | #define PCIX_INT_VECTOR (0x10100) | 745 | #define PCIX_INT_VECTOR (0x10100) |
655 | #define PCIX_INT_MASK (0x10104) | 746 | #define PCIX_INT_MASK (0x10104) |
656 | 747 | ||
657 | #define PCIX_MN_WINDOW_F0 (0x10200) | ||
658 | #define PCIX_MN_WINDOW(_f) (PCIX_MN_WINDOW_F0 + (0x20 * (_f))) | ||
659 | #define PCIX_MS_WINDOW (0x10204) | ||
660 | #define PCIX_SN_WINDOW_F0 (0x10208) | ||
661 | #define PCIX_SN_WINDOW(_f) (PCIX_SN_WINDOW_F0 + (0x20 * (_f))) | ||
662 | #define PCIX_CRB_WINDOW (0x10210) | 748 | #define PCIX_CRB_WINDOW (0x10210) |
663 | #define PCIX_CRB_WINDOW_F0 (0x10210) | 749 | #define PCIX_CRB_WINDOW_F0 (0x10210) |
664 | #define PCIX_CRB_WINDOW_F1 (0x10230) | 750 | #define PCIX_CRB_WINDOW_F1 (0x10230) |
665 | #define PCIX_CRB_WINDOW_F2 (0x10250) | 751 | #define PCIX_CRB_WINDOW_F2 (0x10250) |
666 | #define PCIX_CRB_WINDOW_F3 (0x10270) | 752 | #define PCIX_CRB_WINDOW_F3 (0x10270) |
753 | #define PCIX_CRB_WINDOW_F4 (0x102ac) | ||
754 | #define PCIX_CRB_WINDOW_F5 (0x102bc) | ||
755 | #define PCIX_CRB_WINDOW_F6 (0x102cc) | ||
756 | #define PCIX_CRB_WINDOW_F7 (0x102dc) | ||
757 | #define PCIE_CRB_WINDOW_REG(func) (((func) < 4) ? \ | ||
758 | (PCIX_CRB_WINDOW_F0 + (0x20 * (func))) :\ | ||
759 | (PCIX_CRB_WINDOW_F4 + (0x10 * ((func)-4)))) | ||
760 | |||
761 | #define PCIX_MN_WINDOW (0x10200) | ||
762 | #define PCIX_MN_WINDOW_F0 (0x10200) | ||
763 | #define PCIX_MN_WINDOW_F1 (0x10220) | ||
764 | #define PCIX_MN_WINDOW_F2 (0x10240) | ||
765 | #define PCIX_MN_WINDOW_F3 (0x10260) | ||
766 | #define PCIX_MN_WINDOW_F4 (0x102a0) | ||
767 | #define PCIX_MN_WINDOW_F5 (0x102b0) | ||
768 | #define PCIX_MN_WINDOW_F6 (0x102c0) | ||
769 | #define PCIX_MN_WINDOW_F7 (0x102d0) | ||
770 | #define PCIE_MN_WINDOW_REG(func) (((func) < 4) ? \ | ||
771 | (PCIX_MN_WINDOW_F0 + (0x20 * (func))) :\ | ||
772 | (PCIX_MN_WINDOW_F4 + (0x10 * ((func)-4)))) | ||
773 | |||
774 | #define PCIX_SN_WINDOW (0x10208) | ||
775 | #define PCIX_SN_WINDOW_F0 (0x10208) | ||
776 | #define PCIX_SN_WINDOW_F1 (0x10228) | ||
777 | #define PCIX_SN_WINDOW_F2 (0x10248) | ||
778 | #define PCIX_SN_WINDOW_F3 (0x10268) | ||
779 | #define PCIX_SN_WINDOW_F4 (0x102a8) | ||
780 | #define PCIX_SN_WINDOW_F5 (0x102b8) | ||
781 | #define PCIX_SN_WINDOW_F6 (0x102c8) | ||
782 | #define PCIX_SN_WINDOW_F7 (0x102d8) | ||
783 | #define PCIE_SN_WINDOW_REG(func) (((func) < 4) ? \ | ||
784 | (PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\ | ||
785 | (PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4)))) | ||
667 | 786 | ||
668 | #define PCIX_TARGET_STATUS (0x10118) | 787 | #define PCIX_TARGET_STATUS (0x10118) |
788 | #define PCIX_TARGET_STATUS_F1 (0x10160) | ||
789 | #define PCIX_TARGET_STATUS_F2 (0x10164) | ||
790 | #define PCIX_TARGET_STATUS_F3 (0x10168) | ||
791 | #define PCIX_TARGET_STATUS_F4 (0x10360) | ||
792 | #define PCIX_TARGET_STATUS_F5 (0x10364) | ||
793 | #define PCIX_TARGET_STATUS_F6 (0x10368) | ||
794 | #define PCIX_TARGET_STATUS_F7 (0x1036c) | ||
795 | |||
669 | #define PCIX_TARGET_MASK (0x10128) | 796 | #define PCIX_TARGET_MASK (0x10128) |
670 | #define PCIX_TARGET_STATUS_F1 (0x10160) | 797 | #define PCIX_TARGET_MASK_F1 (0x10170) |
671 | #define PCIX_TARGET_MASK_F1 (0x10170) | 798 | #define PCIX_TARGET_MASK_F2 (0x10174) |
672 | #define PCIX_TARGET_STATUS_F2 (0x10164) | 799 | #define PCIX_TARGET_MASK_F3 (0x10178) |
673 | #define PCIX_TARGET_MASK_F2 (0x10174) | 800 | #define PCIX_TARGET_MASK_F4 (0x10370) |
674 | #define PCIX_TARGET_STATUS_F3 (0x10168) | 801 | #define PCIX_TARGET_MASK_F5 (0x10374) |
675 | #define PCIX_TARGET_MASK_F3 (0x10178) | 802 | #define PCIX_TARGET_MASK_F6 (0x10378) |
803 | #define PCIX_TARGET_MASK_F7 (0x1037c) | ||
676 | 804 | ||
677 | #define PCIX_MSI_F0 (0x13000) | 805 | #define PCIX_MSI_F0 (0x13000) |
678 | #define PCIX_MSI_F1 (0x13004) | 806 | #define PCIX_MSI_F1 (0x13004) |
679 | #define PCIX_MSI_F2 (0x13008) | 807 | #define PCIX_MSI_F2 (0x13008) |
680 | #define PCIX_MSI_F3 (0x1300c) | 808 | #define PCIX_MSI_F3 (0x1300c) |
809 | #define PCIX_MSI_F4 (0x13010) | ||
810 | #define PCIX_MSI_F5 (0x13014) | ||
811 | #define PCIX_MSI_F6 (0x13018) | ||
812 | #define PCIX_MSI_F7 (0x1301c) | ||
681 | #define PCIX_MSI_F(i) (0x13000+((i)*4)) | 813 | #define PCIX_MSI_F(i) (0x13000+((i)*4)) |
682 | 814 | ||
683 | #define PCIX_PS_MEM_SPACE (0x90000) | 815 | #define PCIX_PS_MEM_SPACE (0x90000) |
@@ -695,11 +827,102 @@ enum { | |||
695 | #define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */ | 827 | #define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */ |
696 | #define PCIE_SEM3_LOCK (0x1c018) /* Phy lock */ | 828 | #define PCIE_SEM3_LOCK (0x1c018) /* Phy lock */ |
697 | #define PCIE_SEM3_UNLOCK (0x1c01c) /* Phy unlock */ | 829 | #define PCIE_SEM3_UNLOCK (0x1c01c) /* Phy unlock */ |
698 | 830 | #define PCIE_SEM5_LOCK (0x1c028) /* API lock */ | |
831 | #define PCIE_SEM5_UNLOCK (0x1c02c) /* API unlock */ | ||
832 | #define PCIE_SEM6_LOCK (0x1c030) /* sw lock */ | ||
833 | #define PCIE_SEM6_UNLOCK (0x1c034) /* sw unlock */ | ||
834 | #define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */ | ||
835 | #define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/ | ||
836 | |||
837 | #define PCIE_SETUP_FUNCTION (0x12040) | ||
838 | #define PCIE_SETUP_FUNCTION2 (0x12048) | ||
699 | #define PCIE_TGT_SPLIT_CHICKEN (0x12080) | 839 | #define PCIE_TGT_SPLIT_CHICKEN (0x12080) |
840 | #define PCIE_CHICKEN3 (0x120c8) | ||
700 | 841 | ||
701 | #define PCIE_MAX_MASTER_SPLIT (0x14048) | 842 | #define PCIE_MAX_MASTER_SPLIT (0x14048) |
702 | 843 | ||
844 | #define NETXEN_PORT_MODE_NONE 0 | ||
845 | #define NETXEN_PORT_MODE_XG 1 | ||
846 | #define NETXEN_PORT_MODE_GB 2 | ||
847 | #define NETXEN_PORT_MODE_802_3_AP 3 | ||
848 | #define NETXEN_PORT_MODE_AUTO_NEG 4 | ||
849 | #define NETXEN_PORT_MODE_AUTO_NEG_1G 5 | ||
850 | #define NETXEN_PORT_MODE_AUTO_NEG_XG 6 | ||
851 | #define NETXEN_PORT_MODE_ADDR (NETXEN_CAM_RAM(0x24)) | ||
852 | #define NETXEN_WOL_PORT_MODE (NETXEN_CAM_RAM(0x198)) | ||
853 | |||
703 | #define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14) | 854 | #define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14) |
704 | 855 | ||
856 | #define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) | ||
857 | |||
858 | /* | ||
859 | * PCI Interrupt Vector Values. | ||
860 | */ | ||
861 | #define PCIX_INT_VECTOR_BIT_F0 0x0080 | ||
862 | #define PCIX_INT_VECTOR_BIT_F1 0x0100 | ||
863 | #define PCIX_INT_VECTOR_BIT_F2 0x0200 | ||
864 | #define PCIX_INT_VECTOR_BIT_F3 0x0400 | ||
865 | #define PCIX_INT_VECTOR_BIT_F4 0x0800 | ||
866 | #define PCIX_INT_VECTOR_BIT_F5 0x1000 | ||
867 | #define PCIX_INT_VECTOR_BIT_F6 0x2000 | ||
868 | #define PCIX_INT_VECTOR_BIT_F7 0x4000 | ||
869 | |||
870 | struct netxen_legacy_intr_set { | ||
871 | uint32_t int_vec_bit; | ||
872 | uint32_t tgt_status_reg; | ||
873 | uint32_t tgt_mask_reg; | ||
874 | uint32_t pci_int_reg; | ||
875 | }; | ||
876 | |||
877 | #define NX_LEGACY_INTR_CONFIG \ | ||
878 | { \ | ||
879 | { \ | ||
880 | .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \ | ||
881 | .tgt_status_reg = ISR_INT_TARGET_STATUS, \ | ||
882 | .tgt_mask_reg = ISR_INT_TARGET_MASK, \ | ||
883 | .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \ | ||
884 | \ | ||
885 | { \ | ||
886 | .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \ | ||
887 | .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \ | ||
888 | .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \ | ||
889 | .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \ | ||
890 | \ | ||
891 | { \ | ||
892 | .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \ | ||
893 | .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \ | ||
894 | .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \ | ||
895 | .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \ | ||
896 | \ | ||
897 | { \ | ||
898 | .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \ | ||
899 | .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \ | ||
900 | .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \ | ||
901 | .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \ | ||
902 | \ | ||
903 | { \ | ||
904 | .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \ | ||
905 | .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \ | ||
906 | .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \ | ||
907 | .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \ | ||
908 | \ | ||
909 | { \ | ||
910 | .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \ | ||
911 | .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \ | ||
912 | .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \ | ||
913 | .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \ | ||
914 | \ | ||
915 | { \ | ||
916 | .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \ | ||
917 | .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \ | ||
918 | .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \ | ||
919 | .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \ | ||
920 | \ | ||
921 | { \ | ||
922 | .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \ | ||
923 | .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \ | ||
924 | .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \ | ||
925 | .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \ | ||
926 | } | ||
927 | |||
705 | #endif /* __NETXEN_NIC_HDR_H_ */ | 928 | #endif /* __NETXEN_NIC_HDR_H_ */ |
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index c43d06b8de9b..96a3bc6426e2 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c | |||
@@ -38,242 +38,262 @@ | |||
38 | 38 | ||
39 | #include <net/ip.h> | 39 | #include <net/ip.h> |
40 | 40 | ||
41 | struct netxen_recv_crb recv_crb_registers[] = { | 41 | #define MASK(n) ((1ULL<<(n))-1) |
42 | /* | 42 | #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) |
43 | * Instance 0. | 43 | #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) |
44 | */ | 44 | #define MS_WIN(addr) (addr & 0x0ffc0000) |
45 | { | 45 | |
46 | /* rcv_desc_crb: */ | 46 | #define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) |
47 | { | 47 | |
48 | { | 48 | #define CRB_BLK(off) ((off >> 20) & 0x3f) |
49 | /* crb_rcv_producer_offset: */ | 49 | #define CRB_SUBBLK(off) ((off >> 16) & 0xf) |
50 | NETXEN_NIC_REG(0x100), | 50 | #define CRB_WINDOW_2M (0x130060) |
51 | /* crb_rcv_consumer_offset: */ | 51 | #define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) |
52 | NETXEN_NIC_REG(0x104), | 52 | #define CRB_INDIRECT_2M (0x1e0000UL) |
53 | /* crb_gloablrcv_ring: */ | 53 | |
54 | NETXEN_NIC_REG(0x108), | 54 | #define CRB_WIN_LOCK_TIMEOUT 100000000 |
55 | /* crb_rcv_ring_size */ | 55 | static crb_128M_2M_block_map_t crb_128M_2M_map[64] = { |
56 | NETXEN_NIC_REG(0x10c), | 56 | {{{0, 0, 0, 0} } }, /* 0: PCI */ |
57 | 57 | {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */ | |
58 | }, | 58 | {1, 0x0110000, 0x0120000, 0x130000}, |
59 | /* Jumbo frames */ | 59 | {1, 0x0120000, 0x0122000, 0x124000}, |
60 | { | 60 | {1, 0x0130000, 0x0132000, 0x126000}, |
61 | /* crb_rcv_producer_offset: */ | 61 | {1, 0x0140000, 0x0142000, 0x128000}, |
62 | NETXEN_NIC_REG(0x110), | 62 | {1, 0x0150000, 0x0152000, 0x12a000}, |
63 | /* crb_rcv_consumer_offset: */ | 63 | {1, 0x0160000, 0x0170000, 0x110000}, |
64 | NETXEN_NIC_REG(0x114), | 64 | {1, 0x0170000, 0x0172000, 0x12e000}, |
65 | /* crb_gloablrcv_ring: */ | 65 | {0, 0x0000000, 0x0000000, 0x000000}, |
66 | NETXEN_NIC_REG(0x118), | 66 | {0, 0x0000000, 0x0000000, 0x000000}, |
67 | /* crb_rcv_ring_size */ | 67 | {0, 0x0000000, 0x0000000, 0x000000}, |
68 | NETXEN_NIC_REG(0x11c), | 68 | {0, 0x0000000, 0x0000000, 0x000000}, |
69 | }, | 69 | {0, 0x0000000, 0x0000000, 0x000000}, |
70 | /* LRO */ | 70 | {0, 0x0000000, 0x0000000, 0x000000}, |
71 | { | 71 | {1, 0x01e0000, 0x01e0800, 0x122000}, |
72 | /* crb_rcv_producer_offset: */ | 72 | {0, 0x0000000, 0x0000000, 0x000000} } }, |
73 | NETXEN_NIC_REG(0x120), | 73 | {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */ |
74 | /* crb_rcv_consumer_offset: */ | 74 | {{{0, 0, 0, 0} } }, /* 3: */ |
75 | NETXEN_NIC_REG(0x124), | 75 | {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */ |
76 | /* crb_gloablrcv_ring: */ | 76 | {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */ |
77 | NETXEN_NIC_REG(0x128), | 77 | {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */ |
78 | /* crb_rcv_ring_size */ | 78 | {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */ |
79 | NETXEN_NIC_REG(0x12c), | 79 | {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */ |
80 | } | 80 | {0, 0x0000000, 0x0000000, 0x000000}, |
81 | }, | 81 | {0, 0x0000000, 0x0000000, 0x000000}, |
82 | /* crb_rcvstatus_ring: */ | 82 | {0, 0x0000000, 0x0000000, 0x000000}, |
83 | NETXEN_NIC_REG(0x130), | 83 | {0, 0x0000000, 0x0000000, 0x000000}, |
84 | /* crb_rcv_status_producer: */ | 84 | {0, 0x0000000, 0x0000000, 0x000000}, |
85 | NETXEN_NIC_REG(0x134), | 85 | {0, 0x0000000, 0x0000000, 0x000000}, |
86 | /* crb_rcv_status_consumer: */ | 86 | {0, 0x0000000, 0x0000000, 0x000000}, |
87 | NETXEN_NIC_REG(0x138), | 87 | {0, 0x0000000, 0x0000000, 0x000000}, |
88 | /* crb_rcvpeg_state: */ | 88 | {0, 0x0000000, 0x0000000, 0x000000}, |
89 | NETXEN_NIC_REG(0x13c), | 89 | {0, 0x0000000, 0x0000000, 0x000000}, |
90 | /* crb_status_ring_size */ | 90 | {0, 0x0000000, 0x0000000, 0x000000}, |
91 | NETXEN_NIC_REG(0x140), | 91 | {0, 0x0000000, 0x0000000, 0x000000}, |
92 | 92 | {0, 0x0000000, 0x0000000, 0x000000}, | |
93 | }, | 93 | {0, 0x0000000, 0x0000000, 0x000000}, |
94 | /* | 94 | {1, 0x08f0000, 0x08f2000, 0x172000} } }, |
95 | * Instance 1, | 95 | {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/ |
96 | */ | 96 | {0, 0x0000000, 0x0000000, 0x000000}, |
97 | { | 97 | {0, 0x0000000, 0x0000000, 0x000000}, |
98 | /* rcv_desc_crb: */ | 98 | {0, 0x0000000, 0x0000000, 0x000000}, |
99 | { | 99 | {0, 0x0000000, 0x0000000, 0x000000}, |
100 | { | 100 | {0, 0x0000000, 0x0000000, 0x000000}, |
101 | /* crb_rcv_producer_offset: */ | 101 | {0, 0x0000000, 0x0000000, 0x000000}, |
102 | NETXEN_NIC_REG(0x144), | 102 | {0, 0x0000000, 0x0000000, 0x000000}, |
103 | /* crb_rcv_consumer_offset: */ | 103 | {0, 0x0000000, 0x0000000, 0x000000}, |
104 | NETXEN_NIC_REG(0x148), | 104 | {0, 0x0000000, 0x0000000, 0x000000}, |
105 | /* crb_globalrcv_ring: */ | 105 | {0, 0x0000000, 0x0000000, 0x000000}, |
106 | NETXEN_NIC_REG(0x14c), | 106 | {0, 0x0000000, 0x0000000, 0x000000}, |
107 | /* crb_rcv_ring_size */ | 107 | {0, 0x0000000, 0x0000000, 0x000000}, |
108 | NETXEN_NIC_REG(0x150), | 108 | {0, 0x0000000, 0x0000000, 0x000000}, |
109 | 109 | {0, 0x0000000, 0x0000000, 0x000000}, | |
110 | }, | 110 | {1, 0x09f0000, 0x09f2000, 0x176000} } }, |
111 | /* Jumbo frames */ | 111 | {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/ |
112 | { | 112 | {0, 0x0000000, 0x0000000, 0x000000}, |
113 | /* crb_rcv_producer_offset: */ | 113 | {0, 0x0000000, 0x0000000, 0x000000}, |
114 | NETXEN_NIC_REG(0x154), | 114 | {0, 0x0000000, 0x0000000, 0x000000}, |
115 | /* crb_rcv_consumer_offset: */ | 115 | {0, 0x0000000, 0x0000000, 0x000000}, |
116 | NETXEN_NIC_REG(0x158), | 116 | {0, 0x0000000, 0x0000000, 0x000000}, |
117 | /* crb_globalrcv_ring: */ | 117 | {0, 0x0000000, 0x0000000, 0x000000}, |
118 | NETXEN_NIC_REG(0x15c), | 118 | {0, 0x0000000, 0x0000000, 0x000000}, |
119 | /* crb_rcv_ring_size */ | 119 | {0, 0x0000000, 0x0000000, 0x000000}, |
120 | NETXEN_NIC_REG(0x160), | 120 | {0, 0x0000000, 0x0000000, 0x000000}, |
121 | }, | 121 | {0, 0x0000000, 0x0000000, 0x000000}, |
122 | /* LRO */ | 122 | {0, 0x0000000, 0x0000000, 0x000000}, |
123 | { | 123 | {0, 0x0000000, 0x0000000, 0x000000}, |
124 | /* crb_rcv_producer_offset: */ | 124 | {0, 0x0000000, 0x0000000, 0x000000}, |
125 | NETXEN_NIC_REG(0x164), | 125 | {0, 0x0000000, 0x0000000, 0x000000}, |
126 | /* crb_rcv_consumer_offset: */ | 126 | {1, 0x0af0000, 0x0af2000, 0x17a000} } }, |
127 | NETXEN_NIC_REG(0x168), | 127 | {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/ |
128 | /* crb_globalrcv_ring: */ | 128 | {0, 0x0000000, 0x0000000, 0x000000}, |
129 | NETXEN_NIC_REG(0x16c), | 129 | {0, 0x0000000, 0x0000000, 0x000000}, |
130 | /* crb_rcv_ring_size */ | 130 | {0, 0x0000000, 0x0000000, 0x000000}, |
131 | NETXEN_NIC_REG(0x170), | 131 | {0, 0x0000000, 0x0000000, 0x000000}, |
132 | } | 132 | {0, 0x0000000, 0x0000000, 0x000000}, |
133 | 133 | {0, 0x0000000, 0x0000000, 0x000000}, | |
134 | }, | 134 | {0, 0x0000000, 0x0000000, 0x000000}, |
135 | /* crb_rcvstatus_ring: */ | 135 | {0, 0x0000000, 0x0000000, 0x000000}, |
136 | NETXEN_NIC_REG(0x174), | 136 | {0, 0x0000000, 0x0000000, 0x000000}, |
137 | /* crb_rcv_status_producer: */ | 137 | {0, 0x0000000, 0x0000000, 0x000000}, |
138 | NETXEN_NIC_REG(0x178), | 138 | {0, 0x0000000, 0x0000000, 0x000000}, |
139 | /* crb_rcv_status_consumer: */ | 139 | {0, 0x0000000, 0x0000000, 0x000000}, |
140 | NETXEN_NIC_REG(0x17c), | 140 | {0, 0x0000000, 0x0000000, 0x000000}, |
141 | /* crb_rcvpeg_state: */ | 141 | {0, 0x0000000, 0x0000000, 0x000000}, |
142 | NETXEN_NIC_REG(0x180), | 142 | {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, |
143 | /* crb_status_ring_size */ | 143 | {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */ |
144 | NETXEN_NIC_REG(0x184), | 144 | {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */ |
145 | }, | 145 | {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */ |
146 | /* | 146 | {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */ |
147 | * Instance 2, | 147 | {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */ |
148 | */ | 148 | {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */ |
149 | { | 149 | {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */ |
150 | { | 150 | {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */ |
151 | { | 151 | {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */ |
152 | /* crb_rcv_producer_offset: */ | 152 | {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */ |
153 | NETXEN_NIC_REG(0x1d8), | 153 | {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */ |
154 | /* crb_rcv_consumer_offset: */ | 154 | {{{0, 0, 0, 0} } }, /* 23: */ |
155 | NETXEN_NIC_REG(0x1dc), | 155 | {{{0, 0, 0, 0} } }, /* 24: */ |
156 | /* crb_gloablrcv_ring: */ | 156 | {{{0, 0, 0, 0} } }, /* 25: */ |
157 | NETXEN_NIC_REG(0x1f0), | 157 | {{{0, 0, 0, 0} } }, /* 26: */ |
158 | /* crb_rcv_ring_size */ | 158 | {{{0, 0, 0, 0} } }, /* 27: */ |
159 | NETXEN_NIC_REG(0x1f4), | 159 | {{{0, 0, 0, 0} } }, /* 28: */ |
160 | }, | 160 | {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */ |
161 | /* Jumbo frames */ | 161 | {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */ |
162 | { | 162 | {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */ |
163 | /* crb_rcv_producer_offset: */ | 163 | {{{0} } }, /* 32: PCI */ |
164 | NETXEN_NIC_REG(0x1f8), | 164 | {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */ |
165 | /* crb_rcv_consumer_offset: */ | 165 | {1, 0x2110000, 0x2120000, 0x130000}, |
166 | NETXEN_NIC_REG(0x1fc), | 166 | {1, 0x2120000, 0x2122000, 0x124000}, |
167 | /* crb_gloablrcv_ring: */ | 167 | {1, 0x2130000, 0x2132000, 0x126000}, |
168 | NETXEN_NIC_REG(0x200), | 168 | {1, 0x2140000, 0x2142000, 0x128000}, |
169 | /* crb_rcv_ring_size */ | 169 | {1, 0x2150000, 0x2152000, 0x12a000}, |
170 | NETXEN_NIC_REG(0x204), | 170 | {1, 0x2160000, 0x2170000, 0x110000}, |
171 | }, | 171 | {1, 0x2170000, 0x2172000, 0x12e000}, |
172 | /* LRO */ | 172 | {0, 0x0000000, 0x0000000, 0x000000}, |
173 | { | 173 | {0, 0x0000000, 0x0000000, 0x000000}, |
174 | /* crb_rcv_producer_offset: */ | 174 | {0, 0x0000000, 0x0000000, 0x000000}, |
175 | NETXEN_NIC_REG(0x208), | 175 | {0, 0x0000000, 0x0000000, 0x000000}, |
176 | /* crb_rcv_consumer_offset: */ | 176 | {0, 0x0000000, 0x0000000, 0x000000}, |
177 | NETXEN_NIC_REG(0x20c), | 177 | {0, 0x0000000, 0x0000000, 0x000000}, |
178 | /* crb_gloablrcv_ring: */ | 178 | {0, 0x0000000, 0x0000000, 0x000000}, |
179 | NETXEN_NIC_REG(0x210), | 179 | {0, 0x0000000, 0x0000000, 0x000000} } }, |
180 | /* crb_rcv_ring_size */ | 180 | {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */ |
181 | NETXEN_NIC_REG(0x214), | 181 | {{{0} } }, /* 35: */ |
182 | } | 182 | {{{0} } }, /* 36: */ |
183 | }, | 183 | {{{0} } }, /* 37: */ |
184 | /* crb_rcvstatus_ring: */ | 184 | {{{0} } }, /* 38: */ |
185 | NETXEN_NIC_REG(0x218), | 185 | {{{0} } }, /* 39: */ |
186 | /* crb_rcv_status_producer: */ | 186 | {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */ |
187 | NETXEN_NIC_REG(0x21c), | 187 | {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */ |
188 | /* crb_rcv_status_consumer: */ | 188 | {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */ |
189 | NETXEN_NIC_REG(0x220), | 189 | {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */ |
190 | /* crb_rcvpeg_state: */ | 190 | {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */ |
191 | NETXEN_NIC_REG(0x224), | 191 | {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */ |
192 | /* crb_status_ring_size */ | 192 | {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */ |
193 | NETXEN_NIC_REG(0x228), | 193 | {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */ |
194 | }, | 194 | {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */ |
195 | /* | 195 | {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */ |
196 | * Instance 3, | 196 | {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */ |
197 | */ | 197 | {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */ |
198 | { | 198 | {{{0} } }, /* 52: */ |
199 | { | 199 | {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */ |
200 | { | 200 | {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */ |
201 | /* crb_rcv_producer_offset: */ | 201 | {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */ |
202 | NETXEN_NIC_REG(0x22c), | 202 | {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */ |
203 | /* crb_rcv_consumer_offset: */ | 203 | {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */ |
204 | NETXEN_NIC_REG(0x230), | 204 | {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */ |
205 | /* crb_gloablrcv_ring: */ | 205 | {{{0} } }, /* 59: I2C0 */ |
206 | NETXEN_NIC_REG(0x234), | 206 | {{{0} } }, /* 60: I2C1 */ |
207 | /* crb_rcv_ring_size */ | 207 | {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */ |
208 | NETXEN_NIC_REG(0x238), | 208 | {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */ |
209 | }, | 209 | {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */ |
210 | /* Jumbo frames */ | ||
211 | { | ||
212 | /* crb_rcv_producer_offset: */ | ||
213 | NETXEN_NIC_REG(0x23c), | ||
214 | /* crb_rcv_consumer_offset: */ | ||
215 | NETXEN_NIC_REG(0x240), | ||
216 | /* crb_gloablrcv_ring: */ | ||
217 | NETXEN_NIC_REG(0x244), | ||
218 | /* crb_rcv_ring_size */ | ||
219 | NETXEN_NIC_REG(0x248), | ||
220 | }, | ||
221 | /* LRO */ | ||
222 | { | ||
223 | /* crb_rcv_producer_offset: */ | ||
224 | NETXEN_NIC_REG(0x24c), | ||
225 | /* crb_rcv_consumer_offset: */ | ||
226 | NETXEN_NIC_REG(0x250), | ||
227 | /* crb_gloablrcv_ring: */ | ||
228 | NETXEN_NIC_REG(0x254), | ||
229 | /* crb_rcv_ring_size */ | ||
230 | NETXEN_NIC_REG(0x258), | ||
231 | } | ||
232 | }, | ||
233 | /* crb_rcvstatus_ring: */ | ||
234 | NETXEN_NIC_REG(0x25c), | ||
235 | /* crb_rcv_status_producer: */ | ||
236 | NETXEN_NIC_REG(0x260), | ||
237 | /* crb_rcv_status_consumer: */ | ||
238 | NETXEN_NIC_REG(0x264), | ||
239 | /* crb_rcvpeg_state: */ | ||
240 | NETXEN_NIC_REG(0x268), | ||
241 | /* crb_status_ring_size */ | ||
242 | NETXEN_NIC_REG(0x26c), | ||
243 | }, | ||
244 | }; | 210 | }; |
245 | 211 | ||
246 | static u64 ctx_addr_sig_regs[][3] = { | 212 | /* |
247 | {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)}, | 213 | * top 12 bits of crb internal address (hub, agent) |
248 | {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)}, | 214 | */ |
249 | {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)}, | 215 | static unsigned crb_hub_agt[64] = |
250 | {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)} | 216 | { |
217 | 0, | ||
218 | NETXEN_HW_CRB_HUB_AGT_ADR_PS, | ||
219 | NETXEN_HW_CRB_HUB_AGT_ADR_MN, | ||
220 | NETXEN_HW_CRB_HUB_AGT_ADR_MS, | ||
221 | 0, | ||
222 | NETXEN_HW_CRB_HUB_AGT_ADR_SRE, | ||
223 | NETXEN_HW_CRB_HUB_AGT_ADR_NIU, | ||
224 | NETXEN_HW_CRB_HUB_AGT_ADR_QMN, | ||
225 | NETXEN_HW_CRB_HUB_AGT_ADR_SQN0, | ||
226 | NETXEN_HW_CRB_HUB_AGT_ADR_SQN1, | ||
227 | NETXEN_HW_CRB_HUB_AGT_ADR_SQN2, | ||
228 | NETXEN_HW_CRB_HUB_AGT_ADR_SQN3, | ||
229 | NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, | ||
230 | NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, | ||
231 | NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, | ||
232 | NETXEN_HW_CRB_HUB_AGT_ADR_PGN4, | ||
233 | NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, | ||
234 | NETXEN_HW_CRB_HUB_AGT_ADR_PGN0, | ||
235 | NETXEN_HW_CRB_HUB_AGT_ADR_PGN1, | ||
236 | NETXEN_HW_CRB_HUB_AGT_ADR_PGN2, | ||
237 | NETXEN_HW_CRB_HUB_AGT_ADR_PGN3, | ||
238 | NETXEN_HW_CRB_HUB_AGT_ADR_PGND, | ||
239 | NETXEN_HW_CRB_HUB_AGT_ADR_PGNI, | ||
240 | NETXEN_HW_CRB_HUB_AGT_ADR_PGS0, | ||
241 | NETXEN_HW_CRB_HUB_AGT_ADR_PGS1, | ||
242 | NETXEN_HW_CRB_HUB_AGT_ADR_PGS2, | ||
243 | NETXEN_HW_CRB_HUB_AGT_ADR_PGS3, | ||
244 | 0, | ||
245 | NETXEN_HW_CRB_HUB_AGT_ADR_PGSI, | ||
246 | NETXEN_HW_CRB_HUB_AGT_ADR_SN, | ||
247 | 0, | ||
248 | NETXEN_HW_CRB_HUB_AGT_ADR_EG, | ||
249 | 0, | ||
250 | NETXEN_HW_CRB_HUB_AGT_ADR_PS, | ||
251 | NETXEN_HW_CRB_HUB_AGT_ADR_CAM, | ||
252 | 0, | ||
253 | 0, | ||
254 | 0, | ||
255 | 0, | ||
256 | 0, | ||
257 | NETXEN_HW_CRB_HUB_AGT_ADR_TIMR, | ||
258 | 0, | ||
259 | NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1, | ||
260 | NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2, | ||
261 | NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3, | ||
262 | NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4, | ||
263 | NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5, | ||
264 | NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6, | ||
265 | NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7, | ||
266 | NETXEN_HW_CRB_HUB_AGT_ADR_XDMA, | ||
267 | NETXEN_HW_CRB_HUB_AGT_ADR_I2Q, | ||
268 | NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB, | ||
269 | 0, | ||
270 | NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0, | ||
271 | NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8, | ||
272 | NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9, | ||
273 | NETXEN_HW_CRB_HUB_AGT_ADR_OCM0, | ||
274 | 0, | ||
275 | NETXEN_HW_CRB_HUB_AGT_ADR_SMB, | ||
276 | NETXEN_HW_CRB_HUB_AGT_ADR_I2C0, | ||
277 | NETXEN_HW_CRB_HUB_AGT_ADR_I2C1, | ||
278 | 0, | ||
279 | NETXEN_HW_CRB_HUB_AGT_ADR_PGNC, | ||
280 | 0, | ||
251 | }; | 281 | }; |
252 | #define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0]) | ||
253 | #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2]) | ||
254 | #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1]) | ||
255 | |||
256 | 282 | ||
257 | /* PCI Windowing for DDR regions. */ | 283 | /* PCI Windowing for DDR regions. */ |
258 | 284 | ||
259 | #define ADDR_IN_RANGE(addr, low, high) \ | 285 | #define ADDR_IN_RANGE(addr, low, high) \ |
260 | (((addr) <= (high)) && ((addr) >= (low))) | 286 | (((addr) <= (high)) && ((addr) >= (low))) |
261 | 287 | ||
262 | #define NETXEN_FLASH_BASE (NETXEN_BOOTLD_START) | ||
263 | #define NETXEN_PHANTOM_MEM_BASE (NETXEN_FLASH_BASE) | ||
264 | #define NETXEN_MAX_MTU 8000 + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE | 288 | #define NETXEN_MAX_MTU 8000 + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE |
265 | #define NETXEN_MIN_MTU 64 | 289 | #define NETXEN_MIN_MTU 64 |
266 | #define NETXEN_ETH_FCS_SIZE 4 | 290 | #define NETXEN_ETH_FCS_SIZE 4 |
267 | #define NETXEN_ENET_HEADER_SIZE 14 | 291 | #define NETXEN_ENET_HEADER_SIZE 14 |
268 | #define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ | 292 | #define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ |
269 | #define NETXEN_FIRMWARE_LEN ((16 * 1024) / 4) | 293 | #define NETXEN_FIRMWARE_LEN ((16 * 1024) / 4) |
270 | #define NETXEN_NIU_HDRSIZE (0x1 << 6) | 294 | #define NETXEN_NIU_HDRSIZE (0x1 << 6) |
271 | #define NETXEN_NIU_TLRSIZE (0x1 << 5) | 295 | #define NETXEN_NIU_TLRSIZE (0x1 << 5) |
272 | 296 | ||
273 | #define lower32(x) ((u32)((x) & 0xffffffff)) | ||
274 | #define upper32(x) \ | ||
275 | ((u32)(((unsigned long long)(x) >> 32) & 0xffffffff)) | ||
276 | |||
277 | #define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL | 297 | #define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL |
278 | #define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL | 298 | #define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL |
279 | #define NETXEN_NIC_EPG_PAUSE_ADDR1 0x2200010000c28001ULL | 299 | #define NETXEN_NIC_EPG_PAUSE_ADDR1 0x2200010000c28001ULL |
@@ -281,10 +301,6 @@ static u64 ctx_addr_sig_regs[][3] = { | |||
281 | 301 | ||
282 | #define NETXEN_NIC_WINDOW_MARGIN 0x100000 | 302 | #define NETXEN_NIC_WINDOW_MARGIN 0x100000 |
283 | 303 | ||
284 | static unsigned long netxen_nic_pci_set_window(struct netxen_adapter *adapter, | ||
285 | unsigned long long addr); | ||
286 | void netxen_free_hw_resources(struct netxen_adapter *adapter); | ||
287 | |||
288 | int netxen_nic_set_mac(struct net_device *netdev, void *p) | 304 | int netxen_nic_set_mac(struct net_device *netdev, void *p) |
289 | { | 305 | { |
290 | struct netxen_adapter *adapter = netdev_priv(netdev); | 306 | struct netxen_adapter *adapter = netdev_priv(netdev); |
@@ -296,266 +312,370 @@ int netxen_nic_set_mac(struct net_device *netdev, void *p) | |||
296 | if (!is_valid_ether_addr(addr->sa_data)) | 312 | if (!is_valid_ether_addr(addr->sa_data)) |
297 | return -EADDRNOTAVAIL; | 313 | return -EADDRNOTAVAIL; |
298 | 314 | ||
299 | DPRINTK(INFO, "valid ether addr\n"); | ||
300 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | 315 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
301 | 316 | ||
302 | if (adapter->macaddr_set) | 317 | /* For P3, MAC addr is not set in NIU */ |
303 | adapter->macaddr_set(adapter, addr->sa_data); | 318 | if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) |
319 | if (adapter->macaddr_set) | ||
320 | adapter->macaddr_set(adapter, addr->sa_data); | ||
304 | 321 | ||
305 | return 0; | 322 | return 0; |
306 | } | 323 | } |
307 | 324 | ||
308 | /* | 325 | #define NETXEN_UNICAST_ADDR(port, index) \ |
309 | * netxen_nic_set_multi - Multicast | 326 | (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8)) |
310 | */ | 327 | #define NETXEN_MCAST_ADDR(port, index) \ |
311 | void netxen_nic_set_multi(struct net_device *netdev) | 328 | (NETXEN_MULTICAST_ADDR_BASE+(port*0x80)+(index*8)) |
329 | #define MAC_HI(addr) \ | ||
330 | ((addr[2] << 16) | (addr[1] << 8) | (addr[0])) | ||
331 | #define MAC_LO(addr) \ | ||
332 | ((addr[5] << 16) | (addr[4] << 8) | (addr[3])) | ||
333 | |||
334 | static int | ||
335 | netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter) | ||
336 | { | ||
337 | u32 val = 0; | ||
338 | u16 port = adapter->physical_port; | ||
339 | u8 *addr = adapter->netdev->dev_addr; | ||
340 | |||
341 | if (adapter->mc_enabled) | ||
342 | return 0; | ||
343 | |||
344 | adapter->hw_read_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4); | ||
345 | val |= (1UL << (28+port)); | ||
346 | adapter->hw_write_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4); | ||
347 | |||
348 | /* add broadcast addr to filter */ | ||
349 | val = 0xffffff; | ||
350 | netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 0), val); | ||
351 | netxen_crb_writelit_adapter(adapter, | ||
352 | NETXEN_UNICAST_ADDR(port, 0)+4, val); | ||
353 | |||
354 | /* add station addr to filter */ | ||
355 | val = MAC_HI(addr); | ||
356 | netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 1), val); | ||
357 | val = MAC_LO(addr); | ||
358 | netxen_crb_writelit_adapter(adapter, | ||
359 | NETXEN_UNICAST_ADDR(port, 1)+4, val); | ||
360 | |||
361 | adapter->mc_enabled = 1; | ||
362 | return 0; | ||
363 | } | ||
364 | |||
365 | static int | ||
366 | netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter) | ||
367 | { | ||
368 | u32 val = 0; | ||
369 | u16 port = adapter->physical_port; | ||
370 | u8 *addr = adapter->netdev->dev_addr; | ||
371 | |||
372 | if (!adapter->mc_enabled) | ||
373 | return 0; | ||
374 | |||
375 | adapter->hw_read_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4); | ||
376 | val &= ~(1UL << (28+port)); | ||
377 | adapter->hw_write_wx(adapter, NETXEN_MAC_ADDR_CNTL_REG, &val, 4); | ||
378 | |||
379 | val = MAC_HI(addr); | ||
380 | netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 0), val); | ||
381 | val = MAC_LO(addr); | ||
382 | netxen_crb_writelit_adapter(adapter, | ||
383 | NETXEN_UNICAST_ADDR(port, 0)+4, val); | ||
384 | |||
385 | netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 1), 0); | ||
386 | netxen_crb_writelit_adapter(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0); | ||
387 | |||
388 | adapter->mc_enabled = 0; | ||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static int | ||
393 | netxen_nic_set_mcast_addr(struct netxen_adapter *adapter, | ||
394 | int index, u8 *addr) | ||
395 | { | ||
396 | u32 hi = 0, lo = 0; | ||
397 | u16 port = adapter->physical_port; | ||
398 | |||
399 | lo = MAC_LO(addr); | ||
400 | hi = MAC_HI(addr); | ||
401 | |||
402 | netxen_crb_writelit_adapter(adapter, | ||
403 | NETXEN_MCAST_ADDR(port, index), hi); | ||
404 | netxen_crb_writelit_adapter(adapter, | ||
405 | NETXEN_MCAST_ADDR(port, index)+4, lo); | ||
406 | |||
407 | return 0; | ||
408 | } | ||
409 | |||
410 | void netxen_p2_nic_set_multi(struct net_device *netdev) | ||
312 | { | 411 | { |
313 | struct netxen_adapter *adapter = netdev_priv(netdev); | 412 | struct netxen_adapter *adapter = netdev_priv(netdev); |
314 | struct dev_mc_list *mc_ptr; | 413 | struct dev_mc_list *mc_ptr; |
414 | u8 null_addr[6]; | ||
415 | int index = 0; | ||
416 | |||
417 | memset(null_addr, 0, 6); | ||
315 | 418 | ||
316 | mc_ptr = netdev->mc_list; | ||
317 | if (netdev->flags & IFF_PROMISC) { | 419 | if (netdev->flags & IFF_PROMISC) { |
318 | if (adapter->set_promisc) | 420 | |
319 | adapter->set_promisc(adapter, | 421 | adapter->set_promisc(adapter, |
320 | NETXEN_NIU_PROMISC_MODE); | 422 | NETXEN_NIU_PROMISC_MODE); |
321 | } else { | 423 | |
322 | if (adapter->unset_promisc) | 424 | /* Full promiscuous mode */ |
323 | adapter->unset_promisc(adapter, | 425 | netxen_nic_disable_mcast_filter(adapter); |
324 | NETXEN_NIU_NON_PROMISC_MODE); | 426 | |
427 | return; | ||
428 | } | ||
429 | |||
430 | if (netdev->mc_count == 0) { | ||
431 | adapter->set_promisc(adapter, | ||
432 | NETXEN_NIU_NON_PROMISC_MODE); | ||
433 | netxen_nic_disable_mcast_filter(adapter); | ||
434 | return; | ||
325 | } | 435 | } |
436 | |||
437 | adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE); | ||
438 | if (netdev->flags & IFF_ALLMULTI || | ||
439 | netdev->mc_count > adapter->max_mc_count) { | ||
440 | netxen_nic_disable_mcast_filter(adapter); | ||
441 | return; | ||
442 | } | ||
443 | |||
444 | netxen_nic_enable_mcast_filter(adapter); | ||
445 | |||
446 | for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next, index++) | ||
447 | netxen_nic_set_mcast_addr(adapter, index, mc_ptr->dmi_addr); | ||
448 | |||
449 | if (index != netdev->mc_count) | ||
450 | printk(KERN_WARNING "%s: %s multicast address count mismatch\n", | ||
451 | netxen_nic_driver_name, netdev->name); | ||
452 | |||
453 | /* Clear out remaining addresses */ | ||
454 | for (; index < adapter->max_mc_count; index++) | ||
455 | netxen_nic_set_mcast_addr(adapter, index, null_addr); | ||
326 | } | 456 | } |
327 | 457 | ||
328 | /* | 458 | static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, |
329 | * netxen_nic_change_mtu - Change the Maximum Transfer Unit | 459 | u8 *addr, nx_mac_list_t **add_list, nx_mac_list_t **del_list) |
330 | * @returns 0 on success, negative on failure | ||
331 | */ | ||
332 | int netxen_nic_change_mtu(struct net_device *netdev, int mtu) | ||
333 | { | 460 | { |
334 | struct netxen_adapter *adapter = netdev_priv(netdev); | 461 | nx_mac_list_t *cur, *prev; |
335 | int eff_mtu = mtu + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE; | 462 | |
463 | /* if in del_list, move it to adapter->mac_list */ | ||
464 | for (cur = *del_list, prev = NULL; cur;) { | ||
465 | if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) { | ||
466 | if (prev == NULL) | ||
467 | *del_list = cur->next; | ||
468 | else | ||
469 | prev->next = cur->next; | ||
470 | cur->next = adapter->mac_list; | ||
471 | adapter->mac_list = cur; | ||
472 | return 0; | ||
473 | } | ||
474 | prev = cur; | ||
475 | cur = cur->next; | ||
476 | } | ||
477 | |||
478 | /* make sure to add each mac address only once */ | ||
479 | for (cur = adapter->mac_list; cur; cur = cur->next) { | ||
480 | if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) | ||
481 | return 0; | ||
482 | } | ||
483 | /* not in del_list, create new entry and add to add_list */ | ||
484 | cur = kmalloc(sizeof(*cur), in_atomic()? GFP_ATOMIC : GFP_KERNEL); | ||
485 | if (cur == NULL) { | ||
486 | printk(KERN_ERR "%s: cannot allocate memory. MAC filtering may" | ||
487 | "not work properly from now.\n", __func__); | ||
488 | return -1; | ||
489 | } | ||
336 | 490 | ||
337 | if ((eff_mtu > NETXEN_MAX_MTU) || (eff_mtu < NETXEN_MIN_MTU)) { | 491 | memcpy(cur->mac_addr, addr, ETH_ALEN); |
338 | printk(KERN_ERR "%s: %s %d is not supported.\n", | 492 | cur->next = *add_list; |
339 | netxen_nic_driver_name, netdev->name, mtu); | 493 | *add_list = cur; |
494 | return 0; | ||
495 | } | ||
496 | |||
497 | static int | ||
498 | netxen_send_cmd_descs(struct netxen_adapter *adapter, | ||
499 | struct cmd_desc_type0 *cmd_desc_arr, int nr_elements) | ||
500 | { | ||
501 | uint32_t i, producer; | ||
502 | struct netxen_cmd_buffer *pbuf; | ||
503 | struct cmd_desc_type0 *cmd_desc; | ||
504 | |||
505 | if (nr_elements > MAX_PENDING_DESC_BLOCK_SIZE || nr_elements == 0) { | ||
506 | printk(KERN_WARNING "%s: Too many command descriptors in a " | ||
507 | "request\n", __func__); | ||
340 | return -EINVAL; | 508 | return -EINVAL; |
341 | } | 509 | } |
342 | 510 | ||
343 | if (adapter->set_mtu) | 511 | i = 0; |
344 | adapter->set_mtu(adapter, mtu); | 512 | |
345 | netdev->mtu = mtu; | 513 | producer = adapter->cmd_producer; |
514 | do { | ||
515 | cmd_desc = &cmd_desc_arr[i]; | ||
516 | |||
517 | pbuf = &adapter->cmd_buf_arr[producer]; | ||
518 | pbuf->mss = 0; | ||
519 | pbuf->total_length = 0; | ||
520 | pbuf->skb = NULL; | ||
521 | pbuf->cmd = 0; | ||
522 | pbuf->frag_count = 0; | ||
523 | pbuf->port = 0; | ||
524 | |||
525 | /* adapter->ahw.cmd_desc_head[producer] = *cmd_desc; */ | ||
526 | memcpy(&adapter->ahw.cmd_desc_head[producer], | ||
527 | &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); | ||
528 | |||
529 | producer = get_next_index(producer, | ||
530 | adapter->max_tx_desc_count); | ||
531 | i++; | ||
532 | |||
533 | } while (i != nr_elements); | ||
534 | |||
535 | adapter->cmd_producer = producer; | ||
536 | |||
537 | /* write producer index to start the xmit */ | ||
538 | |||
539 | netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer); | ||
346 | 540 | ||
347 | return 0; | 541 | return 0; |
348 | } | 542 | } |
349 | 543 | ||
350 | /* | 544 | #define NIC_REQUEST 0x14 |
351 | * check if the firmware has been downloaded and ready to run and | 545 | #define NETXEN_MAC_EVENT 0x1 |
352 | * setup the address for the descriptors in the adapter | 546 | |
353 | */ | 547 | static int nx_p3_sre_macaddr_change(struct net_device *dev, |
354 | int netxen_nic_hw_resources(struct netxen_adapter *adapter) | 548 | u8 *addr, unsigned op) |
355 | { | 549 | { |
356 | struct netxen_hardware_context *hw = &adapter->ahw; | 550 | struct netxen_adapter *adapter = (struct netxen_adapter *)dev->priv; |
357 | u32 state = 0; | 551 | nx_nic_req_t req; |
358 | void *addr; | 552 | nx_mac_req_t mac_req; |
359 | int loops = 0, err = 0; | 553 | int rv; |
360 | int ctx, ring; | 554 | |
361 | struct netxen_recv_context *recv_ctx; | 555 | memset(&req, 0, sizeof(nx_nic_req_t)); |
362 | struct netxen_rcv_desc_ctx *rcv_desc; | 556 | req.qhdr |= (NIC_REQUEST << 23); |
363 | int func_id = adapter->portnum; | 557 | req.req_hdr |= NETXEN_MAC_EVENT; |
364 | 558 | req.req_hdr |= ((u64)adapter->portnum << 16); | |
365 | DPRINTK(INFO, "crb_base: %lx %x", NETXEN_PCI_CRBSPACE, | 559 | mac_req.op = op; |
366 | PCI_OFFSET_SECOND_RANGE(adapter, NETXEN_PCI_CRBSPACE)); | 560 | memcpy(&mac_req.mac_addr, addr, 6); |
367 | DPRINTK(INFO, "cam base: %lx %x", NETXEN_CRB_CAM, | 561 | req.words[0] = cpu_to_le64(*(u64 *)&mac_req); |
368 | pci_base_offset(adapter, NETXEN_CRB_CAM)); | 562 | |
369 | DPRINTK(INFO, "cam RAM: %lx %x", NETXEN_CAM_RAM_BASE, | 563 | rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); |
370 | pci_base_offset(adapter, NETXEN_CAM_RAM_BASE)); | 564 | if (rv != 0) { |
371 | 565 | printk(KERN_ERR "ERROR. Could not send mac update\n"); | |
372 | 566 | return rv; | |
373 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | ||
374 | DPRINTK(INFO, "Command Peg ready..waiting for rcv peg\n"); | ||
375 | loops = 0; | ||
376 | state = 0; | ||
377 | /* Window 1 call */ | ||
378 | state = readl(NETXEN_CRB_NORMALIZE(adapter, | ||
379 | recv_crb_registers[ctx]. | ||
380 | crb_rcvpeg_state)); | ||
381 | while (state != PHAN_PEG_RCV_INITIALIZED && loops < 20) { | ||
382 | msleep(1); | ||
383 | /* Window 1 call */ | ||
384 | state = readl(NETXEN_CRB_NORMALIZE(adapter, | ||
385 | recv_crb_registers | ||
386 | [ctx]. | ||
387 | crb_rcvpeg_state)); | ||
388 | loops++; | ||
389 | } | ||
390 | if (loops >= 20) { | ||
391 | printk(KERN_ERR "Rcv Peg initialization not complete:" | ||
392 | "%x.\n", state); | ||
393 | err = -EIO; | ||
394 | return err; | ||
395 | } | ||
396 | } | 567 | } |
397 | adapter->intr_scheme = readl( | ||
398 | NETXEN_CRB_NORMALIZE(adapter, CRB_NIC_CAPABILITIES_FW)); | ||
399 | adapter->msi_mode = readl( | ||
400 | NETXEN_CRB_NORMALIZE(adapter, CRB_NIC_MSI_MODE_FW)); | ||
401 | |||
402 | addr = netxen_alloc(adapter->ahw.pdev, | ||
403 | sizeof(struct netxen_ring_ctx) + | ||
404 | sizeof(uint32_t), | ||
405 | (dma_addr_t *) & adapter->ctx_desc_phys_addr, | ||
406 | &adapter->ctx_desc_pdev); | ||
407 | |||
408 | if (addr == NULL) { | ||
409 | DPRINTK(ERR, "bad return from pci_alloc_consistent\n"); | ||
410 | err = -ENOMEM; | ||
411 | return err; | ||
412 | } | ||
413 | memset(addr, 0, sizeof(struct netxen_ring_ctx)); | ||
414 | adapter->ctx_desc = (struct netxen_ring_ctx *)addr; | ||
415 | adapter->ctx_desc->ctx_id = cpu_to_le32(adapter->portnum); | ||
416 | adapter->ctx_desc->cmd_consumer_offset = | ||
417 | cpu_to_le64(adapter->ctx_desc_phys_addr + | ||
418 | sizeof(struct netxen_ring_ctx)); | ||
419 | adapter->cmd_consumer = (__le32 *) (((char *)addr) + | ||
420 | sizeof(struct netxen_ring_ctx)); | ||
421 | |||
422 | addr = netxen_alloc(adapter->ahw.pdev, | ||
423 | sizeof(struct cmd_desc_type0) * | ||
424 | adapter->max_tx_desc_count, | ||
425 | (dma_addr_t *) & hw->cmd_desc_phys_addr, | ||
426 | &adapter->ahw.cmd_desc_pdev); | ||
427 | |||
428 | if (addr == NULL) { | ||
429 | DPRINTK(ERR, "bad return from pci_alloc_consistent\n"); | ||
430 | netxen_free_hw_resources(adapter); | ||
431 | return -ENOMEM; | ||
432 | } | ||
433 | |||
434 | adapter->ctx_desc->cmd_ring_addr = | ||
435 | cpu_to_le64(hw->cmd_desc_phys_addr); | ||
436 | adapter->ctx_desc->cmd_ring_size = | ||
437 | cpu_to_le32(adapter->max_tx_desc_count); | ||
438 | |||
439 | hw->cmd_desc_head = (struct cmd_desc_type0 *)addr; | ||
440 | |||
441 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | ||
442 | recv_ctx = &adapter->recv_ctx[ctx]; | ||
443 | |||
444 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
445 | rcv_desc = &recv_ctx->rcv_desc[ring]; | ||
446 | addr = netxen_alloc(adapter->ahw.pdev, | ||
447 | RCV_DESC_RINGSIZE, | ||
448 | &rcv_desc->phys_addr, | ||
449 | &rcv_desc->phys_pdev); | ||
450 | if (addr == NULL) { | ||
451 | DPRINTK(ERR, "bad return from " | ||
452 | "pci_alloc_consistent\n"); | ||
453 | netxen_free_hw_resources(adapter); | ||
454 | err = -ENOMEM; | ||
455 | return err; | ||
456 | } | ||
457 | rcv_desc->desc_head = (struct rcv_desc *)addr; | ||
458 | adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr = | ||
459 | cpu_to_le64(rcv_desc->phys_addr); | ||
460 | adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size = | ||
461 | cpu_to_le32(rcv_desc->max_rx_desc_count); | ||
462 | } | ||
463 | 568 | ||
464 | addr = netxen_alloc(adapter->ahw.pdev, STATUS_DESC_RINGSIZE, | 569 | return 0; |
465 | &recv_ctx->rcv_status_desc_phys_addr, | ||
466 | &recv_ctx->rcv_status_desc_pdev); | ||
467 | if (addr == NULL) { | ||
468 | DPRINTK(ERR, "bad return from" | ||
469 | " pci_alloc_consistent\n"); | ||
470 | netxen_free_hw_resources(adapter); | ||
471 | err = -ENOMEM; | ||
472 | return err; | ||
473 | } | ||
474 | recv_ctx->rcv_status_desc_head = (struct status_desc *)addr; | ||
475 | adapter->ctx_desc->sts_ring_addr = | ||
476 | cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr); | ||
477 | adapter->ctx_desc->sts_ring_size = | ||
478 | cpu_to_le32(adapter->max_rx_desc_count); | ||
479 | |||
480 | } | ||
481 | /* Window = 1 */ | ||
482 | |||
483 | writel(lower32(adapter->ctx_desc_phys_addr), | ||
484 | NETXEN_CRB_NORMALIZE(adapter, CRB_CTX_ADDR_REG_LO(func_id))); | ||
485 | writel(upper32(adapter->ctx_desc_phys_addr), | ||
486 | NETXEN_CRB_NORMALIZE(adapter, CRB_CTX_ADDR_REG_HI(func_id))); | ||
487 | writel(NETXEN_CTX_SIGNATURE | func_id, | ||
488 | NETXEN_CRB_NORMALIZE(adapter, CRB_CTX_SIGNATURE_REG(func_id))); | ||
489 | return err; | ||
490 | } | 570 | } |
491 | 571 | ||
492 | void netxen_free_hw_resources(struct netxen_adapter *adapter) | 572 | void netxen_p3_nic_set_multi(struct net_device *netdev) |
493 | { | 573 | { |
494 | struct netxen_recv_context *recv_ctx; | 574 | struct netxen_adapter *adapter = netdev_priv(netdev); |
495 | struct netxen_rcv_desc_ctx *rcv_desc; | 575 | nx_mac_list_t *cur, *next, *del_list, *add_list = NULL; |
496 | int ctx, ring; | 576 | struct dev_mc_list *mc_ptr; |
497 | 577 | u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; | |
498 | if (adapter->ctx_desc != NULL) { | 578 | |
499 | pci_free_consistent(adapter->ctx_desc_pdev, | 579 | adapter->set_promisc(adapter, NETXEN_NIU_PROMISC_MODE); |
500 | sizeof(struct netxen_ring_ctx) + | ||
501 | sizeof(uint32_t), | ||
502 | adapter->ctx_desc, | ||
503 | adapter->ctx_desc_phys_addr); | ||
504 | adapter->ctx_desc = NULL; | ||
505 | } | ||
506 | |||
507 | if (adapter->ahw.cmd_desc_head != NULL) { | ||
508 | pci_free_consistent(adapter->ahw.cmd_desc_pdev, | ||
509 | sizeof(struct cmd_desc_type0) * | ||
510 | adapter->max_tx_desc_count, | ||
511 | adapter->ahw.cmd_desc_head, | ||
512 | adapter->ahw.cmd_desc_phys_addr); | ||
513 | adapter->ahw.cmd_desc_head = NULL; | ||
514 | } | ||
515 | |||
516 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | ||
517 | recv_ctx = &adapter->recv_ctx[ctx]; | ||
518 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
519 | rcv_desc = &recv_ctx->rcv_desc[ring]; | ||
520 | |||
521 | if (rcv_desc->desc_head != NULL) { | ||
522 | pci_free_consistent(rcv_desc->phys_pdev, | ||
523 | RCV_DESC_RINGSIZE, | ||
524 | rcv_desc->desc_head, | ||
525 | rcv_desc->phys_addr); | ||
526 | rcv_desc->desc_head = NULL; | ||
527 | } | ||
528 | } | ||
529 | 580 | ||
530 | if (recv_ctx->rcv_status_desc_head != NULL) { | 581 | /* |
531 | pci_free_consistent(recv_ctx->rcv_status_desc_pdev, | 582 | * Programming mac addresses will automaticly enabling L2 filtering. |
532 | STATUS_DESC_RINGSIZE, | 583 | * HW will replace timestamp with L2 conid when L2 filtering is |
533 | recv_ctx->rcv_status_desc_head, | 584 | * enabled. This causes problem for LSA. Do not enabling L2 filtering |
534 | recv_ctx-> | 585 | * until that problem is fixed. |
535 | rcv_status_desc_phys_addr); | 586 | */ |
536 | recv_ctx->rcv_status_desc_head = NULL; | 587 | if ((netdev->flags & IFF_PROMISC) || |
588 | (netdev->mc_count > adapter->max_mc_count)) | ||
589 | return; | ||
590 | |||
591 | del_list = adapter->mac_list; | ||
592 | adapter->mac_list = NULL; | ||
593 | |||
594 | nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list); | ||
595 | if (netdev->mc_count > 0) { | ||
596 | nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list); | ||
597 | for (mc_ptr = netdev->mc_list; mc_ptr; | ||
598 | mc_ptr = mc_ptr->next) { | ||
599 | nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, | ||
600 | &add_list, &del_list); | ||
537 | } | 601 | } |
538 | } | 602 | } |
603 | for (cur = del_list; cur;) { | ||
604 | nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL); | ||
605 | next = cur->next; | ||
606 | kfree(cur); | ||
607 | cur = next; | ||
608 | } | ||
609 | for (cur = add_list; cur;) { | ||
610 | nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_ADD); | ||
611 | next = cur->next; | ||
612 | cur->next = adapter->mac_list; | ||
613 | adapter->mac_list = cur; | ||
614 | cur = next; | ||
615 | } | ||
539 | } | 616 | } |
540 | 617 | ||
541 | void netxen_tso_check(struct netxen_adapter *adapter, | 618 | #define NETXEN_CONFIG_INTR_COALESCE 3 |
542 | struct cmd_desc_type0 *desc, struct sk_buff *skb) | 619 | |
620 | /* | ||
621 | * Send the interrupt coalescing parameter set by ethtool to the card. | ||
622 | */ | ||
623 | int netxen_config_intr_coalesce(struct netxen_adapter *adapter) | ||
543 | { | 624 | { |
544 | if (desc->mss) { | 625 | nx_nic_req_t req; |
545 | desc->total_hdr_length = (sizeof(struct ethhdr) + | 626 | int rv; |
546 | ip_hdrlen(skb) + tcp_hdrlen(skb)); | 627 | |
547 | netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO); | 628 | memset(&req, 0, sizeof(nx_nic_req_t)); |
548 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | 629 | |
549 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) { | 630 | req.qhdr |= (NIC_REQUEST << 23); |
550 | netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT); | 631 | req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE; |
551 | } else if (ip_hdr(skb)->protocol == IPPROTO_UDP) { | 632 | req.req_hdr |= ((u64)adapter->portnum << 16); |
552 | netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT); | 633 | |
553 | } else { | 634 | memcpy(&req.words[0], &adapter->coal, sizeof(adapter->coal)); |
554 | return; | 635 | |
555 | } | 636 | rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); |
637 | if (rv != 0) { | ||
638 | printk(KERN_ERR "ERROR. Could not send " | ||
639 | "interrupt coalescing parameters\n"); | ||
556 | } | 640 | } |
557 | desc->tcp_hdr_offset = skb_transport_offset(skb); | 641 | |
558 | desc->ip_hdr_offset = skb_network_offset(skb); | 642 | return rv; |
643 | } | ||
644 | |||
645 | /* | ||
646 | * netxen_nic_change_mtu - Change the Maximum Transfer Unit | ||
647 | * @returns 0 on success, negative on failure | ||
648 | */ | ||
649 | |||
650 | #define MTU_FUDGE_FACTOR 100 | ||
651 | |||
652 | int netxen_nic_change_mtu(struct net_device *netdev, int mtu) | ||
653 | { | ||
654 | struct netxen_adapter *adapter = netdev_priv(netdev); | ||
655 | int max_mtu; | ||
656 | |||
657 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
658 | max_mtu = P3_MAX_MTU; | ||
659 | else | ||
660 | max_mtu = P2_MAX_MTU; | ||
661 | |||
662 | if (mtu > max_mtu) { | ||
663 | printk(KERN_ERR "%s: mtu > %d bytes unsupported\n", | ||
664 | netdev->name, max_mtu); | ||
665 | return -EINVAL; | ||
666 | } | ||
667 | |||
668 | if (adapter->set_mtu) | ||
669 | adapter->set_mtu(adapter, mtu); | ||
670 | netdev->mtu = mtu; | ||
671 | |||
672 | mtu += MTU_FUDGE_FACTOR; | ||
673 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
674 | nx_fw_cmd_set_mtu(adapter, mtu); | ||
675 | else if (adapter->set_mtu) | ||
676 | adapter->set_mtu(adapter, mtu); | ||
677 | |||
678 | return 0; | ||
559 | } | 679 | } |
560 | 680 | ||
561 | int netxen_is_flash_supported(struct netxen_adapter *adapter) | 681 | int netxen_is_flash_supported(struct netxen_adapter *adapter) |
@@ -632,41 +752,49 @@ int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 mac[]) | |||
632 | return 0; | 752 | return 0; |
633 | } | 753 | } |
634 | 754 | ||
755 | #define CRB_WIN_LOCK_TIMEOUT 100000000 | ||
756 | |||
757 | static int crb_win_lock(struct netxen_adapter *adapter) | ||
758 | { | ||
759 | int done = 0, timeout = 0; | ||
760 | |||
761 | while (!done) { | ||
762 | /* acquire semaphore3 from PCI HW block */ | ||
763 | adapter->hw_read_wx(adapter, | ||
764 | NETXEN_PCIE_REG(PCIE_SEM7_LOCK), &done, 4); | ||
765 | if (done == 1) | ||
766 | break; | ||
767 | if (timeout >= CRB_WIN_LOCK_TIMEOUT) | ||
768 | return -1; | ||
769 | timeout++; | ||
770 | udelay(1); | ||
771 | } | ||
772 | netxen_crb_writelit_adapter(adapter, | ||
773 | NETXEN_CRB_WIN_LOCK_ID, adapter->portnum); | ||
774 | return 0; | ||
775 | } | ||
776 | |||
777 | static void crb_win_unlock(struct netxen_adapter *adapter) | ||
778 | { | ||
779 | int val; | ||
780 | |||
781 | adapter->hw_read_wx(adapter, | ||
782 | NETXEN_PCIE_REG(PCIE_SEM7_UNLOCK), &val, 4); | ||
783 | } | ||
784 | |||
635 | /* | 785 | /* |
636 | * Changes the CRB window to the specified window. | 786 | * Changes the CRB window to the specified window. |
637 | */ | 787 | */ |
638 | void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw) | 788 | void |
789 | netxen_nic_pci_change_crbwindow_128M(struct netxen_adapter *adapter, u32 wndw) | ||
639 | { | 790 | { |
640 | void __iomem *offset; | 791 | void __iomem *offset; |
641 | u32 tmp; | 792 | u32 tmp; |
642 | int count = 0; | 793 | int count = 0; |
794 | uint8_t func = adapter->ahw.pci_func; | ||
643 | 795 | ||
644 | if (adapter->curr_window == wndw) | 796 | if (adapter->curr_window == wndw) |
645 | return; | 797 | return; |
646 | switch(adapter->ahw.pci_func) { | ||
647 | case 0: | ||
648 | offset = PCI_OFFSET_SECOND_RANGE(adapter, | ||
649 | NETXEN_PCIX_PH_REG(PCIX_CRB_WINDOW)); | ||
650 | break; | ||
651 | case 1: | ||
652 | offset = PCI_OFFSET_SECOND_RANGE(adapter, | ||
653 | NETXEN_PCIX_PH_REG(PCIX_CRB_WINDOW_F1)); | ||
654 | break; | ||
655 | case 2: | ||
656 | offset = PCI_OFFSET_SECOND_RANGE(adapter, | ||
657 | NETXEN_PCIX_PH_REG(PCIX_CRB_WINDOW_F2)); | ||
658 | break; | ||
659 | case 3: | ||
660 | offset = PCI_OFFSET_SECOND_RANGE(adapter, | ||
661 | NETXEN_PCIX_PH_REG(PCIX_CRB_WINDOW_F3)); | ||
662 | break; | ||
663 | default: | ||
664 | printk(KERN_INFO "Changing the window for PCI function " | ||
665 | "%d\n", adapter->ahw.pci_func); | ||
666 | offset = PCI_OFFSET_SECOND_RANGE(adapter, | ||
667 | NETXEN_PCIX_PH_REG(PCIX_CRB_WINDOW)); | ||
668 | break; | ||
669 | } | ||
670 | /* | 798 | /* |
671 | * Move the CRB window. | 799 | * Move the CRB window. |
672 | * We need to write to the "direct access" region of PCI | 800 | * We need to write to the "direct access" region of PCI |
@@ -675,6 +803,8 @@ void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw) | |||
675 | * register address is received by PCI. The direct region bypasses | 803 | * register address is received by PCI. The direct region bypasses |
676 | * the CRB bus. | 804 | * the CRB bus. |
677 | */ | 805 | */ |
806 | offset = PCI_OFFSET_SECOND_RANGE(adapter, | ||
807 | NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func))); | ||
678 | 808 | ||
679 | if (wndw & 0x1) | 809 | if (wndw & 0x1) |
680 | wndw = NETXEN_WINDOW_ONE; | 810 | wndw = NETXEN_WINDOW_ONE; |
@@ -685,7 +815,7 @@ void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw) | |||
685 | while ((tmp = readl(offset)) != wndw) { | 815 | while ((tmp = readl(offset)) != wndw) { |
686 | printk(KERN_WARNING "%s: %s WARNING: CRB window value not " | 816 | printk(KERN_WARNING "%s: %s WARNING: CRB window value not " |
687 | "registered properly: 0x%08x.\n", | 817 | "registered properly: 0x%08x.\n", |
688 | netxen_nic_driver_name, __FUNCTION__, tmp); | 818 | netxen_nic_driver_name, __func__, tmp); |
689 | mdelay(1); | 819 | mdelay(1); |
690 | if (count >= 10) | 820 | if (count >= 10) |
691 | break; | 821 | break; |
@@ -698,51 +828,119 @@ void netxen_nic_pci_change_crbwindow(struct netxen_adapter *adapter, u32 wndw) | |||
698 | adapter->curr_window = 0; | 828 | adapter->curr_window = 0; |
699 | } | 829 | } |
700 | 830 | ||
831 | /* | ||
832 | * Return -1 if off is not valid, | ||
833 | * 1 if window access is needed. 'off' is set to offset from | ||
834 | * CRB space in 128M pci map | ||
835 | * 0 if no window access is needed. 'off' is set to 2M addr | ||
836 | * In: 'off' is offset from base in 128M pci map | ||
837 | */ | ||
838 | static int | ||
839 | netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, | ||
840 | ulong *off, int len) | ||
841 | { | ||
842 | unsigned long end = *off + len; | ||
843 | crb_128M_2M_sub_block_map_t *m; | ||
844 | |||
845 | |||
846 | if (*off >= NETXEN_CRB_MAX) | ||
847 | return -1; | ||
848 | |||
849 | if (*off >= NETXEN_PCI_CAMQM && (end <= NETXEN_PCI_CAMQM_2M_END)) { | ||
850 | *off = (*off - NETXEN_PCI_CAMQM) + NETXEN_PCI_CAMQM_2M_BASE + | ||
851 | (ulong)adapter->ahw.pci_base0; | ||
852 | return 0; | ||
853 | } | ||
854 | |||
855 | if (*off < NETXEN_PCI_CRBSPACE) | ||
856 | return -1; | ||
857 | |||
858 | *off -= NETXEN_PCI_CRBSPACE; | ||
859 | end = *off + len; | ||
860 | |||
861 | /* | ||
862 | * Try direct map | ||
863 | */ | ||
864 | m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)]; | ||
865 | |||
866 | if (m->valid && (m->start_128M <= *off) && (m->end_128M >= end)) { | ||
867 | *off = *off + m->start_2M - m->start_128M + | ||
868 | (ulong)adapter->ahw.pci_base0; | ||
869 | return 0; | ||
870 | } | ||
871 | |||
872 | /* | ||
873 | * Not in direct map, use crb window | ||
874 | */ | ||
875 | return 1; | ||
876 | } | ||
877 | |||
878 | /* | ||
879 | * In: 'off' is offset from CRB space in 128M pci map | ||
880 | * Out: 'off' is 2M pci map addr | ||
881 | * side effect: lock crb window | ||
882 | */ | ||
883 | static void | ||
884 | netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong *off) | ||
885 | { | ||
886 | u32 win_read; | ||
887 | |||
888 | adapter->crb_win = CRB_HI(*off); | ||
889 | writel(adapter->crb_win, (void *)(CRB_WINDOW_2M + | ||
890 | adapter->ahw.pci_base0)); | ||
891 | /* | ||
892 | * Read back value to make sure write has gone through before trying | ||
893 | * to use it. | ||
894 | */ | ||
895 | win_read = readl((void *)(CRB_WINDOW_2M + adapter->ahw.pci_base0)); | ||
896 | if (win_read != adapter->crb_win) { | ||
897 | printk(KERN_ERR "%s: Written crbwin (0x%x) != " | ||
898 | "Read crbwin (0x%x), off=0x%lx\n", | ||
899 | __func__, adapter->crb_win, win_read, *off); | ||
900 | } | ||
901 | *off = (*off & MASK(16)) + CRB_INDIRECT_2M + | ||
902 | (ulong)adapter->ahw.pci_base0; | ||
903 | } | ||
904 | |||
701 | int netxen_load_firmware(struct netxen_adapter *adapter) | 905 | int netxen_load_firmware(struct netxen_adapter *adapter) |
702 | { | 906 | { |
703 | int i; | 907 | int i; |
704 | u32 data, size = 0; | 908 | u32 data, size = 0; |
705 | u32 flashaddr = NETXEN_FLASH_BASE, memaddr = NETXEN_PHANTOM_MEM_BASE; | 909 | u32 flashaddr = NETXEN_BOOTLD_START, memaddr = NETXEN_BOOTLD_START; |
706 | u64 off; | ||
707 | void __iomem *addr; | ||
708 | 910 | ||
709 | size = NETXEN_FIRMWARE_LEN; | 911 | size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START)/4; |
710 | writel(1, NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_CAS_RST)); | 912 | |
913 | if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) | ||
914 | adapter->pci_write_normalize(adapter, | ||
915 | NETXEN_ROMUSB_GLB_CAS_RST, 1); | ||
711 | 916 | ||
712 | for (i = 0; i < size; i++) { | 917 | for (i = 0; i < size; i++) { |
713 | int retries = 10; | ||
714 | if (netxen_rom_fast_read(adapter, flashaddr, (int *)&data) != 0) | 918 | if (netxen_rom_fast_read(adapter, flashaddr, (int *)&data) != 0) |
715 | return -EIO; | 919 | return -EIO; |
716 | 920 | ||
717 | off = netxen_nic_pci_set_window(adapter, memaddr); | 921 | adapter->pci_mem_write(adapter, memaddr, &data, 4); |
718 | addr = pci_base_offset(adapter, off); | ||
719 | writel(data, addr); | ||
720 | do { | ||
721 | if (readl(addr) == data) | ||
722 | break; | ||
723 | msleep(100); | ||
724 | writel(data, addr); | ||
725 | } while (--retries); | ||
726 | if (!retries) { | ||
727 | printk(KERN_ERR "%s: firmware load aborted, write failed at 0x%x\n", | ||
728 | netxen_nic_driver_name, memaddr); | ||
729 | return -EIO; | ||
730 | } | ||
731 | flashaddr += 4; | 922 | flashaddr += 4; |
732 | memaddr += 4; | 923 | memaddr += 4; |
924 | cond_resched(); | ||
925 | } | ||
926 | msleep(1); | ||
927 | |||
928 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
929 | adapter->pci_write_normalize(adapter, | ||
930 | NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d); | ||
931 | else { | ||
932 | adapter->pci_write_normalize(adapter, | ||
933 | NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff); | ||
934 | adapter->pci_write_normalize(adapter, | ||
935 | NETXEN_ROMUSB_GLB_CAS_RST, 0); | ||
733 | } | 936 | } |
734 | udelay(100); | ||
735 | /* make sure Casper is powered on */ | ||
736 | writel(0x3fff, | ||
737 | NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL)); | ||
738 | writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_CAS_RST)); | ||
739 | 937 | ||
740 | return 0; | 938 | return 0; |
741 | } | 939 | } |
742 | 940 | ||
743 | int | 941 | int |
744 | netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data, | 942 | netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, |
745 | int len) | 943 | ulong off, void *data, int len) |
746 | { | 944 | { |
747 | void __iomem *addr; | 945 | void __iomem *addr; |
748 | 946 | ||
@@ -750,7 +948,7 @@ netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data, | |||
750 | addr = NETXEN_CRB_NORMALIZE(adapter, off); | 948 | addr = NETXEN_CRB_NORMALIZE(adapter, off); |
751 | } else { /* Window 0 */ | 949 | } else { /* Window 0 */ |
752 | addr = pci_base_offset(adapter, off); | 950 | addr = pci_base_offset(adapter, off); |
753 | netxen_nic_pci_change_crbwindow(adapter, 0); | 951 | netxen_nic_pci_change_crbwindow_128M(adapter, 0); |
754 | } | 952 | } |
755 | 953 | ||
756 | DPRINTK(INFO, "writing to base %lx offset %llx addr %p" | 954 | DPRINTK(INFO, "writing to base %lx offset %llx addr %p" |
@@ -758,7 +956,7 @@ netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data, | |||
758 | pci_base(adapter, off), off, addr, | 956 | pci_base(adapter, off), off, addr, |
759 | *(unsigned long long *)data, len); | 957 | *(unsigned long long *)data, len); |
760 | if (!addr) { | 958 | if (!addr) { |
761 | netxen_nic_pci_change_crbwindow(adapter, 1); | 959 | netxen_nic_pci_change_crbwindow_128M(adapter, 1); |
762 | return 1; | 960 | return 1; |
763 | } | 961 | } |
764 | 962 | ||
@@ -785,14 +983,14 @@ netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data, | |||
785 | break; | 983 | break; |
786 | } | 984 | } |
787 | if (!ADDR_IN_WINDOW1(off)) | 985 | if (!ADDR_IN_WINDOW1(off)) |
788 | netxen_nic_pci_change_crbwindow(adapter, 1); | 986 | netxen_nic_pci_change_crbwindow_128M(adapter, 1); |
789 | 987 | ||
790 | return 0; | 988 | return 0; |
791 | } | 989 | } |
792 | 990 | ||
793 | int | 991 | int |
794 | netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data, | 992 | netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, |
795 | int len) | 993 | ulong off, void *data, int len) |
796 | { | 994 | { |
797 | void __iomem *addr; | 995 | void __iomem *addr; |
798 | 996 | ||
@@ -800,13 +998,13 @@ netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data, | |||
800 | addr = NETXEN_CRB_NORMALIZE(adapter, off); | 998 | addr = NETXEN_CRB_NORMALIZE(adapter, off); |
801 | } else { /* Window 0 */ | 999 | } else { /* Window 0 */ |
802 | addr = pci_base_offset(adapter, off); | 1000 | addr = pci_base_offset(adapter, off); |
803 | netxen_nic_pci_change_crbwindow(adapter, 0); | 1001 | netxen_nic_pci_change_crbwindow_128M(adapter, 0); |
804 | } | 1002 | } |
805 | 1003 | ||
806 | DPRINTK(INFO, "reading from base %lx offset %llx addr %p\n", | 1004 | DPRINTK(INFO, "reading from base %lx offset %llx addr %p\n", |
807 | pci_base(adapter, off), off, addr); | 1005 | pci_base(adapter, off), off, addr); |
808 | if (!addr) { | 1006 | if (!addr) { |
809 | netxen_nic_pci_change_crbwindow(adapter, 1); | 1007 | netxen_nic_pci_change_crbwindow_128M(adapter, 1); |
810 | return 1; | 1008 | return 1; |
811 | } | 1009 | } |
812 | switch (len) { | 1010 | switch (len) { |
@@ -830,81 +1028,195 @@ netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data, | |||
830 | DPRINTK(INFO, "read %lx\n", *(unsigned long *)data); | 1028 | DPRINTK(INFO, "read %lx\n", *(unsigned long *)data); |
831 | 1029 | ||
832 | if (!ADDR_IN_WINDOW1(off)) | 1030 | if (!ADDR_IN_WINDOW1(off)) |
833 | netxen_nic_pci_change_crbwindow(adapter, 1); | 1031 | netxen_nic_pci_change_crbwindow_128M(adapter, 1); |
834 | 1032 | ||
835 | return 0; | 1033 | return 0; |
836 | } | 1034 | } |
837 | 1035 | ||
838 | void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val) | 1036 | int |
839 | { /* Only for window 1 */ | 1037 | netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, |
840 | void __iomem *addr; | 1038 | ulong off, void *data, int len) |
1039 | { | ||
1040 | unsigned long flags = 0; | ||
1041 | int rv; | ||
1042 | |||
1043 | rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off, len); | ||
1044 | |||
1045 | if (rv == -1) { | ||
1046 | printk(KERN_ERR "%s: invalid offset: 0x%016lx\n", | ||
1047 | __func__, off); | ||
1048 | dump_stack(); | ||
1049 | return -1; | ||
1050 | } | ||
1051 | |||
1052 | if (rv == 1) { | ||
1053 | write_lock_irqsave(&adapter->adapter_lock, flags); | ||
1054 | crb_win_lock(adapter); | ||
1055 | netxen_nic_pci_set_crbwindow_2M(adapter, &off); | ||
1056 | } | ||
841 | 1057 | ||
842 | addr = NETXEN_CRB_NORMALIZE(adapter, off); | 1058 | DPRINTK(1, INFO, "write data %lx to offset %llx, len=%d\n", |
843 | DPRINTK(INFO, "writing to base %lx offset %llx addr %p data %x\n", | 1059 | *(unsigned long *)data, off, len); |
844 | pci_base(adapter, off), off, addr, val); | ||
845 | writel(val, addr); | ||
846 | 1060 | ||
1061 | switch (len) { | ||
1062 | case 1: | ||
1063 | writeb(*(uint8_t *)data, (void *)off); | ||
1064 | break; | ||
1065 | case 2: | ||
1066 | writew(*(uint16_t *)data, (void *)off); | ||
1067 | break; | ||
1068 | case 4: | ||
1069 | writel(*(uint32_t *)data, (void *)off); | ||
1070 | break; | ||
1071 | case 8: | ||
1072 | writeq(*(uint64_t *)data, (void *)off); | ||
1073 | break; | ||
1074 | default: | ||
1075 | DPRINTK(1, INFO, | ||
1076 | "writing data %lx to offset %llx, num words=%d\n", | ||
1077 | *(unsigned long *)data, off, (len>>3)); | ||
1078 | break; | ||
1079 | } | ||
1080 | if (rv == 1) { | ||
1081 | crb_win_unlock(adapter); | ||
1082 | write_unlock_irqrestore(&adapter->adapter_lock, flags); | ||
1083 | } | ||
1084 | |||
1085 | return 0; | ||
847 | } | 1086 | } |
848 | 1087 | ||
849 | int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off) | 1088 | int |
850 | { /* Only for window 1 */ | 1089 | netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, |
851 | void __iomem *addr; | 1090 | ulong off, void *data, int len) |
852 | int val; | 1091 | { |
1092 | unsigned long flags = 0; | ||
1093 | int rv; | ||
853 | 1094 | ||
854 | addr = NETXEN_CRB_NORMALIZE(adapter, off); | 1095 | rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off, len); |
855 | DPRINTK(INFO, "reading from base %lx offset %llx addr %p\n", | 1096 | |
856 | pci_base(adapter, off), off, addr); | 1097 | if (rv == -1) { |
857 | val = readl(addr); | 1098 | printk(KERN_ERR "%s: invalid offset: 0x%016lx\n", |
858 | writel(val, addr); | 1099 | __func__, off); |
1100 | dump_stack(); | ||
1101 | return -1; | ||
1102 | } | ||
1103 | |||
1104 | if (rv == 1) { | ||
1105 | write_lock_irqsave(&adapter->adapter_lock, flags); | ||
1106 | crb_win_lock(adapter); | ||
1107 | netxen_nic_pci_set_crbwindow_2M(adapter, &off); | ||
1108 | } | ||
1109 | |||
1110 | DPRINTK(1, INFO, "read from offset %lx, len=%d\n", off, len); | ||
1111 | |||
1112 | switch (len) { | ||
1113 | case 1: | ||
1114 | *(uint8_t *)data = readb((void *)off); | ||
1115 | break; | ||
1116 | case 2: | ||
1117 | *(uint16_t *)data = readw((void *)off); | ||
1118 | break; | ||
1119 | case 4: | ||
1120 | *(uint32_t *)data = readl((void *)off); | ||
1121 | break; | ||
1122 | case 8: | ||
1123 | *(uint64_t *)data = readq((void *)off); | ||
1124 | break; | ||
1125 | default: | ||
1126 | break; | ||
1127 | } | ||
859 | 1128 | ||
1129 | DPRINTK(1, INFO, "read %lx\n", *(unsigned long *)data); | ||
1130 | |||
1131 | if (rv == 1) { | ||
1132 | crb_win_unlock(adapter); | ||
1133 | write_unlock_irqrestore(&adapter->adapter_lock, flags); | ||
1134 | } | ||
1135 | |||
1136 | return 0; | ||
1137 | } | ||
1138 | |||
1139 | void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val) | ||
1140 | { | ||
1141 | adapter->hw_write_wx(adapter, off, &val, 4); | ||
1142 | } | ||
1143 | |||
1144 | int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off) | ||
1145 | { | ||
1146 | int val; | ||
1147 | adapter->hw_read_wx(adapter, off, &val, 4); | ||
860 | return val; | 1148 | return val; |
861 | } | 1149 | } |
862 | 1150 | ||
863 | /* Change the window to 0, write and change back to window 1. */ | 1151 | /* Change the window to 0, write and change back to window 1. */ |
864 | void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value) | 1152 | void netxen_nic_write_w0(struct netxen_adapter *adapter, u32 index, u32 value) |
865 | { | 1153 | { |
866 | void __iomem *addr; | 1154 | adapter->hw_write_wx(adapter, index, &value, 4); |
867 | |||
868 | netxen_nic_pci_change_crbwindow(adapter, 0); | ||
869 | addr = pci_base_offset(adapter, index); | ||
870 | writel(value, addr); | ||
871 | netxen_nic_pci_change_crbwindow(adapter, 1); | ||
872 | } | 1155 | } |
873 | 1156 | ||
874 | /* Change the window to 0, read and change back to window 1. */ | 1157 | /* Change the window to 0, read and change back to window 1. */ |
875 | void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 * value) | 1158 | void netxen_nic_read_w0(struct netxen_adapter *adapter, u32 index, u32 *value) |
876 | { | 1159 | { |
877 | void __iomem *addr; | 1160 | adapter->hw_read_wx(adapter, index, value, 4); |
1161 | } | ||
878 | 1162 | ||
879 | addr = pci_base_offset(adapter, index); | 1163 | void netxen_nic_write_w1(struct netxen_adapter *adapter, u32 index, u32 value) |
1164 | { | ||
1165 | adapter->hw_write_wx(adapter, index, &value, 4); | ||
1166 | } | ||
1167 | |||
1168 | void netxen_nic_read_w1(struct netxen_adapter *adapter, u32 index, u32 *value) | ||
1169 | { | ||
1170 | adapter->hw_read_wx(adapter, index, value, 4); | ||
1171 | } | ||
1172 | |||
1173 | /* | ||
1174 | * check memory access boundary. | ||
1175 | * used by test agent. support ddr access only for now | ||
1176 | */ | ||
1177 | static unsigned long | ||
1178 | netxen_nic_pci_mem_bound_check(struct netxen_adapter *adapter, | ||
1179 | unsigned long long addr, int size) | ||
1180 | { | ||
1181 | if (!ADDR_IN_RANGE(addr, | ||
1182 | NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX) || | ||
1183 | !ADDR_IN_RANGE(addr+size-1, | ||
1184 | NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX) || | ||
1185 | ((size != 1) && (size != 2) && (size != 4) && (size != 8))) { | ||
1186 | return 0; | ||
1187 | } | ||
880 | 1188 | ||
881 | netxen_nic_pci_change_crbwindow(adapter, 0); | 1189 | return 1; |
882 | *value = readl(addr); | ||
883 | netxen_nic_pci_change_crbwindow(adapter, 1); | ||
884 | } | 1190 | } |
885 | 1191 | ||
886 | static int netxen_pci_set_window_warning_count; | 1192 | static int netxen_pci_set_window_warning_count; |
887 | 1193 | ||
888 | static unsigned long netxen_nic_pci_set_window(struct netxen_adapter *adapter, | 1194 | unsigned long |
889 | unsigned long long addr) | 1195 | netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter, |
1196 | unsigned long long addr) | ||
890 | { | 1197 | { |
891 | static int ddr_mn_window = -1; | 1198 | void __iomem *offset; |
892 | static int qdr_sn_window = -1; | ||
893 | int window; | 1199 | int window; |
1200 | unsigned long long qdr_max; | ||
1201 | uint8_t func = adapter->ahw.pci_func; | ||
1202 | |||
1203 | if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { | ||
1204 | qdr_max = NETXEN_ADDR_QDR_NET_MAX_P2; | ||
1205 | } else { | ||
1206 | qdr_max = NETXEN_ADDR_QDR_NET_MAX_P3; | ||
1207 | } | ||
894 | 1208 | ||
895 | if (ADDR_IN_RANGE(addr, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { | 1209 | if (ADDR_IN_RANGE(addr, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { |
896 | /* DDR network side */ | 1210 | /* DDR network side */ |
897 | addr -= NETXEN_ADDR_DDR_NET; | 1211 | addr -= NETXEN_ADDR_DDR_NET; |
898 | window = (addr >> 25) & 0x3ff; | 1212 | window = (addr >> 25) & 0x3ff; |
899 | if (ddr_mn_window != window) { | 1213 | if (adapter->ahw.ddr_mn_window != window) { |
900 | ddr_mn_window = window; | 1214 | adapter->ahw.ddr_mn_window = window; |
901 | writel(window, PCI_OFFSET_SECOND_RANGE(adapter, | 1215 | offset = PCI_OFFSET_SECOND_RANGE(adapter, |
902 | NETXEN_PCIX_PH_REG | 1216 | NETXEN_PCIX_PH_REG(PCIE_MN_WINDOW_REG(func))); |
903 | (PCIX_MN_WINDOW(adapter->ahw.pci_func)))); | 1217 | writel(window, offset); |
904 | /* MUST make sure window is set before we forge on... */ | 1218 | /* MUST make sure window is set before we forge on... */ |
905 | readl(PCI_OFFSET_SECOND_RANGE(adapter, | 1219 | readl(offset); |
906 | NETXEN_PCIX_PH_REG | ||
907 | (PCIX_MN_WINDOW(adapter->ahw.pci_func)))); | ||
908 | } | 1220 | } |
909 | addr -= (window * NETXEN_WINDOW_ONE); | 1221 | addr -= (window * NETXEN_WINDOW_ONE); |
910 | addr += NETXEN_PCI_DDR_NET; | 1222 | addr += NETXEN_PCI_DDR_NET; |
@@ -914,22 +1226,17 @@ static unsigned long netxen_nic_pci_set_window(struct netxen_adapter *adapter, | |||
914 | } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { | 1226 | } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { |
915 | addr -= NETXEN_ADDR_OCM1; | 1227 | addr -= NETXEN_ADDR_OCM1; |
916 | addr += NETXEN_PCI_OCM1; | 1228 | addr += NETXEN_PCI_OCM1; |
917 | } else | 1229 | } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_QDR_NET, qdr_max)) { |
918 | if (ADDR_IN_RANGE | ||
919 | (addr, NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX)) { | ||
920 | /* QDR network side */ | 1230 | /* QDR network side */ |
921 | addr -= NETXEN_ADDR_QDR_NET; | 1231 | addr -= NETXEN_ADDR_QDR_NET; |
922 | window = (addr >> 22) & 0x3f; | 1232 | window = (addr >> 22) & 0x3f; |
923 | if (qdr_sn_window != window) { | 1233 | if (adapter->ahw.qdr_sn_window != window) { |
924 | qdr_sn_window = window; | 1234 | adapter->ahw.qdr_sn_window = window; |
925 | writel((window << 22), | 1235 | offset = PCI_OFFSET_SECOND_RANGE(adapter, |
926 | PCI_OFFSET_SECOND_RANGE(adapter, | 1236 | NETXEN_PCIX_PH_REG(PCIE_SN_WINDOW_REG(func))); |
927 | NETXEN_PCIX_PH_REG | 1237 | writel((window << 22), offset); |
928 | (PCIX_SN_WINDOW(adapter->ahw.pci_func)))); | ||
929 | /* MUST make sure window is set before we forge on... */ | 1238 | /* MUST make sure window is set before we forge on... */ |
930 | readl(PCI_OFFSET_SECOND_RANGE(adapter, | 1239 | readl(offset); |
931 | NETXEN_PCIX_PH_REG | ||
932 | (PCIX_SN_WINDOW(adapter->ahw.pci_func)))); | ||
933 | } | 1240 | } |
934 | addr -= (window * 0x400000); | 1241 | addr -= (window * 0x400000); |
935 | addr += NETXEN_PCI_QDR_NET; | 1242 | addr += NETXEN_PCI_QDR_NET; |
@@ -943,11 +1250,711 @@ static unsigned long netxen_nic_pci_set_window(struct netxen_adapter *adapter, | |||
943 | printk("%s: Warning:netxen_nic_pci_set_window()" | 1250 | printk("%s: Warning:netxen_nic_pci_set_window()" |
944 | " Unknown address range!\n", | 1251 | " Unknown address range!\n", |
945 | netxen_nic_driver_name); | 1252 | netxen_nic_driver_name); |
1253 | addr = -1UL; | ||
1254 | } | ||
1255 | return addr; | ||
1256 | } | ||
1257 | |||
1258 | /* | ||
1259 | * Note : only 32-bit writes! | ||
1260 | */ | ||
1261 | int netxen_nic_pci_write_immediate_128M(struct netxen_adapter *adapter, | ||
1262 | u64 off, u32 data) | ||
1263 | { | ||
1264 | writel(data, (void __iomem *)(PCI_OFFSET_SECOND_RANGE(adapter, off))); | ||
1265 | return 0; | ||
1266 | } | ||
1267 | |||
1268 | u32 netxen_nic_pci_read_immediate_128M(struct netxen_adapter *adapter, u64 off) | ||
1269 | { | ||
1270 | return readl((void __iomem *)(pci_base_offset(adapter, off))); | ||
1271 | } | ||
1272 | |||
1273 | void netxen_nic_pci_write_normalize_128M(struct netxen_adapter *adapter, | ||
1274 | u64 off, u32 data) | ||
1275 | { | ||
1276 | writel(data, NETXEN_CRB_NORMALIZE(adapter, off)); | ||
1277 | } | ||
1278 | |||
1279 | u32 netxen_nic_pci_read_normalize_128M(struct netxen_adapter *adapter, u64 off) | ||
1280 | { | ||
1281 | return readl(NETXEN_CRB_NORMALIZE(adapter, off)); | ||
1282 | } | ||
1283 | |||
1284 | unsigned long | ||
1285 | netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter, | ||
1286 | unsigned long long addr) | ||
1287 | { | ||
1288 | int window; | ||
1289 | u32 win_read; | ||
1290 | |||
1291 | if (ADDR_IN_RANGE(addr, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { | ||
1292 | /* DDR network side */ | ||
1293 | window = MN_WIN(addr); | ||
1294 | adapter->ahw.ddr_mn_window = window; | ||
1295 | adapter->hw_write_wx(adapter, | ||
1296 | adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE, | ||
1297 | &window, 4); | ||
1298 | adapter->hw_read_wx(adapter, | ||
1299 | adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE, | ||
1300 | &win_read, 4); | ||
1301 | if ((win_read << 17) != window) { | ||
1302 | printk(KERN_INFO "Written MNwin (0x%x) != " | ||
1303 | "Read MNwin (0x%x)\n", window, win_read); | ||
1304 | } | ||
1305 | addr = GET_MEM_OFFS_2M(addr) + NETXEN_PCI_DDR_NET; | ||
1306 | } else if (ADDR_IN_RANGE(addr, | ||
1307 | NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { | ||
1308 | if ((addr & 0x00ff800) == 0xff800) { | ||
1309 | printk("%s: QM access not handled.\n", __func__); | ||
1310 | addr = -1UL; | ||
1311 | } | ||
1312 | |||
1313 | window = OCM_WIN(addr); | ||
1314 | adapter->ahw.ddr_mn_window = window; | ||
1315 | adapter->hw_write_wx(adapter, | ||
1316 | adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE, | ||
1317 | &window, 4); | ||
1318 | adapter->hw_read_wx(adapter, | ||
1319 | adapter->ahw.mn_win_crb | NETXEN_PCI_CRBSPACE, | ||
1320 | &win_read, 4); | ||
1321 | if ((win_read >> 7) != window) { | ||
1322 | printk(KERN_INFO "%s: Written OCMwin (0x%x) != " | ||
1323 | "Read OCMwin (0x%x)\n", | ||
1324 | __func__, window, win_read); | ||
1325 | } | ||
1326 | addr = GET_MEM_OFFS_2M(addr) + NETXEN_PCI_OCM0_2M; | ||
1327 | |||
1328 | } else if (ADDR_IN_RANGE(addr, | ||
1329 | NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P3)) { | ||
1330 | /* QDR network side */ | ||
1331 | window = MS_WIN(addr); | ||
1332 | adapter->ahw.qdr_sn_window = window; | ||
1333 | adapter->hw_write_wx(adapter, | ||
1334 | adapter->ahw.ms_win_crb | NETXEN_PCI_CRBSPACE, | ||
1335 | &window, 4); | ||
1336 | adapter->hw_read_wx(adapter, | ||
1337 | adapter->ahw.ms_win_crb | NETXEN_PCI_CRBSPACE, | ||
1338 | &win_read, 4); | ||
1339 | if (win_read != window) { | ||
1340 | printk(KERN_INFO "%s: Written MSwin (0x%x) != " | ||
1341 | "Read MSwin (0x%x)\n", | ||
1342 | __func__, window, win_read); | ||
1343 | } | ||
1344 | addr = GET_MEM_OFFS_2M(addr) + NETXEN_PCI_QDR_NET; | ||
946 | 1345 | ||
1346 | } else { | ||
1347 | /* | ||
1348 | * peg gdb frequently accesses memory that doesn't exist, | ||
1349 | * this limits the chit chat so debugging isn't slowed down. | ||
1350 | */ | ||
1351 | if ((netxen_pci_set_window_warning_count++ < 8) | ||
1352 | || (netxen_pci_set_window_warning_count%64 == 0)) { | ||
1353 | printk("%s: Warning:%s Unknown address range!\n", | ||
1354 | __func__, netxen_nic_driver_name); | ||
1355 | } | ||
1356 | addr = -1UL; | ||
947 | } | 1357 | } |
948 | return addr; | 1358 | return addr; |
949 | } | 1359 | } |
950 | 1360 | ||
1361 | static int netxen_nic_pci_is_same_window(struct netxen_adapter *adapter, | ||
1362 | unsigned long long addr) | ||
1363 | { | ||
1364 | int window; | ||
1365 | unsigned long long qdr_max; | ||
1366 | |||
1367 | if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) | ||
1368 | qdr_max = NETXEN_ADDR_QDR_NET_MAX_P2; | ||
1369 | else | ||
1370 | qdr_max = NETXEN_ADDR_QDR_NET_MAX_P3; | ||
1371 | |||
1372 | if (ADDR_IN_RANGE(addr, | ||
1373 | NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { | ||
1374 | /* DDR network side */ | ||
1375 | BUG(); /* MN access can not come here */ | ||
1376 | } else if (ADDR_IN_RANGE(addr, | ||
1377 | NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { | ||
1378 | return 1; | ||
1379 | } else if (ADDR_IN_RANGE(addr, | ||
1380 | NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) { | ||
1381 | return 1; | ||
1382 | } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_QDR_NET, qdr_max)) { | ||
1383 | /* QDR network side */ | ||
1384 | window = ((addr - NETXEN_ADDR_QDR_NET) >> 22) & 0x3f; | ||
1385 | if (adapter->ahw.qdr_sn_window == window) | ||
1386 | return 1; | ||
1387 | } | ||
1388 | |||
1389 | return 0; | ||
1390 | } | ||
1391 | |||
1392 | static int netxen_nic_pci_mem_read_direct(struct netxen_adapter *adapter, | ||
1393 | u64 off, void *data, int size) | ||
1394 | { | ||
1395 | unsigned long flags; | ||
1396 | void *addr; | ||
1397 | int ret = 0; | ||
1398 | u64 start; | ||
1399 | uint8_t *mem_ptr = NULL; | ||
1400 | unsigned long mem_base; | ||
1401 | unsigned long mem_page; | ||
1402 | |||
1403 | write_lock_irqsave(&adapter->adapter_lock, flags); | ||
1404 | |||
1405 | /* | ||
1406 | * If attempting to access unknown address or straddle hw windows, | ||
1407 | * do not access. | ||
1408 | */ | ||
1409 | start = adapter->pci_set_window(adapter, off); | ||
1410 | if ((start == -1UL) || | ||
1411 | (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { | ||
1412 | write_unlock_irqrestore(&adapter->adapter_lock, flags); | ||
1413 | printk(KERN_ERR "%s out of bound pci memory access. " | ||
1414 | "offset is 0x%llx\n", netxen_nic_driver_name, off); | ||
1415 | return -1; | ||
1416 | } | ||
1417 | |||
1418 | addr = (void *)(pci_base_offset(adapter, start)); | ||
1419 | if (!addr) { | ||
1420 | write_unlock_irqrestore(&adapter->adapter_lock, flags); | ||
1421 | mem_base = pci_resource_start(adapter->pdev, 0); | ||
1422 | mem_page = start & PAGE_MASK; | ||
1423 | /* Map two pages whenever user tries to access addresses in two | ||
1424 | consecutive pages. | ||
1425 | */ | ||
1426 | if (mem_page != ((start + size - 1) & PAGE_MASK)) | ||
1427 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2); | ||
1428 | else | ||
1429 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); | ||
1430 | if (mem_ptr == 0UL) { | ||
1431 | *(uint8_t *)data = 0; | ||
1432 | return -1; | ||
1433 | } | ||
1434 | addr = mem_ptr; | ||
1435 | addr += start & (PAGE_SIZE - 1); | ||
1436 | write_lock_irqsave(&adapter->adapter_lock, flags); | ||
1437 | } | ||
1438 | |||
1439 | switch (size) { | ||
1440 | case 1: | ||
1441 | *(uint8_t *)data = readb(addr); | ||
1442 | break; | ||
1443 | case 2: | ||
1444 | *(uint16_t *)data = readw(addr); | ||
1445 | break; | ||
1446 | case 4: | ||
1447 | *(uint32_t *)data = readl(addr); | ||
1448 | break; | ||
1449 | case 8: | ||
1450 | *(uint64_t *)data = readq(addr); | ||
1451 | break; | ||
1452 | default: | ||
1453 | ret = -1; | ||
1454 | break; | ||
1455 | } | ||
1456 | write_unlock_irqrestore(&adapter->adapter_lock, flags); | ||
1457 | DPRINTK(1, INFO, "read %llx\n", *(unsigned long long *)data); | ||
1458 | |||
1459 | if (mem_ptr) | ||
1460 | iounmap(mem_ptr); | ||
1461 | return ret; | ||
1462 | } | ||
1463 | |||
1464 | static int | ||
1465 | netxen_nic_pci_mem_write_direct(struct netxen_adapter *adapter, u64 off, | ||
1466 | void *data, int size) | ||
1467 | { | ||
1468 | unsigned long flags; | ||
1469 | void *addr; | ||
1470 | int ret = 0; | ||
1471 | u64 start; | ||
1472 | uint8_t *mem_ptr = NULL; | ||
1473 | unsigned long mem_base; | ||
1474 | unsigned long mem_page; | ||
1475 | |||
1476 | write_lock_irqsave(&adapter->adapter_lock, flags); | ||
1477 | |||
1478 | /* | ||
1479 | * If attempting to access unknown address or straddle hw windows, | ||
1480 | * do not access. | ||
1481 | */ | ||
1482 | start = adapter->pci_set_window(adapter, off); | ||
1483 | if ((start == -1UL) || | ||
1484 | (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { | ||
1485 | write_unlock_irqrestore(&adapter->adapter_lock, flags); | ||
1486 | printk(KERN_ERR "%s out of bound pci memory access. " | ||
1487 | "offset is 0x%llx\n", netxen_nic_driver_name, off); | ||
1488 | return -1; | ||
1489 | } | ||
1490 | |||
1491 | addr = (void *)(pci_base_offset(adapter, start)); | ||
1492 | if (!addr) { | ||
1493 | write_unlock_irqrestore(&adapter->adapter_lock, flags); | ||
1494 | mem_base = pci_resource_start(adapter->pdev, 0); | ||
1495 | mem_page = start & PAGE_MASK; | ||
1496 | /* Map two pages whenever user tries to access addresses in two | ||
1497 | * consecutive pages. | ||
1498 | */ | ||
1499 | if (mem_page != ((start + size - 1) & PAGE_MASK)) | ||
1500 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2); | ||
1501 | else | ||
1502 | mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); | ||
1503 | if (mem_ptr == 0UL) | ||
1504 | return -1; | ||
1505 | addr = mem_ptr; | ||
1506 | addr += start & (PAGE_SIZE - 1); | ||
1507 | write_lock_irqsave(&adapter->adapter_lock, flags); | ||
1508 | } | ||
1509 | |||
1510 | switch (size) { | ||
1511 | case 1: | ||
1512 | writeb(*(uint8_t *)data, addr); | ||
1513 | break; | ||
1514 | case 2: | ||
1515 | writew(*(uint16_t *)data, addr); | ||
1516 | break; | ||
1517 | case 4: | ||
1518 | writel(*(uint32_t *)data, addr); | ||
1519 | break; | ||
1520 | case 8: | ||
1521 | writeq(*(uint64_t *)data, addr); | ||
1522 | break; | ||
1523 | default: | ||
1524 | ret = -1; | ||
1525 | break; | ||
1526 | } | ||
1527 | write_unlock_irqrestore(&adapter->adapter_lock, flags); | ||
1528 | DPRINTK(1, INFO, "writing data %llx to offset %llx\n", | ||
1529 | *(unsigned long long *)data, start); | ||
1530 | if (mem_ptr) | ||
1531 | iounmap(mem_ptr); | ||
1532 | return ret; | ||
1533 | } | ||
1534 | |||
1535 | #define MAX_CTL_CHECK 1000 | ||
1536 | |||
1537 | int | ||
1538 | netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter, | ||
1539 | u64 off, void *data, int size) | ||
1540 | { | ||
1541 | unsigned long flags, mem_crb; | ||
1542 | int i, j, ret = 0, loop, sz[2], off0; | ||
1543 | uint32_t temp; | ||
1544 | uint64_t off8, tmpw, word[2] = {0, 0}; | ||
1545 | |||
1546 | /* | ||
1547 | * If not MN, go check for MS or invalid. | ||
1548 | */ | ||
1549 | if (netxen_nic_pci_mem_bound_check(adapter, off, size) == 0) | ||
1550 | return netxen_nic_pci_mem_write_direct(adapter, | ||
1551 | off, data, size); | ||
1552 | |||
1553 | off8 = off & 0xfffffff8; | ||
1554 | off0 = off & 0x7; | ||
1555 | sz[0] = (size < (8 - off0)) ? size : (8 - off0); | ||
1556 | sz[1] = size - sz[0]; | ||
1557 | loop = ((off0 + size - 1) >> 3) + 1; | ||
1558 | mem_crb = (unsigned long)pci_base_offset(adapter, NETXEN_CRB_DDR_NET); | ||
1559 | |||
1560 | if ((size != 8) || (off0 != 0)) { | ||
1561 | for (i = 0; i < loop; i++) { | ||
1562 | if (adapter->pci_mem_read(adapter, | ||
1563 | off8 + (i << 3), &word[i], 8)) | ||
1564 | return -1; | ||
1565 | } | ||
1566 | } | ||
1567 | |||
1568 | switch (size) { | ||
1569 | case 1: | ||
1570 | tmpw = *((uint8_t *)data); | ||
1571 | break; | ||
1572 | case 2: | ||
1573 | tmpw = *((uint16_t *)data); | ||
1574 | break; | ||
1575 | case 4: | ||
1576 | tmpw = *((uint32_t *)data); | ||
1577 | break; | ||
1578 | case 8: | ||
1579 | default: | ||
1580 | tmpw = *((uint64_t *)data); | ||
1581 | break; | ||
1582 | } | ||
1583 | word[0] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); | ||
1584 | word[0] |= tmpw << (off0 * 8); | ||
1585 | |||
1586 | if (loop == 2) { | ||
1587 | word[1] &= ~(~0ULL << (sz[1] * 8)); | ||
1588 | word[1] |= tmpw >> (sz[0] * 8); | ||
1589 | } | ||
1590 | |||
1591 | write_lock_irqsave(&adapter->adapter_lock, flags); | ||
1592 | netxen_nic_pci_change_crbwindow_128M(adapter, 0); | ||
1593 | |||
1594 | for (i = 0; i < loop; i++) { | ||
1595 | writel((uint32_t)(off8 + (i << 3)), | ||
1596 | (void *)(mem_crb+MIU_TEST_AGT_ADDR_LO)); | ||
1597 | writel(0, | ||
1598 | (void *)(mem_crb+MIU_TEST_AGT_ADDR_HI)); | ||
1599 | writel(word[i] & 0xffffffff, | ||
1600 | (void *)(mem_crb+MIU_TEST_AGT_WRDATA_LO)); | ||
1601 | writel((word[i] >> 32) & 0xffffffff, | ||
1602 | (void *)(mem_crb+MIU_TEST_AGT_WRDATA_HI)); | ||
1603 | writel(MIU_TA_CTL_ENABLE|MIU_TA_CTL_WRITE, | ||
1604 | (void *)(mem_crb+MIU_TEST_AGT_CTRL)); | ||
1605 | writel(MIU_TA_CTL_START|MIU_TA_CTL_ENABLE|MIU_TA_CTL_WRITE, | ||
1606 | (void *)(mem_crb+MIU_TEST_AGT_CTRL)); | ||
1607 | |||
1608 | for (j = 0; j < MAX_CTL_CHECK; j++) { | ||
1609 | temp = readl( | ||
1610 | (void *)(mem_crb+MIU_TEST_AGT_CTRL)); | ||
1611 | if ((temp & MIU_TA_CTL_BUSY) == 0) | ||
1612 | break; | ||
1613 | } | ||
1614 | |||
1615 | if (j >= MAX_CTL_CHECK) { | ||
1616 | printk("%s: %s Fail to write through agent\n", | ||
1617 | __func__, netxen_nic_driver_name); | ||
1618 | ret = -1; | ||
1619 | break; | ||
1620 | } | ||
1621 | } | ||
1622 | |||
1623 | netxen_nic_pci_change_crbwindow_128M(adapter, 1); | ||
1624 | write_unlock_irqrestore(&adapter->adapter_lock, flags); | ||
1625 | return ret; | ||
1626 | } | ||
1627 | |||
1628 | int | ||
1629 | netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter, | ||
1630 | u64 off, void *data, int size) | ||
1631 | { | ||
1632 | unsigned long flags, mem_crb; | ||
1633 | int i, j = 0, k, start, end, loop, sz[2], off0[2]; | ||
1634 | uint32_t temp; | ||
1635 | uint64_t off8, val, word[2] = {0, 0}; | ||
1636 | |||
1637 | |||
1638 | /* | ||
1639 | * If not MN, go check for MS or invalid. | ||
1640 | */ | ||
1641 | if (netxen_nic_pci_mem_bound_check(adapter, off, size) == 0) | ||
1642 | return netxen_nic_pci_mem_read_direct(adapter, off, data, size); | ||
1643 | |||
1644 | off8 = off & 0xfffffff8; | ||
1645 | off0[0] = off & 0x7; | ||
1646 | off0[1] = 0; | ||
1647 | sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]); | ||
1648 | sz[1] = size - sz[0]; | ||
1649 | loop = ((off0[0] + size - 1) >> 3) + 1; | ||
1650 | mem_crb = (unsigned long)pci_base_offset(adapter, NETXEN_CRB_DDR_NET); | ||
1651 | |||
1652 | write_lock_irqsave(&adapter->adapter_lock, flags); | ||
1653 | netxen_nic_pci_change_crbwindow_128M(adapter, 0); | ||
1654 | |||
1655 | for (i = 0; i < loop; i++) { | ||
1656 | writel((uint32_t)(off8 + (i << 3)), | ||
1657 | (void *)(mem_crb+MIU_TEST_AGT_ADDR_LO)); | ||
1658 | writel(0, | ||
1659 | (void *)(mem_crb+MIU_TEST_AGT_ADDR_HI)); | ||
1660 | writel(MIU_TA_CTL_ENABLE, | ||
1661 | (void *)(mem_crb+MIU_TEST_AGT_CTRL)); | ||
1662 | writel(MIU_TA_CTL_START|MIU_TA_CTL_ENABLE, | ||
1663 | (void *)(mem_crb+MIU_TEST_AGT_CTRL)); | ||
1664 | |||
1665 | for (j = 0; j < MAX_CTL_CHECK; j++) { | ||
1666 | temp = readl( | ||
1667 | (void *)(mem_crb+MIU_TEST_AGT_CTRL)); | ||
1668 | if ((temp & MIU_TA_CTL_BUSY) == 0) | ||
1669 | break; | ||
1670 | } | ||
1671 | |||
1672 | if (j >= MAX_CTL_CHECK) { | ||
1673 | printk(KERN_ERR "%s: %s Fail to read through agent\n", | ||
1674 | __func__, netxen_nic_driver_name); | ||
1675 | break; | ||
1676 | } | ||
1677 | |||
1678 | start = off0[i] >> 2; | ||
1679 | end = (off0[i] + sz[i] - 1) >> 2; | ||
1680 | for (k = start; k <= end; k++) { | ||
1681 | word[i] |= ((uint64_t) readl( | ||
1682 | (void *)(mem_crb + | ||
1683 | MIU_TEST_AGT_RDDATA(k))) << (32*k)); | ||
1684 | } | ||
1685 | } | ||
1686 | |||
1687 | netxen_nic_pci_change_crbwindow_128M(adapter, 1); | ||
1688 | write_unlock_irqrestore(&adapter->adapter_lock, flags); | ||
1689 | |||
1690 | if (j >= MAX_CTL_CHECK) | ||
1691 | return -1; | ||
1692 | |||
1693 | if (sz[0] == 8) { | ||
1694 | val = word[0]; | ||
1695 | } else { | ||
1696 | val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) | | ||
1697 | ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8)); | ||
1698 | } | ||
1699 | |||
1700 | switch (size) { | ||
1701 | case 1: | ||
1702 | *(uint8_t *)data = val; | ||
1703 | break; | ||
1704 | case 2: | ||
1705 | *(uint16_t *)data = val; | ||
1706 | break; | ||
1707 | case 4: | ||
1708 | *(uint32_t *)data = val; | ||
1709 | break; | ||
1710 | case 8: | ||
1711 | *(uint64_t *)data = val; | ||
1712 | break; | ||
1713 | } | ||
1714 | DPRINTK(1, INFO, "read %llx\n", *(unsigned long long *)data); | ||
1715 | return 0; | ||
1716 | } | ||
1717 | |||
1718 | int | ||
1719 | netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter, | ||
1720 | u64 off, void *data, int size) | ||
1721 | { | ||
1722 | int i, j, ret = 0, loop, sz[2], off0; | ||
1723 | uint32_t temp; | ||
1724 | uint64_t off8, mem_crb, tmpw, word[2] = {0, 0}; | ||
1725 | |||
1726 | /* | ||
1727 | * If not MN, go check for MS or invalid. | ||
1728 | */ | ||
1729 | if (off >= NETXEN_ADDR_QDR_NET && off <= NETXEN_ADDR_QDR_NET_MAX_P3) | ||
1730 | mem_crb = NETXEN_CRB_QDR_NET; | ||
1731 | else { | ||
1732 | mem_crb = NETXEN_CRB_DDR_NET; | ||
1733 | if (netxen_nic_pci_mem_bound_check(adapter, off, size) == 0) | ||
1734 | return netxen_nic_pci_mem_write_direct(adapter, | ||
1735 | off, data, size); | ||
1736 | } | ||
1737 | |||
1738 | off8 = off & 0xfffffff8; | ||
1739 | off0 = off & 0x7; | ||
1740 | sz[0] = (size < (8 - off0)) ? size : (8 - off0); | ||
1741 | sz[1] = size - sz[0]; | ||
1742 | loop = ((off0 + size - 1) >> 3) + 1; | ||
1743 | |||
1744 | if ((size != 8) || (off0 != 0)) { | ||
1745 | for (i = 0; i < loop; i++) { | ||
1746 | if (adapter->pci_mem_read(adapter, off8 + (i << 3), | ||
1747 | &word[i], 8)) | ||
1748 | return -1; | ||
1749 | } | ||
1750 | } | ||
1751 | |||
1752 | switch (size) { | ||
1753 | case 1: | ||
1754 | tmpw = *((uint8_t *)data); | ||
1755 | break; | ||
1756 | case 2: | ||
1757 | tmpw = *((uint16_t *)data); | ||
1758 | break; | ||
1759 | case 4: | ||
1760 | tmpw = *((uint32_t *)data); | ||
1761 | break; | ||
1762 | case 8: | ||
1763 | default: | ||
1764 | tmpw = *((uint64_t *)data); | ||
1765 | break; | ||
1766 | } | ||
1767 | |||
1768 | word[0] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); | ||
1769 | word[0] |= tmpw << (off0 * 8); | ||
1770 | |||
1771 | if (loop == 2) { | ||
1772 | word[1] &= ~(~0ULL << (sz[1] * 8)); | ||
1773 | word[1] |= tmpw >> (sz[0] * 8); | ||
1774 | } | ||
1775 | |||
1776 | /* | ||
1777 | * don't lock here - write_wx gets the lock if each time | ||
1778 | * write_lock_irqsave(&adapter->adapter_lock, flags); | ||
1779 | * netxen_nic_pci_change_crbwindow_128M(adapter, 0); | ||
1780 | */ | ||
1781 | |||
1782 | for (i = 0; i < loop; i++) { | ||
1783 | temp = off8 + (i << 3); | ||
1784 | adapter->hw_write_wx(adapter, | ||
1785 | mem_crb+MIU_TEST_AGT_ADDR_LO, &temp, 4); | ||
1786 | temp = 0; | ||
1787 | adapter->hw_write_wx(adapter, | ||
1788 | mem_crb+MIU_TEST_AGT_ADDR_HI, &temp, 4); | ||
1789 | temp = word[i] & 0xffffffff; | ||
1790 | adapter->hw_write_wx(adapter, | ||
1791 | mem_crb+MIU_TEST_AGT_WRDATA_LO, &temp, 4); | ||
1792 | temp = (word[i] >> 32) & 0xffffffff; | ||
1793 | adapter->hw_write_wx(adapter, | ||
1794 | mem_crb+MIU_TEST_AGT_WRDATA_HI, &temp, 4); | ||
1795 | temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; | ||
1796 | adapter->hw_write_wx(adapter, | ||
1797 | mem_crb+MIU_TEST_AGT_CTRL, &temp, 4); | ||
1798 | temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; | ||
1799 | adapter->hw_write_wx(adapter, | ||
1800 | mem_crb+MIU_TEST_AGT_CTRL, &temp, 4); | ||
1801 | |||
1802 | for (j = 0; j < MAX_CTL_CHECK; j++) { | ||
1803 | adapter->hw_read_wx(adapter, | ||
1804 | mem_crb + MIU_TEST_AGT_CTRL, &temp, 4); | ||
1805 | if ((temp & MIU_TA_CTL_BUSY) == 0) | ||
1806 | break; | ||
1807 | } | ||
1808 | |||
1809 | if (j >= MAX_CTL_CHECK) { | ||
1810 | printk(KERN_ERR "%s: Fail to write through agent\n", | ||
1811 | netxen_nic_driver_name); | ||
1812 | ret = -1; | ||
1813 | break; | ||
1814 | } | ||
1815 | } | ||
1816 | |||
1817 | /* | ||
1818 | * netxen_nic_pci_change_crbwindow_128M(adapter, 1); | ||
1819 | * write_unlock_irqrestore(&adapter->adapter_lock, flags); | ||
1820 | */ | ||
1821 | return ret; | ||
1822 | } | ||
1823 | |||
1824 | int | ||
1825 | netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter, | ||
1826 | u64 off, void *data, int size) | ||
1827 | { | ||
1828 | int i, j = 0, k, start, end, loop, sz[2], off0[2]; | ||
1829 | uint32_t temp; | ||
1830 | uint64_t off8, val, mem_crb, word[2] = {0, 0}; | ||
1831 | |||
1832 | /* | ||
1833 | * If not MN, go check for MS or invalid. | ||
1834 | */ | ||
1835 | |||
1836 | if (off >= NETXEN_ADDR_QDR_NET && off <= NETXEN_ADDR_QDR_NET_MAX_P3) | ||
1837 | mem_crb = NETXEN_CRB_QDR_NET; | ||
1838 | else { | ||
1839 | mem_crb = NETXEN_CRB_DDR_NET; | ||
1840 | if (netxen_nic_pci_mem_bound_check(adapter, off, size) == 0) | ||
1841 | return netxen_nic_pci_mem_read_direct(adapter, | ||
1842 | off, data, size); | ||
1843 | } | ||
1844 | |||
1845 | off8 = off & 0xfffffff8; | ||
1846 | off0[0] = off & 0x7; | ||
1847 | off0[1] = 0; | ||
1848 | sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]); | ||
1849 | sz[1] = size - sz[0]; | ||
1850 | loop = ((off0[0] + size - 1) >> 3) + 1; | ||
1851 | |||
1852 | /* | ||
1853 | * don't lock here - write_wx gets the lock if each time | ||
1854 | * write_lock_irqsave(&adapter->adapter_lock, flags); | ||
1855 | * netxen_nic_pci_change_crbwindow_128M(adapter, 0); | ||
1856 | */ | ||
1857 | |||
1858 | for (i = 0; i < loop; i++) { | ||
1859 | temp = off8 + (i << 3); | ||
1860 | adapter->hw_write_wx(adapter, | ||
1861 | mem_crb + MIU_TEST_AGT_ADDR_LO, &temp, 4); | ||
1862 | temp = 0; | ||
1863 | adapter->hw_write_wx(adapter, | ||
1864 | mem_crb + MIU_TEST_AGT_ADDR_HI, &temp, 4); | ||
1865 | temp = MIU_TA_CTL_ENABLE; | ||
1866 | adapter->hw_write_wx(adapter, | ||
1867 | mem_crb + MIU_TEST_AGT_CTRL, &temp, 4); | ||
1868 | temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; | ||
1869 | adapter->hw_write_wx(adapter, | ||
1870 | mem_crb + MIU_TEST_AGT_CTRL, &temp, 4); | ||
1871 | |||
1872 | for (j = 0; j < MAX_CTL_CHECK; j++) { | ||
1873 | adapter->hw_read_wx(adapter, | ||
1874 | mem_crb + MIU_TEST_AGT_CTRL, &temp, 4); | ||
1875 | if ((temp & MIU_TA_CTL_BUSY) == 0) | ||
1876 | break; | ||
1877 | } | ||
1878 | |||
1879 | if (j >= MAX_CTL_CHECK) { | ||
1880 | printk(KERN_ERR "%s: Fail to read through agent\n", | ||
1881 | netxen_nic_driver_name); | ||
1882 | break; | ||
1883 | } | ||
1884 | |||
1885 | start = off0[i] >> 2; | ||
1886 | end = (off0[i] + sz[i] - 1) >> 2; | ||
1887 | for (k = start; k <= end; k++) { | ||
1888 | adapter->hw_read_wx(adapter, | ||
1889 | mem_crb + MIU_TEST_AGT_RDDATA(k), &temp, 4); | ||
1890 | word[i] |= ((uint64_t)temp << (32 * k)); | ||
1891 | } | ||
1892 | } | ||
1893 | |||
1894 | /* | ||
1895 | * netxen_nic_pci_change_crbwindow_128M(adapter, 1); | ||
1896 | * write_unlock_irqrestore(&adapter->adapter_lock, flags); | ||
1897 | */ | ||
1898 | |||
1899 | if (j >= MAX_CTL_CHECK) | ||
1900 | return -1; | ||
1901 | |||
1902 | if (sz[0] == 8) { | ||
1903 | val = word[0]; | ||
1904 | } else { | ||
1905 | val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) | | ||
1906 | ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8)); | ||
1907 | } | ||
1908 | |||
1909 | switch (size) { | ||
1910 | case 1: | ||
1911 | *(uint8_t *)data = val; | ||
1912 | break; | ||
1913 | case 2: | ||
1914 | *(uint16_t *)data = val; | ||
1915 | break; | ||
1916 | case 4: | ||
1917 | *(uint32_t *)data = val; | ||
1918 | break; | ||
1919 | case 8: | ||
1920 | *(uint64_t *)data = val; | ||
1921 | break; | ||
1922 | } | ||
1923 | DPRINTK(1, INFO, "read %llx\n", *(unsigned long long *)data); | ||
1924 | return 0; | ||
1925 | } | ||
1926 | |||
1927 | /* | ||
1928 | * Note : only 32-bit writes! | ||
1929 | */ | ||
1930 | int netxen_nic_pci_write_immediate_2M(struct netxen_adapter *adapter, | ||
1931 | u64 off, u32 data) | ||
1932 | { | ||
1933 | adapter->hw_write_wx(adapter, off, &data, 4); | ||
1934 | |||
1935 | return 0; | ||
1936 | } | ||
1937 | |||
1938 | u32 netxen_nic_pci_read_immediate_2M(struct netxen_adapter *adapter, u64 off) | ||
1939 | { | ||
1940 | u32 temp; | ||
1941 | adapter->hw_read_wx(adapter, off, &temp, 4); | ||
1942 | return temp; | ||
1943 | } | ||
1944 | |||
1945 | void netxen_nic_pci_write_normalize_2M(struct netxen_adapter *adapter, | ||
1946 | u64 off, u32 data) | ||
1947 | { | ||
1948 | adapter->hw_write_wx(adapter, off, &data, 4); | ||
1949 | } | ||
1950 | |||
1951 | u32 netxen_nic_pci_read_normalize_2M(struct netxen_adapter *adapter, u64 off) | ||
1952 | { | ||
1953 | u32 temp; | ||
1954 | adapter->hw_read_wx(adapter, off, &temp, 4); | ||
1955 | return temp; | ||
1956 | } | ||
1957 | |||
951 | #if 0 | 1958 | #if 0 |
952 | int | 1959 | int |
953 | netxen_nic_erase_pxe(struct netxen_adapter *adapter) | 1960 | netxen_nic_erase_pxe(struct netxen_adapter *adapter) |
@@ -1003,12 +2010,25 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter) | |||
1003 | case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: | 2010 | case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: |
1004 | case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: | 2011 | case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: |
1005 | case NETXEN_BRDTYPE_P2_SB31_10G_CX4: | 2012 | case NETXEN_BRDTYPE_P2_SB31_10G_CX4: |
2013 | case NETXEN_BRDTYPE_P3_HMEZ: | ||
2014 | case NETXEN_BRDTYPE_P3_XG_LOM: | ||
2015 | case NETXEN_BRDTYPE_P3_10G_CX4: | ||
2016 | case NETXEN_BRDTYPE_P3_10G_CX4_LP: | ||
2017 | case NETXEN_BRDTYPE_P3_IMEZ: | ||
2018 | case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: | ||
2019 | case NETXEN_BRDTYPE_P3_10G_XFP: | ||
2020 | case NETXEN_BRDTYPE_P3_10000_BASE_T: | ||
2021 | |||
1006 | adapter->ahw.board_type = NETXEN_NIC_XGBE; | 2022 | adapter->ahw.board_type = NETXEN_NIC_XGBE; |
1007 | break; | 2023 | break; |
1008 | case NETXEN_BRDTYPE_P1_BD: | 2024 | case NETXEN_BRDTYPE_P1_BD: |
1009 | case NETXEN_BRDTYPE_P1_SB: | 2025 | case NETXEN_BRDTYPE_P1_SB: |
1010 | case NETXEN_BRDTYPE_P1_SMAX: | 2026 | case NETXEN_BRDTYPE_P1_SMAX: |
1011 | case NETXEN_BRDTYPE_P1_SOCK: | 2027 | case NETXEN_BRDTYPE_P1_SOCK: |
2028 | case NETXEN_BRDTYPE_P3_REF_QG: | ||
2029 | case NETXEN_BRDTYPE_P3_4_GB: | ||
2030 | case NETXEN_BRDTYPE_P3_4_GB_MM: | ||
2031 | |||
1012 | adapter->ahw.board_type = NETXEN_NIC_GBE; | 2032 | adapter->ahw.board_type = NETXEN_NIC_GBE; |
1013 | break; | 2033 | break; |
1014 | default: | 2034 | default: |
@@ -1042,25 +2062,11 @@ int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) | |||
1042 | return 0; | 2062 | return 0; |
1043 | } | 2063 | } |
1044 | 2064 | ||
1045 | void netxen_nic_init_niu_gb(struct netxen_adapter *adapter) | ||
1046 | { | ||
1047 | netxen_niu_gbe_init_port(adapter, adapter->physical_port); | ||
1048 | } | ||
1049 | |||
1050 | void | 2065 | void |
1051 | netxen_crb_writelit_adapter(struct netxen_adapter *adapter, unsigned long off, | 2066 | netxen_crb_writelit_adapter(struct netxen_adapter *adapter, |
1052 | int data) | 2067 | unsigned long off, int data) |
1053 | { | 2068 | { |
1054 | void __iomem *addr; | 2069 | adapter->hw_write_wx(adapter, off, &data, 4); |
1055 | |||
1056 | if (ADDR_IN_WINDOW1(off)) { | ||
1057 | writel(data, NETXEN_CRB_NORMALIZE(adapter, off)); | ||
1058 | } else { | ||
1059 | netxen_nic_pci_change_crbwindow(adapter, 0); | ||
1060 | addr = pci_base_offset(adapter, off); | ||
1061 | writel(data, addr); | ||
1062 | netxen_nic_pci_change_crbwindow(adapter, 1); | ||
1063 | } | ||
1064 | } | 2070 | } |
1065 | 2071 | ||
1066 | void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) | 2072 | void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) |
@@ -1147,12 +2153,11 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter) | |||
1147 | addr += sizeof(u32); | 2153 | addr += sizeof(u32); |
1148 | } | 2154 | } |
1149 | 2155 | ||
1150 | fw_major = readl(NETXEN_CRB_NORMALIZE(adapter, | 2156 | adapter->hw_read_wx(adapter, NETXEN_FW_VERSION_MAJOR, &fw_major, 4); |
1151 | NETXEN_FW_VERSION_MAJOR)); | 2157 | adapter->hw_read_wx(adapter, NETXEN_FW_VERSION_MINOR, &fw_minor, 4); |
1152 | fw_minor = readl(NETXEN_CRB_NORMALIZE(adapter, | 2158 | adapter->hw_read_wx(adapter, NETXEN_FW_VERSION_SUB, &fw_build, 4); |
1153 | NETXEN_FW_VERSION_MINOR)); | 2159 | |
1154 | fw_build = | 2160 | adapter->fw_major = fw_major; |
1155 | readl(NETXEN_CRB_NORMALIZE(adapter, NETXEN_FW_VERSION_SUB)); | ||
1156 | 2161 | ||
1157 | if (adapter->portnum == 0) { | 2162 | if (adapter->portnum == 0) { |
1158 | get_brd_name_by_type(board_info->board_type, brd_name); | 2163 | get_brd_name_by_type(board_info->board_type, brd_name); |
@@ -1163,28 +2168,13 @@ void netxen_nic_flash_print(struct netxen_adapter *adapter) | |||
1163 | fw_minor, fw_build); | 2168 | fw_minor, fw_build); |
1164 | } | 2169 | } |
1165 | 2170 | ||
1166 | if (fw_major != _NETXEN_NIC_LINUX_MAJOR) { | 2171 | if (NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build) < |
1167 | adapter->driver_mismatch = 1; | 2172 | NETXEN_VERSION_CODE(3, 4, 216)) { |
1168 | } | ||
1169 | if (fw_minor != _NETXEN_NIC_LINUX_MINOR && | ||
1170 | fw_minor != (_NETXEN_NIC_LINUX_MINOR + 1)) { | ||
1171 | adapter->driver_mismatch = 1; | 2173 | adapter->driver_mismatch = 1; |
1172 | } | 2174 | printk(KERN_ERR "%s: firmware version %d.%d.%d unsupported\n", |
1173 | if (adapter->driver_mismatch) { | 2175 | netxen_nic_driver_name, |
1174 | printk(KERN_ERR "%s: driver and firmware version mismatch\n", | 2176 | fw_major, fw_minor, fw_build); |
1175 | adapter->netdev->name); | ||
1176 | return; | 2177 | return; |
1177 | } | 2178 | } |
1178 | |||
1179 | switch (adapter->ahw.board_type) { | ||
1180 | case NETXEN_NIC_GBE: | ||
1181 | dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", | ||
1182 | adapter->netdev->name); | ||
1183 | break; | ||
1184 | case NETXEN_NIC_XGBE: | ||
1185 | dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", | ||
1186 | adapter->netdev->name); | ||
1187 | break; | ||
1188 | } | ||
1189 | } | 2179 | } |
1190 | 2180 | ||
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h index a3ea1dd98c41..b8e0030f03d7 100644 --- a/drivers/net/netxen/netxen_nic_hw.h +++ b/drivers/net/netxen/netxen_nic_hw.h | |||
@@ -82,19 +82,9 @@ struct netxen_adapter; | |||
82 | 82 | ||
83 | #define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20) | 83 | #define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20) |
84 | 84 | ||
85 | #define NETXEN_NIC_LOCKED_READ_REG(X, Y) \ | ||
86 | addr = pci_base_offset(adapter, X); \ | ||
87 | *(u32 *)Y = readl((void __iomem*) addr); | ||
88 | |||
89 | struct netxen_port; | 85 | struct netxen_port; |
90 | void netxen_nic_set_link_parameters(struct netxen_adapter *adapter); | 86 | void netxen_nic_set_link_parameters(struct netxen_adapter *adapter); |
91 | void netxen_nic_flash_print(struct netxen_adapter *adapter); | 87 | void netxen_nic_flash_print(struct netxen_adapter *adapter); |
92 | int netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, | ||
93 | void *data, int len); | ||
94 | void netxen_crb_writelit_adapter(struct netxen_adapter *adapter, | ||
95 | unsigned long off, int data); | ||
96 | int netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, | ||
97 | void *data, int len); | ||
98 | 88 | ||
99 | typedef u8 netxen_ethernet_macaddr_t[6]; | 89 | typedef u8 netxen_ethernet_macaddr_t[6]; |
100 | 90 | ||
@@ -432,7 +422,8 @@ typedef enum { | |||
432 | /* Promiscous mode options (GbE mode only) */ | 422 | /* Promiscous mode options (GbE mode only) */ |
433 | typedef enum { | 423 | typedef enum { |
434 | NETXEN_NIU_PROMISC_MODE = 0, | 424 | NETXEN_NIU_PROMISC_MODE = 0, |
435 | NETXEN_NIU_NON_PROMISC_MODE | 425 | NETXEN_NIU_NON_PROMISC_MODE, |
426 | NETXEN_NIU_ALLMULTI_MODE | ||
436 | } netxen_niu_prom_mode_t; | 427 | } netxen_niu_prom_mode_t; |
437 | 428 | ||
438 | /* | 429 | /* |
@@ -478,42 +469,6 @@ typedef enum { | |||
478 | #define netxen_xg_soft_reset(config_word) \ | 469 | #define netxen_xg_soft_reset(config_word) \ |
479 | ((config_word) |= 1 << 4) | 470 | ((config_word) |= 1 << 4) |
480 | 471 | ||
481 | /* | ||
482 | * MAC Control Register | ||
483 | * | ||
484 | * Bit 0-1 : id_pool0 | ||
485 | * Bit 2 : enable_xtnd0 | ||
486 | * Bit 4-5 : id_pool1 | ||
487 | * Bit 6 : enable_xtnd1 | ||
488 | * Bit 8-9 : id_pool2 | ||
489 | * Bit 10 : enable_xtnd2 | ||
490 | * Bit 12-13 : id_pool3 | ||
491 | * Bit 14 : enable_xtnd3 | ||
492 | * Bit 24-25 : mode_select | ||
493 | * Bit 28-31 : enable_pool | ||
494 | */ | ||
495 | |||
496 | #define netxen_nic_mcr_set_id_pool0(config, val) \ | ||
497 | ((config) |= ((val) &0x03)) | ||
498 | #define netxen_nic_mcr_set_enable_xtnd0(config) \ | ||
499 | ((config) |= 1 << 3) | ||
500 | #define netxen_nic_mcr_set_id_pool1(config, val) \ | ||
501 | ((config) |= (((val) & 0x03) << 4)) | ||
502 | #define netxen_nic_mcr_set_enable_xtnd1(config) \ | ||
503 | ((config) |= 1 << 6) | ||
504 | #define netxen_nic_mcr_set_id_pool2(config, val) \ | ||
505 | ((config) |= (((val) & 0x03) << 8)) | ||
506 | #define netxen_nic_mcr_set_enable_xtnd2(config) \ | ||
507 | ((config) |= 1 << 10) | ||
508 | #define netxen_nic_mcr_set_id_pool3(config, val) \ | ||
509 | ((config) |= (((val) & 0x03) << 12)) | ||
510 | #define netxen_nic_mcr_set_enable_xtnd3(config) \ | ||
511 | ((config) |= 1 << 14) | ||
512 | #define netxen_nic_mcr_set_mode_select(config, val) \ | ||
513 | ((config) |= (((val) & 0x03) << 24)) | ||
514 | #define netxen_nic_mcr_set_enable_pool(config, val) \ | ||
515 | ((config) |= (((val) & 0x0f) << 28)) | ||
516 | |||
517 | /* Set promiscuous mode for a GbE interface */ | 472 | /* Set promiscuous mode for a GbE interface */ |
518 | int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, | 473 | int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, |
519 | netxen_niu_prom_mode_t mode); | 474 | netxen_niu_prom_mode_t mode); |
@@ -538,4 +493,15 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter); | |||
538 | 493 | ||
539 | int netxen_niu_disable_xg_port(struct netxen_adapter *adapter); | 494 | int netxen_niu_disable_xg_port(struct netxen_adapter *adapter); |
540 | 495 | ||
496 | typedef struct { | ||
497 | unsigned valid; | ||
498 | unsigned start_128M; | ||
499 | unsigned end_128M; | ||
500 | unsigned start_2M; | ||
501 | } crb_128M_2M_sub_block_map_t; | ||
502 | |||
503 | typedef struct { | ||
504 | crb_128M_2M_sub_block_map_t sub_block[16]; | ||
505 | } crb_128M_2M_block_map_t; | ||
506 | |||
541 | #endif /* __NETXEN_NIC_HW_H_ */ | 507 | #endif /* __NETXEN_NIC_HW_H_ */ |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 70d1b22ced22..01ab31b34a85 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -42,8 +42,6 @@ struct crb_addr_pair { | |||
42 | u32 data; | 42 | u32 data; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | unsigned long last_schedule_time; | ||
46 | |||
47 | #define NETXEN_MAX_CRB_XFORM 60 | 45 | #define NETXEN_MAX_CRB_XFORM 60 |
48 | static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; | 46 | static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; |
49 | #define NETXEN_ADDR_ERROR (0xffffffff) | 47 | #define NETXEN_ADDR_ERROR (0xffffffff) |
@@ -117,6 +115,8 @@ static void crb_addr_transform_setup(void) | |||
117 | crb_addr_transform(C2C1); | 115 | crb_addr_transform(C2C1); |
118 | crb_addr_transform(C2C0); | 116 | crb_addr_transform(C2C0); |
119 | crb_addr_transform(SMB); | 117 | crb_addr_transform(SMB); |
118 | crb_addr_transform(OCM0); | ||
119 | crb_addr_transform(I2C0); | ||
120 | } | 120 | } |
121 | 121 | ||
122 | int netxen_init_firmware(struct netxen_adapter *adapter) | 122 | int netxen_init_firmware(struct netxen_adapter *adapter) |
@@ -124,15 +124,15 @@ int netxen_init_firmware(struct netxen_adapter *adapter) | |||
124 | u32 state = 0, loops = 0, err = 0; | 124 | u32 state = 0, loops = 0, err = 0; |
125 | 125 | ||
126 | /* Window 1 call */ | 126 | /* Window 1 call */ |
127 | state = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); | 127 | state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE); |
128 | 128 | ||
129 | if (state == PHAN_INITIALIZE_ACK) | 129 | if (state == PHAN_INITIALIZE_ACK) |
130 | return 0; | 130 | return 0; |
131 | 131 | ||
132 | while (state != PHAN_INITIALIZE_COMPLETE && loops < 2000) { | 132 | while (state != PHAN_INITIALIZE_COMPLETE && loops < 2000) { |
133 | udelay(100); | 133 | msleep(1); |
134 | /* Window 1 call */ | 134 | /* Window 1 call */ |
135 | state = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); | 135 | state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE); |
136 | 136 | ||
137 | loops++; | 137 | loops++; |
138 | } | 138 | } |
@@ -143,64 +143,193 @@ int netxen_init_firmware(struct netxen_adapter *adapter) | |||
143 | return err; | 143 | return err; |
144 | } | 144 | } |
145 | /* Window 1 call */ | 145 | /* Window 1 call */ |
146 | writel(INTR_SCHEME_PERPORT, | 146 | adapter->pci_write_normalize(adapter, |
147 | NETXEN_CRB_NORMALIZE(adapter, CRB_NIC_CAPABILITIES_HOST)); | 147 | CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT); |
148 | writel(MSI_MODE_MULTIFUNC, | 148 | adapter->pci_write_normalize(adapter, |
149 | NETXEN_CRB_NORMALIZE(adapter, CRB_NIC_MSI_MODE_HOST)); | 149 | CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC); |
150 | writel(MPORT_MULTI_FUNCTION_MODE, | 150 | adapter->pci_write_normalize(adapter, |
151 | NETXEN_CRB_NORMALIZE(adapter, CRB_MPORT_MODE)); | 151 | CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE); |
152 | writel(PHAN_INITIALIZE_ACK, | 152 | adapter->pci_write_normalize(adapter, |
153 | NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); | 153 | CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); |
154 | 154 | ||
155 | return err; | 155 | return err; |
156 | } | 156 | } |
157 | 157 | ||
158 | #define NETXEN_ADDR_LIMIT 0xffffffffULL | 158 | void netxen_release_rx_buffers(struct netxen_adapter *adapter) |
159 | { | ||
160 | struct netxen_recv_context *recv_ctx; | ||
161 | struct nx_host_rds_ring *rds_ring; | ||
162 | struct netxen_rx_buffer *rx_buf; | ||
163 | int i, ctxid, ring; | ||
164 | |||
165 | for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) { | ||
166 | recv_ctx = &adapter->recv_ctx[ctxid]; | ||
167 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | ||
168 | rds_ring = &recv_ctx->rds_rings[ring]; | ||
169 | for (i = 0; i < rds_ring->max_rx_desc_count; ++i) { | ||
170 | rx_buf = &(rds_ring->rx_buf_arr[i]); | ||
171 | if (rx_buf->state == NETXEN_BUFFER_FREE) | ||
172 | continue; | ||
173 | pci_unmap_single(adapter->pdev, | ||
174 | rx_buf->dma, | ||
175 | rds_ring->dma_size, | ||
176 | PCI_DMA_FROMDEVICE); | ||
177 | if (rx_buf->skb != NULL) | ||
178 | dev_kfree_skb_any(rx_buf->skb); | ||
179 | } | ||
180 | } | ||
181 | } | ||
182 | } | ||
159 | 183 | ||
160 | void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr, | 184 | void netxen_release_tx_buffers(struct netxen_adapter *adapter) |
161 | struct pci_dev **used_dev) | ||
162 | { | 185 | { |
163 | void *addr; | 186 | struct netxen_cmd_buffer *cmd_buf; |
187 | struct netxen_skb_frag *buffrag; | ||
188 | int i, j; | ||
189 | |||
190 | cmd_buf = adapter->cmd_buf_arr; | ||
191 | for (i = 0; i < adapter->max_tx_desc_count; i++) { | ||
192 | buffrag = cmd_buf->frag_array; | ||
193 | if (buffrag->dma) { | ||
194 | pci_unmap_single(adapter->pdev, buffrag->dma, | ||
195 | buffrag->length, PCI_DMA_TODEVICE); | ||
196 | buffrag->dma = 0ULL; | ||
197 | } | ||
198 | for (j = 0; j < cmd_buf->frag_count; j++) { | ||
199 | buffrag++; | ||
200 | if (buffrag->dma) { | ||
201 | pci_unmap_page(adapter->pdev, buffrag->dma, | ||
202 | buffrag->length, | ||
203 | PCI_DMA_TODEVICE); | ||
204 | buffrag->dma = 0ULL; | ||
205 | } | ||
206 | } | ||
207 | /* Free the skb we received in netxen_nic_xmit_frame */ | ||
208 | if (cmd_buf->skb) { | ||
209 | dev_kfree_skb_any(cmd_buf->skb); | ||
210 | cmd_buf->skb = NULL; | ||
211 | } | ||
212 | cmd_buf++; | ||
213 | } | ||
214 | } | ||
164 | 215 | ||
165 | addr = pci_alloc_consistent(pdev, sz, ptr); | 216 | void netxen_free_sw_resources(struct netxen_adapter *adapter) |
166 | if ((unsigned long long)(*ptr) < NETXEN_ADDR_LIMIT) { | 217 | { |
167 | *used_dev = pdev; | 218 | struct netxen_recv_context *recv_ctx; |
168 | return addr; | 219 | struct nx_host_rds_ring *rds_ring; |
220 | int ctx, ring; | ||
221 | |||
222 | for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) { | ||
223 | recv_ctx = &adapter->recv_ctx[ctx]; | ||
224 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | ||
225 | rds_ring = &recv_ctx->rds_rings[ring]; | ||
226 | if (rds_ring->rx_buf_arr) { | ||
227 | vfree(rds_ring->rx_buf_arr); | ||
228 | rds_ring->rx_buf_arr = NULL; | ||
229 | } | ||
230 | } | ||
169 | } | 231 | } |
170 | pci_free_consistent(pdev, sz, addr, *ptr); | 232 | if (adapter->cmd_buf_arr) |
171 | addr = pci_alloc_consistent(NULL, sz, ptr); | 233 | vfree(adapter->cmd_buf_arr); |
172 | *used_dev = NULL; | 234 | return; |
173 | return addr; | ||
174 | } | 235 | } |
175 | 236 | ||
176 | void netxen_initialize_adapter_sw(struct netxen_adapter *adapter) | 237 | int netxen_alloc_sw_resources(struct netxen_adapter *adapter) |
177 | { | 238 | { |
178 | int ctxid, ring; | 239 | struct netxen_recv_context *recv_ctx; |
179 | u32 i; | 240 | struct nx_host_rds_ring *rds_ring; |
180 | u32 num_rx_bufs = 0; | 241 | struct netxen_rx_buffer *rx_buf; |
181 | struct netxen_rcv_desc_ctx *rcv_desc; | 242 | int ctx, ring, i, num_rx_bufs; |
182 | 243 | ||
183 | DPRINTK(INFO, "initializing some queues: %p\n", adapter); | 244 | struct netxen_cmd_buffer *cmd_buf_arr; |
184 | for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) { | 245 | struct net_device *netdev = adapter->netdev; |
185 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | 246 | |
186 | struct netxen_rx_buffer *rx_buf; | 247 | cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE); |
187 | rcv_desc = &adapter->recv_ctx[ctxid].rcv_desc[ring]; | 248 | if (cmd_buf_arr == NULL) { |
188 | rcv_desc->begin_alloc = 0; | 249 | printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n", |
189 | rx_buf = rcv_desc->rx_buf_arr; | 250 | netdev->name); |
190 | num_rx_bufs = rcv_desc->max_rx_desc_count; | 251 | return -ENOMEM; |
252 | } | ||
253 | memset(cmd_buf_arr, 0, TX_RINGSIZE); | ||
254 | adapter->cmd_buf_arr = cmd_buf_arr; | ||
255 | |||
256 | for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) { | ||
257 | recv_ctx = &adapter->recv_ctx[ctx]; | ||
258 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | ||
259 | rds_ring = &recv_ctx->rds_rings[ring]; | ||
260 | switch (RCV_DESC_TYPE(ring)) { | ||
261 | case RCV_DESC_NORMAL: | ||
262 | rds_ring->max_rx_desc_count = | ||
263 | adapter->max_rx_desc_count; | ||
264 | rds_ring->flags = RCV_DESC_NORMAL; | ||
265 | if (adapter->ahw.cut_through) { | ||
266 | rds_ring->dma_size = | ||
267 | NX_CT_DEFAULT_RX_BUF_LEN; | ||
268 | rds_ring->skb_size = | ||
269 | NX_CT_DEFAULT_RX_BUF_LEN; | ||
270 | } else { | ||
271 | rds_ring->dma_size = RX_DMA_MAP_LEN; | ||
272 | rds_ring->skb_size = | ||
273 | MAX_RX_BUFFER_LENGTH; | ||
274 | } | ||
275 | break; | ||
276 | |||
277 | case RCV_DESC_JUMBO: | ||
278 | rds_ring->max_rx_desc_count = | ||
279 | adapter->max_jumbo_rx_desc_count; | ||
280 | rds_ring->flags = RCV_DESC_JUMBO; | ||
281 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
282 | rds_ring->dma_size = | ||
283 | NX_P3_RX_JUMBO_BUF_MAX_LEN; | ||
284 | else | ||
285 | rds_ring->dma_size = | ||
286 | NX_P2_RX_JUMBO_BUF_MAX_LEN; | ||
287 | rds_ring->skb_size = | ||
288 | rds_ring->dma_size + NET_IP_ALIGN; | ||
289 | break; | ||
290 | |||
291 | case RCV_RING_LRO: | ||
292 | rds_ring->max_rx_desc_count = | ||
293 | adapter->max_lro_rx_desc_count; | ||
294 | rds_ring->flags = RCV_DESC_LRO; | ||
295 | rds_ring->dma_size = RX_LRO_DMA_MAP_LEN; | ||
296 | rds_ring->skb_size = MAX_RX_LRO_BUFFER_LENGTH; | ||
297 | break; | ||
298 | |||
299 | } | ||
300 | rds_ring->rx_buf_arr = (struct netxen_rx_buffer *) | ||
301 | vmalloc(RCV_BUFFSIZE); | ||
302 | if (rds_ring->rx_buf_arr == NULL) { | ||
303 | printk(KERN_ERR "%s: Failed to allocate " | ||
304 | "rx buffer ring %d\n", | ||
305 | netdev->name, ring); | ||
306 | /* free whatever was already allocated */ | ||
307 | goto err_out; | ||
308 | } | ||
309 | memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE); | ||
310 | INIT_LIST_HEAD(&rds_ring->free_list); | ||
311 | rds_ring->begin_alloc = 0; | ||
191 | /* | 312 | /* |
192 | * Now go through all of them, set reference handles | 313 | * Now go through all of them, set reference handles |
193 | * and put them in the queues. | 314 | * and put them in the queues. |
194 | */ | 315 | */ |
316 | num_rx_bufs = rds_ring->max_rx_desc_count; | ||
317 | rx_buf = rds_ring->rx_buf_arr; | ||
195 | for (i = 0; i < num_rx_bufs; i++) { | 318 | for (i = 0; i < num_rx_bufs; i++) { |
319 | list_add_tail(&rx_buf->list, | ||
320 | &rds_ring->free_list); | ||
196 | rx_buf->ref_handle = i; | 321 | rx_buf->ref_handle = i; |
197 | rx_buf->state = NETXEN_BUFFER_FREE; | 322 | rx_buf->state = NETXEN_BUFFER_FREE; |
198 | DPRINTK(INFO, "Rx buf:ctx%d i(%d) rx_buf:" | ||
199 | "%p\n", ctxid, i, rx_buf); | ||
200 | rx_buf++; | 323 | rx_buf++; |
201 | } | 324 | } |
202 | } | 325 | } |
203 | } | 326 | } |
327 | |||
328 | return 0; | ||
329 | |||
330 | err_out: | ||
331 | netxen_free_sw_resources(adapter); | ||
332 | return -ENOMEM; | ||
204 | } | 333 | } |
205 | 334 | ||
206 | void netxen_initialize_adapter_ops(struct netxen_adapter *adapter) | 335 | void netxen_initialize_adapter_ops(struct netxen_adapter *adapter) |
@@ -211,14 +340,12 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter) | |||
211 | netxen_niu_gbe_enable_phy_interrupts; | 340 | netxen_niu_gbe_enable_phy_interrupts; |
212 | adapter->disable_phy_interrupts = | 341 | adapter->disable_phy_interrupts = |
213 | netxen_niu_gbe_disable_phy_interrupts; | 342 | netxen_niu_gbe_disable_phy_interrupts; |
214 | adapter->handle_phy_intr = netxen_nic_gbe_handle_phy_intr; | ||
215 | adapter->macaddr_set = netxen_niu_macaddr_set; | 343 | adapter->macaddr_set = netxen_niu_macaddr_set; |
216 | adapter->set_mtu = netxen_nic_set_mtu_gb; | 344 | adapter->set_mtu = netxen_nic_set_mtu_gb; |
217 | adapter->set_promisc = netxen_niu_set_promiscuous_mode; | 345 | adapter->set_promisc = netxen_niu_set_promiscuous_mode; |
218 | adapter->unset_promisc = netxen_niu_set_promiscuous_mode; | ||
219 | adapter->phy_read = netxen_niu_gbe_phy_read; | 346 | adapter->phy_read = netxen_niu_gbe_phy_read; |
220 | adapter->phy_write = netxen_niu_gbe_phy_write; | 347 | adapter->phy_write = netxen_niu_gbe_phy_write; |
221 | adapter->init_niu = netxen_nic_init_niu_gb; | 348 | adapter->init_port = netxen_niu_gbe_init_port; |
222 | adapter->stop_port = netxen_niu_disable_gbe_port; | 349 | adapter->stop_port = netxen_niu_disable_gbe_port; |
223 | break; | 350 | break; |
224 | 351 | ||
@@ -227,12 +354,10 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter) | |||
227 | netxen_niu_xgbe_enable_phy_interrupts; | 354 | netxen_niu_xgbe_enable_phy_interrupts; |
228 | adapter->disable_phy_interrupts = | 355 | adapter->disable_phy_interrupts = |
229 | netxen_niu_xgbe_disable_phy_interrupts; | 356 | netxen_niu_xgbe_disable_phy_interrupts; |
230 | adapter->handle_phy_intr = netxen_nic_xgbe_handle_phy_intr; | ||
231 | adapter->macaddr_set = netxen_niu_xg_macaddr_set; | 357 | adapter->macaddr_set = netxen_niu_xg_macaddr_set; |
232 | adapter->set_mtu = netxen_nic_set_mtu_xgb; | 358 | adapter->set_mtu = netxen_nic_set_mtu_xgb; |
233 | adapter->init_port = netxen_niu_xg_init_port; | 359 | adapter->init_port = netxen_niu_xg_init_port; |
234 | adapter->set_promisc = netxen_niu_xg_set_promiscuous_mode; | 360 | adapter->set_promisc = netxen_niu_xg_set_promiscuous_mode; |
235 | adapter->unset_promisc = netxen_niu_xg_set_promiscuous_mode; | ||
236 | adapter->stop_port = netxen_niu_disable_xg_port; | 361 | adapter->stop_port = netxen_niu_disable_xg_port; |
237 | break; | 362 | break; |
238 | 363 | ||
@@ -270,7 +395,9 @@ static u32 netxen_decode_crb_addr(u32 addr) | |||
270 | 395 | ||
271 | static long rom_max_timeout = 100; | 396 | static long rom_max_timeout = 100; |
272 | static long rom_lock_timeout = 10000; | 397 | static long rom_lock_timeout = 10000; |
398 | #if 0 | ||
273 | static long rom_write_timeout = 700; | 399 | static long rom_write_timeout = 700; |
400 | #endif | ||
274 | 401 | ||
275 | static int rom_lock(struct netxen_adapter *adapter) | 402 | static int rom_lock(struct netxen_adapter *adapter) |
276 | { | 403 | { |
@@ -319,6 +446,7 @@ static int netxen_wait_rom_done(struct netxen_adapter *adapter) | |||
319 | return 0; | 446 | return 0; |
320 | } | 447 | } |
321 | 448 | ||
449 | #if 0 | ||
322 | static int netxen_rom_wren(struct netxen_adapter *adapter) | 450 | static int netxen_rom_wren(struct netxen_adapter *adapter) |
323 | { | 451 | { |
324 | /* Set write enable latch in ROM status register */ | 452 | /* Set write enable latch in ROM status register */ |
@@ -348,6 +476,7 @@ static int netxen_do_rom_rdsr(struct netxen_adapter *adapter) | |||
348 | } | 476 | } |
349 | return netxen_rdcrbreg(adapter, NETXEN_ROMUSB_ROM_RDATA); | 477 | return netxen_rdcrbreg(adapter, NETXEN_ROMUSB_ROM_RDATA); |
350 | } | 478 | } |
479 | #endif | ||
351 | 480 | ||
352 | static void netxen_rom_unlock(struct netxen_adapter *adapter) | 481 | static void netxen_rom_unlock(struct netxen_adapter *adapter) |
353 | { | 482 | { |
@@ -358,6 +487,7 @@ static void netxen_rom_unlock(struct netxen_adapter *adapter) | |||
358 | 487 | ||
359 | } | 488 | } |
360 | 489 | ||
490 | #if 0 | ||
361 | static int netxen_rom_wip_poll(struct netxen_adapter *adapter) | 491 | static int netxen_rom_wip_poll(struct netxen_adapter *adapter) |
362 | { | 492 | { |
363 | long timeout = 0; | 493 | long timeout = 0; |
@@ -393,6 +523,7 @@ static int do_rom_fast_write(struct netxen_adapter *adapter, int addr, | |||
393 | 523 | ||
394 | return netxen_rom_wip_poll(adapter); | 524 | return netxen_rom_wip_poll(adapter); |
395 | } | 525 | } |
526 | #endif | ||
396 | 527 | ||
397 | static int do_rom_fast_read(struct netxen_adapter *adapter, | 528 | static int do_rom_fast_read(struct netxen_adapter *adapter, |
398 | int addr, int *valp) | 529 | int addr, int *valp) |
@@ -475,7 +606,6 @@ int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data) | |||
475 | netxen_rom_unlock(adapter); | 606 | netxen_rom_unlock(adapter); |
476 | return ret; | 607 | return ret; |
477 | } | 608 | } |
478 | #endif /* 0 */ | ||
479 | 609 | ||
480 | static int do_rom_fast_write_words(struct netxen_adapter *adapter, | 610 | static int do_rom_fast_write_words(struct netxen_adapter *adapter, |
481 | int addr, u8 *bytes, size_t size) | 611 | int addr, u8 *bytes, size_t size) |
@@ -740,28 +870,25 @@ int netxen_flash_unlock(struct netxen_adapter *adapter) | |||
740 | 870 | ||
741 | return ret; | 871 | return ret; |
742 | } | 872 | } |
873 | #endif /* 0 */ | ||
743 | 874 | ||
744 | #define NETXEN_BOARDTYPE 0x4008 | 875 | #define NETXEN_BOARDTYPE 0x4008 |
745 | #define NETXEN_BOARDNUM 0x400c | 876 | #define NETXEN_BOARDNUM 0x400c |
746 | #define NETXEN_CHIPNUM 0x4010 | 877 | #define NETXEN_CHIPNUM 0x4010 |
747 | #define NETXEN_ROMBUS_RESET 0xFFFFFFFF | ||
748 | #define NETXEN_ROM_FIRST_BARRIER 0x800000000ULL | ||
749 | #define NETXEN_ROM_FOUND_INIT 0x400 | ||
750 | 878 | ||
751 | int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) | 879 | int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) |
752 | { | 880 | { |
753 | int addr, val; | 881 | int addr, val; |
754 | int n, i; | 882 | int i, init_delay = 0; |
755 | int init_delay = 0; | ||
756 | struct crb_addr_pair *buf; | 883 | struct crb_addr_pair *buf; |
884 | unsigned offset, n; | ||
757 | u32 off; | 885 | u32 off; |
758 | 886 | ||
759 | /* resetall */ | 887 | /* resetall */ |
760 | netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET, | 888 | netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET, |
761 | NETXEN_ROMBUS_RESET); | 889 | 0xffffffff); |
762 | 890 | ||
763 | if (verbose) { | 891 | if (verbose) { |
764 | int val; | ||
765 | if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0) | 892 | if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0) |
766 | printk("P2 ROM board type: 0x%08x\n", val); | 893 | printk("P2 ROM board type: 0x%08x\n", val); |
767 | else | 894 | else |
@@ -776,117 +903,141 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) | |||
776 | printk("Could not read chip number\n"); | 903 | printk("Could not read chip number\n"); |
777 | } | 904 | } |
778 | 905 | ||
779 | if (netxen_rom_fast_read(adapter, 0, &n) == 0 | 906 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { |
780 | && (n & NETXEN_ROM_FIRST_BARRIER)) { | 907 | if (netxen_rom_fast_read(adapter, 0, &n) != 0 || |
781 | n &= ~NETXEN_ROM_ROUNDUP; | 908 | (n != 0xcafecafeUL) || |
782 | if (n < NETXEN_ROM_FOUND_INIT) { | 909 | netxen_rom_fast_read(adapter, 4, &n) != 0) { |
783 | if (verbose) | 910 | printk(KERN_ERR "%s: ERROR Reading crb_init area: " |
784 | printk("%s: %d CRB init values found" | 911 | "n: %08x\n", netxen_nic_driver_name, n); |
785 | " in ROM.\n", netxen_nic_driver_name, n); | ||
786 | } else { | ||
787 | printk("%s:n=0x%x Error! NetXen card flash not" | ||
788 | " initialized.\n", __FUNCTION__, n); | ||
789 | return -EIO; | 912 | return -EIO; |
790 | } | 913 | } |
791 | buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); | 914 | offset = n & 0xffffU; |
792 | if (buf == NULL) { | 915 | n = (n >> 16) & 0xffffU; |
793 | printk("%s: netxen_pinit_from_rom: Unable to calloc " | 916 | } else { |
794 | "memory.\n", netxen_nic_driver_name); | 917 | if (netxen_rom_fast_read(adapter, 0, &n) != 0 || |
795 | return -ENOMEM; | 918 | !(n & 0x80000000)) { |
796 | } | 919 | printk(KERN_ERR "%s: ERROR Reading crb_init area: " |
797 | for (i = 0; i < n; i++) { | 920 | "n: %08x\n", netxen_nic_driver_name, n); |
798 | if (netxen_rom_fast_read(adapter, 8 * i + 4, &val) != 0 | 921 | return -EIO; |
799 | || netxen_rom_fast_read(adapter, 8 * i + 8, | ||
800 | &addr) != 0) | ||
801 | return -EIO; | ||
802 | |||
803 | buf[i].addr = addr; | ||
804 | buf[i].data = val; | ||
805 | |||
806 | if (verbose) | ||
807 | printk("%s: PCI: 0x%08x == 0x%08x\n", | ||
808 | netxen_nic_driver_name, (unsigned int) | ||
809 | netxen_decode_crb_addr(addr), val); | ||
810 | } | 922 | } |
811 | for (i = 0; i < n; i++) { | 923 | offset = 1; |
924 | n &= ~0x80000000; | ||
925 | } | ||
926 | |||
927 | if (n < 1024) { | ||
928 | if (verbose) | ||
929 | printk(KERN_DEBUG "%s: %d CRB init values found" | ||
930 | " in ROM.\n", netxen_nic_driver_name, n); | ||
931 | } else { | ||
932 | printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not" | ||
933 | " initialized.\n", __func__, n); | ||
934 | return -EIO; | ||
935 | } | ||
936 | |||
937 | buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL); | ||
938 | if (buf == NULL) { | ||
939 | printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n", | ||
940 | netxen_nic_driver_name); | ||
941 | return -ENOMEM; | ||
942 | } | ||
943 | for (i = 0; i < n; i++) { | ||
944 | if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || | ||
945 | netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) | ||
946 | return -EIO; | ||
947 | |||
948 | buf[i].addr = addr; | ||
949 | buf[i].data = val; | ||
812 | 950 | ||
813 | off = netxen_decode_crb_addr(buf[i].addr); | 951 | if (verbose) |
814 | if (off == NETXEN_ADDR_ERROR) { | 952 | printk(KERN_DEBUG "%s: PCI: 0x%08x == 0x%08x\n", |
815 | printk(KERN_ERR"CRB init value out of range %x\n", | 953 | netxen_nic_driver_name, |
954 | (u32)netxen_decode_crb_addr(addr), val); | ||
955 | } | ||
956 | for (i = 0; i < n; i++) { | ||
957 | |||
958 | off = netxen_decode_crb_addr(buf[i].addr); | ||
959 | if (off == NETXEN_ADDR_ERROR) { | ||
960 | printk(KERN_ERR"CRB init value out of range %x\n", | ||
816 | buf[i].addr); | 961 | buf[i].addr); |
962 | continue; | ||
963 | } | ||
964 | off += NETXEN_PCI_CRBSPACE; | ||
965 | /* skipping cold reboot MAGIC */ | ||
966 | if (off == NETXEN_CAM_RAM(0x1fc)) | ||
967 | continue; | ||
968 | |||
969 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { | ||
970 | /* do not reset PCI */ | ||
971 | if (off == (ROMUSB_GLB + 0xbc)) | ||
817 | continue; | 972 | continue; |
818 | } | 973 | if (off == (NETXEN_CRB_PEG_NET_1 + 0x18)) |
819 | off += NETXEN_PCI_CRBSPACE; | 974 | buf[i].data = 0x1020; |
820 | /* skipping cold reboot MAGIC */ | 975 | /* skip the function enable register */ |
821 | if (off == NETXEN_CAM_RAM(0x1fc)) | 976 | if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION)) |
977 | continue; | ||
978 | if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2)) | ||
822 | continue; | 979 | continue; |
980 | if ((off & 0x0ff00000) == NETXEN_CRB_SMB) | ||
981 | continue; | ||
982 | } | ||
823 | 983 | ||
824 | /* After writing this register, HW needs time for CRB */ | 984 | if (off == NETXEN_ADDR_ERROR) { |
825 | /* to quiet down (else crb_window returns 0xffffffff) */ | 985 | printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n", |
826 | if (off == NETXEN_ROMUSB_GLB_SW_RESET) { | 986 | netxen_nic_driver_name, buf[i].addr); |
827 | init_delay = 1; | 987 | continue; |
988 | } | ||
989 | |||
990 | /* After writing this register, HW needs time for CRB */ | ||
991 | /* to quiet down (else crb_window returns 0xffffffff) */ | ||
992 | if (off == NETXEN_ROMUSB_GLB_SW_RESET) { | ||
993 | init_delay = 1; | ||
994 | if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { | ||
828 | /* hold xdma in reset also */ | 995 | /* hold xdma in reset also */ |
829 | buf[i].data = NETXEN_NIC_XDMA_RESET; | 996 | buf[i].data = NETXEN_NIC_XDMA_RESET; |
830 | } | 997 | } |
998 | } | ||
831 | 999 | ||
832 | if (ADDR_IN_WINDOW1(off)) { | 1000 | adapter->hw_write_wx(adapter, off, &buf[i].data, 4); |
833 | writel(buf[i].data, | ||
834 | NETXEN_CRB_NORMALIZE(adapter, off)); | ||
835 | } else { | ||
836 | netxen_nic_pci_change_crbwindow(adapter, 0); | ||
837 | writel(buf[i].data, | ||
838 | pci_base_offset(adapter, off)); | ||
839 | 1001 | ||
840 | netxen_nic_pci_change_crbwindow(adapter, 1); | 1002 | if (init_delay == 1) { |
841 | } | 1003 | msleep(1000); |
842 | if (init_delay == 1) { | 1004 | init_delay = 0; |
843 | msleep(1000); | ||
844 | init_delay = 0; | ||
845 | } | ||
846 | msleep(1); | ||
847 | } | 1005 | } |
848 | kfree(buf); | 1006 | msleep(1); |
1007 | } | ||
1008 | kfree(buf); | ||
849 | 1009 | ||
850 | /* disable_peg_cache_all */ | 1010 | /* disable_peg_cache_all */ |
851 | 1011 | ||
852 | /* unreset_net_cache */ | 1012 | /* unreset_net_cache */ |
853 | netxen_nic_hw_read_wx(adapter, NETXEN_ROMUSB_GLB_SW_RESET, &val, | 1013 | if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { |
854 | 4); | 1014 | adapter->hw_read_wx(adapter, |
855 | netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET, | 1015 | NETXEN_ROMUSB_GLB_SW_RESET, &val, 4); |
856 | (val & 0xffffff0f)); | ||
857 | /* p2dn replyCount */ | ||
858 | netxen_crb_writelit_adapter(adapter, | ||
859 | NETXEN_CRB_PEG_NET_D + 0xec, 0x1e); | ||
860 | /* disable_peg_cache 0 */ | ||
861 | netxen_crb_writelit_adapter(adapter, | 1016 | netxen_crb_writelit_adapter(adapter, |
862 | NETXEN_CRB_PEG_NET_D + 0x4c, 8); | 1017 | NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f)); |
863 | /* disable_peg_cache 1 */ | ||
864 | netxen_crb_writelit_adapter(adapter, | ||
865 | NETXEN_CRB_PEG_NET_I + 0x4c, 8); | ||
866 | |||
867 | /* peg_clr_all */ | ||
868 | |||
869 | /* peg_clr 0 */ | ||
870 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, | ||
871 | 0); | ||
872 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, | ||
873 | 0); | ||
874 | /* peg_clr 1 */ | ||
875 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, | ||
876 | 0); | ||
877 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, | ||
878 | 0); | ||
879 | /* peg_clr 2 */ | ||
880 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, | ||
881 | 0); | ||
882 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, | ||
883 | 0); | ||
884 | /* peg_clr 3 */ | ||
885 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, | ||
886 | 0); | ||
887 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, | ||
888 | 0); | ||
889 | } | 1018 | } |
1019 | |||
1020 | /* p2dn replyCount */ | ||
1021 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e); | ||
1022 | /* disable_peg_cache 0 */ | ||
1023 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8); | ||
1024 | /* disable_peg_cache 1 */ | ||
1025 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8); | ||
1026 | |||
1027 | /* peg_clr_all */ | ||
1028 | |||
1029 | /* peg_clr 0 */ | ||
1030 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0); | ||
1031 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0); | ||
1032 | /* peg_clr 1 */ | ||
1033 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0); | ||
1034 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0); | ||
1035 | /* peg_clr 2 */ | ||
1036 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0); | ||
1037 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0); | ||
1038 | /* peg_clr 3 */ | ||
1039 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0); | ||
1040 | netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0); | ||
890 | return 0; | 1041 | return 0; |
891 | } | 1042 | } |
892 | 1043 | ||
@@ -897,12 +1048,12 @@ int netxen_initialize_adapter_offload(struct netxen_adapter *adapter) | |||
897 | uint32_t lo; | 1048 | uint32_t lo; |
898 | 1049 | ||
899 | adapter->dummy_dma.addr = | 1050 | adapter->dummy_dma.addr = |
900 | pci_alloc_consistent(adapter->ahw.pdev, | 1051 | pci_alloc_consistent(adapter->pdev, |
901 | NETXEN_HOST_DUMMY_DMA_SIZE, | 1052 | NETXEN_HOST_DUMMY_DMA_SIZE, |
902 | &adapter->dummy_dma.phys_addr); | 1053 | &adapter->dummy_dma.phys_addr); |
903 | if (adapter->dummy_dma.addr == NULL) { | 1054 | if (adapter->dummy_dma.addr == NULL) { |
904 | printk("%s: ERROR: Could not allocate dummy DMA memory\n", | 1055 | printk("%s: ERROR: Could not allocate dummy DMA memory\n", |
905 | __FUNCTION__); | 1056 | __func__); |
906 | return -ENOMEM; | 1057 | return -ENOMEM; |
907 | } | 1058 | } |
908 | 1059 | ||
@@ -910,8 +1061,13 @@ int netxen_initialize_adapter_offload(struct netxen_adapter *adapter) | |||
910 | hi = (addr >> 32) & 0xffffffff; | 1061 | hi = (addr >> 32) & 0xffffffff; |
911 | lo = addr & 0xffffffff; | 1062 | lo = addr & 0xffffffff; |
912 | 1063 | ||
913 | writel(hi, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI)); | 1064 | adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi); |
914 | writel(lo, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO)); | 1065 | adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo); |
1066 | |||
1067 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { | ||
1068 | uint32_t temp = 0; | ||
1069 | adapter->hw_write_wx(adapter, CRB_HOST_DUMMY_BUF, &temp, 4); | ||
1070 | } | ||
915 | 1071 | ||
916 | return 0; | 1072 | return 0; |
917 | } | 1073 | } |
@@ -931,7 +1087,7 @@ void netxen_free_adapter_offload(struct netxen_adapter *adapter) | |||
931 | } while (--i); | 1087 | } while (--i); |
932 | 1088 | ||
933 | if (i) { | 1089 | if (i) { |
934 | pci_free_consistent(adapter->ahw.pdev, | 1090 | pci_free_consistent(adapter->pdev, |
935 | NETXEN_HOST_DUMMY_DMA_SIZE, | 1091 | NETXEN_HOST_DUMMY_DMA_SIZE, |
936 | adapter->dummy_dma.addr, | 1092 | adapter->dummy_dma.addr, |
937 | adapter->dummy_dma.phys_addr); | 1093 | adapter->dummy_dma.phys_addr); |
@@ -946,22 +1102,24 @@ void netxen_free_adapter_offload(struct netxen_adapter *adapter) | |||
946 | int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) | 1102 | int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) |
947 | { | 1103 | { |
948 | u32 val = 0; | 1104 | u32 val = 0; |
949 | int retries = 30; | 1105 | int retries = 60; |
950 | 1106 | ||
951 | if (!pegtune_val) { | 1107 | if (!pegtune_val) { |
952 | do { | 1108 | do { |
953 | val = readl(NETXEN_CRB_NORMALIZE | 1109 | val = adapter->pci_read_normalize(adapter, |
954 | (adapter, CRB_CMDPEG_STATE)); | 1110 | CRB_CMDPEG_STATE); |
955 | pegtune_val = readl(NETXEN_CRB_NORMALIZE | ||
956 | (adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE)); | ||
957 | 1111 | ||
958 | if (val == PHAN_INITIALIZE_COMPLETE || | 1112 | if (val == PHAN_INITIALIZE_COMPLETE || |
959 | val == PHAN_INITIALIZE_ACK) | 1113 | val == PHAN_INITIALIZE_ACK) |
960 | return 0; | 1114 | return 0; |
961 | 1115 | ||
962 | msleep(1000); | 1116 | msleep(500); |
1117 | |||
963 | } while (--retries); | 1118 | } while (--retries); |
1119 | |||
964 | if (!retries) { | 1120 | if (!retries) { |
1121 | pegtune_val = adapter->pci_read_normalize(adapter, | ||
1122 | NETXEN_ROMUSB_GLB_PEGTUNE_DONE); | ||
965 | printk(KERN_WARNING "netxen_phantom_init: init failed, " | 1123 | printk(KERN_WARNING "netxen_phantom_init: init failed, " |
966 | "pegtune_val=%x\n", pegtune_val); | 1124 | "pegtune_val=%x\n", pegtune_val); |
967 | return -1; | 1125 | return -1; |
@@ -971,58 +1129,61 @@ int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) | |||
971 | return 0; | 1129 | return 0; |
972 | } | 1130 | } |
973 | 1131 | ||
974 | static int netxen_nic_check_temp(struct netxen_adapter *adapter) | 1132 | int netxen_receive_peg_ready(struct netxen_adapter *adapter) |
975 | { | 1133 | { |
976 | struct net_device *netdev = adapter->netdev; | 1134 | u32 val = 0; |
977 | uint32_t temp, temp_state, temp_val; | 1135 | int retries = 2000; |
978 | int rv = 0; | 1136 | |
979 | 1137 | do { | |
980 | temp = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_TEMP_STATE)); | 1138 | val = adapter->pci_read_normalize(adapter, CRB_RCVPEG_STATE); |
981 | 1139 | ||
982 | temp_state = nx_get_temp_state(temp); | 1140 | if (val == PHAN_PEG_RCV_INITIALIZED) |
983 | temp_val = nx_get_temp_val(temp); | 1141 | return 0; |
984 | 1142 | ||
985 | if (temp_state == NX_TEMP_PANIC) { | 1143 | msleep(10); |
986 | printk(KERN_ALERT | 1144 | |
987 | "%s: Device temperature %d degrees C exceeds" | 1145 | } while (--retries); |
988 | " maximum allowed. Hardware has been shut down.\n", | 1146 | |
989 | netxen_nic_driver_name, temp_val); | 1147 | if (!retries) { |
990 | 1148 | printk(KERN_ERR "Receive Peg initialization not " | |
991 | netif_carrier_off(netdev); | 1149 | "complete, state: 0x%x.\n", val); |
992 | netif_stop_queue(netdev); | 1150 | return -EIO; |
993 | rv = 1; | ||
994 | } else if (temp_state == NX_TEMP_WARN) { | ||
995 | if (adapter->temp == NX_TEMP_NORMAL) { | ||
996 | printk(KERN_ALERT | ||
997 | "%s: Device temperature %d degrees C " | ||
998 | "exceeds operating range." | ||
999 | " Immediate action needed.\n", | ||
1000 | netxen_nic_driver_name, temp_val); | ||
1001 | } | ||
1002 | } else { | ||
1003 | if (adapter->temp == NX_TEMP_WARN) { | ||
1004 | printk(KERN_INFO | ||
1005 | "%s: Device temperature is now %d degrees C" | ||
1006 | " in normal range.\n", netxen_nic_driver_name, | ||
1007 | temp_val); | ||
1008 | } | ||
1009 | } | 1151 | } |
1010 | adapter->temp = temp_state; | 1152 | |
1011 | return rv; | 1153 | return 0; |
1012 | } | 1154 | } |
1013 | 1155 | ||
1014 | void netxen_watchdog_task(struct work_struct *work) | 1156 | static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, |
1157 | struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum) | ||
1015 | { | 1158 | { |
1016 | struct netxen_adapter *adapter = | 1159 | struct netxen_rx_buffer *buffer; |
1017 | container_of(work, struct netxen_adapter, watchdog_task); | 1160 | struct sk_buff *skb; |
1018 | 1161 | ||
1019 | if ((adapter->portnum == 0) && netxen_nic_check_temp(adapter)) | 1162 | buffer = &rds_ring->rx_buf_arr[index]; |
1020 | return; | 1163 | |
1164 | pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, | ||
1165 | PCI_DMA_FROMDEVICE); | ||
1021 | 1166 | ||
1022 | if (adapter->handle_phy_intr) | 1167 | skb = buffer->skb; |
1023 | adapter->handle_phy_intr(adapter); | 1168 | if (!skb) |
1169 | goto no_skb; | ||
1024 | 1170 | ||
1025 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); | 1171 | if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) { |
1172 | adapter->stats.csummed++; | ||
1173 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1174 | } else | ||
1175 | skb->ip_summed = CHECKSUM_NONE; | ||
1176 | |||
1177 | skb->dev = adapter->netdev; | ||
1178 | |||
1179 | buffer->skb = NULL; | ||
1180 | |||
1181 | no_skb: | ||
1182 | buffer->state = NETXEN_BUFFER_FREE; | ||
1183 | buffer->lro_current_frags = 0; | ||
1184 | buffer->lro_expected_frags = 0; | ||
1185 | list_add_tail(&buffer->list, &rds_ring->free_list); | ||
1186 | return skb; | ||
1026 | } | 1187 | } |
1027 | 1188 | ||
1028 | /* | 1189 | /* |
@@ -1031,9 +1192,8 @@ void netxen_watchdog_task(struct work_struct *work) | |||
1031 | * invoke the routine to send more rx buffers to the Phantom... | 1192 | * invoke the routine to send more rx buffers to the Phantom... |
1032 | */ | 1193 | */ |
1033 | static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | 1194 | static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, |
1034 | struct status_desc *desc) | 1195 | struct status_desc *desc, struct status_desc *frag_desc) |
1035 | { | 1196 | { |
1036 | struct pci_dev *pdev = adapter->pdev; | ||
1037 | struct net_device *netdev = adapter->netdev; | 1197 | struct net_device *netdev = adapter->netdev; |
1038 | u64 sts_data = le64_to_cpu(desc->status_desc_data); | 1198 | u64 sts_data = le64_to_cpu(desc->status_desc_data); |
1039 | int index = netxen_get_sts_refhandle(sts_data); | 1199 | int index = netxen_get_sts_refhandle(sts_data); |
@@ -1042,8 +1202,8 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | |||
1042 | struct sk_buff *skb; | 1202 | struct sk_buff *skb; |
1043 | u32 length = netxen_get_sts_totallength(sts_data); | 1203 | u32 length = netxen_get_sts_totallength(sts_data); |
1044 | u32 desc_ctx; | 1204 | u32 desc_ctx; |
1045 | struct netxen_rcv_desc_ctx *rcv_desc; | 1205 | u16 pkt_offset = 0, cksum; |
1046 | int ret; | 1206 | struct nx_host_rds_ring *rds_ring; |
1047 | 1207 | ||
1048 | desc_ctx = netxen_get_sts_type(sts_data); | 1208 | desc_ctx = netxen_get_sts_type(sts_data); |
1049 | if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) { | 1209 | if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) { |
@@ -1052,13 +1212,13 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | |||
1052 | return; | 1212 | return; |
1053 | } | 1213 | } |
1054 | 1214 | ||
1055 | rcv_desc = &recv_ctx->rcv_desc[desc_ctx]; | 1215 | rds_ring = &recv_ctx->rds_rings[desc_ctx]; |
1056 | if (unlikely(index > rcv_desc->max_rx_desc_count)) { | 1216 | if (unlikely(index > rds_ring->max_rx_desc_count)) { |
1057 | DPRINTK(ERR, "Got a buffer index:%x Max is %x\n", | 1217 | DPRINTK(ERR, "Got a buffer index:%x Max is %x\n", |
1058 | index, rcv_desc->max_rx_desc_count); | 1218 | index, rds_ring->max_rx_desc_count); |
1059 | return; | 1219 | return; |
1060 | } | 1220 | } |
1061 | buffer = &rcv_desc->rx_buf_arr[index]; | 1221 | buffer = &rds_ring->rx_buf_arr[index]; |
1062 | if (desc_ctx == RCV_DESC_LRO_CTXID) { | 1222 | if (desc_ctx == RCV_DESC_LRO_CTXID) { |
1063 | buffer->lro_current_frags++; | 1223 | buffer->lro_current_frags++; |
1064 | if (netxen_get_sts_desc_lro_last_frag(desc)) { | 1224 | if (netxen_get_sts_desc_lro_last_frag(desc)) { |
@@ -1079,43 +1239,52 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | |||
1079 | } | 1239 | } |
1080 | } | 1240 | } |
1081 | 1241 | ||
1082 | pci_unmap_single(pdev, buffer->dma, rcv_desc->dma_size, | 1242 | cksum = netxen_get_sts_status(sts_data); |
1083 | PCI_DMA_FROMDEVICE); | ||
1084 | 1243 | ||
1085 | skb = (struct sk_buff *)buffer->skb; | 1244 | skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum); |
1086 | 1245 | if (!skb) | |
1087 | if (likely(adapter->rx_csum && | 1246 | return; |
1088 | netxen_get_sts_status(sts_data) == STATUS_CKSUM_OK)) { | ||
1089 | adapter->stats.csummed++; | ||
1090 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1091 | } else | ||
1092 | skb->ip_summed = CHECKSUM_NONE; | ||
1093 | 1247 | ||
1094 | skb->dev = netdev; | ||
1095 | if (desc_ctx == RCV_DESC_LRO_CTXID) { | 1248 | if (desc_ctx == RCV_DESC_LRO_CTXID) { |
1096 | /* True length was only available on the last pkt */ | 1249 | /* True length was only available on the last pkt */ |
1097 | skb_put(skb, buffer->lro_length); | 1250 | skb_put(skb, buffer->lro_length); |
1098 | } else { | 1251 | } else { |
1099 | skb_put(skb, length); | 1252 | if (length > rds_ring->skb_size) |
1253 | skb_put(skb, rds_ring->skb_size); | ||
1254 | else | ||
1255 | skb_put(skb, length); | ||
1256 | |||
1257 | pkt_offset = netxen_get_sts_pkt_offset(sts_data); | ||
1258 | if (pkt_offset) | ||
1259 | skb_pull(skb, pkt_offset); | ||
1100 | } | 1260 | } |
1101 | 1261 | ||
1102 | skb->protocol = eth_type_trans(skb, netdev); | 1262 | skb->protocol = eth_type_trans(skb, netdev); |
1103 | 1263 | ||
1104 | ret = netif_receive_skb(skb); | ||
1105 | netdev->last_rx = jiffies; | ||
1106 | |||
1107 | rcv_desc->rcv_pending--; | ||
1108 | |||
1109 | /* | 1264 | /* |
1110 | * We just consumed one buffer so post a buffer. | 1265 | * rx buffer chaining is disabled, walk and free |
1266 | * any spurious rx buffer chain. | ||
1111 | */ | 1267 | */ |
1112 | buffer->skb = NULL; | 1268 | if (frag_desc) { |
1113 | buffer->state = NETXEN_BUFFER_FREE; | 1269 | u16 i, nr_frags = desc->nr_frags; |
1114 | buffer->lro_current_frags = 0; | 1270 | |
1115 | buffer->lro_expected_frags = 0; | 1271 | dev_kfree_skb_any(skb); |
1272 | for (i = 0; i < nr_frags; i++) { | ||
1273 | index = frag_desc->frag_handles[i]; | ||
1274 | skb = netxen_process_rxbuf(adapter, | ||
1275 | rds_ring, index, cksum); | ||
1276 | if (skb) | ||
1277 | dev_kfree_skb_any(skb); | ||
1278 | } | ||
1279 | adapter->stats.rxdropped++; | ||
1280 | } else { | ||
1116 | 1281 | ||
1117 | adapter->stats.no_rcv++; | 1282 | netif_receive_skb(skb); |
1118 | adapter->stats.rxbytes += length; | 1283 | netdev->last_rx = jiffies; |
1284 | |||
1285 | adapter->stats.no_rcv++; | ||
1286 | adapter->stats.rxbytes += length; | ||
1287 | } | ||
1119 | } | 1288 | } |
1120 | 1289 | ||
1121 | /* Process Receive status ring */ | 1290 | /* Process Receive status ring */ |
@@ -1123,10 +1292,11 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | |||
1123 | { | 1292 | { |
1124 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]); | 1293 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]); |
1125 | struct status_desc *desc_head = recv_ctx->rcv_status_desc_head; | 1294 | struct status_desc *desc_head = recv_ctx->rcv_status_desc_head; |
1126 | struct status_desc *desc; /* used to read status desc here */ | 1295 | struct status_desc *desc, *frag_desc; |
1127 | u32 consumer = recv_ctx->status_rx_consumer; | 1296 | u32 consumer = recv_ctx->status_rx_consumer; |
1128 | u32 producer = 0; | ||
1129 | int count = 0, ring; | 1297 | int count = 0, ring; |
1298 | u64 sts_data; | ||
1299 | u16 opcode; | ||
1130 | 1300 | ||
1131 | while (count < max) { | 1301 | while (count < max) { |
1132 | desc = &desc_head[consumer]; | 1302 | desc = &desc_head[consumer]; |
@@ -1135,24 +1305,38 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | |||
1135 | netxen_get_sts_owner(desc)); | 1305 | netxen_get_sts_owner(desc)); |
1136 | break; | 1306 | break; |
1137 | } | 1307 | } |
1138 | netxen_process_rcv(adapter, ctxid, desc); | 1308 | |
1309 | sts_data = le64_to_cpu(desc->status_desc_data); | ||
1310 | opcode = netxen_get_sts_opcode(sts_data); | ||
1311 | frag_desc = NULL; | ||
1312 | if (opcode == NETXEN_NIC_RXPKT_DESC) { | ||
1313 | if (desc->nr_frags) { | ||
1314 | consumer = get_next_index(consumer, | ||
1315 | adapter->max_rx_desc_count); | ||
1316 | frag_desc = &desc_head[consumer]; | ||
1317 | netxen_set_sts_owner(frag_desc, | ||
1318 | STATUS_OWNER_PHANTOM); | ||
1319 | } | ||
1320 | } | ||
1321 | |||
1322 | netxen_process_rcv(adapter, ctxid, desc, frag_desc); | ||
1323 | |||
1139 | netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM); | 1324 | netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM); |
1140 | consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1); | 1325 | |
1326 | consumer = get_next_index(consumer, | ||
1327 | adapter->max_rx_desc_count); | ||
1141 | count++; | 1328 | count++; |
1142 | } | 1329 | } |
1143 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) | 1330 | for (ring = 0; ring < adapter->max_rds_rings; ring++) |
1144 | netxen_post_rx_buffers_nodb(adapter, ctxid, ring); | 1331 | netxen_post_rx_buffers_nodb(adapter, ctxid, ring); |
1145 | 1332 | ||
1146 | /* update the consumer index in phantom */ | 1333 | /* update the consumer index in phantom */ |
1147 | if (count) { | 1334 | if (count) { |
1148 | recv_ctx->status_rx_consumer = consumer; | 1335 | recv_ctx->status_rx_consumer = consumer; |
1149 | recv_ctx->status_rx_producer = producer; | ||
1150 | 1336 | ||
1151 | /* Window = 1 */ | 1337 | /* Window = 1 */ |
1152 | writel(consumer, | 1338 | adapter->pci_write_normalize(adapter, |
1153 | NETXEN_CRB_NORMALIZE(adapter, | 1339 | recv_ctx->crb_sts_consumer, consumer); |
1154 | recv_crb_registers[adapter->portnum]. | ||
1155 | crb_rcv_status_consumer)); | ||
1156 | } | 1340 | } |
1157 | 1341 | ||
1158 | return count; | 1342 | return count; |
@@ -1231,10 +1415,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
1231 | */ | 1415 | */ |
1232 | void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | 1416 | void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) |
1233 | { | 1417 | { |
1234 | struct pci_dev *pdev = adapter->ahw.pdev; | 1418 | struct pci_dev *pdev = adapter->pdev; |
1235 | struct sk_buff *skb; | 1419 | struct sk_buff *skb; |
1236 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); | 1420 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); |
1237 | struct netxen_rcv_desc_ctx *rcv_desc = NULL; | 1421 | struct nx_host_rds_ring *rds_ring = NULL; |
1238 | uint producer; | 1422 | uint producer; |
1239 | struct rcv_desc *pdesc; | 1423 | struct rcv_desc *pdesc; |
1240 | struct netxen_rx_buffer *buffer; | 1424 | struct netxen_rx_buffer *buffer; |
@@ -1242,41 +1426,36 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1242 | int index = 0; | 1426 | int index = 0; |
1243 | netxen_ctx_msg msg = 0; | 1427 | netxen_ctx_msg msg = 0; |
1244 | dma_addr_t dma; | 1428 | dma_addr_t dma; |
1429 | struct list_head *head; | ||
1245 | 1430 | ||
1246 | rcv_desc = &recv_ctx->rcv_desc[ringid]; | 1431 | rds_ring = &recv_ctx->rds_rings[ringid]; |
1432 | |||
1433 | producer = rds_ring->producer; | ||
1434 | index = rds_ring->begin_alloc; | ||
1435 | head = &rds_ring->free_list; | ||
1247 | 1436 | ||
1248 | producer = rcv_desc->producer; | ||
1249 | index = rcv_desc->begin_alloc; | ||
1250 | buffer = &rcv_desc->rx_buf_arr[index]; | ||
1251 | /* We can start writing rx descriptors into the phantom memory. */ | 1437 | /* We can start writing rx descriptors into the phantom memory. */ |
1252 | while (buffer->state == NETXEN_BUFFER_FREE) { | 1438 | while (!list_empty(head)) { |
1253 | skb = dev_alloc_skb(rcv_desc->skb_size); | 1439 | |
1440 | skb = dev_alloc_skb(rds_ring->skb_size); | ||
1254 | if (unlikely(!skb)) { | 1441 | if (unlikely(!skb)) { |
1255 | /* | 1442 | rds_ring->begin_alloc = index; |
1256 | * TODO | ||
1257 | * We need to schedule the posting of buffers to the pegs. | ||
1258 | */ | ||
1259 | rcv_desc->begin_alloc = index; | ||
1260 | DPRINTK(ERR, "netxen_post_rx_buffers: " | ||
1261 | " allocated only %d buffers\n", count); | ||
1262 | break; | 1443 | break; |
1263 | } | 1444 | } |
1264 | 1445 | ||
1446 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); | ||
1447 | list_del(&buffer->list); | ||
1448 | |||
1265 | count++; /* now there should be no failure */ | 1449 | count++; /* now there should be no failure */ |
1266 | pdesc = &rcv_desc->desc_head[producer]; | 1450 | pdesc = &rds_ring->desc_head[producer]; |
1267 | 1451 | ||
1268 | #if defined(XGB_DEBUG) | 1452 | if (!adapter->ahw.cut_through) |
1269 | *(unsigned long *)(skb->head) = 0xc0debabe; | 1453 | skb_reserve(skb, 2); |
1270 | if (skb_is_nonlinear(skb)) { | ||
1271 | printk("Allocated SKB @%p is nonlinear\n"); | ||
1272 | } | ||
1273 | #endif | ||
1274 | skb_reserve(skb, 2); | ||
1275 | /* This will be setup when we receive the | 1454 | /* This will be setup when we receive the |
1276 | * buffer after it has been filled FSL TBD TBD | 1455 | * buffer after it has been filled FSL TBD TBD |
1277 | * skb->dev = netdev; | 1456 | * skb->dev = netdev; |
1278 | */ | 1457 | */ |
1279 | dma = pci_map_single(pdev, skb->data, rcv_desc->dma_size, | 1458 | dma = pci_map_single(pdev, skb->data, rds_ring->dma_size, |
1280 | PCI_DMA_FROMDEVICE); | 1459 | PCI_DMA_FROMDEVICE); |
1281 | pdesc->addr_buffer = cpu_to_le64(dma); | 1460 | pdesc->addr_buffer = cpu_to_le64(dma); |
1282 | buffer->skb = skb; | 1461 | buffer->skb = skb; |
@@ -1284,112 +1463,101 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1284 | buffer->dma = dma; | 1463 | buffer->dma = dma; |
1285 | /* make a rcv descriptor */ | 1464 | /* make a rcv descriptor */ |
1286 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | 1465 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
1287 | pdesc->buffer_length = cpu_to_le32(rcv_desc->dma_size); | 1466 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); |
1288 | DPRINTK(INFO, "done writing descripter\n"); | 1467 | DPRINTK(INFO, "done writing descripter\n"); |
1289 | producer = | 1468 | producer = |
1290 | get_next_index(producer, rcv_desc->max_rx_desc_count); | 1469 | get_next_index(producer, rds_ring->max_rx_desc_count); |
1291 | index = get_next_index(index, rcv_desc->max_rx_desc_count); | 1470 | index = get_next_index(index, rds_ring->max_rx_desc_count); |
1292 | buffer = &rcv_desc->rx_buf_arr[index]; | ||
1293 | } | 1471 | } |
1294 | /* if we did allocate buffers, then write the count to Phantom */ | 1472 | /* if we did allocate buffers, then write the count to Phantom */ |
1295 | if (count) { | 1473 | if (count) { |
1296 | rcv_desc->begin_alloc = index; | 1474 | rds_ring->begin_alloc = index; |
1297 | rcv_desc->rcv_pending += count; | 1475 | rds_ring->producer = producer; |
1298 | rcv_desc->producer = producer; | ||
1299 | /* Window = 1 */ | 1476 | /* Window = 1 */ |
1300 | writel((producer - 1) & | 1477 | adapter->pci_write_normalize(adapter, |
1301 | (rcv_desc->max_rx_desc_count - 1), | 1478 | rds_ring->crb_rcv_producer, |
1302 | NETXEN_CRB_NORMALIZE(adapter, | 1479 | (producer-1) & (rds_ring->max_rx_desc_count-1)); |
1303 | recv_crb_registers[ | 1480 | |
1304 | adapter->portnum]. | 1481 | if (adapter->fw_major < 4) { |
1305 | rcv_desc_crb[ringid]. | ||
1306 | crb_rcv_producer_offset)); | ||
1307 | /* | 1482 | /* |
1308 | * Write a doorbell msg to tell phanmon of change in | 1483 | * Write a doorbell msg to tell phanmon of change in |
1309 | * receive ring producer | 1484 | * receive ring producer |
1485 | * Only for firmware version < 4.0.0 | ||
1310 | */ | 1486 | */ |
1311 | netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); | 1487 | netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); |
1312 | netxen_set_msg_privid(msg); | 1488 | netxen_set_msg_privid(msg); |
1313 | netxen_set_msg_count(msg, | 1489 | netxen_set_msg_count(msg, |
1314 | ((producer - | 1490 | ((producer - |
1315 | 1) & (rcv_desc-> | 1491 | 1) & (rds_ring-> |
1316 | max_rx_desc_count - 1))); | 1492 | max_rx_desc_count - 1))); |
1317 | netxen_set_msg_ctxid(msg, adapter->portnum); | 1493 | netxen_set_msg_ctxid(msg, adapter->portnum); |
1318 | netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); | 1494 | netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); |
1319 | writel(msg, | 1495 | writel(msg, |
1320 | DB_NORMALIZE(adapter, | 1496 | DB_NORMALIZE(adapter, |
1321 | NETXEN_RCV_PRODUCER_OFFSET)); | 1497 | NETXEN_RCV_PRODUCER_OFFSET)); |
1498 | } | ||
1322 | } | 1499 | } |
1323 | } | 1500 | } |
1324 | 1501 | ||
1325 | static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, | 1502 | static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, |
1326 | uint32_t ctx, uint32_t ringid) | 1503 | uint32_t ctx, uint32_t ringid) |
1327 | { | 1504 | { |
1328 | struct pci_dev *pdev = adapter->ahw.pdev; | 1505 | struct pci_dev *pdev = adapter->pdev; |
1329 | struct sk_buff *skb; | 1506 | struct sk_buff *skb; |
1330 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); | 1507 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); |
1331 | struct netxen_rcv_desc_ctx *rcv_desc = NULL; | 1508 | struct nx_host_rds_ring *rds_ring = NULL; |
1332 | u32 producer; | 1509 | u32 producer; |
1333 | struct rcv_desc *pdesc; | 1510 | struct rcv_desc *pdesc; |
1334 | struct netxen_rx_buffer *buffer; | 1511 | struct netxen_rx_buffer *buffer; |
1335 | int count = 0; | 1512 | int count = 0; |
1336 | int index = 0; | 1513 | int index = 0; |
1514 | struct list_head *head; | ||
1337 | 1515 | ||
1338 | rcv_desc = &recv_ctx->rcv_desc[ringid]; | 1516 | rds_ring = &recv_ctx->rds_rings[ringid]; |
1339 | 1517 | ||
1340 | producer = rcv_desc->producer; | 1518 | producer = rds_ring->producer; |
1341 | index = rcv_desc->begin_alloc; | 1519 | index = rds_ring->begin_alloc; |
1342 | buffer = &rcv_desc->rx_buf_arr[index]; | 1520 | head = &rds_ring->free_list; |
1343 | /* We can start writing rx descriptors into the phantom memory. */ | 1521 | /* We can start writing rx descriptors into the phantom memory. */ |
1344 | while (buffer->state == NETXEN_BUFFER_FREE) { | 1522 | while (!list_empty(head)) { |
1345 | skb = dev_alloc_skb(rcv_desc->skb_size); | 1523 | |
1524 | skb = dev_alloc_skb(rds_ring->skb_size); | ||
1346 | if (unlikely(!skb)) { | 1525 | if (unlikely(!skb)) { |
1347 | /* | 1526 | rds_ring->begin_alloc = index; |
1348 | * We need to schedule the posting of buffers to the pegs. | ||
1349 | */ | ||
1350 | rcv_desc->begin_alloc = index; | ||
1351 | DPRINTK(ERR, "netxen_post_rx_buffers_nodb: " | ||
1352 | " allocated only %d buffers\n", count); | ||
1353 | break; | 1527 | break; |
1354 | } | 1528 | } |
1529 | |||
1530 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); | ||
1531 | list_del(&buffer->list); | ||
1532 | |||
1355 | count++; /* now there should be no failure */ | 1533 | count++; /* now there should be no failure */ |
1356 | pdesc = &rcv_desc->desc_head[producer]; | 1534 | pdesc = &rds_ring->desc_head[producer]; |
1357 | skb_reserve(skb, 2); | 1535 | if (!adapter->ahw.cut_through) |
1358 | /* | 1536 | skb_reserve(skb, 2); |
1359 | * This will be setup when we receive the | ||
1360 | * buffer after it has been filled | ||
1361 | * skb->dev = netdev; | ||
1362 | */ | ||
1363 | buffer->skb = skb; | 1537 | buffer->skb = skb; |
1364 | buffer->state = NETXEN_BUFFER_BUSY; | 1538 | buffer->state = NETXEN_BUFFER_BUSY; |
1365 | buffer->dma = pci_map_single(pdev, skb->data, | 1539 | buffer->dma = pci_map_single(pdev, skb->data, |
1366 | rcv_desc->dma_size, | 1540 | rds_ring->dma_size, |
1367 | PCI_DMA_FROMDEVICE); | 1541 | PCI_DMA_FROMDEVICE); |
1368 | 1542 | ||
1369 | /* make a rcv descriptor */ | 1543 | /* make a rcv descriptor */ |
1370 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | 1544 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
1371 | pdesc->buffer_length = cpu_to_le32(rcv_desc->dma_size); | 1545 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); |
1372 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); | 1546 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); |
1373 | DPRINTK(INFO, "done writing descripter\n"); | ||
1374 | producer = | 1547 | producer = |
1375 | get_next_index(producer, rcv_desc->max_rx_desc_count); | 1548 | get_next_index(producer, rds_ring->max_rx_desc_count); |
1376 | index = get_next_index(index, rcv_desc->max_rx_desc_count); | 1549 | index = get_next_index(index, rds_ring->max_rx_desc_count); |
1377 | buffer = &rcv_desc->rx_buf_arr[index]; | 1550 | buffer = &rds_ring->rx_buf_arr[index]; |
1378 | } | 1551 | } |
1379 | 1552 | ||
1380 | /* if we did allocate buffers, then write the count to Phantom */ | 1553 | /* if we did allocate buffers, then write the count to Phantom */ |
1381 | if (count) { | 1554 | if (count) { |
1382 | rcv_desc->begin_alloc = index; | 1555 | rds_ring->begin_alloc = index; |
1383 | rcv_desc->rcv_pending += count; | 1556 | rds_ring->producer = producer; |
1384 | rcv_desc->producer = producer; | ||
1385 | /* Window = 1 */ | 1557 | /* Window = 1 */ |
1386 | writel((producer - 1) & | 1558 | adapter->pci_write_normalize(adapter, |
1387 | (rcv_desc->max_rx_desc_count - 1), | 1559 | rds_ring->crb_rcv_producer, |
1388 | NETXEN_CRB_NORMALIZE(adapter, | 1560 | (producer-1) & (rds_ring->max_rx_desc_count-1)); |
1389 | recv_crb_registers[ | ||
1390 | adapter->portnum]. | ||
1391 | rcv_desc_crb[ringid]. | ||
1392 | crb_rcv_producer_offset)); | ||
1393 | wmb(); | 1561 | wmb(); |
1394 | } | 1562 | } |
1395 | } | 1563 | } |
diff --git a/drivers/net/netxen/netxen_nic_isr.c b/drivers/net/netxen/netxen_nic_isr.c deleted file mode 100644 index 96cec41f9019..000000000000 --- a/drivers/net/netxen/netxen_nic_isr.c +++ /dev/null | |||
@@ -1,220 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2006 NetXen, Inc. | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version 2 | ||
8 | * of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
18 | * MA 02111-1307, USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution | ||
21 | * in the file called LICENSE. | ||
22 | * | ||
23 | * Contact Information: | ||
24 | * info@netxen.com | ||
25 | * NetXen, | ||
26 | * 3965 Freedom Circle, Fourth floor, | ||
27 | * Santa Clara, CA 95054 | ||
28 | */ | ||
29 | |||
30 | #include <linux/netdevice.h> | ||
31 | #include <linux/delay.h> | ||
32 | |||
33 | #include "netxen_nic.h" | ||
34 | #include "netxen_nic_hw.h" | ||
35 | #include "netxen_nic_phan_reg.h" | ||
36 | |||
37 | /* | ||
38 | * netxen_nic_get_stats - Get System Network Statistics | ||
39 | * @netdev: network interface device structure | ||
40 | */ | ||
41 | struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) | ||
42 | { | ||
43 | struct netxen_adapter *adapter = netdev_priv(netdev); | ||
44 | struct net_device_stats *stats = &adapter->net_stats; | ||
45 | |||
46 | memset(stats, 0, sizeof(*stats)); | ||
47 | |||
48 | /* total packets received */ | ||
49 | stats->rx_packets = adapter->stats.no_rcv; | ||
50 | /* total packets transmitted */ | ||
51 | stats->tx_packets = adapter->stats.xmitedframes + | ||
52 | adapter->stats.xmitfinished; | ||
53 | /* total bytes received */ | ||
54 | stats->rx_bytes = adapter->stats.rxbytes; | ||
55 | /* total bytes transmitted */ | ||
56 | stats->tx_bytes = adapter->stats.txbytes; | ||
57 | /* bad packets received */ | ||
58 | stats->rx_errors = adapter->stats.rcvdbadskb; | ||
59 | /* packet transmit problems */ | ||
60 | stats->tx_errors = adapter->stats.nocmddescriptor; | ||
61 | /* no space in linux buffers */ | ||
62 | stats->rx_dropped = adapter->stats.rxdropped; | ||
63 | /* no space available in linux */ | ||
64 | stats->tx_dropped = adapter->stats.txdropped; | ||
65 | |||
66 | return stats; | ||
67 | } | ||
68 | |||
69 | static void netxen_indicate_link_status(struct netxen_adapter *adapter, | ||
70 | u32 link) | ||
71 | { | ||
72 | struct net_device *netdev = adapter->netdev; | ||
73 | |||
74 | if (link) | ||
75 | netif_carrier_on(netdev); | ||
76 | else | ||
77 | netif_carrier_off(netdev); | ||
78 | } | ||
79 | |||
80 | #if 0 | ||
81 | void netxen_handle_port_int(struct netxen_adapter *adapter, u32 enable) | ||
82 | { | ||
83 | __u32 int_src; | ||
84 | |||
85 | /* This should clear the interrupt source */ | ||
86 | if (adapter->phy_read) | ||
87 | adapter->phy_read(adapter, | ||
88 | NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS, | ||
89 | &int_src); | ||
90 | if (int_src == 0) { | ||
91 | DPRINTK(INFO, "No phy interrupts for port #%d\n", portno); | ||
92 | return; | ||
93 | } | ||
94 | if (adapter->disable_phy_interrupts) | ||
95 | adapter->disable_phy_interrupts(adapter); | ||
96 | |||
97 | if (netxen_get_phy_int_jabber(int_src)) | ||
98 | DPRINTK(INFO, "Jabber interrupt \n"); | ||
99 | |||
100 | if (netxen_get_phy_int_polarity_changed(int_src)) | ||
101 | DPRINTK(INFO, "POLARITY CHANGED int \n"); | ||
102 | |||
103 | if (netxen_get_phy_int_energy_detect(int_src)) | ||
104 | DPRINTK(INFO, "ENERGY DETECT INT \n"); | ||
105 | |||
106 | if (netxen_get_phy_int_downshift(int_src)) | ||
107 | DPRINTK(INFO, "DOWNSHIFT INT \n"); | ||
108 | /* write it down later.. */ | ||
109 | if ((netxen_get_phy_int_speed_changed(int_src)) | ||
110 | || (netxen_get_phy_int_link_status_changed(int_src))) { | ||
111 | __u32 status; | ||
112 | |||
113 | DPRINTK(INFO, "SPEED CHANGED OR LINK STATUS CHANGED \n"); | ||
114 | |||
115 | if (adapter->phy_read | ||
116 | && adapter->phy_read(adapter, | ||
117 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, | ||
118 | &status) == 0) { | ||
119 | if (netxen_get_phy_int_link_status_changed(int_src)) { | ||
120 | if (netxen_get_phy_link(status)) { | ||
121 | printk(KERN_INFO "%s: %s Link UP\n", | ||
122 | netxen_nic_driver_name, | ||
123 | adapter->netdev->name); | ||
124 | |||
125 | } else { | ||
126 | printk(KERN_INFO "%s: %s Link DOWN\n", | ||
127 | netxen_nic_driver_name, | ||
128 | adapter->netdev->name); | ||
129 | } | ||
130 | netxen_indicate_link_status(adapter, | ||
131 | netxen_get_phy_link | ||
132 | (status)); | ||
133 | } | ||
134 | } | ||
135 | } | ||
136 | if (adapter->enable_phy_interrupts) | ||
137 | adapter->enable_phy_interrupts(adapter); | ||
138 | } | ||
139 | #endif /* 0 */ | ||
140 | |||
141 | static void netxen_nic_isr_other(struct netxen_adapter *adapter) | ||
142 | { | ||
143 | int portno = adapter->portnum; | ||
144 | u32 val, linkup, qg_linksup; | ||
145 | |||
146 | /* verify the offset */ | ||
147 | val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE)); | ||
148 | val = val >> adapter->physical_port; | ||
149 | if (val == adapter->ahw.qg_linksup) | ||
150 | return; | ||
151 | |||
152 | qg_linksup = adapter->ahw.qg_linksup; | ||
153 | adapter->ahw.qg_linksup = val; | ||
154 | DPRINTK(INFO, "link update 0x%08x\n", val); | ||
155 | |||
156 | linkup = val & 1; | ||
157 | |||
158 | if (linkup != (qg_linksup & 1)) { | ||
159 | printk(KERN_INFO "%s: %s PORT %d link %s\n", | ||
160 | adapter->netdev->name, | ||
161 | netxen_nic_driver_name, portno, | ||
162 | ((linkup == 0) ? "down" : "up")); | ||
163 | netxen_indicate_link_status(adapter, linkup); | ||
164 | if (linkup) | ||
165 | netxen_nic_set_link_parameters(adapter); | ||
166 | |||
167 | } | ||
168 | } | ||
169 | |||
170 | void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter) | ||
171 | { | ||
172 | netxen_nic_isr_other(adapter); | ||
173 | } | ||
174 | |||
175 | #if 0 | ||
176 | int netxen_nic_link_ok(struct netxen_adapter *adapter) | ||
177 | { | ||
178 | switch (adapter->ahw.board_type) { | ||
179 | case NETXEN_NIC_GBE: | ||
180 | return ((adapter->ahw.qg_linksup) & 1); | ||
181 | |||
182 | case NETXEN_NIC_XGBE: | ||
183 | return ((adapter->ahw.xg_linkup) & 1); | ||
184 | |||
185 | default: | ||
186 | printk(KERN_ERR"%s: Function: %s, Unknown board type\n", | ||
187 | netxen_nic_driver_name, __FUNCTION__); | ||
188 | break; | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | #endif /* 0 */ | ||
194 | |||
195 | void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter) | ||
196 | { | ||
197 | struct net_device *netdev = adapter->netdev; | ||
198 | u32 val; | ||
199 | |||
200 | /* WINDOW = 1 */ | ||
201 | val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_XG_STATE)); | ||
202 | val >>= (adapter->physical_port * 8); | ||
203 | val &= 0xff; | ||
204 | |||
205 | if (adapter->ahw.xg_linkup == 1 && val != XG_LINK_UP) { | ||
206 | printk(KERN_INFO "%s: %s NIC Link is down\n", | ||
207 | netxen_nic_driver_name, netdev->name); | ||
208 | adapter->ahw.xg_linkup = 0; | ||
209 | if (netif_running(netdev)) { | ||
210 | netif_carrier_off(netdev); | ||
211 | netif_stop_queue(netdev); | ||
212 | } | ||
213 | } else if (adapter->ahw.xg_linkup == 0 && val == XG_LINK_UP) { | ||
214 | printk(KERN_INFO "%s: %s NIC Link is up\n", | ||
215 | netxen_nic_driver_name, netdev->name); | ||
216 | adapter->ahw.xg_linkup = 1; | ||
217 | netif_carrier_on(netdev); | ||
218 | netif_wake_queue(netdev); | ||
219 | } | ||
220 | } | ||
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 63cd67b931e7..91d209a8f6cb 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -49,13 +49,18 @@ char netxen_nic_driver_name[] = "netxen_nic"; | |||
49 | static char netxen_nic_driver_string[] = "NetXen Network Driver version " | 49 | static char netxen_nic_driver_string[] = "NetXen Network Driver version " |
50 | NETXEN_NIC_LINUX_VERSIONID; | 50 | NETXEN_NIC_LINUX_VERSIONID; |
51 | 51 | ||
52 | #define NETXEN_NETDEV_WEIGHT 120 | 52 | static int port_mode = NETXEN_PORT_MODE_AUTO_NEG; |
53 | #define NETXEN_ADAPTER_UP_MAGIC 777 | 53 | |
54 | #define NETXEN_NIC_PEG_TUNE 0 | 54 | /* Default to restricted 1G auto-neg mode */ |
55 | static int wol_port_mode = 5; | ||
56 | |||
57 | static int use_msi = 1; | ||
58 | |||
59 | static int use_msi_x = 1; | ||
55 | 60 | ||
56 | /* Local functions to NetXen NIC driver */ | 61 | /* Local functions to NetXen NIC driver */ |
57 | static int __devinit netxen_nic_probe(struct pci_dev *pdev, | 62 | static int __devinit netxen_nic_probe(struct pci_dev *pdev, |
58 | const struct pci_device_id *ent); | 63 | const struct pci_device_id *ent); |
59 | static void __devexit netxen_nic_remove(struct pci_dev *pdev); | 64 | static void __devexit netxen_nic_remove(struct pci_dev *pdev); |
60 | static int netxen_nic_open(struct net_device *netdev); | 65 | static int netxen_nic_open(struct net_device *netdev); |
61 | static int netxen_nic_close(struct net_device *netdev); | 66 | static int netxen_nic_close(struct net_device *netdev); |
@@ -83,6 +88,7 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = { | |||
83 | ENTRY(0x0005), | 88 | ENTRY(0x0005), |
84 | ENTRY(0x0024), | 89 | ENTRY(0x0024), |
85 | ENTRY(0x0025), | 90 | ENTRY(0x0025), |
91 | ENTRY(0x0100), | ||
86 | {0,} | 92 | {0,} |
87 | }; | 93 | }; |
88 | 94 | ||
@@ -108,95 +114,61 @@ static struct workqueue_struct *netxen_workq; | |||
108 | 114 | ||
109 | static void netxen_watchdog(unsigned long); | 115 | static void netxen_watchdog(unsigned long); |
110 | 116 | ||
111 | static void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, | 117 | static uint32_t crb_cmd_producer[4] = { |
112 | uint32_t crb_producer) | 118 | CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1, |
119 | CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3 | ||
120 | }; | ||
121 | |||
122 | void | ||
123 | netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, | ||
124 | uint32_t crb_producer) | ||
113 | { | 125 | { |
114 | switch (adapter->portnum) { | 126 | adapter->pci_write_normalize(adapter, |
115 | case 0: | 127 | adapter->crb_addr_cmd_producer, crb_producer); |
116 | writel(crb_producer, NETXEN_CRB_NORMALIZE | ||
117 | (adapter, CRB_CMD_PRODUCER_OFFSET)); | ||
118 | return; | ||
119 | case 1: | ||
120 | writel(crb_producer, NETXEN_CRB_NORMALIZE | ||
121 | (adapter, CRB_CMD_PRODUCER_OFFSET_1)); | ||
122 | return; | ||
123 | case 2: | ||
124 | writel(crb_producer, NETXEN_CRB_NORMALIZE | ||
125 | (adapter, CRB_CMD_PRODUCER_OFFSET_2)); | ||
126 | return; | ||
127 | case 3: | ||
128 | writel(crb_producer, NETXEN_CRB_NORMALIZE | ||
129 | (adapter, CRB_CMD_PRODUCER_OFFSET_3)); | ||
130 | return; | ||
131 | default: | ||
132 | printk(KERN_WARNING "We tried to update " | ||
133 | "CRB_CMD_PRODUCER_OFFSET for invalid " | ||
134 | "PCI function id %d\n", | ||
135 | adapter->portnum); | ||
136 | return; | ||
137 | } | ||
138 | } | 128 | } |
139 | 129 | ||
140 | static void netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, | 130 | static uint32_t crb_cmd_consumer[4] = { |
141 | u32 crb_consumer) | 131 | CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1, |
132 | CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3 | ||
133 | }; | ||
134 | |||
135 | static inline void | ||
136 | netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, | ||
137 | u32 crb_consumer) | ||
142 | { | 138 | { |
143 | switch (adapter->portnum) { | 139 | adapter->pci_write_normalize(adapter, |
144 | case 0: | 140 | adapter->crb_addr_cmd_consumer, crb_consumer); |
145 | writel(crb_consumer, NETXEN_CRB_NORMALIZE | ||
146 | (adapter, CRB_CMD_CONSUMER_OFFSET)); | ||
147 | return; | ||
148 | case 1: | ||
149 | writel(crb_consumer, NETXEN_CRB_NORMALIZE | ||
150 | (adapter, CRB_CMD_CONSUMER_OFFSET_1)); | ||
151 | return; | ||
152 | case 2: | ||
153 | writel(crb_consumer, NETXEN_CRB_NORMALIZE | ||
154 | (adapter, CRB_CMD_CONSUMER_OFFSET_2)); | ||
155 | return; | ||
156 | case 3: | ||
157 | writel(crb_consumer, NETXEN_CRB_NORMALIZE | ||
158 | (adapter, CRB_CMD_CONSUMER_OFFSET_3)); | ||
159 | return; | ||
160 | default: | ||
161 | printk(KERN_WARNING "We tried to update " | ||
162 | "CRB_CMD_PRODUCER_OFFSET for invalid " | ||
163 | "PCI function id %d\n", | ||
164 | adapter->portnum); | ||
165 | return; | ||
166 | } | ||
167 | } | 141 | } |
168 | 142 | ||
169 | #define ADAPTER_LIST_SIZE 12 | 143 | static uint32_t msi_tgt_status[8] = { |
170 | |||
171 | static uint32_t msi_tgt_status[4] = { | ||
172 | ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, | 144 | ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, |
173 | ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3 | 145 | ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, |
146 | ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, | ||
147 | ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 | ||
174 | }; | 148 | }; |
175 | 149 | ||
176 | static uint32_t sw_int_mask[4] = { | 150 | static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; |
177 | CRB_SW_INT_MASK_0, CRB_SW_INT_MASK_1, | ||
178 | CRB_SW_INT_MASK_2, CRB_SW_INT_MASK_3 | ||
179 | }; | ||
180 | 151 | ||
181 | static void netxen_nic_disable_int(struct netxen_adapter *adapter) | 152 | static void netxen_nic_disable_int(struct netxen_adapter *adapter) |
182 | { | 153 | { |
183 | u32 mask = 0x7ff; | 154 | u32 mask = 0x7ff; |
184 | int retries = 32; | 155 | int retries = 32; |
185 | int port = adapter->portnum; | ||
186 | int pci_fn = adapter->ahw.pci_func; | 156 | int pci_fn = adapter->ahw.pci_func; |
187 | 157 | ||
188 | if (adapter->msi_mode != MSI_MODE_MULTIFUNC) | 158 | if (adapter->msi_mode != MSI_MODE_MULTIFUNC) |
189 | writel(0x0, NETXEN_CRB_NORMALIZE(adapter, sw_int_mask[port])); | 159 | adapter->pci_write_normalize(adapter, |
160 | adapter->crb_intr_mask, 0); | ||
190 | 161 | ||
191 | if (adapter->intr_scheme != -1 && | 162 | if (adapter->intr_scheme != -1 && |
192 | adapter->intr_scheme != INTR_SCHEME_PERPORT) | 163 | adapter->intr_scheme != INTR_SCHEME_PERPORT) |
193 | writel(mask,PCI_OFFSET_SECOND_RANGE(adapter, ISR_INT_MASK)); | 164 | adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask); |
194 | 165 | ||
195 | if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) { | 166 | if (!NETXEN_IS_MSI_FAMILY(adapter)) { |
196 | do { | 167 | do { |
197 | writel(0xffffffff, | 168 | adapter->pci_write_immediate(adapter, |
198 | PCI_OFFSET_SECOND_RANGE(adapter, ISR_INT_TARGET_STATUS)); | 169 | ISR_INT_TARGET_STATUS, 0xffffffff); |
199 | mask = readl(pci_base_offset(adapter, ISR_INT_VECTOR)); | 170 | mask = adapter->pci_read_immediate(adapter, |
171 | ISR_INT_VECTOR); | ||
200 | if (!(mask & 0x80)) | 172 | if (!(mask & 0x80)) |
201 | break; | 173 | break; |
202 | udelay(10); | 174 | udelay(10); |
@@ -208,8 +180,8 @@ static void netxen_nic_disable_int(struct netxen_adapter *adapter) | |||
208 | } | 180 | } |
209 | } else { | 181 | } else { |
210 | if (adapter->msi_mode == MSI_MODE_MULTIFUNC) { | 182 | if (adapter->msi_mode == MSI_MODE_MULTIFUNC) { |
211 | writel(0xffffffff, PCI_OFFSET_SECOND_RANGE(adapter, | 183 | adapter->pci_write_immediate(adapter, |
212 | msi_tgt_status[pci_fn])); | 184 | msi_tgt_status[pci_fn], 0xffffffff); |
213 | } | 185 | } |
214 | } | 186 | } |
215 | } | 187 | } |
@@ -217,7 +189,6 @@ static void netxen_nic_disable_int(struct netxen_adapter *adapter) | |||
217 | static void netxen_nic_enable_int(struct netxen_adapter *adapter) | 189 | static void netxen_nic_enable_int(struct netxen_adapter *adapter) |
218 | { | 190 | { |
219 | u32 mask; | 191 | u32 mask; |
220 | int port = adapter->portnum; | ||
221 | 192 | ||
222 | DPRINTK(1, INFO, "Entered ISR Enable \n"); | 193 | DPRINTK(1, INFO, "Entered ISR Enable \n"); |
223 | 194 | ||
@@ -235,24 +206,299 @@ static void netxen_nic_enable_int(struct netxen_adapter *adapter) | |||
235 | break; | 206 | break; |
236 | } | 207 | } |
237 | 208 | ||
238 | writel(mask, PCI_OFFSET_SECOND_RANGE(adapter, ISR_INT_MASK)); | 209 | adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask); |
239 | } | 210 | } |
240 | 211 | ||
241 | writel(0x1, NETXEN_CRB_NORMALIZE(adapter, sw_int_mask[port])); | 212 | adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0x1); |
242 | 213 | ||
243 | if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) { | 214 | if (!NETXEN_IS_MSI_FAMILY(adapter)) { |
244 | mask = 0xbff; | 215 | mask = 0xbff; |
245 | if (adapter->intr_scheme != -1 && | 216 | if (adapter->intr_scheme != -1 && |
246 | adapter->intr_scheme != INTR_SCHEME_PERPORT) { | 217 | adapter->intr_scheme != INTR_SCHEME_PERPORT) { |
247 | writel(0X0, NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR)); | 218 | adapter->pci_write_normalize(adapter, |
219 | CRB_INT_VECTOR, 0); | ||
248 | } | 220 | } |
249 | writel(mask, | 221 | adapter->pci_write_immediate(adapter, |
250 | PCI_OFFSET_SECOND_RANGE(adapter, ISR_INT_TARGET_MASK)); | 222 | ISR_INT_TARGET_MASK, mask); |
251 | } | 223 | } |
252 | 224 | ||
253 | DPRINTK(1, INFO, "Done with enable Int\n"); | 225 | DPRINTK(1, INFO, "Done with enable Int\n"); |
254 | } | 226 | } |
255 | 227 | ||
228 | static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) | ||
229 | { | ||
230 | struct pci_dev *pdev = adapter->pdev; | ||
231 | int err; | ||
232 | uint64_t mask; | ||
233 | |||
234 | #ifdef CONFIG_IA64 | ||
235 | adapter->dma_mask = DMA_32BIT_MASK; | ||
236 | #else | ||
237 | if (revision_id >= NX_P3_B0) { | ||
238 | /* should go to DMA_64BIT_MASK */ | ||
239 | adapter->dma_mask = DMA_39BIT_MASK; | ||
240 | mask = DMA_39BIT_MASK; | ||
241 | } else if (revision_id == NX_P3_A2) { | ||
242 | adapter->dma_mask = DMA_39BIT_MASK; | ||
243 | mask = DMA_39BIT_MASK; | ||
244 | } else if (revision_id == NX_P2_C1) { | ||
245 | adapter->dma_mask = DMA_35BIT_MASK; | ||
246 | mask = DMA_35BIT_MASK; | ||
247 | } else { | ||
248 | adapter->dma_mask = DMA_32BIT_MASK; | ||
249 | mask = DMA_32BIT_MASK; | ||
250 | goto set_32_bit_mask; | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * Consistent DMA mask is set to 32 bit because it cannot be set to | ||
255 | * 35 bits. For P3 also leave it at 32 bits for now. Only the rings | ||
256 | * come off this pool. | ||
257 | */ | ||
258 | if (pci_set_dma_mask(pdev, mask) == 0 && | ||
259 | pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK) == 0) { | ||
260 | adapter->pci_using_dac = 1; | ||
261 | return 0; | ||
262 | } | ||
263 | #endif /* CONFIG_IA64 */ | ||
264 | |||
265 | set_32_bit_mask: | ||
266 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
267 | if (!err) | ||
268 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
269 | if (err) { | ||
270 | DPRINTK(ERR, "No usable DMA configuration, aborting:%d\n", err); | ||
271 | return err; | ||
272 | } | ||
273 | |||
274 | adapter->pci_using_dac = 0; | ||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | static void netxen_check_options(struct netxen_adapter *adapter) | ||
279 | { | ||
280 | switch (adapter->ahw.boardcfg.board_type) { | ||
281 | case NETXEN_BRDTYPE_P3_HMEZ: | ||
282 | case NETXEN_BRDTYPE_P3_XG_LOM: | ||
283 | case NETXEN_BRDTYPE_P3_10G_CX4: | ||
284 | case NETXEN_BRDTYPE_P3_10G_CX4_LP: | ||
285 | case NETXEN_BRDTYPE_P3_IMEZ: | ||
286 | case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: | ||
287 | case NETXEN_BRDTYPE_P3_10G_XFP: | ||
288 | case NETXEN_BRDTYPE_P3_10000_BASE_T: | ||
289 | adapter->msix_supported = !!use_msi_x; | ||
290 | adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G; | ||
291 | break; | ||
292 | |||
293 | case NETXEN_BRDTYPE_P2_SB31_10G: | ||
294 | case NETXEN_BRDTYPE_P2_SB31_10G_CX4: | ||
295 | case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: | ||
296 | case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: | ||
297 | adapter->msix_supported = 0; | ||
298 | adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G; | ||
299 | break; | ||
300 | |||
301 | case NETXEN_BRDTYPE_P3_REF_QG: | ||
302 | case NETXEN_BRDTYPE_P3_4_GB: | ||
303 | case NETXEN_BRDTYPE_P3_4_GB_MM: | ||
304 | case NETXEN_BRDTYPE_P2_SB35_4G: | ||
305 | case NETXEN_BRDTYPE_P2_SB31_2G: | ||
306 | adapter->msix_supported = 0; | ||
307 | adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G; | ||
308 | break; | ||
309 | |||
310 | default: | ||
311 | adapter->msix_supported = 0; | ||
312 | adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G; | ||
313 | |||
314 | printk(KERN_WARNING "Unknown board type(0x%x)\n", | ||
315 | adapter->ahw.boardcfg.board_type); | ||
316 | break; | ||
317 | } | ||
318 | |||
319 | adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS_HOST; | ||
320 | adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS; | ||
321 | adapter->max_lro_rx_desc_count = MAX_LRO_RCV_DESCRIPTORS; | ||
322 | |||
323 | adapter->max_possible_rss_rings = 1; | ||
324 | return; | ||
325 | } | ||
326 | |||
327 | static int | ||
328 | netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot) | ||
329 | { | ||
330 | int ret = 0; | ||
331 | |||
332 | if (first_boot == 0x55555555) { | ||
333 | /* This is the first boot after power up */ | ||
334 | |||
335 | /* PCI bus master workaround */ | ||
336 | adapter->hw_read_wx(adapter, | ||
337 | NETXEN_PCIE_REG(0x4), &first_boot, 4); | ||
338 | if (!(first_boot & 0x4)) { | ||
339 | first_boot |= 0x4; | ||
340 | adapter->hw_write_wx(adapter, | ||
341 | NETXEN_PCIE_REG(0x4), &first_boot, 4); | ||
342 | adapter->hw_read_wx(adapter, | ||
343 | NETXEN_PCIE_REG(0x4), &first_boot, 4); | ||
344 | } | ||
345 | |||
346 | /* This is the first boot after power up */ | ||
347 | adapter->hw_read_wx(adapter, | ||
348 | NETXEN_ROMUSB_GLB_SW_RESET, &first_boot, 4); | ||
349 | if (first_boot != 0x80000f) { | ||
350 | /* clear the register for future unloads/loads */ | ||
351 | adapter->pci_write_normalize(adapter, | ||
352 | NETXEN_CAM_RAM(0x1fc), 0); | ||
353 | ret = -1; | ||
354 | } | ||
355 | |||
356 | if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { | ||
357 | /* Start P2 boot loader */ | ||
358 | adapter->pci_write_normalize(adapter, | ||
359 | NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); | ||
360 | adapter->pci_write_normalize(adapter, | ||
361 | NETXEN_ROMUSB_GLB_PEGTUNE_DONE, 1); | ||
362 | } | ||
363 | } | ||
364 | return ret; | ||
365 | } | ||
366 | |||
367 | static void netxen_set_port_mode(struct netxen_adapter *adapter) | ||
368 | { | ||
369 | u32 val, data; | ||
370 | |||
371 | val = adapter->ahw.boardcfg.board_type; | ||
372 | if ((val == NETXEN_BRDTYPE_P3_HMEZ) || | ||
373 | (val == NETXEN_BRDTYPE_P3_XG_LOM)) { | ||
374 | if (port_mode == NETXEN_PORT_MODE_802_3_AP) { | ||
375 | data = NETXEN_PORT_MODE_802_3_AP; | ||
376 | adapter->hw_write_wx(adapter, | ||
377 | NETXEN_PORT_MODE_ADDR, &data, 4); | ||
378 | } else if (port_mode == NETXEN_PORT_MODE_XG) { | ||
379 | data = NETXEN_PORT_MODE_XG; | ||
380 | adapter->hw_write_wx(adapter, | ||
381 | NETXEN_PORT_MODE_ADDR, &data, 4); | ||
382 | } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) { | ||
383 | data = NETXEN_PORT_MODE_AUTO_NEG_1G; | ||
384 | adapter->hw_write_wx(adapter, | ||
385 | NETXEN_PORT_MODE_ADDR, &data, 4); | ||
386 | } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) { | ||
387 | data = NETXEN_PORT_MODE_AUTO_NEG_XG; | ||
388 | adapter->hw_write_wx(adapter, | ||
389 | NETXEN_PORT_MODE_ADDR, &data, 4); | ||
390 | } else { | ||
391 | data = NETXEN_PORT_MODE_AUTO_NEG; | ||
392 | adapter->hw_write_wx(adapter, | ||
393 | NETXEN_PORT_MODE_ADDR, &data, 4); | ||
394 | } | ||
395 | |||
396 | if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) && | ||
397 | (wol_port_mode != NETXEN_PORT_MODE_XG) && | ||
398 | (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) && | ||
399 | (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) { | ||
400 | wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG; | ||
401 | } | ||
402 | adapter->hw_write_wx(adapter, NETXEN_WOL_PORT_MODE, | ||
403 | &wol_port_mode, 4); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | #define PCI_CAP_ID_GEN 0x10 | ||
408 | |||
409 | static void netxen_pcie_strap_init(struct netxen_adapter *adapter) | ||
410 | { | ||
411 | u32 pdevfuncsave; | ||
412 | u32 c8c9value = 0; | ||
413 | u32 chicken = 0; | ||
414 | u32 control = 0; | ||
415 | int i, pos; | ||
416 | struct pci_dev *pdev; | ||
417 | |||
418 | pdev = pci_get_device(0x1166, 0x0140, NULL); | ||
419 | if (pdev) { | ||
420 | pci_dev_put(pdev); | ||
421 | adapter->hw_read_wx(adapter, | ||
422 | NETXEN_PCIE_REG(PCIE_TGT_SPLIT_CHICKEN), &chicken, 4); | ||
423 | chicken |= 0x4000; | ||
424 | adapter->hw_write_wx(adapter, | ||
425 | NETXEN_PCIE_REG(PCIE_TGT_SPLIT_CHICKEN), &chicken, 4); | ||
426 | } | ||
427 | |||
428 | pdev = adapter->pdev; | ||
429 | |||
430 | adapter->hw_read_wx(adapter, | ||
431 | NETXEN_PCIE_REG(PCIE_CHICKEN3), &chicken, 4); | ||
432 | /* clear chicken3.25:24 */ | ||
433 | chicken &= 0xFCFFFFFF; | ||
434 | /* | ||
435 | * if gen1 and B0, set F1020 - if gen 2, do nothing | ||
436 | * if gen2 set to F1000 | ||
437 | */ | ||
438 | pos = pci_find_capability(pdev, PCI_CAP_ID_GEN); | ||
439 | if (pos == 0xC0) { | ||
440 | pci_read_config_dword(pdev, pos + 0x10, &control); | ||
441 | if ((control & 0x000F0000) != 0x00020000) { | ||
442 | /* set chicken3.24 if gen1 */ | ||
443 | chicken |= 0x01000000; | ||
444 | } | ||
445 | printk(KERN_INFO "%s Gen2 strapping detected\n", | ||
446 | netxen_nic_driver_name); | ||
447 | c8c9value = 0xF1000; | ||
448 | } else { | ||
449 | /* set chicken3.24 if gen1 */ | ||
450 | chicken |= 0x01000000; | ||
451 | printk(KERN_INFO "%s Gen1 strapping detected\n", | ||
452 | netxen_nic_driver_name); | ||
453 | if (adapter->ahw.revision_id == NX_P3_B0) | ||
454 | c8c9value = 0xF1020; | ||
455 | else | ||
456 | c8c9value = 0; | ||
457 | |||
458 | } | ||
459 | adapter->hw_write_wx(adapter, | ||
460 | NETXEN_PCIE_REG(PCIE_CHICKEN3), &chicken, 4); | ||
461 | |||
462 | if (!c8c9value) | ||
463 | return; | ||
464 | |||
465 | pdevfuncsave = pdev->devfn; | ||
466 | if (pdevfuncsave & 0x07) | ||
467 | return; | ||
468 | |||
469 | for (i = 0; i < 8; i++) { | ||
470 | pci_read_config_dword(pdev, pos + 8, &control); | ||
471 | pci_read_config_dword(pdev, pos + 8, &control); | ||
472 | pci_write_config_dword(pdev, pos + 8, c8c9value); | ||
473 | pdev->devfn++; | ||
474 | } | ||
475 | pdev->devfn = pdevfuncsave; | ||
476 | } | ||
477 | |||
478 | static void netxen_set_msix_bit(struct pci_dev *pdev, int enable) | ||
479 | { | ||
480 | u32 control; | ||
481 | int pos; | ||
482 | |||
483 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); | ||
484 | if (pos) { | ||
485 | pci_read_config_dword(pdev, pos, &control); | ||
486 | if (enable) | ||
487 | control |= PCI_MSIX_FLAGS_ENABLE; | ||
488 | else | ||
489 | control = 0; | ||
490 | pci_write_config_dword(pdev, pos, control); | ||
491 | } | ||
492 | } | ||
493 | |||
494 | static void netxen_init_msix_entries(struct netxen_adapter *adapter) | ||
495 | { | ||
496 | int i; | ||
497 | |||
498 | for (i = 0; i < MSIX_ENTRIES_PER_ADAPTER; i++) | ||
499 | adapter->msix_entries[i].entry = i; | ||
500 | } | ||
501 | |||
256 | /* | 502 | /* |
257 | * netxen_nic_probe() | 503 | * netxen_nic_probe() |
258 | * | 504 | * |
@@ -278,28 +524,28 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
278 | 524 | ||
279 | 525 | ||
280 | u8 __iomem *db_ptr = NULL; | 526 | u8 __iomem *db_ptr = NULL; |
281 | unsigned long mem_base, mem_len, db_base, db_len; | 527 | unsigned long mem_base, mem_len, db_base, db_len, pci_len0 = 0; |
282 | int pci_using_dac, i = 0, err; | 528 | int i = 0, err; |
283 | int ring; | 529 | int first_driver, first_boot; |
284 | struct netxen_recv_context *recv_ctx = NULL; | ||
285 | struct netxen_rcv_desc_ctx *rcv_desc = NULL; | ||
286 | struct netxen_cmd_buffer *cmd_buf_arr = NULL; | ||
287 | __le64 mac_addr[FLASH_NUM_PORTS + 1]; | 530 | __le64 mac_addr[FLASH_NUM_PORTS + 1]; |
288 | int valid_mac = 0; | ||
289 | u32 val; | 531 | u32 val; |
290 | int pci_func_id = PCI_FUNC(pdev->devfn); | 532 | int pci_func_id = PCI_FUNC(pdev->devfn); |
291 | DECLARE_MAC_BUF(mac); | 533 | DECLARE_MAC_BUF(mac); |
534 | struct netxen_legacy_intr_set *legacy_intrp; | ||
535 | uint8_t revision_id; | ||
292 | 536 | ||
293 | if (pci_func_id == 0) | 537 | if (pci_func_id == 0) |
294 | printk(KERN_INFO "%s \n", netxen_nic_driver_string); | 538 | printk(KERN_INFO "%s\n", netxen_nic_driver_string); |
295 | 539 | ||
296 | if (pdev->class != 0x020000) { | 540 | if (pdev->class != 0x020000) { |
297 | printk(KERN_DEBUG "NetXen function %d, class %x will not " | 541 | printk(KERN_DEBUG "NetXen function %d, class %x will not " |
298 | "be enabled.\n",pci_func_id, pdev->class); | 542 | "be enabled.\n",pci_func_id, pdev->class); |
299 | return -ENODEV; | 543 | return -ENODEV; |
300 | } | 544 | } |
545 | |||
301 | if ((err = pci_enable_device(pdev))) | 546 | if ((err = pci_enable_device(pdev))) |
302 | return err; | 547 | return err; |
548 | |||
303 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | 549 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
304 | err = -ENODEV; | 550 | err = -ENODEV; |
305 | goto err_out_disable_pdev; | 551 | goto err_out_disable_pdev; |
@@ -309,18 +555,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
309 | goto err_out_disable_pdev; | 555 | goto err_out_disable_pdev; |
310 | 556 | ||
311 | pci_set_master(pdev); | 557 | pci_set_master(pdev); |
312 | if (pdev->revision == NX_P2_C1 && | ||
313 | (pci_set_dma_mask(pdev, DMA_35BIT_MASK) == 0) && | ||
314 | (pci_set_consistent_dma_mask(pdev, DMA_35BIT_MASK) == 0)) { | ||
315 | pci_using_dac = 1; | ||
316 | } else { | ||
317 | if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || | ||
318 | (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) | ||
319 | goto err_out_free_res; | ||
320 | |||
321 | pci_using_dac = 0; | ||
322 | } | ||
323 | |||
324 | 558 | ||
325 | netdev = alloc_etherdev(sizeof(struct netxen_adapter)); | 559 | netdev = alloc_etherdev(sizeof(struct netxen_adapter)); |
326 | if(!netdev) { | 560 | if(!netdev) { |
@@ -333,13 +567,35 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
333 | SET_NETDEV_DEV(netdev, &pdev->dev); | 567 | SET_NETDEV_DEV(netdev, &pdev->dev); |
334 | 568 | ||
335 | adapter = netdev->priv; | 569 | adapter = netdev->priv; |
336 | 570 | adapter->netdev = netdev; | |
337 | adapter->ahw.pdev = pdev; | 571 | adapter->pdev = pdev; |
338 | adapter->ahw.pci_func = pci_func_id; | 572 | adapter->ahw.pci_func = pci_func_id; |
339 | 573 | ||
574 | revision_id = pdev->revision; | ||
575 | adapter->ahw.revision_id = revision_id; | ||
576 | |||
577 | err = nx_set_dma_mask(adapter, revision_id); | ||
578 | if (err) | ||
579 | goto err_out_free_netdev; | ||
580 | |||
581 | rwlock_init(&adapter->adapter_lock); | ||
582 | adapter->ahw.qdr_sn_window = -1; | ||
583 | adapter->ahw.ddr_mn_window = -1; | ||
584 | |||
340 | /* remap phys address */ | 585 | /* remap phys address */ |
341 | mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ | 586 | mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ |
342 | mem_len = pci_resource_len(pdev, 0); | 587 | mem_len = pci_resource_len(pdev, 0); |
588 | pci_len0 = 0; | ||
589 | |||
590 | adapter->hw_write_wx = netxen_nic_hw_write_wx_128M; | ||
591 | adapter->hw_read_wx = netxen_nic_hw_read_wx_128M; | ||
592 | adapter->pci_read_immediate = netxen_nic_pci_read_immediate_128M; | ||
593 | adapter->pci_write_immediate = netxen_nic_pci_write_immediate_128M; | ||
594 | adapter->pci_read_normalize = netxen_nic_pci_read_normalize_128M; | ||
595 | adapter->pci_write_normalize = netxen_nic_pci_write_normalize_128M; | ||
596 | adapter->pci_set_window = netxen_nic_pci_set_window_128M; | ||
597 | adapter->pci_mem_read = netxen_nic_pci_mem_read_128M; | ||
598 | adapter->pci_mem_write = netxen_nic_pci_mem_write_128M; | ||
343 | 599 | ||
344 | /* 128 Meg of memory */ | 600 | /* 128 Meg of memory */ |
345 | if (mem_len == NETXEN_PCI_128MB_SIZE) { | 601 | if (mem_len == NETXEN_PCI_128MB_SIZE) { |
@@ -356,27 +612,48 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
356 | SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); | 612 | SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); |
357 | first_page_group_start = 0; | 613 | first_page_group_start = 0; |
358 | first_page_group_end = 0; | 614 | first_page_group_end = 0; |
615 | } else if (mem_len == NETXEN_PCI_2MB_SIZE) { | ||
616 | adapter->hw_write_wx = netxen_nic_hw_write_wx_2M; | ||
617 | adapter->hw_read_wx = netxen_nic_hw_read_wx_2M; | ||
618 | adapter->pci_read_immediate = netxen_nic_pci_read_immediate_2M; | ||
619 | adapter->pci_write_immediate = | ||
620 | netxen_nic_pci_write_immediate_2M; | ||
621 | adapter->pci_read_normalize = netxen_nic_pci_read_normalize_2M; | ||
622 | adapter->pci_write_normalize = | ||
623 | netxen_nic_pci_write_normalize_2M; | ||
624 | adapter->pci_set_window = netxen_nic_pci_set_window_2M; | ||
625 | adapter->pci_mem_read = netxen_nic_pci_mem_read_2M; | ||
626 | adapter->pci_mem_write = netxen_nic_pci_mem_write_2M; | ||
627 | |||
628 | mem_ptr0 = ioremap(mem_base, mem_len); | ||
629 | pci_len0 = mem_len; | ||
630 | first_page_group_start = 0; | ||
631 | first_page_group_end = 0; | ||
632 | |||
633 | adapter->ahw.ddr_mn_window = 0; | ||
634 | adapter->ahw.qdr_sn_window = 0; | ||
635 | |||
636 | adapter->ahw.mn_win_crb = 0x100000 + PCIX_MN_WINDOW + | ||
637 | (pci_func_id * 0x20); | ||
638 | adapter->ahw.ms_win_crb = 0x100000 + PCIX_SN_WINDOW; | ||
639 | if (pci_func_id < 4) | ||
640 | adapter->ahw.ms_win_crb += (pci_func_id * 0x20); | ||
641 | else | ||
642 | adapter->ahw.ms_win_crb += | ||
643 | 0xA0 + ((pci_func_id - 4) * 0x10); | ||
359 | } else { | 644 | } else { |
360 | err = -EIO; | 645 | err = -EIO; |
361 | goto err_out_free_netdev; | 646 | goto err_out_free_netdev; |
362 | } | 647 | } |
363 | 648 | ||
364 | if ((!mem_ptr0 && mem_len == NETXEN_PCI_128MB_SIZE) || | 649 | dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); |
365 | !mem_ptr1 || !mem_ptr2) { | ||
366 | DPRINTK(ERR, | ||
367 | "Cannot remap adapter memory aborting.:" | ||
368 | "0 -> %p, 1 -> %p, 2 -> %p\n", | ||
369 | mem_ptr0, mem_ptr1, mem_ptr2); | ||
370 | 650 | ||
371 | err = -EIO; | ||
372 | goto err_out_iounmap; | ||
373 | } | ||
374 | db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ | 651 | db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ |
375 | db_len = pci_resource_len(pdev, 4); | 652 | db_len = pci_resource_len(pdev, 4); |
376 | 653 | ||
377 | if (db_len == 0) { | 654 | if (db_len == 0) { |
378 | printk(KERN_ERR "%s: doorbell is disabled\n", | 655 | printk(KERN_ERR "%s: doorbell is disabled\n", |
379 | netxen_nic_driver_name); | 656 | netxen_nic_driver_name); |
380 | err = -EIO; | 657 | err = -EIO; |
381 | goto err_out_iounmap; | 658 | goto err_out_iounmap; |
382 | } | 659 | } |
@@ -386,13 +663,14 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
386 | db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES); | 663 | db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES); |
387 | if (!db_ptr) { | 664 | if (!db_ptr) { |
388 | printk(KERN_ERR "%s: Failed to allocate doorbell map.", | 665 | printk(KERN_ERR "%s: Failed to allocate doorbell map.", |
389 | netxen_nic_driver_name); | 666 | netxen_nic_driver_name); |
390 | err = -EIO; | 667 | err = -EIO; |
391 | goto err_out_iounmap; | 668 | goto err_out_iounmap; |
392 | } | 669 | } |
393 | DPRINTK(INFO, "doorbell ioremaped at %p\n", db_ptr); | 670 | DPRINTK(INFO, "doorbell ioremaped at %p\n", db_ptr); |
394 | 671 | ||
395 | adapter->ahw.pci_base0 = mem_ptr0; | 672 | adapter->ahw.pci_base0 = mem_ptr0; |
673 | adapter->ahw.pci_len0 = pci_len0; | ||
396 | adapter->ahw.first_page_group_start = first_page_group_start; | 674 | adapter->ahw.first_page_group_start = first_page_group_start; |
397 | adapter->ahw.first_page_group_end = first_page_group_end; | 675 | adapter->ahw.first_page_group_end = first_page_group_end; |
398 | adapter->ahw.pci_base1 = mem_ptr1; | 676 | adapter->ahw.pci_base1 = mem_ptr1; |
@@ -400,11 +678,18 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
400 | adapter->ahw.db_base = db_ptr; | 678 | adapter->ahw.db_base = db_ptr; |
401 | adapter->ahw.db_len = db_len; | 679 | adapter->ahw.db_len = db_len; |
402 | 680 | ||
403 | adapter->netdev = netdev; | ||
404 | adapter->pdev = pdev; | ||
405 | |||
406 | netif_napi_add(netdev, &adapter->napi, | 681 | netif_napi_add(netdev, &adapter->napi, |
407 | netxen_nic_poll, NETXEN_NETDEV_WEIGHT); | 682 | netxen_nic_poll, NETXEN_NETDEV_WEIGHT); |
683 | |||
684 | if (revision_id >= NX_P3_B0) | ||
685 | legacy_intrp = &legacy_intr[pci_func_id]; | ||
686 | else | ||
687 | legacy_intrp = &legacy_intr[0]; | ||
688 | |||
689 | adapter->legacy_intr.int_vec_bit = legacy_intrp->int_vec_bit; | ||
690 | adapter->legacy_intr.tgt_status_reg = legacy_intrp->tgt_status_reg; | ||
691 | adapter->legacy_intr.tgt_mask_reg = legacy_intrp->tgt_mask_reg; | ||
692 | adapter->legacy_intr.pci_int_reg = legacy_intrp->pci_int_reg; | ||
408 | 693 | ||
409 | /* this will be read from FW later */ | 694 | /* this will be read from FW later */ |
410 | adapter->intr_scheme = -1; | 695 | adapter->intr_scheme = -1; |
@@ -414,12 +699,23 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
414 | adapter->portnum = pci_func_id; | 699 | adapter->portnum = pci_func_id; |
415 | adapter->status &= ~NETXEN_NETDEV_STATUS; | 700 | adapter->status &= ~NETXEN_NETDEV_STATUS; |
416 | adapter->rx_csum = 1; | 701 | adapter->rx_csum = 1; |
702 | adapter->mc_enabled = 0; | ||
703 | if (NX_IS_REVISION_P3(revision_id)) { | ||
704 | adapter->max_mc_count = 38; | ||
705 | adapter->max_rds_rings = 2; | ||
706 | } else { | ||
707 | adapter->max_mc_count = 16; | ||
708 | adapter->max_rds_rings = 3; | ||
709 | } | ||
417 | 710 | ||
418 | netdev->open = netxen_nic_open; | 711 | netdev->open = netxen_nic_open; |
419 | netdev->stop = netxen_nic_close; | 712 | netdev->stop = netxen_nic_close; |
420 | netdev->hard_start_xmit = netxen_nic_xmit_frame; | 713 | netdev->hard_start_xmit = netxen_nic_xmit_frame; |
421 | netdev->get_stats = netxen_nic_get_stats; | 714 | netdev->get_stats = netxen_nic_get_stats; |
422 | netdev->set_multicast_list = netxen_nic_set_multi; | 715 | if (NX_IS_REVISION_P3(revision_id)) |
716 | netdev->set_multicast_list = netxen_p3_nic_set_multi; | ||
717 | else | ||
718 | netdev->set_multicast_list = netxen_p2_nic_set_multi; | ||
423 | netdev->set_mac_address = netxen_nic_set_mac; | 719 | netdev->set_mac_address = netxen_nic_set_mac; |
424 | netdev->change_mtu = netxen_nic_change_mtu; | 720 | netdev->change_mtu = netxen_nic_change_mtu; |
425 | netdev->tx_timeout = netxen_tx_timeout; | 721 | netdev->tx_timeout = netxen_tx_timeout; |
@@ -435,18 +731,14 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
435 | netdev->features = NETIF_F_SG; | 731 | netdev->features = NETIF_F_SG; |
436 | netdev->features |= NETIF_F_IP_CSUM; | 732 | netdev->features |= NETIF_F_IP_CSUM; |
437 | netdev->features |= NETIF_F_TSO; | 733 | netdev->features |= NETIF_F_TSO; |
734 | if (NX_IS_REVISION_P3(revision_id)) { | ||
735 | netdev->features |= NETIF_F_IPV6_CSUM; | ||
736 | netdev->features |= NETIF_F_TSO6; | ||
737 | } | ||
438 | 738 | ||
439 | if (pci_using_dac) | 739 | if (adapter->pci_using_dac) |
440 | netdev->features |= NETIF_F_HIGHDMA; | 740 | netdev->features |= NETIF_F_HIGHDMA; |
441 | 741 | ||
442 | if (pci_enable_msi(pdev)) | ||
443 | adapter->flags &= ~NETXEN_NIC_MSI_ENABLED; | ||
444 | else | ||
445 | adapter->flags |= NETXEN_NIC_MSI_ENABLED; | ||
446 | |||
447 | netdev->irq = pdev->irq; | ||
448 | INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); | ||
449 | |||
450 | /* | 742 | /* |
451 | * Set the CRB window to invalid. If any register in window 0 is | 743 | * Set the CRB window to invalid. If any register in window 0 is |
452 | * accessed it should set the window to 0 and then reset it to 1. | 744 | * accessed it should set the window to 0 and then reset it to 1. |
@@ -455,87 +747,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
455 | 747 | ||
456 | if (netxen_nic_get_board_info(adapter) != 0) { | 748 | if (netxen_nic_get_board_info(adapter) != 0) { |
457 | printk("%s: Error getting board config info.\n", | 749 | printk("%s: Error getting board config info.\n", |
458 | netxen_nic_driver_name); | 750 | netxen_nic_driver_name); |
459 | err = -EIO; | 751 | err = -EIO; |
460 | goto err_out_iounmap; | 752 | goto err_out_iounmap; |
461 | } | 753 | } |
462 | 754 | ||
463 | /* | ||
464 | * Adapter in our case is quad port so initialize it before | ||
465 | * initializing the ports | ||
466 | */ | ||
467 | |||
468 | netxen_initialize_adapter_ops(adapter); | 755 | netxen_initialize_adapter_ops(adapter); |
469 | 756 | ||
470 | adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS_HOST; | ||
471 | if ((adapter->ahw.boardcfg.board_type == NETXEN_BRDTYPE_P2_SB35_4G) || | ||
472 | (adapter->ahw.boardcfg.board_type == | ||
473 | NETXEN_BRDTYPE_P2_SB31_2G)) | ||
474 | adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G; | ||
475 | else | ||
476 | adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS; | ||
477 | adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS; | ||
478 | adapter->max_lro_rx_desc_count = MAX_LRO_RCV_DESCRIPTORS; | ||
479 | |||
480 | cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE); | ||
481 | if (cmd_buf_arr == NULL) { | ||
482 | printk(KERN_ERR | ||
483 | "%s: Could not allocate cmd_buf_arr memory:%d\n", | ||
484 | netxen_nic_driver_name, (int)TX_RINGSIZE); | ||
485 | err = -ENOMEM; | ||
486 | goto err_out_free_adapter; | ||
487 | } | ||
488 | memset(cmd_buf_arr, 0, TX_RINGSIZE); | ||
489 | adapter->cmd_buf_arr = cmd_buf_arr; | ||
490 | |||
491 | for (i = 0; i < MAX_RCV_CTX; ++i) { | ||
492 | recv_ctx = &adapter->recv_ctx[i]; | ||
493 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
494 | rcv_desc = &recv_ctx->rcv_desc[ring]; | ||
495 | switch (RCV_DESC_TYPE(ring)) { | ||
496 | case RCV_DESC_NORMAL: | ||
497 | rcv_desc->max_rx_desc_count = | ||
498 | adapter->max_rx_desc_count; | ||
499 | rcv_desc->flags = RCV_DESC_NORMAL; | ||
500 | rcv_desc->dma_size = RX_DMA_MAP_LEN; | ||
501 | rcv_desc->skb_size = MAX_RX_BUFFER_LENGTH; | ||
502 | break; | ||
503 | |||
504 | case RCV_DESC_JUMBO: | ||
505 | rcv_desc->max_rx_desc_count = | ||
506 | adapter->max_jumbo_rx_desc_count; | ||
507 | rcv_desc->flags = RCV_DESC_JUMBO; | ||
508 | rcv_desc->dma_size = RX_JUMBO_DMA_MAP_LEN; | ||
509 | rcv_desc->skb_size = MAX_RX_JUMBO_BUFFER_LENGTH; | ||
510 | break; | ||
511 | |||
512 | case RCV_RING_LRO: | ||
513 | rcv_desc->max_rx_desc_count = | ||
514 | adapter->max_lro_rx_desc_count; | ||
515 | rcv_desc->flags = RCV_DESC_LRO; | ||
516 | rcv_desc->dma_size = RX_LRO_DMA_MAP_LEN; | ||
517 | rcv_desc->skb_size = MAX_RX_LRO_BUFFER_LENGTH; | ||
518 | break; | ||
519 | |||
520 | } | ||
521 | rcv_desc->rx_buf_arr = (struct netxen_rx_buffer *) | ||
522 | vmalloc(RCV_BUFFSIZE); | ||
523 | |||
524 | if (rcv_desc->rx_buf_arr == NULL) { | ||
525 | printk(KERN_ERR "%s: Could not allocate " | ||
526 | "rcv_desc->rx_buf_arr memory:%d\n", | ||
527 | netxen_nic_driver_name, | ||
528 | (int)RCV_BUFFSIZE); | ||
529 | err = -ENOMEM; | ||
530 | goto err_out_free_rx_buffer; | ||
531 | } | ||
532 | memset(rcv_desc->rx_buf_arr, 0, RCV_BUFFSIZE); | ||
533 | } | ||
534 | |||
535 | } | ||
536 | |||
537 | netxen_initialize_adapter_sw(adapter); /* initialize the buffers in adapter */ | ||
538 | |||
539 | /* Mezz cards have PCI function 0,2,3 enabled */ | 757 | /* Mezz cards have PCI function 0,2,3 enabled */ |
540 | switch (adapter->ahw.boardcfg.board_type) { | 758 | switch (adapter->ahw.boardcfg.board_type) { |
541 | case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: | 759 | case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: |
@@ -547,90 +765,71 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
547 | break; | 765 | break; |
548 | } | 766 | } |
549 | 767 | ||
550 | init_timer(&adapter->watchdog_timer); | 768 | /* |
551 | adapter->ahw.xg_linkup = 0; | 769 | * This call will setup various max rx/tx counts. |
552 | adapter->watchdog_timer.function = &netxen_watchdog; | 770 | * It must be done before any buffer/ring allocations. |
553 | adapter->watchdog_timer.data = (unsigned long)adapter; | 771 | */ |
554 | INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task); | 772 | netxen_check_options(adapter); |
555 | adapter->ahw.pdev = pdev; | ||
556 | adapter->ahw.revision_id = pdev->revision; | ||
557 | |||
558 | /* make sure Window == 1 */ | ||
559 | netxen_nic_pci_change_crbwindow(adapter, 1); | ||
560 | 773 | ||
774 | first_driver = 0; | ||
775 | if (NX_IS_REVISION_P3(revision_id)) { | ||
776 | if (adapter->ahw.pci_func == 0) | ||
777 | first_driver = 1; | ||
778 | } else { | ||
779 | if (adapter->portnum == 0) | ||
780 | first_driver = 1; | ||
781 | } | ||
782 | adapter->crb_addr_cmd_producer = crb_cmd_producer[adapter->portnum]; | ||
783 | adapter->crb_addr_cmd_consumer = crb_cmd_consumer[adapter->portnum]; | ||
561 | netxen_nic_update_cmd_producer(adapter, 0); | 784 | netxen_nic_update_cmd_producer(adapter, 0); |
562 | netxen_nic_update_cmd_consumer(adapter, 0); | 785 | netxen_nic_update_cmd_consumer(adapter, 0); |
563 | writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO)); | ||
564 | 786 | ||
565 | if (netxen_is_flash_supported(adapter) == 0 && | 787 | if (first_driver) { |
566 | netxen_get_flash_mac_addr(adapter, mac_addr) == 0) | 788 | first_boot = adapter->pci_read_normalize(adapter, |
567 | valid_mac = 1; | 789 | NETXEN_CAM_RAM(0x1fc)); |
568 | else | ||
569 | valid_mac = 0; | ||
570 | |||
571 | if (valid_mac) { | ||
572 | unsigned char *p = (unsigned char *)&mac_addr[adapter->portnum]; | ||
573 | netdev->dev_addr[0] = *(p + 5); | ||
574 | netdev->dev_addr[1] = *(p + 4); | ||
575 | netdev->dev_addr[2] = *(p + 3); | ||
576 | netdev->dev_addr[3] = *(p + 2); | ||
577 | netdev->dev_addr[4] = *(p + 1); | ||
578 | netdev->dev_addr[5] = *(p + 0); | ||
579 | 790 | ||
580 | memcpy(netdev->perm_addr, netdev->dev_addr, | 791 | err = netxen_check_hw_init(adapter, first_boot); |
581 | netdev->addr_len); | 792 | if (err) { |
582 | if (!is_valid_ether_addr(netdev->perm_addr)) { | 793 | printk(KERN_ERR "%s: error in init HW init sequence\n", |
583 | printk(KERN_ERR "%s: Bad MAC address %s.\n", | 794 | netxen_nic_driver_name); |
584 | netxen_nic_driver_name, | 795 | goto err_out_iounmap; |
585 | print_mac(mac, netdev->dev_addr)); | ||
586 | } else { | ||
587 | if (adapter->macaddr_set) | ||
588 | adapter->macaddr_set(adapter, | ||
589 | netdev->dev_addr); | ||
590 | } | 796 | } |
591 | } | ||
592 | 797 | ||
593 | if (adapter->portnum == 0) { | 798 | if (NX_IS_REVISION_P3(revision_id)) |
594 | err = netxen_initialize_adapter_offload(adapter); | 799 | netxen_set_port_mode(adapter); |
595 | if (err) | 800 | |
596 | goto err_out_free_rx_buffer; | 801 | if (first_boot != 0x55555555) { |
597 | val = readl(NETXEN_CRB_NORMALIZE(adapter, | 802 | adapter->pci_write_normalize(adapter, |
598 | NETXEN_CAM_RAM(0x1fc))); | 803 | CRB_CMDPEG_STATE, 0); |
599 | if (val == 0x55555555) { | ||
600 | /* This is the first boot after power up */ | ||
601 | netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(0x4), &val); | ||
602 | if (!(val & 0x4)) { | ||
603 | val |= 0x4; | ||
604 | netxen_nic_write_w0(adapter, NETXEN_PCIE_REG(0x4), val); | ||
605 | netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(0x4), &val); | ||
606 | if (!(val & 0x4)) | ||
607 | printk(KERN_ERR "%s: failed to set MSI bit in PCI-e reg\n", | ||
608 | netxen_nic_driver_name); | ||
609 | } | ||
610 | val = readl(NETXEN_CRB_NORMALIZE(adapter, | ||
611 | NETXEN_ROMUSB_GLB_SW_RESET)); | ||
612 | printk(KERN_INFO"NetXen: read 0x%08x for reset reg.\n",val); | ||
613 | if (val != 0x80000f) { | ||
614 | /* clear the register for future unloads/loads */ | ||
615 | writel(0, NETXEN_CRB_NORMALIZE(adapter, | ||
616 | NETXEN_CAM_RAM(0x1fc))); | ||
617 | printk(KERN_ERR "ERROR in NetXen HW init sequence.\n"); | ||
618 | err = -ENODEV; | ||
619 | goto err_out_free_dev; | ||
620 | } | ||
621 | } else { | ||
622 | writel(0, NETXEN_CRB_NORMALIZE(adapter, | ||
623 | CRB_CMDPEG_STATE)); | ||
624 | netxen_pinit_from_rom(adapter, 0); | 804 | netxen_pinit_from_rom(adapter, 0); |
625 | msleep(1); | 805 | msleep(1); |
626 | netxen_load_firmware(adapter); | 806 | netxen_load_firmware(adapter); |
627 | netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); | ||
628 | } | 807 | } |
629 | 808 | ||
630 | /* clear the register for future unloads/loads */ | 809 | if (NX_IS_REVISION_P3(revision_id)) |
631 | writel(0, NETXEN_CRB_NORMALIZE(adapter, NETXEN_CAM_RAM(0x1fc))); | 810 | netxen_pcie_strap_init(adapter); |
632 | dev_info(&pdev->dev, "cmdpeg state: 0x%0x\n", | 811 | |
633 | readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE))); | 812 | if (NX_IS_REVISION_P2(revision_id)) { |
813 | |||
814 | /* Initialize multicast addr pool owners */ | ||
815 | val = 0x7654; | ||
816 | if (adapter->ahw.board_type == NETXEN_NIC_XGBE) | ||
817 | val |= 0x0f000000; | ||
818 | netxen_crb_writelit_adapter(adapter, | ||
819 | NETXEN_MAC_ADDR_CNTL_REG, val); | ||
820 | |||
821 | } | ||
822 | |||
823 | if ((first_boot == 0x55555555) && | ||
824 | (NX_IS_REVISION_P2(revision_id))) { | ||
825 | /* Unlock the HW, prompting the boot sequence */ | ||
826 | adapter->pci_write_normalize(adapter, | ||
827 | NETXEN_ROMUSB_GLB_PEGTUNE_DONE, 1); | ||
828 | } | ||
829 | |||
830 | err = netxen_initialize_adapter_offload(adapter); | ||
831 | if (err) | ||
832 | goto err_out_iounmap; | ||
634 | 833 | ||
635 | /* | 834 | /* |
636 | * Tell the hardware our version number. | 835 | * Tell the hardware our version number. |
@@ -638,24 +837,101 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
638 | i = (_NETXEN_NIC_LINUX_MAJOR << 16) | 837 | i = (_NETXEN_NIC_LINUX_MAJOR << 16) |
639 | | ((_NETXEN_NIC_LINUX_MINOR << 8)) | 838 | | ((_NETXEN_NIC_LINUX_MINOR << 8)) |
640 | | (_NETXEN_NIC_LINUX_SUBVERSION); | 839 | | (_NETXEN_NIC_LINUX_SUBVERSION); |
641 | writel(i, NETXEN_CRB_NORMALIZE(adapter, CRB_DRIVER_VERSION)); | 840 | adapter->pci_write_normalize(adapter, CRB_DRIVER_VERSION, i); |
642 | 841 | ||
643 | /* Unlock the HW, prompting the boot sequence */ | ||
644 | writel(1, | ||
645 | NETXEN_CRB_NORMALIZE(adapter, | ||
646 | NETXEN_ROMUSB_GLB_PEGTUNE_DONE)); | ||
647 | /* Handshake with the card before we register the devices. */ | 842 | /* Handshake with the card before we register the devices. */ |
648 | netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); | 843 | netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); |
844 | |||
845 | } /* first_driver */ | ||
846 | |||
847 | netxen_nic_flash_print(adapter); | ||
848 | |||
849 | if (NX_IS_REVISION_P3(revision_id)) { | ||
850 | adapter->hw_read_wx(adapter, | ||
851 | NETXEN_MIU_MN_CONTROL, &val, 4); | ||
852 | adapter->ahw.cut_through = (val & 0x4) ? 1 : 0; | ||
853 | dev_info(&pdev->dev, "firmware running in %s mode\n", | ||
854 | adapter->ahw.cut_through ? "cut through" : "legacy"); | ||
649 | } | 855 | } |
650 | 856 | ||
651 | /* | 857 | /* |
652 | * See if the firmware gave us a virtual-physical port mapping. | 858 | * See if the firmware gave us a virtual-physical port mapping. |
653 | */ | 859 | */ |
654 | adapter->physical_port = adapter->portnum; | 860 | adapter->physical_port = adapter->portnum; |
655 | i = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_V2P(adapter->portnum))); | 861 | i = adapter->pci_read_normalize(adapter, CRB_V2P(adapter->portnum)); |
656 | if (i != 0x55555555) | 862 | if (i != 0x55555555) |
657 | adapter->physical_port = i; | 863 | adapter->physical_port = i; |
658 | 864 | ||
865 | adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); | ||
866 | |||
867 | netxen_set_msix_bit(pdev, 0); | ||
868 | |||
869 | if (NX_IS_REVISION_P3(revision_id)) { | ||
870 | if ((mem_len != NETXEN_PCI_128MB_SIZE) && | ||
871 | mem_len != NETXEN_PCI_2MB_SIZE) | ||
872 | adapter->msix_supported = 0; | ||
873 | } | ||
874 | |||
875 | if (adapter->msix_supported) { | ||
876 | |||
877 | netxen_init_msix_entries(adapter); | ||
878 | |||
879 | if (pci_enable_msix(pdev, adapter->msix_entries, | ||
880 | MSIX_ENTRIES_PER_ADAPTER)) | ||
881 | goto request_msi; | ||
882 | |||
883 | adapter->flags |= NETXEN_NIC_MSIX_ENABLED; | ||
884 | netxen_set_msix_bit(pdev, 1); | ||
885 | dev_info(&pdev->dev, "using msi-x interrupts\n"); | ||
886 | |||
887 | } else { | ||
888 | request_msi: | ||
889 | if (use_msi && !pci_enable_msi(pdev)) { | ||
890 | adapter->flags |= NETXEN_NIC_MSI_ENABLED; | ||
891 | dev_info(&pdev->dev, "using msi interrupts\n"); | ||
892 | } else | ||
893 | dev_info(&pdev->dev, "using legacy interrupts\n"); | ||
894 | } | ||
895 | |||
896 | if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) | ||
897 | netdev->irq = adapter->msix_entries[0].vector; | ||
898 | else | ||
899 | netdev->irq = pdev->irq; | ||
900 | |||
901 | err = netxen_receive_peg_ready(adapter); | ||
902 | if (err) | ||
903 | goto err_out_disable_msi; | ||
904 | |||
905 | init_timer(&adapter->watchdog_timer); | ||
906 | adapter->ahw.linkup = 0; | ||
907 | adapter->watchdog_timer.function = &netxen_watchdog; | ||
908 | adapter->watchdog_timer.data = (unsigned long)adapter; | ||
909 | INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task); | ||
910 | INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); | ||
911 | |||
912 | if (netxen_is_flash_supported(adapter) == 0 && | ||
913 | netxen_get_flash_mac_addr(adapter, mac_addr) == 0) { | ||
914 | unsigned char *p; | ||
915 | |||
916 | p = (unsigned char *)&mac_addr[adapter->portnum]; | ||
917 | netdev->dev_addr[0] = *(p + 5); | ||
918 | netdev->dev_addr[1] = *(p + 4); | ||
919 | netdev->dev_addr[2] = *(p + 3); | ||
920 | netdev->dev_addr[3] = *(p + 2); | ||
921 | netdev->dev_addr[4] = *(p + 1); | ||
922 | netdev->dev_addr[5] = *(p + 0); | ||
923 | |||
924 | memcpy(netdev->perm_addr, netdev->dev_addr, | ||
925 | netdev->addr_len); | ||
926 | if (!is_valid_ether_addr(netdev->perm_addr)) { | ||
927 | printk(KERN_ERR "%s: Bad MAC address %s.\n", | ||
928 | netxen_nic_driver_name, | ||
929 | print_mac(mac, netdev->dev_addr)); | ||
930 | } else { | ||
931 | adapter->macaddr_set(adapter, netdev->dev_addr); | ||
932 | } | ||
933 | } | ||
934 | |||
659 | netif_carrier_off(netdev); | 935 | netif_carrier_off(netdev); |
660 | netif_stop_queue(netdev); | 936 | netif_stop_queue(netdev); |
661 | 937 | ||
@@ -664,41 +940,37 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
664 | " aborting\n", netxen_nic_driver_name, | 940 | " aborting\n", netxen_nic_driver_name, |
665 | adapter->portnum); | 941 | adapter->portnum); |
666 | err = -EIO; | 942 | err = -EIO; |
667 | goto err_out_free_dev; | 943 | goto err_out_disable_msi; |
668 | } | 944 | } |
669 | 945 | ||
670 | netxen_nic_flash_print(adapter); | ||
671 | pci_set_drvdata(pdev, adapter); | 946 | pci_set_drvdata(pdev, adapter); |
672 | 947 | ||
673 | return 0; | 948 | switch (adapter->ahw.board_type) { |
674 | 949 | case NETXEN_NIC_GBE: | |
675 | err_out_free_dev: | 950 | dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", |
676 | if (adapter->portnum == 0) | 951 | adapter->netdev->name); |
677 | netxen_free_adapter_offload(adapter); | 952 | break; |
678 | 953 | case NETXEN_NIC_XGBE: | |
679 | err_out_free_rx_buffer: | 954 | dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", |
680 | for (i = 0; i < MAX_RCV_CTX; ++i) { | 955 | adapter->netdev->name); |
681 | recv_ctx = &adapter->recv_ctx[i]; | 956 | break; |
682 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
683 | rcv_desc = &recv_ctx->rcv_desc[ring]; | ||
684 | if (rcv_desc->rx_buf_arr != NULL) { | ||
685 | vfree(rcv_desc->rx_buf_arr); | ||
686 | rcv_desc->rx_buf_arr = NULL; | ||
687 | } | ||
688 | } | ||
689 | } | 957 | } |
690 | vfree(cmd_buf_arr); | ||
691 | 958 | ||
692 | err_out_free_adapter: | 959 | return 0; |
960 | |||
961 | err_out_disable_msi: | ||
962 | if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) | ||
963 | pci_disable_msix(pdev); | ||
693 | if (adapter->flags & NETXEN_NIC_MSI_ENABLED) | 964 | if (adapter->flags & NETXEN_NIC_MSI_ENABLED) |
694 | pci_disable_msi(pdev); | 965 | pci_disable_msi(pdev); |
695 | 966 | ||
696 | pci_set_drvdata(pdev, NULL); | 967 | if (first_driver) |
968 | netxen_free_adapter_offload(adapter); | ||
697 | 969 | ||
970 | err_out_iounmap: | ||
698 | if (db_ptr) | 971 | if (db_ptr) |
699 | iounmap(db_ptr); | 972 | iounmap(db_ptr); |
700 | 973 | ||
701 | err_out_iounmap: | ||
702 | if (mem_ptr0) | 974 | if (mem_ptr0) |
703 | iounmap(mem_ptr0); | 975 | iounmap(mem_ptr0); |
704 | if (mem_ptr1) | 976 | if (mem_ptr1) |
@@ -713,6 +985,7 @@ err_out_free_res: | |||
713 | pci_release_regions(pdev); | 985 | pci_release_regions(pdev); |
714 | 986 | ||
715 | err_out_disable_pdev: | 987 | err_out_disable_pdev: |
988 | pci_set_drvdata(pdev, NULL); | ||
716 | pci_disable_device(pdev); | 989 | pci_disable_device(pdev); |
717 | return err; | 990 | return err; |
718 | } | 991 | } |
@@ -721,11 +994,6 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) | |||
721 | { | 994 | { |
722 | struct netxen_adapter *adapter; | 995 | struct netxen_adapter *adapter; |
723 | struct net_device *netdev; | 996 | struct net_device *netdev; |
724 | struct netxen_rx_buffer *buffer; | ||
725 | struct netxen_recv_context *recv_ctx; | ||
726 | struct netxen_rcv_desc_ctx *rcv_desc; | ||
727 | int i, ctxid, ring; | ||
728 | static int init_firmware_done = 0; | ||
729 | 997 | ||
730 | adapter = pci_get_drvdata(pdev); | 998 | adapter = pci_get_drvdata(pdev); |
731 | if (adapter == NULL) | 999 | if (adapter == NULL) |
@@ -736,36 +1004,18 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) | |||
736 | unregister_netdev(netdev); | 1004 | unregister_netdev(netdev); |
737 | 1005 | ||
738 | if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { | 1006 | if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { |
739 | init_firmware_done++; | ||
740 | netxen_free_hw_resources(adapter); | 1007 | netxen_free_hw_resources(adapter); |
1008 | netxen_free_sw_resources(adapter); | ||
741 | } | 1009 | } |
742 | 1010 | ||
743 | for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) { | ||
744 | recv_ctx = &adapter->recv_ctx[ctxid]; | ||
745 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
746 | rcv_desc = &recv_ctx->rcv_desc[ring]; | ||
747 | for (i = 0; i < rcv_desc->max_rx_desc_count; ++i) { | ||
748 | buffer = &(rcv_desc->rx_buf_arr[i]); | ||
749 | if (buffer->state == NETXEN_BUFFER_FREE) | ||
750 | continue; | ||
751 | pci_unmap_single(pdev, buffer->dma, | ||
752 | rcv_desc->dma_size, | ||
753 | PCI_DMA_FROMDEVICE); | ||
754 | if (buffer->skb != NULL) | ||
755 | dev_kfree_skb_any(buffer->skb); | ||
756 | } | ||
757 | vfree(rcv_desc->rx_buf_arr); | ||
758 | } | ||
759 | } | ||
760 | |||
761 | vfree(adapter->cmd_buf_arr); | ||
762 | |||
763 | if (adapter->portnum == 0) | 1011 | if (adapter->portnum == 0) |
764 | netxen_free_adapter_offload(adapter); | 1012 | netxen_free_adapter_offload(adapter); |
765 | 1013 | ||
766 | if (adapter->irq) | 1014 | if (adapter->irq) |
767 | free_irq(adapter->irq, adapter); | 1015 | free_irq(adapter->irq, adapter); |
768 | 1016 | ||
1017 | if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) | ||
1018 | pci_disable_msix(pdev); | ||
769 | if (adapter->flags & NETXEN_NIC_MSI_ENABLED) | 1019 | if (adapter->flags & NETXEN_NIC_MSI_ENABLED) |
770 | pci_disable_msi(pdev); | 1020 | pci_disable_msi(pdev); |
771 | 1021 | ||
@@ -803,51 +1053,69 @@ static int netxen_nic_open(struct net_device *netdev) | |||
803 | return -EIO; | 1053 | return -EIO; |
804 | } | 1054 | } |
805 | 1055 | ||
806 | /* setup all the resources for the Phantom... */ | 1056 | err = netxen_alloc_sw_resources(adapter); |
807 | /* this include the descriptors for rcv, tx, and status */ | ||
808 | netxen_nic_clear_stats(adapter); | ||
809 | err = netxen_nic_hw_resources(adapter); | ||
810 | if (err) { | 1057 | if (err) { |
811 | printk(KERN_ERR "Error in setting hw resources:%d\n", | 1058 | printk(KERN_ERR "%s: Error in setting sw resources\n", |
812 | err); | 1059 | netdev->name); |
813 | return err; | 1060 | return err; |
814 | } | 1061 | } |
1062 | |||
1063 | netxen_nic_clear_stats(adapter); | ||
1064 | |||
1065 | err = netxen_alloc_hw_resources(adapter); | ||
1066 | if (err) { | ||
1067 | printk(KERN_ERR "%s: Error in setting hw resources\n", | ||
1068 | netdev->name); | ||
1069 | goto err_out_free_sw; | ||
1070 | } | ||
1071 | |||
1072 | if (adapter->fw_major < 4) { | ||
1073 | adapter->crb_addr_cmd_producer = | ||
1074 | crb_cmd_producer[adapter->portnum]; | ||
1075 | adapter->crb_addr_cmd_consumer = | ||
1076 | crb_cmd_consumer[adapter->portnum]; | ||
1077 | } | ||
1078 | |||
1079 | netxen_nic_update_cmd_producer(adapter, 0); | ||
1080 | netxen_nic_update_cmd_consumer(adapter, 0); | ||
1081 | |||
815 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | 1082 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { |
816 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) | 1083 | for (ring = 0; ring < adapter->max_rds_rings; ring++) |
817 | netxen_post_rx_buffers(adapter, ctx, ring); | 1084 | netxen_post_rx_buffers(adapter, ctx, ring); |
818 | } | 1085 | } |
819 | adapter->irq = adapter->ahw.pdev->irq; | 1086 | if (NETXEN_IS_MSI_FAMILY(adapter)) |
820 | if (adapter->flags & NETXEN_NIC_MSI_ENABLED) | ||
821 | handler = netxen_msi_intr; | 1087 | handler = netxen_msi_intr; |
822 | else { | 1088 | else { |
823 | flags |= IRQF_SHARED; | 1089 | flags |= IRQF_SHARED; |
824 | handler = netxen_intr; | 1090 | handler = netxen_intr; |
825 | } | 1091 | } |
1092 | adapter->irq = netdev->irq; | ||
826 | err = request_irq(adapter->irq, handler, | 1093 | err = request_irq(adapter->irq, handler, |
827 | flags, netdev->name, adapter); | 1094 | flags, netdev->name, adapter); |
828 | if (err) { | 1095 | if (err) { |
829 | printk(KERN_ERR "request_irq failed with: %d\n", err); | 1096 | printk(KERN_ERR "request_irq failed with: %d\n", err); |
830 | netxen_free_hw_resources(adapter); | 1097 | goto err_out_free_hw; |
831 | return err; | ||
832 | } | 1098 | } |
833 | 1099 | ||
834 | adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; | 1100 | adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; |
835 | } | 1101 | } |
1102 | |||
836 | /* Done here again so that even if phantom sw overwrote it, | 1103 | /* Done here again so that even if phantom sw overwrote it, |
837 | * we set it */ | 1104 | * we set it */ |
838 | if (adapter->init_port | 1105 | err = adapter->init_port(adapter, adapter->physical_port); |
839 | && adapter->init_port(adapter, adapter->portnum) != 0) { | 1106 | if (err) { |
840 | printk(KERN_ERR "%s: Failed to initialize port %d\n", | 1107 | printk(KERN_ERR "%s: Failed to initialize port %d\n", |
841 | netxen_nic_driver_name, adapter->portnum); | 1108 | netxen_nic_driver_name, adapter->portnum); |
842 | return -EIO; | 1109 | goto err_out_free_irq; |
843 | } | 1110 | } |
844 | if (adapter->macaddr_set) | 1111 | adapter->macaddr_set(adapter, netdev->dev_addr); |
845 | adapter->macaddr_set(adapter, netdev->dev_addr); | ||
846 | 1112 | ||
847 | netxen_nic_set_link_parameters(adapter); | 1113 | netxen_nic_set_link_parameters(adapter); |
848 | 1114 | ||
849 | netxen_nic_set_multi(netdev); | 1115 | netdev->set_multicast_list(netdev); |
850 | if (adapter->set_mtu) | 1116 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) |
1117 | nx_fw_cmd_set_mtu(adapter, netdev->mtu); | ||
1118 | else | ||
851 | adapter->set_mtu(adapter, netdev->mtu); | 1119 | adapter->set_mtu(adapter, netdev->mtu); |
852 | 1120 | ||
853 | mod_timer(&adapter->watchdog_timer, jiffies); | 1121 | mod_timer(&adapter->watchdog_timer, jiffies); |
@@ -858,6 +1126,14 @@ static int netxen_nic_open(struct net_device *netdev) | |||
858 | netif_start_queue(netdev); | 1126 | netif_start_queue(netdev); |
859 | 1127 | ||
860 | return 0; | 1128 | return 0; |
1129 | |||
1130 | err_out_free_irq: | ||
1131 | free_irq(adapter->irq, adapter); | ||
1132 | err_out_free_hw: | ||
1133 | netxen_free_hw_resources(adapter); | ||
1134 | err_out_free_sw: | ||
1135 | netxen_free_sw_resources(adapter); | ||
1136 | return err; | ||
861 | } | 1137 | } |
862 | 1138 | ||
863 | /* | 1139 | /* |
@@ -866,9 +1142,6 @@ static int netxen_nic_open(struct net_device *netdev) | |||
866 | static int netxen_nic_close(struct net_device *netdev) | 1142 | static int netxen_nic_close(struct net_device *netdev) |
867 | { | 1143 | { |
868 | struct netxen_adapter *adapter = netdev_priv(netdev); | 1144 | struct netxen_adapter *adapter = netdev_priv(netdev); |
869 | int i, j; | ||
870 | struct netxen_cmd_buffer *cmd_buff; | ||
871 | struct netxen_skb_frag *buffrag; | ||
872 | 1145 | ||
873 | netif_carrier_off(netdev); | 1146 | netif_carrier_off(netdev); |
874 | netif_stop_queue(netdev); | 1147 | netif_stop_queue(netdev); |
@@ -879,30 +1152,8 @@ static int netxen_nic_close(struct net_device *netdev) | |||
879 | 1152 | ||
880 | netxen_nic_disable_int(adapter); | 1153 | netxen_nic_disable_int(adapter); |
881 | 1154 | ||
882 | cmd_buff = adapter->cmd_buf_arr; | 1155 | netxen_release_tx_buffers(adapter); |
883 | for (i = 0; i < adapter->max_tx_desc_count; i++) { | 1156 | |
884 | buffrag = cmd_buff->frag_array; | ||
885 | if (buffrag->dma) { | ||
886 | pci_unmap_single(adapter->pdev, buffrag->dma, | ||
887 | buffrag->length, PCI_DMA_TODEVICE); | ||
888 | buffrag->dma = 0ULL; | ||
889 | } | ||
890 | for (j = 0; j < cmd_buff->frag_count; j++) { | ||
891 | buffrag++; | ||
892 | if (buffrag->dma) { | ||
893 | pci_unmap_page(adapter->pdev, buffrag->dma, | ||
894 | buffrag->length, | ||
895 | PCI_DMA_TODEVICE); | ||
896 | buffrag->dma = 0ULL; | ||
897 | } | ||
898 | } | ||
899 | /* Free the skb we received in netxen_nic_xmit_frame */ | ||
900 | if (cmd_buff->skb) { | ||
901 | dev_kfree_skb_any(cmd_buff->skb); | ||
902 | cmd_buff->skb = NULL; | ||
903 | } | ||
904 | cmd_buff++; | ||
905 | } | ||
906 | if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { | 1157 | if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { |
907 | FLUSH_SCHEDULED_WORK(); | 1158 | FLUSH_SCHEDULED_WORK(); |
908 | del_timer_sync(&adapter->watchdog_timer); | 1159 | del_timer_sync(&adapter->watchdog_timer); |
@@ -911,6 +1162,31 @@ static int netxen_nic_close(struct net_device *netdev) | |||
911 | return 0; | 1162 | return 0; |
912 | } | 1163 | } |
913 | 1164 | ||
1165 | void netxen_tso_check(struct netxen_adapter *adapter, | ||
1166 | struct cmd_desc_type0 *desc, struct sk_buff *skb) | ||
1167 | { | ||
1168 | if (desc->mss) { | ||
1169 | desc->total_hdr_length = (sizeof(struct ethhdr) + | ||
1170 | ip_hdrlen(skb) + tcp_hdrlen(skb)); | ||
1171 | |||
1172 | if ((NX_IS_REVISION_P3(adapter->ahw.revision_id)) && | ||
1173 | (skb->protocol == htons(ETH_P_IPV6))) | ||
1174 | netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO6); | ||
1175 | else | ||
1176 | netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO); | ||
1177 | |||
1178 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
1179 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | ||
1180 | netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT); | ||
1181 | else if (ip_hdr(skb)->protocol == IPPROTO_UDP) | ||
1182 | netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT); | ||
1183 | else | ||
1184 | return; | ||
1185 | } | ||
1186 | desc->tcp_hdr_offset = skb_transport_offset(skb); | ||
1187 | desc->ip_hdr_offset = skb_network_offset(skb); | ||
1188 | } | ||
1189 | |||
914 | static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | 1190 | static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
915 | { | 1191 | { |
916 | struct netxen_adapter *adapter = netdev_priv(netdev); | 1192 | struct netxen_adapter *adapter = netdev_priv(netdev); |
@@ -932,7 +1208,7 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
932 | 1208 | ||
933 | /* There 4 fragments per descriptor */ | 1209 | /* There 4 fragments per descriptor */ |
934 | no_of_desc = (frag_count + 3) >> 2; | 1210 | no_of_desc = (frag_count + 3) >> 2; |
935 | if (netdev->features & NETIF_F_TSO) { | 1211 | if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) { |
936 | if (skb_shinfo(skb)->gso_size > 0) { | 1212 | if (skb_shinfo(skb)->gso_size > 0) { |
937 | 1213 | ||
938 | no_of_desc++; | 1214 | no_of_desc++; |
@@ -959,7 +1235,8 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
959 | memset(hwdesc, 0, sizeof(struct cmd_desc_type0)); | 1235 | memset(hwdesc, 0, sizeof(struct cmd_desc_type0)); |
960 | /* Take skb->data itself */ | 1236 | /* Take skb->data itself */ |
961 | pbuf = &adapter->cmd_buf_arr[producer]; | 1237 | pbuf = &adapter->cmd_buf_arr[producer]; |
962 | if ((netdev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size > 0) { | 1238 | if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && |
1239 | skb_shinfo(skb)->gso_size > 0) { | ||
963 | pbuf->mss = skb_shinfo(skb)->gso_size; | 1240 | pbuf->mss = skb_shinfo(skb)->gso_size; |
964 | hwdesc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | 1241 | hwdesc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); |
965 | } else { | 1242 | } else { |
@@ -1086,6 +1363,89 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1086 | return NETDEV_TX_OK; | 1363 | return NETDEV_TX_OK; |
1087 | } | 1364 | } |
1088 | 1365 | ||
1366 | static int netxen_nic_check_temp(struct netxen_adapter *adapter) | ||
1367 | { | ||
1368 | struct net_device *netdev = adapter->netdev; | ||
1369 | uint32_t temp, temp_state, temp_val; | ||
1370 | int rv = 0; | ||
1371 | |||
1372 | temp = adapter->pci_read_normalize(adapter, CRB_TEMP_STATE); | ||
1373 | |||
1374 | temp_state = nx_get_temp_state(temp); | ||
1375 | temp_val = nx_get_temp_val(temp); | ||
1376 | |||
1377 | if (temp_state == NX_TEMP_PANIC) { | ||
1378 | printk(KERN_ALERT | ||
1379 | "%s: Device temperature %d degrees C exceeds" | ||
1380 | " maximum allowed. Hardware has been shut down.\n", | ||
1381 | netxen_nic_driver_name, temp_val); | ||
1382 | |||
1383 | netif_carrier_off(netdev); | ||
1384 | netif_stop_queue(netdev); | ||
1385 | rv = 1; | ||
1386 | } else if (temp_state == NX_TEMP_WARN) { | ||
1387 | if (adapter->temp == NX_TEMP_NORMAL) { | ||
1388 | printk(KERN_ALERT | ||
1389 | "%s: Device temperature %d degrees C " | ||
1390 | "exceeds operating range." | ||
1391 | " Immediate action needed.\n", | ||
1392 | netxen_nic_driver_name, temp_val); | ||
1393 | } | ||
1394 | } else { | ||
1395 | if (adapter->temp == NX_TEMP_WARN) { | ||
1396 | printk(KERN_INFO | ||
1397 | "%s: Device temperature is now %d degrees C" | ||
1398 | " in normal range.\n", netxen_nic_driver_name, | ||
1399 | temp_val); | ||
1400 | } | ||
1401 | } | ||
1402 | adapter->temp = temp_state; | ||
1403 | return rv; | ||
1404 | } | ||
1405 | |||
1406 | static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) | ||
1407 | { | ||
1408 | struct net_device *netdev = adapter->netdev; | ||
1409 | u32 val, port, linkup; | ||
1410 | |||
1411 | port = adapter->physical_port; | ||
1412 | |||
1413 | if (adapter->ahw.board_type == NETXEN_NIC_GBE) { | ||
1414 | val = adapter->pci_read_normalize(adapter, CRB_XG_STATE); | ||
1415 | linkup = (val >> port) & 1; | ||
1416 | } else { | ||
1417 | if (adapter->fw_major < 4) { | ||
1418 | val = adapter->pci_read_normalize(adapter, | ||
1419 | CRB_XG_STATE); | ||
1420 | val = (val >> port*8) & 0xff; | ||
1421 | linkup = (val == XG_LINK_UP); | ||
1422 | } else { | ||
1423 | val = adapter->pci_read_normalize(adapter, | ||
1424 | CRB_XG_STATE_P3); | ||
1425 | val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); | ||
1426 | linkup = (val == XG_LINK_UP_P3); | ||
1427 | } | ||
1428 | } | ||
1429 | |||
1430 | if (adapter->ahw.linkup && !linkup) { | ||
1431 | printk(KERN_INFO "%s: %s NIC Link is down\n", | ||
1432 | netxen_nic_driver_name, netdev->name); | ||
1433 | adapter->ahw.linkup = 0; | ||
1434 | if (netif_running(netdev)) { | ||
1435 | netif_carrier_off(netdev); | ||
1436 | netif_stop_queue(netdev); | ||
1437 | } | ||
1438 | } else if (!adapter->ahw.linkup && linkup) { | ||
1439 | printk(KERN_INFO "%s: %s NIC Link is up\n", | ||
1440 | netxen_nic_driver_name, netdev->name); | ||
1441 | adapter->ahw.linkup = 1; | ||
1442 | if (netif_running(netdev)) { | ||
1443 | netif_carrier_on(netdev); | ||
1444 | netif_wake_queue(netdev); | ||
1445 | } | ||
1446 | } | ||
1447 | } | ||
1448 | |||
1089 | static void netxen_watchdog(unsigned long v) | 1449 | static void netxen_watchdog(unsigned long v) |
1090 | { | 1450 | { |
1091 | struct netxen_adapter *adapter = (struct netxen_adapter *)v; | 1451 | struct netxen_adapter *adapter = (struct netxen_adapter *)v; |
@@ -1093,6 +1453,19 @@ static void netxen_watchdog(unsigned long v) | |||
1093 | SCHEDULE_WORK(&adapter->watchdog_task); | 1453 | SCHEDULE_WORK(&adapter->watchdog_task); |
1094 | } | 1454 | } |
1095 | 1455 | ||
1456 | void netxen_watchdog_task(struct work_struct *work) | ||
1457 | { | ||
1458 | struct netxen_adapter *adapter = | ||
1459 | container_of(work, struct netxen_adapter, watchdog_task); | ||
1460 | |||
1461 | if ((adapter->portnum == 0) && netxen_nic_check_temp(adapter)) | ||
1462 | return; | ||
1463 | |||
1464 | netxen_nic_handle_phy_intr(adapter); | ||
1465 | |||
1466 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); | ||
1467 | } | ||
1468 | |||
1096 | static void netxen_tx_timeout(struct net_device *netdev) | 1469 | static void netxen_tx_timeout(struct net_device *netdev) |
1097 | { | 1470 | { |
1098 | struct netxen_adapter *adapter = (struct netxen_adapter *) | 1471 | struct netxen_adapter *adapter = (struct netxen_adapter *) |
@@ -1118,6 +1491,38 @@ static void netxen_tx_timeout_task(struct work_struct *work) | |||
1118 | netif_wake_queue(adapter->netdev); | 1491 | netif_wake_queue(adapter->netdev); |
1119 | } | 1492 | } |
1120 | 1493 | ||
1494 | /* | ||
1495 | * netxen_nic_get_stats - Get System Network Statistics | ||
1496 | * @netdev: network interface device structure | ||
1497 | */ | ||
1498 | struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) | ||
1499 | { | ||
1500 | struct netxen_adapter *adapter = netdev_priv(netdev); | ||
1501 | struct net_device_stats *stats = &adapter->net_stats; | ||
1502 | |||
1503 | memset(stats, 0, sizeof(*stats)); | ||
1504 | |||
1505 | /* total packets received */ | ||
1506 | stats->rx_packets = adapter->stats.no_rcv; | ||
1507 | /* total packets transmitted */ | ||
1508 | stats->tx_packets = adapter->stats.xmitedframes + | ||
1509 | adapter->stats.xmitfinished; | ||
1510 | /* total bytes received */ | ||
1511 | stats->rx_bytes = adapter->stats.rxbytes; | ||
1512 | /* total bytes transmitted */ | ||
1513 | stats->tx_bytes = adapter->stats.txbytes; | ||
1514 | /* bad packets received */ | ||
1515 | stats->rx_errors = adapter->stats.rcvdbadskb; | ||
1516 | /* packet transmit problems */ | ||
1517 | stats->tx_errors = adapter->stats.nocmddescriptor; | ||
1518 | /* no space in linux buffers */ | ||
1519 | stats->rx_dropped = adapter->stats.rxdropped; | ||
1520 | /* no space available in linux */ | ||
1521 | stats->tx_dropped = adapter->stats.txdropped; | ||
1522 | |||
1523 | return stats; | ||
1524 | } | ||
1525 | |||
1121 | static inline void | 1526 | static inline void |
1122 | netxen_handle_int(struct netxen_adapter *adapter) | 1527 | netxen_handle_int(struct netxen_adapter *adapter) |
1123 | { | 1528 | { |
@@ -1125,20 +1530,20 @@ netxen_handle_int(struct netxen_adapter *adapter) | |||
1125 | napi_schedule(&adapter->napi); | 1530 | napi_schedule(&adapter->napi); |
1126 | } | 1531 | } |
1127 | 1532 | ||
1128 | irqreturn_t netxen_intr(int irq, void *data) | 1533 | static irqreturn_t netxen_intr(int irq, void *data) |
1129 | { | 1534 | { |
1130 | struct netxen_adapter *adapter = data; | 1535 | struct netxen_adapter *adapter = data; |
1131 | u32 our_int = 0; | 1536 | u32 our_int = 0; |
1132 | 1537 | ||
1133 | our_int = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR)); | 1538 | our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR); |
1134 | /* not our interrupt */ | 1539 | /* not our interrupt */ |
1135 | if ((our_int & (0x80 << adapter->portnum)) == 0) | 1540 | if ((our_int & (0x80 << adapter->portnum)) == 0) |
1136 | return IRQ_NONE; | 1541 | return IRQ_NONE; |
1137 | 1542 | ||
1138 | if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { | 1543 | if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { |
1139 | /* claim interrupt */ | 1544 | /* claim interrupt */ |
1140 | writel(our_int & ~((u32)(0x80 << adapter->portnum)), | 1545 | adapter->pci_write_normalize(adapter, CRB_INT_VECTOR, |
1141 | NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR)); | 1546 | our_int & ~((u32)(0x80 << adapter->portnum))); |
1142 | } | 1547 | } |
1143 | 1548 | ||
1144 | netxen_handle_int(adapter); | 1549 | netxen_handle_int(adapter); |
@@ -1146,7 +1551,7 @@ irqreturn_t netxen_intr(int irq, void *data) | |||
1146 | return IRQ_HANDLED; | 1551 | return IRQ_HANDLED; |
1147 | } | 1552 | } |
1148 | 1553 | ||
1149 | irqreturn_t netxen_msi_intr(int irq, void *data) | 1554 | static irqreturn_t netxen_msi_intr(int irq, void *data) |
1150 | { | 1555 | { |
1151 | struct netxen_adapter *adapter = data; | 1556 | struct netxen_adapter *adapter = data; |
1152 | 1557 | ||
@@ -1220,10 +1625,6 @@ module_init(netxen_init_module); | |||
1220 | 1625 | ||
1221 | static void __exit netxen_exit_module(void) | 1626 | static void __exit netxen_exit_module(void) |
1222 | { | 1627 | { |
1223 | /* | ||
1224 | * Wait for some time to allow the dma to drain, if any. | ||
1225 | */ | ||
1226 | msleep(100); | ||
1227 | pci_unregister_driver(&netxen_driver); | 1628 | pci_unregister_driver(&netxen_driver); |
1228 | destroy_workqueue(netxen_workq); | 1629 | destroy_workqueue(netxen_workq); |
1229 | } | 1630 | } |
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c index a3bc7cc67a6f..4cb8f4a1cf4b 100644 --- a/drivers/net/netxen/netxen_nic_niu.c +++ b/drivers/net/netxen/netxen_nic_niu.c | |||
@@ -46,9 +46,8 @@ static int phy_lock(struct netxen_adapter *adapter) | |||
46 | int done = 0, timeout = 0; | 46 | int done = 0, timeout = 0; |
47 | 47 | ||
48 | while (!done) { | 48 | while (!done) { |
49 | done = | 49 | done = netxen_nic_reg_read(adapter, |
50 | readl(pci_base_offset | 50 | NETXEN_PCIE_REG(PCIE_SEM3_LOCK)); |
51 | (adapter, NETXEN_PCIE_REG(PCIE_SEM3_LOCK))); | ||
52 | if (done == 1) | 51 | if (done == 1) |
53 | break; | 52 | break; |
54 | if (timeout >= phy_lock_timeout) { | 53 | if (timeout >= phy_lock_timeout) { |
@@ -63,14 +62,14 @@ static int phy_lock(struct netxen_adapter *adapter) | |||
63 | } | 62 | } |
64 | } | 63 | } |
65 | 64 | ||
66 | writel(PHY_LOCK_DRIVER, | 65 | netxen_crb_writelit_adapter(adapter, |
67 | NETXEN_CRB_NORMALIZE(adapter, NETXEN_PHY_LOCK_ID)); | 66 | NETXEN_PHY_LOCK_ID, PHY_LOCK_DRIVER); |
68 | return 0; | 67 | return 0; |
69 | } | 68 | } |
70 | 69 | ||
71 | static int phy_unlock(struct netxen_adapter *adapter) | 70 | static int phy_unlock(struct netxen_adapter *adapter) |
72 | { | 71 | { |
73 | readl(pci_base_offset(adapter, NETXEN_PCIE_REG(PCIE_SEM3_UNLOCK))); | 72 | adapter->pci_read_immediate(adapter, NETXEN_PCIE_REG(PCIE_SEM3_UNLOCK)); |
74 | 73 | ||
75 | return 0; | 74 | return 0; |
76 | } | 75 | } |
@@ -109,7 +108,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg, | |||
109 | * so it cannot be in reset | 108 | * so it cannot be in reset |
110 | */ | 109 | */ |
111 | 110 | ||
112 | if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), | 111 | if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), |
113 | &mac_cfg0, 4)) | 112 | &mac_cfg0, 4)) |
114 | return -EIO; | 113 | return -EIO; |
115 | if (netxen_gb_get_soft_reset(mac_cfg0)) { | 114 | if (netxen_gb_get_soft_reset(mac_cfg0)) { |
@@ -119,7 +118,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg, | |||
119 | netxen_gb_rx_reset_pb(temp); | 118 | netxen_gb_rx_reset_pb(temp); |
120 | netxen_gb_tx_reset_mac(temp); | 119 | netxen_gb_tx_reset_mac(temp); |
121 | netxen_gb_rx_reset_mac(temp); | 120 | netxen_gb_rx_reset_mac(temp); |
122 | if (netxen_nic_hw_write_wx(adapter, | 121 | if (adapter->hw_write_wx(adapter, |
123 | NETXEN_NIU_GB_MAC_CONFIG_0(0), | 122 | NETXEN_NIU_GB_MAC_CONFIG_0(0), |
124 | &temp, 4)) | 123 | &temp, 4)) |
125 | return -EIO; | 124 | return -EIO; |
@@ -129,22 +128,22 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg, | |||
129 | address = 0; | 128 | address = 0; |
130 | netxen_gb_mii_mgmt_reg_addr(address, reg); | 129 | netxen_gb_mii_mgmt_reg_addr(address, reg); |
131 | netxen_gb_mii_mgmt_phy_addr(address, phy); | 130 | netxen_gb_mii_mgmt_phy_addr(address, phy); |
132 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0), | 131 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0), |
133 | &address, 4)) | 132 | &address, 4)) |
134 | return -EIO; | 133 | return -EIO; |
135 | command = 0; /* turn off any prior activity */ | 134 | command = 0; /* turn off any prior activity */ |
136 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), | 135 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), |
137 | &command, 4)) | 136 | &command, 4)) |
138 | return -EIO; | 137 | return -EIO; |
139 | /* send read command */ | 138 | /* send read command */ |
140 | netxen_gb_mii_mgmt_set_read_cycle(command); | 139 | netxen_gb_mii_mgmt_set_read_cycle(command); |
141 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), | 140 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), |
142 | &command, 4)) | 141 | &command, 4)) |
143 | return -EIO; | 142 | return -EIO; |
144 | 143 | ||
145 | status = 0; | 144 | status = 0; |
146 | do { | 145 | do { |
147 | if (netxen_nic_hw_read_wx(adapter, | 146 | if (adapter->hw_read_wx(adapter, |
148 | NETXEN_NIU_GB_MII_MGMT_INDICATE(0), | 147 | NETXEN_NIU_GB_MII_MGMT_INDICATE(0), |
149 | &status, 4)) | 148 | &status, 4)) |
150 | return -EIO; | 149 | return -EIO; |
@@ -154,7 +153,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg, | |||
154 | && (timeout++ < NETXEN_NIU_PHY_WAITMAX)); | 153 | && (timeout++ < NETXEN_NIU_PHY_WAITMAX)); |
155 | 154 | ||
156 | if (timeout < NETXEN_NIU_PHY_WAITMAX) { | 155 | if (timeout < NETXEN_NIU_PHY_WAITMAX) { |
157 | if (netxen_nic_hw_read_wx(adapter, | 156 | if (adapter->hw_read_wx(adapter, |
158 | NETXEN_NIU_GB_MII_MGMT_STATUS(0), | 157 | NETXEN_NIU_GB_MII_MGMT_STATUS(0), |
159 | readval, 4)) | 158 | readval, 4)) |
160 | return -EIO; | 159 | return -EIO; |
@@ -163,7 +162,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long reg, | |||
163 | result = -1; | 162 | result = -1; |
164 | 163 | ||
165 | if (restore) | 164 | if (restore) |
166 | if (netxen_nic_hw_write_wx(adapter, | 165 | if (adapter->hw_write_wx(adapter, |
167 | NETXEN_NIU_GB_MAC_CONFIG_0(0), | 166 | NETXEN_NIU_GB_MAC_CONFIG_0(0), |
168 | &mac_cfg0, 4)) | 167 | &mac_cfg0, 4)) |
169 | return -EIO; | 168 | return -EIO; |
@@ -201,7 +200,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg, | |||
201 | * cannot be in reset | 200 | * cannot be in reset |
202 | */ | 201 | */ |
203 | 202 | ||
204 | if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), | 203 | if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(0), |
205 | &mac_cfg0, 4)) | 204 | &mac_cfg0, 4)) |
206 | return -EIO; | 205 | return -EIO; |
207 | if (netxen_gb_get_soft_reset(mac_cfg0)) { | 206 | if (netxen_gb_get_soft_reset(mac_cfg0)) { |
@@ -212,7 +211,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg, | |||
212 | netxen_gb_tx_reset_mac(temp); | 211 | netxen_gb_tx_reset_mac(temp); |
213 | netxen_gb_rx_reset_mac(temp); | 212 | netxen_gb_rx_reset_mac(temp); |
214 | 213 | ||
215 | if (netxen_nic_hw_write_wx(adapter, | 214 | if (adapter->hw_write_wx(adapter, |
216 | NETXEN_NIU_GB_MAC_CONFIG_0(0), | 215 | NETXEN_NIU_GB_MAC_CONFIG_0(0), |
217 | &temp, 4)) | 216 | &temp, 4)) |
218 | return -EIO; | 217 | return -EIO; |
@@ -220,24 +219,24 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg, | |||
220 | } | 219 | } |
221 | 220 | ||
222 | command = 0; /* turn off any prior activity */ | 221 | command = 0; /* turn off any prior activity */ |
223 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), | 222 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_COMMAND(0), |
224 | &command, 4)) | 223 | &command, 4)) |
225 | return -EIO; | 224 | return -EIO; |
226 | 225 | ||
227 | address = 0; | 226 | address = 0; |
228 | netxen_gb_mii_mgmt_reg_addr(address, reg); | 227 | netxen_gb_mii_mgmt_reg_addr(address, reg); |
229 | netxen_gb_mii_mgmt_phy_addr(address, phy); | 228 | netxen_gb_mii_mgmt_phy_addr(address, phy); |
230 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0), | 229 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR(0), |
231 | &address, 4)) | 230 | &address, 4)) |
232 | return -EIO; | 231 | return -EIO; |
233 | 232 | ||
234 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CTRL(0), | 233 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CTRL(0), |
235 | &val, 4)) | 234 | &val, 4)) |
236 | return -EIO; | 235 | return -EIO; |
237 | 236 | ||
238 | status = 0; | 237 | status = 0; |
239 | do { | 238 | do { |
240 | if (netxen_nic_hw_read_wx(adapter, | 239 | if (adapter->hw_read_wx(adapter, |
241 | NETXEN_NIU_GB_MII_MGMT_INDICATE(0), | 240 | NETXEN_NIU_GB_MII_MGMT_INDICATE(0), |
242 | &status, 4)) | 241 | &status, 4)) |
243 | return -EIO; | 242 | return -EIO; |
@@ -252,7 +251,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long reg, | |||
252 | 251 | ||
253 | /* restore the state of port 0 MAC in case we tampered with it */ | 252 | /* restore the state of port 0 MAC in case we tampered with it */ |
254 | if (restore) | 253 | if (restore) |
255 | if (netxen_nic_hw_write_wx(adapter, | 254 | if (adapter->hw_write_wx(adapter, |
256 | NETXEN_NIU_GB_MAC_CONFIG_0(0), | 255 | NETXEN_NIU_GB_MAC_CONFIG_0(0), |
257 | &mac_cfg0, 4)) | 256 | &mac_cfg0, 4)) |
258 | return -EIO; | 257 | return -EIO; |
@@ -401,14 +400,16 @@ int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port) | |||
401 | { | 400 | { |
402 | int result = 0; | 401 | int result = 0; |
403 | __u32 status; | 402 | __u32 status; |
403 | |||
404 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
405 | return 0; | ||
406 | |||
404 | if (adapter->disable_phy_interrupts) | 407 | if (adapter->disable_phy_interrupts) |
405 | adapter->disable_phy_interrupts(adapter); | 408 | adapter->disable_phy_interrupts(adapter); |
406 | mdelay(2); | 409 | mdelay(2); |
407 | 410 | ||
408 | if (0 == | 411 | if (0 == netxen_niu_gbe_phy_read(adapter, |
409 | netxen_niu_gbe_phy_read(adapter, | 412 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, &status)) { |
410 | NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, | ||
411 | &status)) { | ||
412 | if (netxen_get_phy_link(status)) { | 413 | if (netxen_get_phy_link(status)) { |
413 | if (netxen_get_phy_speed(status) == 2) { | 414 | if (netxen_get_phy_speed(status) == 2) { |
414 | netxen_niu_gbe_set_gmii_mode(adapter, port, 1); | 415 | netxen_niu_gbe_set_gmii_mode(adapter, port, 1); |
@@ -456,12 +457,12 @@ int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port) | |||
456 | 457 | ||
457 | int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) | 458 | int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) |
458 | { | 459 | { |
459 | u32 portnum = adapter->physical_port; | 460 | if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { |
460 | 461 | netxen_crb_writelit_adapter(adapter, | |
461 | netxen_crb_writelit_adapter(adapter, | 462 | NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447); |
462 | NETXEN_NIU_XGE_CONFIG_1+(0x10000*portnum), 0x1447); | 463 | netxen_crb_writelit_adapter(adapter, |
463 | netxen_crb_writelit_adapter(adapter, | 464 | NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5); |
464 | NETXEN_NIU_XGE_CONFIG_0+(0x10000*portnum), 0x5); | 465 | } |
465 | 466 | ||
466 | return 0; | 467 | return 0; |
467 | } | 468 | } |
@@ -581,10 +582,10 @@ static int netxen_niu_macaddr_get(struct netxen_adapter *adapter, | |||
581 | if ((phy < 0) || (phy > 3)) | 582 | if ((phy < 0) || (phy > 3)) |
582 | return -EINVAL; | 583 | return -EINVAL; |
583 | 584 | ||
584 | if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy), | 585 | if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy), |
585 | &stationhigh, 4)) | 586 | &stationhigh, 4)) |
586 | return -EIO; | 587 | return -EIO; |
587 | if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy), | 588 | if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy), |
588 | &stationlow, 4)) | 589 | &stationlow, 4)) |
589 | return -EIO; | 590 | return -EIO; |
590 | ((__le32 *)val)[1] = cpu_to_le32(stationhigh); | 591 | ((__le32 *)val)[1] = cpu_to_le32(stationhigh); |
@@ -613,14 +614,14 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter, | |||
613 | temp[0] = temp[1] = 0; | 614 | temp[0] = temp[1] = 0; |
614 | memcpy(temp + 2, addr, 2); | 615 | memcpy(temp + 2, addr, 2); |
615 | val = le32_to_cpu(*(__le32 *)temp); | 616 | val = le32_to_cpu(*(__le32 *)temp); |
616 | if (netxen_nic_hw_write_wx | 617 | if (adapter->hw_write_wx(adapter, |
617 | (adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy), &val, 4)) | 618 | NETXEN_NIU_GB_STATION_ADDR_1(phy), &val, 4)) |
618 | return -EIO; | 619 | return -EIO; |
619 | 620 | ||
620 | memcpy(temp, ((u8 *) addr) + 2, sizeof(__le32)); | 621 | memcpy(temp, ((u8 *) addr) + 2, sizeof(__le32)); |
621 | val = le32_to_cpu(*(__le32 *)temp); | 622 | val = le32_to_cpu(*(__le32 *)temp); |
622 | if (netxen_nic_hw_write_wx | 623 | if (adapter->hw_write_wx(adapter, |
623 | (adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy), &val, 4)) | 624 | NETXEN_NIU_GB_STATION_ADDR_0(phy), &val, 4)) |
624 | return -2; | 625 | return -2; |
625 | 626 | ||
626 | netxen_niu_macaddr_get(adapter, | 627 | netxen_niu_macaddr_get(adapter, |
@@ -654,7 +655,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter, | |||
654 | 655 | ||
655 | mac_cfg0 = 0; | 656 | mac_cfg0 = 0; |
656 | netxen_gb_soft_reset(mac_cfg0); | 657 | netxen_gb_soft_reset(mac_cfg0); |
657 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), | 658 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), |
658 | &mac_cfg0, 4)) | 659 | &mac_cfg0, 4)) |
659 | return -EIO; | 660 | return -EIO; |
660 | mac_cfg0 = 0; | 661 | mac_cfg0 = 0; |
@@ -666,7 +667,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter, | |||
666 | netxen_gb_tx_reset_mac(mac_cfg0); | 667 | netxen_gb_tx_reset_mac(mac_cfg0); |
667 | netxen_gb_rx_reset_mac(mac_cfg0); | 668 | netxen_gb_rx_reset_mac(mac_cfg0); |
668 | 669 | ||
669 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), | 670 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), |
670 | &mac_cfg0, 4)) | 671 | &mac_cfg0, 4)) |
671 | return -EIO; | 672 | return -EIO; |
672 | mac_cfg1 = 0; | 673 | mac_cfg1 = 0; |
@@ -679,7 +680,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter, | |||
679 | 680 | ||
680 | if (mode == NETXEN_NIU_10_100_MB) { | 681 | if (mode == NETXEN_NIU_10_100_MB) { |
681 | netxen_gb_set_intfmode(mac_cfg1, 1); | 682 | netxen_gb_set_intfmode(mac_cfg1, 1); |
682 | if (netxen_nic_hw_write_wx(adapter, | 683 | if (adapter->hw_write_wx(adapter, |
683 | NETXEN_NIU_GB_MAC_CONFIG_1(port), | 684 | NETXEN_NIU_GB_MAC_CONFIG_1(port), |
684 | &mac_cfg1, 4)) | 685 | &mac_cfg1, 4)) |
685 | return -EIO; | 686 | return -EIO; |
@@ -692,7 +693,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter, | |||
692 | 693 | ||
693 | } else if (mode == NETXEN_NIU_1000_MB) { | 694 | } else if (mode == NETXEN_NIU_1000_MB) { |
694 | netxen_gb_set_intfmode(mac_cfg1, 2); | 695 | netxen_gb_set_intfmode(mac_cfg1, 2); |
695 | if (netxen_nic_hw_write_wx(adapter, | 696 | if (adapter->hw_write_wx(adapter, |
696 | NETXEN_NIU_GB_MAC_CONFIG_1(port), | 697 | NETXEN_NIU_GB_MAC_CONFIG_1(port), |
697 | &mac_cfg1, 4)) | 698 | &mac_cfg1, 4)) |
698 | return -EIO; | 699 | return -EIO; |
@@ -704,7 +705,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter, | |||
704 | } | 705 | } |
705 | mii_cfg = 0; | 706 | mii_cfg = 0; |
706 | netxen_gb_set_mii_mgmt_clockselect(mii_cfg, 7); | 707 | netxen_gb_set_mii_mgmt_clockselect(mii_cfg, 7); |
707 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CONFIG(port), | 708 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MII_MGMT_CONFIG(port), |
708 | &mii_cfg, 4)) | 709 | &mii_cfg, 4)) |
709 | return -EIO; | 710 | return -EIO; |
710 | mac_cfg0 = 0; | 711 | mac_cfg0 = 0; |
@@ -713,7 +714,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter, | |||
713 | netxen_gb_unset_rx_flowctl(mac_cfg0); | 714 | netxen_gb_unset_rx_flowctl(mac_cfg0); |
714 | netxen_gb_unset_tx_flowctl(mac_cfg0); | 715 | netxen_gb_unset_tx_flowctl(mac_cfg0); |
715 | 716 | ||
716 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), | 717 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), |
717 | &mac_cfg0, 4)) | 718 | &mac_cfg0, 4)) |
718 | return -EIO; | 719 | return -EIO; |
719 | return 0; | 720 | return 0; |
@@ -730,7 +731,7 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter) | |||
730 | return -EINVAL; | 731 | return -EINVAL; |
731 | mac_cfg0 = 0; | 732 | mac_cfg0 = 0; |
732 | netxen_gb_soft_reset(mac_cfg0); | 733 | netxen_gb_soft_reset(mac_cfg0); |
733 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), | 734 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port), |
734 | &mac_cfg0, 4)) | 735 | &mac_cfg0, 4)) |
735 | return -EIO; | 736 | return -EIO; |
736 | return 0; | 737 | return 0; |
@@ -746,7 +747,7 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) | |||
746 | return -EINVAL; | 747 | return -EINVAL; |
747 | 748 | ||
748 | mac_cfg = 0; | 749 | mac_cfg = 0; |
749 | if (netxen_nic_hw_write_wx(adapter, | 750 | if (adapter->hw_write_wx(adapter, |
750 | NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), &mac_cfg, 4)) | 751 | NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), &mac_cfg, 4)) |
751 | return -EIO; | 752 | return -EIO; |
752 | return 0; | 753 | return 0; |
@@ -763,7 +764,7 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, | |||
763 | return -EINVAL; | 764 | return -EINVAL; |
764 | 765 | ||
765 | /* save previous contents */ | 766 | /* save previous contents */ |
766 | if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR, | 767 | if (adapter->hw_read_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR, |
767 | ®, 4)) | 768 | ®, 4)) |
768 | return -EIO; | 769 | return -EIO; |
769 | if (mode == NETXEN_NIU_PROMISC_MODE) { | 770 | if (mode == NETXEN_NIU_PROMISC_MODE) { |
@@ -801,7 +802,7 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, | |||
801 | return -EIO; | 802 | return -EIO; |
802 | } | 803 | } |
803 | } | 804 | } |
804 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR, | 805 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_GB_DROP_WRONGADDR, |
805 | ®, 4)) | 806 | ®, 4)) |
806 | return -EIO; | 807 | return -EIO; |
807 | return 0; | 808 | return 0; |
@@ -826,13 +827,13 @@ int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter, | |||
826 | case 0: | 827 | case 0: |
827 | memcpy(temp + 2, addr, 2); | 828 | memcpy(temp + 2, addr, 2); |
828 | val = le32_to_cpu(*(__le32 *)temp); | 829 | val = le32_to_cpu(*(__le32 *)temp); |
829 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1, | 830 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1, |
830 | &val, 4)) | 831 | &val, 4)) |
831 | return -EIO; | 832 | return -EIO; |
832 | 833 | ||
833 | memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32)); | 834 | memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32)); |
834 | val = le32_to_cpu(*(__le32 *)temp); | 835 | val = le32_to_cpu(*(__le32 *)temp); |
835 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI, | 836 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI, |
836 | &val, 4)) | 837 | &val, 4)) |
837 | return -EIO; | 838 | return -EIO; |
838 | break; | 839 | break; |
@@ -840,13 +841,13 @@ int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter, | |||
840 | case 1: | 841 | case 1: |
841 | memcpy(temp + 2, addr, 2); | 842 | memcpy(temp + 2, addr, 2); |
842 | val = le32_to_cpu(*(__le32 *)temp); | 843 | val = le32_to_cpu(*(__le32 *)temp); |
843 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_1, | 844 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_1, |
844 | &val, 4)) | 845 | &val, 4)) |
845 | return -EIO; | 846 | return -EIO; |
846 | 847 | ||
847 | memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32)); | 848 | memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32)); |
848 | val = le32_to_cpu(*(__le32 *)temp); | 849 | val = le32_to_cpu(*(__le32 *)temp); |
849 | if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_HI, | 850 | if (adapter->hw_write_wx(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_HI, |
850 | &val, 4)) | 851 | &val, 4)) |
851 | return -EIO; | 852 | return -EIO; |
852 | break; | 853 | break; |
@@ -877,10 +878,10 @@ int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, | |||
877 | if (phy != 0) | 878 | if (phy != 0) |
878 | return -EINVAL; | 879 | return -EINVAL; |
879 | 880 | ||
880 | if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI, | 881 | if (adapter->hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI, |
881 | &stationhigh, 4)) | 882 | &stationhigh, 4)) |
882 | return -EIO; | 883 | return -EIO; |
883 | if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1, | 884 | if (adapter->hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1, |
884 | &stationlow, 4)) | 885 | &stationlow, 4)) |
885 | return -EIO; | 886 | return -EIO; |
886 | ((__le32 *)val)[1] = cpu_to_le32(stationhigh); | 887 | ((__le32 *)val)[1] = cpu_to_le32(stationhigh); |
@@ -901,7 +902,7 @@ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, | |||
901 | if (port > NETXEN_NIU_MAX_XG_PORTS) | 902 | if (port > NETXEN_NIU_MAX_XG_PORTS) |
902 | return -EINVAL; | 903 | return -EINVAL; |
903 | 904 | ||
904 | if (netxen_nic_hw_read_wx(adapter, | 905 | if (adapter->hw_read_wx(adapter, |
905 | NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), ®, 4)) | 906 | NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), ®, 4)) |
906 | return -EIO; | 907 | return -EIO; |
907 | if (mode == NETXEN_NIU_PROMISC_MODE) | 908 | if (mode == NETXEN_NIU_PROMISC_MODE) |
@@ -909,6 +910,11 @@ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, | |||
909 | else | 910 | else |
910 | reg = (reg & ~0x2000UL); | 911 | reg = (reg & ~0x2000UL); |
911 | 912 | ||
913 | if (mode == NETXEN_NIU_ALLMULTI_MODE) | ||
914 | reg = (reg | 0x1000UL); | ||
915 | else | ||
916 | reg = (reg & ~0x1000UL); | ||
917 | |||
912 | netxen_crb_writelit_adapter(adapter, | 918 | netxen_crb_writelit_adapter(adapter, |
913 | NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg); | 919 | NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg); |
914 | 920 | ||
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h index a566b50f36f5..3bfa51b62a4f 100644 --- a/drivers/net/netxen/netxen_nic_phan_reg.h +++ b/drivers/net/netxen/netxen_nic_phan_reg.h | |||
@@ -42,8 +42,11 @@ | |||
42 | #define CRB_CMD_CONSUMER_OFFSET NETXEN_NIC_REG(0x0c) | 42 | #define CRB_CMD_CONSUMER_OFFSET NETXEN_NIC_REG(0x0c) |
43 | #define CRB_PAUSE_ADDR_LO NETXEN_NIC_REG(0x10) /* C0 EPG BUG */ | 43 | #define CRB_PAUSE_ADDR_LO NETXEN_NIC_REG(0x10) /* C0 EPG BUG */ |
44 | #define CRB_PAUSE_ADDR_HI NETXEN_NIC_REG(0x14) | 44 | #define CRB_PAUSE_ADDR_HI NETXEN_NIC_REG(0x14) |
45 | #define CRB_HOST_CMD_ADDR_HI NETXEN_NIC_REG(0x18) /* host add:cmd ring */ | 45 | #define NX_CDRP_CRB_OFFSET NETXEN_NIC_REG(0x18) |
46 | #define CRB_HOST_CMD_ADDR_LO NETXEN_NIC_REG(0x1c) | 46 | #define NX_ARG1_CRB_OFFSET NETXEN_NIC_REG(0x1c) |
47 | #define NX_ARG2_CRB_OFFSET NETXEN_NIC_REG(0x20) | ||
48 | #define NX_ARG3_CRB_OFFSET NETXEN_NIC_REG(0x24) | ||
49 | #define NX_SIGN_CRB_OFFSET NETXEN_NIC_REG(0x28) | ||
47 | #define CRB_CMD_INTR_LOOP NETXEN_NIC_REG(0x20) /* 4 regs for perf */ | 50 | #define CRB_CMD_INTR_LOOP NETXEN_NIC_REG(0x20) /* 4 regs for perf */ |
48 | #define CRB_CMD_DMA_LOOP NETXEN_NIC_REG(0x24) | 51 | #define CRB_CMD_DMA_LOOP NETXEN_NIC_REG(0x24) |
49 | #define CRB_RCV_INTR_LOOP NETXEN_NIC_REG(0x28) | 52 | #define CRB_RCV_INTR_LOOP NETXEN_NIC_REG(0x28) |
@@ -73,8 +76,8 @@ | |||
73 | #define CRB_RX_LRO_MID_TIMER NETXEN_NIC_REG(0x88) | 76 | #define CRB_RX_LRO_MID_TIMER NETXEN_NIC_REG(0x88) |
74 | #define CRB_DMA_MAX_RCV_BUFS NETXEN_NIC_REG(0x8c) | 77 | #define CRB_DMA_MAX_RCV_BUFS NETXEN_NIC_REG(0x8c) |
75 | #define CRB_MAX_DMA_ENTRIES NETXEN_NIC_REG(0x90) | 78 | #define CRB_MAX_DMA_ENTRIES NETXEN_NIC_REG(0x90) |
76 | #define CRB_XG_STATE NETXEN_NIC_REG(0x94) /* XG Link status */ | 79 | #define CRB_XG_STATE NETXEN_NIC_REG(0x94) /* XG Link status */ |
77 | #define CRB_AGENT_GO NETXEN_NIC_REG(0x98) /* NIC pkt gen agent */ | 80 | #define CRB_XG_STATE_P3 NETXEN_NIC_REG(0x98) /* XG PF Link status */ |
78 | #define CRB_AGENT_TX_SIZE NETXEN_NIC_REG(0x9c) | 81 | #define CRB_AGENT_TX_SIZE NETXEN_NIC_REG(0x9c) |
79 | #define CRB_AGENT_TX_TYPE NETXEN_NIC_REG(0xa0) | 82 | #define CRB_AGENT_TX_TYPE NETXEN_NIC_REG(0xa0) |
80 | #define CRB_AGENT_TX_ADDR NETXEN_NIC_REG(0xa4) | 83 | #define CRB_AGENT_TX_ADDR NETXEN_NIC_REG(0xa4) |
@@ -97,7 +100,9 @@ | |||
97 | #define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0) | 100 | #define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0) |
98 | #define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4) | 101 | #define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4) |
99 | #define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8) | 102 | #define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8) |
103 | #define CRB_HOST_DUMMY_BUF NETXEN_NIC_REG(0xfc) | ||
100 | 104 | ||
105 | #define CRB_RCVPEG_STATE NETXEN_NIC_REG(0x13c) | ||
101 | #define CRB_CMD_PRODUCER_OFFSET_1 NETXEN_NIC_REG(0x1ac) | 106 | #define CRB_CMD_PRODUCER_OFFSET_1 NETXEN_NIC_REG(0x1ac) |
102 | #define CRB_CMD_CONSUMER_OFFSET_1 NETXEN_NIC_REG(0x1b0) | 107 | #define CRB_CMD_CONSUMER_OFFSET_1 NETXEN_NIC_REG(0x1b0) |
103 | #define CRB_CMD_PRODUCER_OFFSET_2 NETXEN_NIC_REG(0x1b8) | 108 | #define CRB_CMD_PRODUCER_OFFSET_2 NETXEN_NIC_REG(0x1b8) |
@@ -147,29 +152,15 @@ | |||
147 | #define nx_get_temp_state(x) ((x) & 0xffff) | 152 | #define nx_get_temp_state(x) ((x) & 0xffff) |
148 | #define nx_encode_temp(val, state) (((val) << 16) | (state)) | 153 | #define nx_encode_temp(val, state) (((val) << 16) | (state)) |
149 | 154 | ||
150 | /* CRB registers per Rcv Descriptor ring */ | ||
151 | struct netxen_rcv_desc_crb { | ||
152 | u32 crb_rcv_producer_offset __attribute__ ((aligned(512))); | ||
153 | u32 crb_rcv_consumer_offset; | ||
154 | u32 crb_globalrcv_ring; | ||
155 | u32 crb_rcv_ring_size; | ||
156 | }; | ||
157 | |||
158 | /* | 155 | /* |
159 | * CRB registers used by the receive peg logic. | 156 | * CRB registers used by the receive peg logic. |
160 | */ | 157 | */ |
161 | 158 | ||
162 | struct netxen_recv_crb { | 159 | struct netxen_recv_crb { |
163 | struct netxen_rcv_desc_crb rcv_desc_crb[NUM_RCV_DESC_RINGS]; | 160 | u32 crb_rcv_producer[NUM_RCV_DESC_RINGS]; |
164 | u32 crb_rcvstatus_ring; | 161 | u32 crb_sts_consumer; |
165 | u32 crb_rcv_status_producer; | ||
166 | u32 crb_rcv_status_consumer; | ||
167 | u32 crb_rcvpeg_state; | ||
168 | u32 crb_status_ring_size; | ||
169 | }; | 162 | }; |
170 | 163 | ||
171 | extern struct netxen_recv_crb recv_crb_registers[]; | ||
172 | |||
173 | /* | 164 | /* |
174 | * Temperature control. | 165 | * Temperature control. |
175 | */ | 166 | */ |
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 32a8503a7acd..4aa547947040 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
@@ -158,11 +158,10 @@ static int m88e1111_config_init(struct phy_device *phydev) | |||
158 | { | 158 | { |
159 | int err; | 159 | int err; |
160 | int temp; | 160 | int temp; |
161 | int mode; | ||
162 | 161 | ||
163 | /* Enable Fiber/Copper auto selection */ | 162 | /* Enable Fiber/Copper auto selection */ |
164 | temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); | 163 | temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); |
165 | temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO; | 164 | temp &= ~MII_M1111_HWCFG_FIBER_COPPER_AUTO; |
166 | phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); | 165 | phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); |
167 | 166 | ||
168 | temp = phy_read(phydev, MII_BMCR); | 167 | temp = phy_read(phydev, MII_BMCR); |
@@ -198,9 +197,7 @@ static int m88e1111_config_init(struct phy_device *phydev) | |||
198 | 197 | ||
199 | temp &= ~(MII_M1111_HWCFG_MODE_MASK); | 198 | temp &= ~(MII_M1111_HWCFG_MODE_MASK); |
200 | 199 | ||
201 | mode = phy_read(phydev, MII_M1111_PHY_EXT_CR); | 200 | if (temp & MII_M1111_HWCFG_FIBER_COPPER_RES) |
202 | |||
203 | if (mode & MII_M1111_HWCFG_FIBER_COPPER_RES) | ||
204 | temp |= MII_M1111_HWCFG_MODE_FIBER_RGMII; | 201 | temp |= MII_M1111_HWCFG_MODE_FIBER_RGMII; |
205 | else | 202 | else |
206 | temp |= MII_M1111_HWCFG_MODE_COPPER_RGMII; | 203 | temp |= MII_M1111_HWCFG_MODE_COPPER_RGMII; |
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 504a48ff73c8..6531ff565c54 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c | |||
@@ -50,8 +50,8 @@ | |||
50 | #include <asm/processor.h> | 50 | #include <asm/processor.h> |
51 | 51 | ||
52 | #define DRV_NAME "r6040" | 52 | #define DRV_NAME "r6040" |
53 | #define DRV_VERSION "0.16" | 53 | #define DRV_VERSION "0.18" |
54 | #define DRV_RELDATE "10Nov2007" | 54 | #define DRV_RELDATE "13Jul2008" |
55 | 55 | ||
56 | /* PHY CHIP Address */ | 56 | /* PHY CHIP Address */ |
57 | #define PHY1_ADDR 1 /* For MAC1 */ | 57 | #define PHY1_ADDR 1 /* For MAC1 */ |
@@ -91,6 +91,14 @@ | |||
91 | #define MISR 0x3C /* Status register */ | 91 | #define MISR 0x3C /* Status register */ |
92 | #define MIER 0x40 /* INT enable register */ | 92 | #define MIER 0x40 /* INT enable register */ |
93 | #define MSK_INT 0x0000 /* Mask off interrupts */ | 93 | #define MSK_INT 0x0000 /* Mask off interrupts */ |
94 | #define RX_FINISH 0x0001 /* RX finished */ | ||
95 | #define RX_NO_DESC 0x0002 /* No RX descriptor available */ | ||
96 | #define RX_FIFO_FULL 0x0004 /* RX FIFO full */ | ||
97 | #define RX_EARLY 0x0008 /* RX early */ | ||
98 | #define TX_FINISH 0x0010 /* TX finished */ | ||
99 | #define TX_EARLY 0x0080 /* TX early */ | ||
100 | #define EVENT_OVRFL 0x0100 /* Event counter overflow */ | ||
101 | #define LINK_CHANGED 0x0200 /* PHY link changed */ | ||
94 | #define ME_CISR 0x44 /* Event counter INT status */ | 102 | #define ME_CISR 0x44 /* Event counter INT status */ |
95 | #define ME_CIER 0x48 /* Event counter INT enable */ | 103 | #define ME_CIER 0x48 /* Event counter INT enable */ |
96 | #define MR_CNT 0x50 /* Successfully received packet counter */ | 104 | #define MR_CNT 0x50 /* Successfully received packet counter */ |
@@ -130,6 +138,21 @@ | |||
130 | #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ | 138 | #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ |
131 | #define MCAST_MAX 4 /* Max number multicast addresses to filter */ | 139 | #define MCAST_MAX 4 /* Max number multicast addresses to filter */ |
132 | 140 | ||
141 | /* Descriptor status */ | ||
142 | #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ | ||
143 | #define DSC_RX_OK 0x4000 /* RX was successful */ | ||
144 | #define DSC_RX_ERR 0x0800 /* RX PHY error */ | ||
145 | #define DSC_RX_ERR_DRI 0x0400 /* RX dribble packet */ | ||
146 | #define DSC_RX_ERR_BUF 0x0200 /* RX length exceeds buffer size */ | ||
147 | #define DSC_RX_ERR_LONG 0x0100 /* RX length > maximum packet length */ | ||
148 | #define DSC_RX_ERR_RUNT 0x0080 /* RX packet length < 64 byte */ | ||
149 | #define DSC_RX_ERR_CRC 0x0040 /* RX CRC error */ | ||
150 | #define DSC_RX_BCAST 0x0020 /* RX broadcast (no error) */ | ||
151 | #define DSC_RX_MCAST 0x0010 /* RX multicast (no error) */ | ||
152 | #define DSC_RX_MCH_HIT 0x0008 /* RX multicast hit in hash table (no error) */ | ||
153 | #define DSC_RX_MIDH_HIT 0x0004 /* RX MID table hit (no error) */ | ||
154 | #define DSC_RX_IDX_MID_MASK 3 /* RX mask for the index of matched MIDx */ | ||
155 | |||
133 | /* PHY settings */ | 156 | /* PHY settings */ |
134 | #define ICPLUS_PHY_ID 0x0243 | 157 | #define ICPLUS_PHY_ID 0x0243 |
135 | 158 | ||
@@ -139,10 +162,10 @@ MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>," | |||
139 | MODULE_LICENSE("GPL"); | 162 | MODULE_LICENSE("GPL"); |
140 | MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver"); | 163 | MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver"); |
141 | 164 | ||
142 | #define RX_INT 0x0001 | 165 | /* RX and TX interrupts that we handle */ |
143 | #define TX_INT 0x0010 | 166 | #define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH) |
144 | #define RX_NO_DESC_INT 0x0002 | 167 | #define TX_INTS (TX_FINISH) |
145 | #define INT_MASK (RX_INT | TX_INT) | 168 | #define INT_MASK (RX_INTS | TX_INTS) |
146 | 169 | ||
147 | struct r6040_descriptor { | 170 | struct r6040_descriptor { |
148 | u16 status, len; /* 0-3 */ | 171 | u16 status, len; /* 0-3 */ |
@@ -167,7 +190,7 @@ struct r6040_private { | |||
167 | struct r6040_descriptor *tx_ring; | 190 | struct r6040_descriptor *tx_ring; |
168 | dma_addr_t rx_ring_dma; | 191 | dma_addr_t rx_ring_dma; |
169 | dma_addr_t tx_ring_dma; | 192 | dma_addr_t tx_ring_dma; |
170 | u16 tx_free_desc, rx_free_desc, phy_addr, phy_mode; | 193 | u16 tx_free_desc, phy_addr, phy_mode; |
171 | u16 mcr0, mcr1; | 194 | u16 mcr0, mcr1; |
172 | u16 switch_sig; | 195 | u16 switch_sig; |
173 | struct net_device *dev; | 196 | struct net_device *dev; |
@@ -183,7 +206,7 @@ static char version[] __devinitdata = KERN_INFO DRV_NAME | |||
183 | static int phy_table[] = { PHY1_ADDR, PHY2_ADDR }; | 206 | static int phy_table[] = { PHY1_ADDR, PHY2_ADDR }; |
184 | 207 | ||
185 | /* Read a word data from PHY Chip */ | 208 | /* Read a word data from PHY Chip */ |
186 | static int phy_read(void __iomem *ioaddr, int phy_addr, int reg) | 209 | static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg) |
187 | { | 210 | { |
188 | int limit = 2048; | 211 | int limit = 2048; |
189 | u16 cmd; | 212 | u16 cmd; |
@@ -200,7 +223,7 @@ static int phy_read(void __iomem *ioaddr, int phy_addr, int reg) | |||
200 | } | 223 | } |
201 | 224 | ||
202 | /* Write a word data from PHY Chip */ | 225 | /* Write a word data from PHY Chip */ |
203 | static void phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val) | 226 | static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val) |
204 | { | 227 | { |
205 | int limit = 2048; | 228 | int limit = 2048; |
206 | u16 cmd; | 229 | u16 cmd; |
@@ -216,20 +239,20 @@ static void phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val) | |||
216 | } | 239 | } |
217 | } | 240 | } |
218 | 241 | ||
219 | static int mdio_read(struct net_device *dev, int mii_id, int reg) | 242 | static int r6040_mdio_read(struct net_device *dev, int mii_id, int reg) |
220 | { | 243 | { |
221 | struct r6040_private *lp = netdev_priv(dev); | 244 | struct r6040_private *lp = netdev_priv(dev); |
222 | void __iomem *ioaddr = lp->base; | 245 | void __iomem *ioaddr = lp->base; |
223 | 246 | ||
224 | return (phy_read(ioaddr, lp->phy_addr, reg)); | 247 | return (r6040_phy_read(ioaddr, lp->phy_addr, reg)); |
225 | } | 248 | } |
226 | 249 | ||
227 | static void mdio_write(struct net_device *dev, int mii_id, int reg, int val) | 250 | static void r6040_mdio_write(struct net_device *dev, int mii_id, int reg, int val) |
228 | { | 251 | { |
229 | struct r6040_private *lp = netdev_priv(dev); | 252 | struct r6040_private *lp = netdev_priv(dev); |
230 | void __iomem *ioaddr = lp->base; | 253 | void __iomem *ioaddr = lp->base; |
231 | 254 | ||
232 | phy_write(ioaddr, lp->phy_addr, reg, val); | 255 | r6040_phy_write(ioaddr, lp->phy_addr, reg, val); |
233 | } | 256 | } |
234 | 257 | ||
235 | static void r6040_free_txbufs(struct net_device *dev) | 258 | static void r6040_free_txbufs(struct net_device *dev) |
@@ -283,58 +306,101 @@ static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring, | |||
283 | desc->vndescp = desc_ring; | 306 | desc->vndescp = desc_ring; |
284 | } | 307 | } |
285 | 308 | ||
286 | /* Allocate skb buffer for rx descriptor */ | 309 | static void r6040_init_txbufs(struct net_device *dev) |
287 | static void rx_buf_alloc(struct r6040_private *lp, struct net_device *dev) | ||
288 | { | 310 | { |
289 | struct r6040_descriptor *descptr; | 311 | struct r6040_private *lp = netdev_priv(dev); |
290 | void __iomem *ioaddr = lp->base; | ||
291 | 312 | ||
292 | descptr = lp->rx_insert_ptr; | 313 | lp->tx_free_desc = TX_DCNT; |
293 | while (lp->rx_free_desc < RX_DCNT) { | ||
294 | descptr->skb_ptr = netdev_alloc_skb(dev, MAX_BUF_SIZE); | ||
295 | 314 | ||
296 | if (!descptr->skb_ptr) | 315 | lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring; |
297 | break; | 316 | r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT); |
298 | descptr->buf = cpu_to_le32(pci_map_single(lp->pdev, | ||
299 | descptr->skb_ptr->data, | ||
300 | MAX_BUF_SIZE, PCI_DMA_FROMDEVICE)); | ||
301 | descptr->status = 0x8000; | ||
302 | descptr = descptr->vndescp; | ||
303 | lp->rx_free_desc++; | ||
304 | /* Trigger RX DMA */ | ||
305 | iowrite16(lp->mcr0 | 0x0002, ioaddr); | ||
306 | } | ||
307 | lp->rx_insert_ptr = descptr; | ||
308 | } | 317 | } |
309 | 318 | ||
310 | static void r6040_alloc_txbufs(struct net_device *dev) | 319 | static int r6040_alloc_rxbufs(struct net_device *dev) |
311 | { | 320 | { |
312 | struct r6040_private *lp = netdev_priv(dev); | 321 | struct r6040_private *lp = netdev_priv(dev); |
313 | void __iomem *ioaddr = lp->base; | 322 | struct r6040_descriptor *desc; |
323 | struct sk_buff *skb; | ||
324 | int rc; | ||
314 | 325 | ||
315 | lp->tx_free_desc = TX_DCNT; | 326 | lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring; |
327 | r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT); | ||
316 | 328 | ||
317 | lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring; | 329 | /* Allocate skbs for the rx descriptors */ |
318 | r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT); | 330 | desc = lp->rx_ring; |
331 | do { | ||
332 | skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); | ||
333 | if (!skb) { | ||
334 | printk(KERN_ERR "%s: failed to alloc skb for rx\n", dev->name); | ||
335 | rc = -ENOMEM; | ||
336 | goto err_exit; | ||
337 | } | ||
338 | desc->skb_ptr = skb; | ||
339 | desc->buf = cpu_to_le32(pci_map_single(lp->pdev, | ||
340 | desc->skb_ptr->data, | ||
341 | MAX_BUF_SIZE, PCI_DMA_FROMDEVICE)); | ||
342 | desc->status = DSC_OWNER_MAC; | ||
343 | desc = desc->vndescp; | ||
344 | } while (desc != lp->rx_ring); | ||
319 | 345 | ||
320 | iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0); | 346 | return 0; |
321 | iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1); | 347 | |
348 | err_exit: | ||
349 | /* Deallocate all previously allocated skbs */ | ||
350 | r6040_free_rxbufs(dev); | ||
351 | return rc; | ||
322 | } | 352 | } |
323 | 353 | ||
324 | static void r6040_alloc_rxbufs(struct net_device *dev) | 354 | static void r6040_init_mac_regs(struct net_device *dev) |
325 | { | 355 | { |
326 | struct r6040_private *lp = netdev_priv(dev); | 356 | struct r6040_private *lp = netdev_priv(dev); |
327 | void __iomem *ioaddr = lp->base; | 357 | void __iomem *ioaddr = lp->base; |
358 | int limit = 2048; | ||
359 | u16 cmd; | ||
328 | 360 | ||
329 | lp->rx_free_desc = 0; | 361 | /* Mask Off Interrupt */ |
362 | iowrite16(MSK_INT, ioaddr + MIER); | ||
330 | 363 | ||
331 | lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring; | 364 | /* Reset RDC MAC */ |
332 | r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT); | 365 | iowrite16(MAC_RST, ioaddr + MCR1); |
366 | while (limit--) { | ||
367 | cmd = ioread16(ioaddr + MCR1); | ||
368 | if (cmd & 0x1) | ||
369 | break; | ||
370 | } | ||
371 | /* Reset internal state machine */ | ||
372 | iowrite16(2, ioaddr + MAC_SM); | ||
373 | iowrite16(0, ioaddr + MAC_SM); | ||
374 | udelay(5000); | ||
333 | 375 | ||
334 | rx_buf_alloc(lp, dev); | 376 | /* MAC Bus Control Register */ |
377 | iowrite16(MBCR_DEFAULT, ioaddr + MBCR); | ||
378 | |||
379 | /* Buffer Size Register */ | ||
380 | iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR); | ||
381 | |||
382 | /* Write TX ring start address */ | ||
383 | iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0); | ||
384 | iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1); | ||
335 | 385 | ||
386 | /* Write RX ring start address */ | ||
336 | iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0); | 387 | iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0); |
337 | iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1); | 388 | iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1); |
389 | |||
390 | /* Set interrupt waiting time and packet numbers */ | ||
391 | iowrite16(0, ioaddr + MT_ICR); | ||
392 | iowrite16(0, ioaddr + MR_ICR); | ||
393 | |||
394 | /* Enable interrupts */ | ||
395 | iowrite16(INT_MASK, ioaddr + MIER); | ||
396 | |||
397 | /* Enable TX and RX */ | ||
398 | iowrite16(lp->mcr0 | 0x0002, ioaddr); | ||
399 | |||
400 | /* Let TX poll the descriptors | ||
401 | * we may got called by r6040_tx_timeout which has left | ||
402 | * some unsent tx buffers */ | ||
403 | iowrite16(0x01, ioaddr + MTPR); | ||
338 | } | 404 | } |
339 | 405 | ||
340 | static void r6040_tx_timeout(struct net_device *dev) | 406 | static void r6040_tx_timeout(struct net_device *dev) |
@@ -342,27 +408,16 @@ static void r6040_tx_timeout(struct net_device *dev) | |||
342 | struct r6040_private *priv = netdev_priv(dev); | 408 | struct r6040_private *priv = netdev_priv(dev); |
343 | void __iomem *ioaddr = priv->base; | 409 | void __iomem *ioaddr = priv->base; |
344 | 410 | ||
345 | printk(KERN_WARNING "%s: transmit timed out, status %4.4x, PHY status " | 411 | printk(KERN_WARNING "%s: transmit timed out, int enable %4.4x " |
346 | "%4.4x\n", | 412 | "status %4.4x, PHY status %4.4x\n", |
347 | dev->name, ioread16(ioaddr + MIER), | 413 | dev->name, ioread16(ioaddr + MIER), |
348 | mdio_read(dev, priv->mii_if.phy_id, MII_BMSR)); | 414 | ioread16(ioaddr + MISR), |
349 | 415 | r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR)); | |
350 | disable_irq(dev->irq); | ||
351 | napi_disable(&priv->napi); | ||
352 | spin_lock(&priv->lock); | ||
353 | /* Clear all descriptors */ | ||
354 | r6040_free_txbufs(dev); | ||
355 | r6040_free_rxbufs(dev); | ||
356 | r6040_alloc_txbufs(dev); | ||
357 | r6040_alloc_rxbufs(dev); | ||
358 | |||
359 | /* Reset MAC */ | ||
360 | iowrite16(MAC_RST, ioaddr + MCR1); | ||
361 | spin_unlock(&priv->lock); | ||
362 | enable_irq(dev->irq); | ||
363 | 416 | ||
364 | dev->stats.tx_errors++; | 417 | dev->stats.tx_errors++; |
365 | netif_wake_queue(dev); | 418 | |
419 | /* Reset MAC and re-init all registers */ | ||
420 | r6040_init_mac_regs(dev); | ||
366 | } | 421 | } |
367 | 422 | ||
368 | static struct net_device_stats *r6040_get_stats(struct net_device *dev) | 423 | static struct net_device_stats *r6040_get_stats(struct net_device *dev) |
@@ -424,6 +479,7 @@ static int r6040_close(struct net_device *dev) | |||
424 | del_timer_sync(&lp->timer); | 479 | del_timer_sync(&lp->timer); |
425 | 480 | ||
426 | spin_lock_irq(&lp->lock); | 481 | spin_lock_irq(&lp->lock); |
482 | napi_disable(&lp->napi); | ||
427 | netif_stop_queue(dev); | 483 | netif_stop_queue(dev); |
428 | r6040_down(dev); | 484 | r6040_down(dev); |
429 | spin_unlock_irq(&lp->lock); | 485 | spin_unlock_irq(&lp->lock); |
@@ -432,23 +488,23 @@ static int r6040_close(struct net_device *dev) | |||
432 | } | 488 | } |
433 | 489 | ||
434 | /* Status of PHY CHIP */ | 490 | /* Status of PHY CHIP */ |
435 | static int phy_mode_chk(struct net_device *dev) | 491 | static int r6040_phy_mode_chk(struct net_device *dev) |
436 | { | 492 | { |
437 | struct r6040_private *lp = netdev_priv(dev); | 493 | struct r6040_private *lp = netdev_priv(dev); |
438 | void __iomem *ioaddr = lp->base; | 494 | void __iomem *ioaddr = lp->base; |
439 | int phy_dat; | 495 | int phy_dat; |
440 | 496 | ||
441 | /* PHY Link Status Check */ | 497 | /* PHY Link Status Check */ |
442 | phy_dat = phy_read(ioaddr, lp->phy_addr, 1); | 498 | phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1); |
443 | if (!(phy_dat & 0x4)) | 499 | if (!(phy_dat & 0x4)) |
444 | phy_dat = 0x8000; /* Link Failed, full duplex */ | 500 | phy_dat = 0x8000; /* Link Failed, full duplex */ |
445 | 501 | ||
446 | /* PHY Chip Auto-Negotiation Status */ | 502 | /* PHY Chip Auto-Negotiation Status */ |
447 | phy_dat = phy_read(ioaddr, lp->phy_addr, 1); | 503 | phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 1); |
448 | if (phy_dat & 0x0020) { | 504 | if (phy_dat & 0x0020) { |
449 | /* Auto Negotiation Mode */ | 505 | /* Auto Negotiation Mode */ |
450 | phy_dat = phy_read(ioaddr, lp->phy_addr, 5); | 506 | phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 5); |
451 | phy_dat &= phy_read(ioaddr, lp->phy_addr, 4); | 507 | phy_dat &= r6040_phy_read(ioaddr, lp->phy_addr, 4); |
452 | if (phy_dat & 0x140) | 508 | if (phy_dat & 0x140) |
453 | /* Force full duplex */ | 509 | /* Force full duplex */ |
454 | phy_dat = 0x8000; | 510 | phy_dat = 0x8000; |
@@ -456,7 +512,7 @@ static int phy_mode_chk(struct net_device *dev) | |||
456 | phy_dat = 0; | 512 | phy_dat = 0; |
457 | } else { | 513 | } else { |
458 | /* Force Mode */ | 514 | /* Force Mode */ |
459 | phy_dat = phy_read(ioaddr, lp->phy_addr, 0); | 515 | phy_dat = r6040_phy_read(ioaddr, lp->phy_addr, 0); |
460 | if (phy_dat & 0x100) | 516 | if (phy_dat & 0x100) |
461 | phy_dat = 0x8000; | 517 | phy_dat = 0x8000; |
462 | else | 518 | else |
@@ -468,12 +524,12 @@ static int phy_mode_chk(struct net_device *dev) | |||
468 | 524 | ||
469 | static void r6040_set_carrier(struct mii_if_info *mii) | 525 | static void r6040_set_carrier(struct mii_if_info *mii) |
470 | { | 526 | { |
471 | if (phy_mode_chk(mii->dev)) { | 527 | if (r6040_phy_mode_chk(mii->dev)) { |
472 | /* autoneg is off: Link is always assumed to be up */ | 528 | /* autoneg is off: Link is always assumed to be up */ |
473 | if (!netif_carrier_ok(mii->dev)) | 529 | if (!netif_carrier_ok(mii->dev)) |
474 | netif_carrier_on(mii->dev); | 530 | netif_carrier_on(mii->dev); |
475 | } else | 531 | } else |
476 | phy_mode_chk(mii->dev); | 532 | r6040_phy_mode_chk(mii->dev); |
477 | } | 533 | } |
478 | 534 | ||
479 | static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 535 | static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
@@ -494,73 +550,72 @@ static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
494 | static int r6040_rx(struct net_device *dev, int limit) | 550 | static int r6040_rx(struct net_device *dev, int limit) |
495 | { | 551 | { |
496 | struct r6040_private *priv = netdev_priv(dev); | 552 | struct r6040_private *priv = netdev_priv(dev); |
497 | int count; | 553 | struct r6040_descriptor *descptr = priv->rx_remove_ptr; |
498 | void __iomem *ioaddr = priv->base; | 554 | struct sk_buff *skb_ptr, *new_skb; |
555 | int count = 0; | ||
499 | u16 err; | 556 | u16 err; |
500 | 557 | ||
501 | for (count = 0; count < limit; ++count) { | 558 | /* Limit not reached and the descriptor belongs to the CPU */ |
502 | struct r6040_descriptor *descptr = priv->rx_remove_ptr; | 559 | while (count < limit && !(descptr->status & DSC_OWNER_MAC)) { |
503 | struct sk_buff *skb_ptr; | 560 | /* Read the descriptor status */ |
504 | 561 | err = descptr->status; | |
505 | /* Disable RX interrupt */ | 562 | /* Global error status set */ |
506 | iowrite16(ioread16(ioaddr + MIER) & (~RX_INT), ioaddr + MIER); | 563 | if (err & DSC_RX_ERR) { |
507 | descptr = priv->rx_remove_ptr; | 564 | /* RX dribble */ |
508 | 565 | if (err & DSC_RX_ERR_DRI) | |
509 | /* Check for errors */ | 566 | dev->stats.rx_frame_errors++; |
510 | err = ioread16(ioaddr + MLSR); | 567 | /* Buffer lenght exceeded */ |
511 | if (err & 0x0400) | 568 | if (err & DSC_RX_ERR_BUF) |
512 | dev->stats.rx_errors++; | 569 | dev->stats.rx_length_errors++; |
513 | /* RX FIFO over-run */ | 570 | /* Packet too long */ |
514 | if (err & 0x8000) | 571 | if (err & DSC_RX_ERR_LONG) |
515 | dev->stats.rx_fifo_errors++; | 572 | dev->stats.rx_length_errors++; |
516 | /* RX descriptor unavailable */ | 573 | /* Packet < 64 bytes */ |
517 | if (err & 0x0080) | 574 | if (err & DSC_RX_ERR_RUNT) |
518 | dev->stats.rx_frame_errors++; | 575 | dev->stats.rx_length_errors++; |
519 | /* Received packet with length over buffer lenght */ | 576 | /* CRC error */ |
520 | if (err & 0x0020) | 577 | if (err & DSC_RX_ERR_CRC) { |
521 | dev->stats.rx_over_errors++; | 578 | spin_lock(&priv->lock); |
522 | /* Received packet with too long or short */ | 579 | dev->stats.rx_crc_errors++; |
523 | if (err & (0x0010 | 0x0008)) | 580 | spin_unlock(&priv->lock); |
524 | dev->stats.rx_length_errors++; | ||
525 | /* Received packet with CRC errors */ | ||
526 | if (err & 0x0004) { | ||
527 | spin_lock(&priv->lock); | ||
528 | dev->stats.rx_crc_errors++; | ||
529 | spin_unlock(&priv->lock); | ||
530 | } | ||
531 | |||
532 | while (priv->rx_free_desc) { | ||
533 | /* No RX packet */ | ||
534 | if (descptr->status & 0x8000) | ||
535 | break; | ||
536 | skb_ptr = descptr->skb_ptr; | ||
537 | if (!skb_ptr) { | ||
538 | printk(KERN_ERR "%s: Inconsistent RX" | ||
539 | "descriptor chain\n", | ||
540 | dev->name); | ||
541 | break; | ||
542 | } | 581 | } |
543 | descptr->skb_ptr = NULL; | 582 | goto next_descr; |
544 | skb_ptr->dev = priv->dev; | 583 | } |
545 | /* Do not count the CRC */ | 584 | |
546 | skb_put(skb_ptr, descptr->len - 4); | 585 | /* Packet successfully received */ |
547 | pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), | 586 | new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); |
548 | MAX_BUF_SIZE, PCI_DMA_FROMDEVICE); | 587 | if (!new_skb) { |
549 | skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev); | 588 | dev->stats.rx_dropped++; |
550 | /* Send to upper layer */ | 589 | goto next_descr; |
551 | netif_receive_skb(skb_ptr); | ||
552 | dev->last_rx = jiffies; | ||
553 | dev->stats.rx_packets++; | ||
554 | dev->stats.rx_bytes += descptr->len; | ||
555 | /* To next descriptor */ | ||
556 | descptr = descptr->vndescp; | ||
557 | priv->rx_free_desc--; | ||
558 | } | 590 | } |
559 | priv->rx_remove_ptr = descptr; | 591 | skb_ptr = descptr->skb_ptr; |
592 | skb_ptr->dev = priv->dev; | ||
593 | |||
594 | /* Do not count the CRC */ | ||
595 | skb_put(skb_ptr, descptr->len - 4); | ||
596 | pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), | ||
597 | MAX_BUF_SIZE, PCI_DMA_FROMDEVICE); | ||
598 | skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev); | ||
599 | |||
600 | /* Send to upper layer */ | ||
601 | netif_receive_skb(skb_ptr); | ||
602 | dev->last_rx = jiffies; | ||
603 | dev->stats.rx_packets++; | ||
604 | dev->stats.rx_bytes += descptr->len - 4; | ||
605 | |||
606 | /* put new skb into descriptor */ | ||
607 | descptr->skb_ptr = new_skb; | ||
608 | descptr->buf = cpu_to_le32(pci_map_single(priv->pdev, | ||
609 | descptr->skb_ptr->data, | ||
610 | MAX_BUF_SIZE, PCI_DMA_FROMDEVICE)); | ||
611 | |||
612 | next_descr: | ||
613 | /* put the descriptor back to the MAC */ | ||
614 | descptr->status = DSC_OWNER_MAC; | ||
615 | descptr = descptr->vndescp; | ||
616 | count++; | ||
560 | } | 617 | } |
561 | /* Allocate new RX buffer */ | 618 | priv->rx_remove_ptr = descptr; |
562 | if (priv->rx_free_desc < RX_DCNT) | ||
563 | rx_buf_alloc(priv, priv->dev); | ||
564 | 619 | ||
565 | return count; | 620 | return count; |
566 | } | 621 | } |
@@ -584,7 +639,7 @@ static void r6040_tx(struct net_device *dev) | |||
584 | if (err & (0x2000 | 0x4000)) | 639 | if (err & (0x2000 | 0x4000)) |
585 | dev->stats.tx_carrier_errors++; | 640 | dev->stats.tx_carrier_errors++; |
586 | 641 | ||
587 | if (descptr->status & 0x8000) | 642 | if (descptr->status & DSC_OWNER_MAC) |
588 | break; /* Not complete */ | 643 | break; /* Not complete */ |
589 | skb_ptr = descptr->skb_ptr; | 644 | skb_ptr = descptr->skb_ptr; |
590 | pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), | 645 | pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), |
@@ -616,7 +671,7 @@ static int r6040_poll(struct napi_struct *napi, int budget) | |||
616 | if (work_done < budget) { | 671 | if (work_done < budget) { |
617 | netif_rx_complete(dev, napi); | 672 | netif_rx_complete(dev, napi); |
618 | /* Enable RX interrupt */ | 673 | /* Enable RX interrupt */ |
619 | iowrite16(ioread16(ioaddr + MIER) | RX_INT, ioaddr + MIER); | 674 | iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER); |
620 | } | 675 | } |
621 | return work_done; | 676 | return work_done; |
622 | } | 677 | } |
@@ -638,13 +693,22 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id) | |||
638 | return IRQ_NONE; | 693 | return IRQ_NONE; |
639 | 694 | ||
640 | /* RX interrupt request */ | 695 | /* RX interrupt request */ |
641 | if (status & 0x01) { | 696 | if (status & RX_INTS) { |
697 | if (status & RX_NO_DESC) { | ||
698 | /* RX descriptor unavailable */ | ||
699 | dev->stats.rx_dropped++; | ||
700 | dev->stats.rx_missed_errors++; | ||
701 | } | ||
702 | if (status & RX_FIFO_FULL) | ||
703 | dev->stats.rx_fifo_errors++; | ||
704 | |||
705 | /* Mask off RX interrupt */ | ||
706 | iowrite16(ioread16(ioaddr + MIER) & ~RX_INTS, ioaddr + MIER); | ||
642 | netif_rx_schedule(dev, &lp->napi); | 707 | netif_rx_schedule(dev, &lp->napi); |
643 | iowrite16(TX_INT, ioaddr + MIER); | ||
644 | } | 708 | } |
645 | 709 | ||
646 | /* TX interrupt request */ | 710 | /* TX interrupt request */ |
647 | if (status & 0x10) | 711 | if (status & TX_INTS) |
648 | r6040_tx(dev); | 712 | r6040_tx(dev); |
649 | 713 | ||
650 | return IRQ_HANDLED; | 714 | return IRQ_HANDLED; |
@@ -660,52 +724,48 @@ static void r6040_poll_controller(struct net_device *dev) | |||
660 | #endif | 724 | #endif |
661 | 725 | ||
662 | /* Init RDC MAC */ | 726 | /* Init RDC MAC */ |
663 | static void r6040_up(struct net_device *dev) | 727 | static int r6040_up(struct net_device *dev) |
664 | { | 728 | { |
665 | struct r6040_private *lp = netdev_priv(dev); | 729 | struct r6040_private *lp = netdev_priv(dev); |
666 | void __iomem *ioaddr = lp->base; | 730 | void __iomem *ioaddr = lp->base; |
731 | int ret; | ||
667 | 732 | ||
668 | /* Initialise and alloc RX/TX buffers */ | 733 | /* Initialise and alloc RX/TX buffers */ |
669 | r6040_alloc_txbufs(dev); | 734 | r6040_init_txbufs(dev); |
670 | r6040_alloc_rxbufs(dev); | 735 | ret = r6040_alloc_rxbufs(dev); |
736 | if (ret) | ||
737 | return ret; | ||
671 | 738 | ||
672 | /* Buffer Size Register */ | ||
673 | iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR); | ||
674 | /* Read the PHY ID */ | 739 | /* Read the PHY ID */ |
675 | lp->switch_sig = phy_read(ioaddr, 0, 2); | 740 | lp->switch_sig = r6040_phy_read(ioaddr, 0, 2); |
676 | 741 | ||
677 | if (lp->switch_sig == ICPLUS_PHY_ID) { | 742 | if (lp->switch_sig == ICPLUS_PHY_ID) { |
678 | phy_write(ioaddr, 29, 31, 0x175C); /* Enable registers */ | 743 | r6040_phy_write(ioaddr, 29, 31, 0x175C); /* Enable registers */ |
679 | lp->phy_mode = 0x8000; | 744 | lp->phy_mode = 0x8000; |
680 | } else { | 745 | } else { |
681 | /* PHY Mode Check */ | 746 | /* PHY Mode Check */ |
682 | phy_write(ioaddr, lp->phy_addr, 4, PHY_CAP); | 747 | r6040_phy_write(ioaddr, lp->phy_addr, 4, PHY_CAP); |
683 | phy_write(ioaddr, lp->phy_addr, 0, PHY_MODE); | 748 | r6040_phy_write(ioaddr, lp->phy_addr, 0, PHY_MODE); |
684 | 749 | ||
685 | if (PHY_MODE == 0x3100) | 750 | if (PHY_MODE == 0x3100) |
686 | lp->phy_mode = phy_mode_chk(dev); | 751 | lp->phy_mode = r6040_phy_mode_chk(dev); |
687 | else | 752 | else |
688 | lp->phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0; | 753 | lp->phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0; |
689 | } | 754 | } |
690 | /* MAC Bus Control Register */ | ||
691 | iowrite16(MBCR_DEFAULT, ioaddr + MBCR); | ||
692 | 755 | ||
693 | /* MAC TX/RX Enable */ | 756 | /* Set duplex mode */ |
694 | lp->mcr0 |= lp->phy_mode; | 757 | lp->mcr0 |= lp->phy_mode; |
695 | iowrite16(lp->mcr0, ioaddr); | ||
696 | |||
697 | /* set interrupt waiting time and packet numbers */ | ||
698 | iowrite16(0x0F06, ioaddr + MT_ICR); | ||
699 | iowrite16(0x0F06, ioaddr + MR_ICR); | ||
700 | 758 | ||
701 | /* improve performance (by RDC guys) */ | 759 | /* improve performance (by RDC guys) */ |
702 | phy_write(ioaddr, 30, 17, (phy_read(ioaddr, 30, 17) | 0x4000)); | 760 | r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000)); |
703 | phy_write(ioaddr, 30, 17, ~((~phy_read(ioaddr, 30, 17)) | 0x2000)); | 761 | r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000)); |
704 | phy_write(ioaddr, 0, 19, 0x0000); | 762 | r6040_phy_write(ioaddr, 0, 19, 0x0000); |
705 | phy_write(ioaddr, 0, 30, 0x01F0); | 763 | r6040_phy_write(ioaddr, 0, 30, 0x01F0); |
706 | 764 | ||
707 | /* Interrupt Mask Register */ | 765 | /* Initialize all MAC registers */ |
708 | iowrite16(INT_MASK, ioaddr + MIER); | 766 | r6040_init_mac_regs(dev); |
767 | |||
768 | return 0; | ||
709 | } | 769 | } |
710 | 770 | ||
711 | /* | 771 | /* |
@@ -721,7 +781,7 @@ static void r6040_timer(unsigned long data) | |||
721 | 781 | ||
722 | /* Polling PHY Chip Status */ | 782 | /* Polling PHY Chip Status */ |
723 | if (PHY_MODE == 0x3100) | 783 | if (PHY_MODE == 0x3100) |
724 | phy_mode = phy_mode_chk(dev); | 784 | phy_mode = r6040_phy_mode_chk(dev); |
725 | else | 785 | else |
726 | phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0; | 786 | phy_mode = (PHY_MODE & 0x0100) ? 0x8000:0x0; |
727 | 787 | ||
@@ -784,7 +844,14 @@ static int r6040_open(struct net_device *dev) | |||
784 | return -ENOMEM; | 844 | return -ENOMEM; |
785 | } | 845 | } |
786 | 846 | ||
787 | r6040_up(dev); | 847 | ret = r6040_up(dev); |
848 | if (ret) { | ||
849 | pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring, | ||
850 | lp->tx_ring_dma); | ||
851 | pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring, | ||
852 | lp->rx_ring_dma); | ||
853 | return ret; | ||
854 | } | ||
788 | 855 | ||
789 | napi_enable(&lp->napi); | 856 | napi_enable(&lp->napi); |
790 | netif_start_queue(dev); | 857 | netif_start_queue(dev); |
@@ -830,7 +897,7 @@ static int r6040_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
830 | descptr->skb_ptr = skb; | 897 | descptr->skb_ptr = skb; |
831 | descptr->buf = cpu_to_le32(pci_map_single(lp->pdev, | 898 | descptr->buf = cpu_to_le32(pci_map_single(lp->pdev, |
832 | skb->data, skb->len, PCI_DMA_TODEVICE)); | 899 | skb->data, skb->len, PCI_DMA_TODEVICE)); |
833 | descptr->status = 0x8000; | 900 | descptr->status = DSC_OWNER_MAC; |
834 | /* Trigger the MAC to check the TX descriptor */ | 901 | /* Trigger the MAC to check the TX descriptor */ |
835 | iowrite16(0x01, ioaddr + MTPR); | 902 | iowrite16(0x01, ioaddr + MTPR); |
836 | lp->tx_insert_ptr = descptr->vndescp; | 903 | lp->tx_insert_ptr = descptr->vndescp; |
@@ -987,24 +1054,27 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, | |||
987 | 1054 | ||
988 | err = pci_enable_device(pdev); | 1055 | err = pci_enable_device(pdev); |
989 | if (err) | 1056 | if (err) |
990 | return err; | 1057 | goto err_out; |
991 | 1058 | ||
992 | /* this should always be supported */ | 1059 | /* this should always be supported */ |
993 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { | 1060 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); |
1061 | if (err) { | ||
994 | printk(KERN_ERR DRV_NAME "32-bit PCI DMA addresses" | 1062 | printk(KERN_ERR DRV_NAME "32-bit PCI DMA addresses" |
995 | "not supported by the card\n"); | 1063 | "not supported by the card\n"); |
996 | return -ENODEV; | 1064 | goto err_out; |
997 | } | 1065 | } |
998 | if (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { | 1066 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); |
1067 | if (err) { | ||
999 | printk(KERN_ERR DRV_NAME "32-bit PCI DMA addresses" | 1068 | printk(KERN_ERR DRV_NAME "32-bit PCI DMA addresses" |
1000 | "not supported by the card\n"); | 1069 | "not supported by the card\n"); |
1001 | return -ENODEV; | 1070 | goto err_out; |
1002 | } | 1071 | } |
1003 | 1072 | ||
1004 | /* IO Size check */ | 1073 | /* IO Size check */ |
1005 | if (pci_resource_len(pdev, 0) < io_size) { | 1074 | if (pci_resource_len(pdev, 0) < io_size) { |
1006 | printk(KERN_ERR "Insufficient PCI resources, aborting\n"); | 1075 | printk(KERN_ERR DRV_NAME "Insufficient PCI resources, aborting\n"); |
1007 | return -EIO; | 1076 | err = -EIO; |
1077 | goto err_out; | ||
1008 | } | 1078 | } |
1009 | 1079 | ||
1010 | pioaddr = pci_resource_start(pdev, 0); /* IO map base address */ | 1080 | pioaddr = pci_resource_start(pdev, 0); /* IO map base address */ |
@@ -1012,24 +1082,26 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, | |||
1012 | 1082 | ||
1013 | dev = alloc_etherdev(sizeof(struct r6040_private)); | 1083 | dev = alloc_etherdev(sizeof(struct r6040_private)); |
1014 | if (!dev) { | 1084 | if (!dev) { |
1015 | printk(KERN_ERR "Failed to allocate etherdev\n"); | 1085 | printk(KERN_ERR DRV_NAME "Failed to allocate etherdev\n"); |
1016 | return -ENOMEM; | 1086 | err = -ENOMEM; |
1087 | goto err_out; | ||
1017 | } | 1088 | } |
1018 | SET_NETDEV_DEV(dev, &pdev->dev); | 1089 | SET_NETDEV_DEV(dev, &pdev->dev); |
1019 | lp = netdev_priv(dev); | 1090 | lp = netdev_priv(dev); |
1020 | lp->pdev = pdev; | ||
1021 | 1091 | ||
1022 | if (pci_request_regions(pdev, DRV_NAME)) { | 1092 | err = pci_request_regions(pdev, DRV_NAME); |
1093 | |||
1094 | if (err) { | ||
1023 | printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n"); | 1095 | printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n"); |
1024 | err = -ENODEV; | 1096 | goto err_out_free_dev; |
1025 | goto err_out_disable; | ||
1026 | } | 1097 | } |
1027 | 1098 | ||
1028 | ioaddr = pci_iomap(pdev, bar, io_size); | 1099 | ioaddr = pci_iomap(pdev, bar, io_size); |
1029 | if (!ioaddr) { | 1100 | if (!ioaddr) { |
1030 | printk(KERN_ERR "ioremap failed for device %s\n", | 1101 | printk(KERN_ERR "ioremap failed for device %s\n", |
1031 | pci_name(pdev)); | 1102 | pci_name(pdev)); |
1032 | return -EIO; | 1103 | err = -EIO; |
1104 | goto err_out_free_res; | ||
1033 | } | 1105 | } |
1034 | 1106 | ||
1035 | /* Init system & device */ | 1107 | /* Init system & device */ |
@@ -1049,6 +1121,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, | |||
1049 | 1121 | ||
1050 | /* Link new device into r6040_root_dev */ | 1122 | /* Link new device into r6040_root_dev */ |
1051 | lp->pdev = pdev; | 1123 | lp->pdev = pdev; |
1124 | lp->dev = dev; | ||
1052 | 1125 | ||
1053 | /* Init RDC private data */ | 1126 | /* Init RDC private data */ |
1054 | lp->mcr0 = 0x1002; | 1127 | lp->mcr0 = 0x1002; |
@@ -1070,8 +1143,8 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, | |||
1070 | #endif | 1143 | #endif |
1071 | netif_napi_add(dev, &lp->napi, r6040_poll, 64); | 1144 | netif_napi_add(dev, &lp->napi, r6040_poll, 64); |
1072 | lp->mii_if.dev = dev; | 1145 | lp->mii_if.dev = dev; |
1073 | lp->mii_if.mdio_read = mdio_read; | 1146 | lp->mii_if.mdio_read = r6040_mdio_read; |
1074 | lp->mii_if.mdio_write = mdio_write; | 1147 | lp->mii_if.mdio_write = r6040_mdio_write; |
1075 | lp->mii_if.phy_id = lp->phy_addr; | 1148 | lp->mii_if.phy_id = lp->phy_addr; |
1076 | lp->mii_if.phy_id_mask = 0x1f; | 1149 | lp->mii_if.phy_id_mask = 0x1f; |
1077 | lp->mii_if.reg_num_mask = 0x1f; | 1150 | lp->mii_if.reg_num_mask = 0x1f; |
@@ -1080,17 +1153,17 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, | |||
1080 | err = register_netdev(dev); | 1153 | err = register_netdev(dev); |
1081 | if (err) { | 1154 | if (err) { |
1082 | printk(KERN_ERR DRV_NAME ": Failed to register net device\n"); | 1155 | printk(KERN_ERR DRV_NAME ": Failed to register net device\n"); |
1083 | goto err_out_res; | 1156 | goto err_out_unmap; |
1084 | } | 1157 | } |
1085 | return 0; | 1158 | return 0; |
1086 | 1159 | ||
1087 | err_out_res: | 1160 | err_out_unmap: |
1161 | pci_iounmap(pdev, ioaddr); | ||
1162 | err_out_free_res: | ||
1088 | pci_release_regions(pdev); | 1163 | pci_release_regions(pdev); |
1089 | err_out_disable: | 1164 | err_out_free_dev: |
1090 | pci_disable_device(pdev); | ||
1091 | pci_set_drvdata(pdev, NULL); | ||
1092 | free_netdev(dev); | 1165 | free_netdev(dev); |
1093 | 1166 | err_out: | |
1094 | return err; | 1167 | return err; |
1095 | } | 1168 | } |
1096 | 1169 | ||
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index cfe8829ed31f..a3e3895e5032 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -1418,8 +1418,10 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) | |||
1418 | 1418 | ||
1419 | rtl_hw_phy_config(dev); | 1419 | rtl_hw_phy_config(dev); |
1420 | 1420 | ||
1421 | dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); | 1421 | if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { |
1422 | RTL_W8(0x82, 0x01); | 1422 | dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); |
1423 | RTL_W8(0x82, 0x01); | ||
1424 | } | ||
1423 | 1425 | ||
1424 | pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); | 1426 | pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); |
1425 | 1427 | ||
@@ -3032,13 +3034,7 @@ static void rtl_set_rx_mode(struct net_device *dev) | |||
3032 | tmp = rtl8169_rx_config | rx_mode | | 3034 | tmp = rtl8169_rx_config | rx_mode | |
3033 | (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask); | 3035 | (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask); |
3034 | 3036 | ||
3035 | if ((tp->mac_version == RTL_GIGA_MAC_VER_11) || | 3037 | if (tp->mac_version > RTL_GIGA_MAC_VER_06) { |
3036 | (tp->mac_version == RTL_GIGA_MAC_VER_12) || | ||
3037 | (tp->mac_version == RTL_GIGA_MAC_VER_13) || | ||
3038 | (tp->mac_version == RTL_GIGA_MAC_VER_14) || | ||
3039 | (tp->mac_version == RTL_GIGA_MAC_VER_15) || | ||
3040 | (tp->mac_version == RTL_GIGA_MAC_VER_16) || | ||
3041 | (tp->mac_version == RTL_GIGA_MAC_VER_17)) { | ||
3042 | u32 data = mc_filter[0]; | 3038 | u32 data = mc_filter[0]; |
3043 | 3039 | ||
3044 | mc_filter[0] = swab32(mc_filter[1]); | 3040 | mc_filter[0] = swab32(mc_filter[1]); |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 7b2015f9e469..45c72eebb3a7 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/in.h> | 19 | #include <linux/in.h> |
20 | #include <linux/crc32.h> | 20 | #include <linux/crc32.h> |
21 | #include <linux/ethtool.h> | 21 | #include <linux/ethtool.h> |
22 | #include <linux/topology.h> | ||
22 | #include "net_driver.h" | 23 | #include "net_driver.h" |
23 | #include "gmii.h" | 24 | #include "gmii.h" |
24 | #include "ethtool.h" | 25 | #include "ethtool.h" |
@@ -832,7 +833,23 @@ static void efx_probe_interrupts(struct efx_nic *efx) | |||
832 | if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { | 833 | if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { |
833 | BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX)); | 834 | BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX)); |
834 | 835 | ||
835 | efx->rss_queues = rss_cpus ? rss_cpus : num_online_cpus(); | 836 | if (rss_cpus == 0) { |
837 | cpumask_t core_mask; | ||
838 | int cpu; | ||
839 | |||
840 | cpus_clear(core_mask); | ||
841 | efx->rss_queues = 0; | ||
842 | for_each_online_cpu(cpu) { | ||
843 | if (!cpu_isset(cpu, core_mask)) { | ||
844 | ++efx->rss_queues; | ||
845 | cpus_or(core_mask, core_mask, | ||
846 | topology_core_siblings(cpu)); | ||
847 | } | ||
848 | } | ||
849 | } else { | ||
850 | efx->rss_queues = rss_cpus; | ||
851 | } | ||
852 | |||
836 | efx->rss_queues = min(efx->rss_queues, max_channel + 1); | 853 | efx->rss_queues = min(efx->rss_queues, max_channel + 1); |
837 | efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS); | 854 | efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS); |
838 | 855 | ||
@@ -1762,7 +1779,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) | |||
1762 | 1779 | ||
1763 | efx->reset_pending = method; | 1780 | efx->reset_pending = method; |
1764 | 1781 | ||
1765 | queue_work(efx->workqueue, &efx->reset_work); | 1782 | queue_work(efx->reset_workqueue, &efx->reset_work); |
1766 | } | 1783 | } |
1767 | 1784 | ||
1768 | /************************************************************************** | 1785 | /************************************************************************** |
@@ -1907,14 +1924,28 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, | |||
1907 | goto fail1; | 1924 | goto fail1; |
1908 | } | 1925 | } |
1909 | 1926 | ||
1927 | efx->reset_workqueue = create_singlethread_workqueue("sfc_reset"); | ||
1928 | if (!efx->reset_workqueue) { | ||
1929 | rc = -ENOMEM; | ||
1930 | goto fail2; | ||
1931 | } | ||
1932 | |||
1910 | return 0; | 1933 | return 0; |
1911 | 1934 | ||
1935 | fail2: | ||
1936 | destroy_workqueue(efx->workqueue); | ||
1937 | efx->workqueue = NULL; | ||
1938 | |||
1912 | fail1: | 1939 | fail1: |
1913 | return rc; | 1940 | return rc; |
1914 | } | 1941 | } |
1915 | 1942 | ||
1916 | static void efx_fini_struct(struct efx_nic *efx) | 1943 | static void efx_fini_struct(struct efx_nic *efx) |
1917 | { | 1944 | { |
1945 | if (efx->reset_workqueue) { | ||
1946 | destroy_workqueue(efx->reset_workqueue); | ||
1947 | efx->reset_workqueue = NULL; | ||
1948 | } | ||
1918 | if (efx->workqueue) { | 1949 | if (efx->workqueue) { |
1919 | destroy_workqueue(efx->workqueue); | 1950 | destroy_workqueue(efx->workqueue); |
1920 | efx->workqueue = NULL; | 1951 | efx->workqueue = NULL; |
@@ -1977,7 +2008,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev) | |||
1977 | * scheduled from this point because efx_stop_all() has been | 2008 | * scheduled from this point because efx_stop_all() has been |
1978 | * called, we are no longer registered with driverlink, and | 2009 | * called, we are no longer registered with driverlink, and |
1979 | * the net_device's have been removed. */ | 2010 | * the net_device's have been removed. */ |
1980 | flush_workqueue(efx->workqueue); | 2011 | flush_workqueue(efx->reset_workqueue); |
1981 | 2012 | ||
1982 | efx_pci_remove_main(efx); | 2013 | efx_pci_remove_main(efx); |
1983 | 2014 | ||
@@ -2098,7 +2129,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | |||
2098 | * scheduled since efx_stop_all() has been called, and we | 2129 | * scheduled since efx_stop_all() has been called, and we |
2099 | * have not and never have been registered with either | 2130 | * have not and never have been registered with either |
2100 | * the rtnetlink or driverlink layers. */ | 2131 | * the rtnetlink or driverlink layers. */ |
2101 | cancel_work_sync(&efx->reset_work); | 2132 | flush_workqueue(efx->reset_workqueue); |
2102 | 2133 | ||
2103 | /* Retry if a recoverably reset event has been scheduled */ | 2134 | /* Retry if a recoverably reset event has been scheduled */ |
2104 | if ((efx->reset_pending != RESET_TYPE_INVISIBLE) && | 2135 | if ((efx->reset_pending != RESET_TYPE_INVISIBLE) && |
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 630406e142e5..9138ee5b7b7b 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -223,13 +223,8 @@ static struct i2c_algo_bit_data falcon_i2c_bit_operations = { | |||
223 | .getsda = falcon_getsda, | 223 | .getsda = falcon_getsda, |
224 | .getscl = falcon_getscl, | 224 | .getscl = falcon_getscl, |
225 | .udelay = 5, | 225 | .udelay = 5, |
226 | /* | 226 | /* Wait up to 50 ms for slave to let us pull SCL high */ |
227 | * This is the number of system clock ticks after which | 227 | .timeout = DIV_ROUND_UP(HZ, 20), |
228 | * i2c-algo-bit gives up waiting for SCL to become high. | ||
229 | * It must be at least 2 since the first tick can happen | ||
230 | * immediately after it starts waiting. | ||
231 | */ | ||
232 | .timeout = 2, | ||
233 | }; | 228 | }; |
234 | 229 | ||
235 | /************************************************************************** | 230 | /************************************************************************** |
@@ -2479,12 +2474,11 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2479 | 2474 | ||
2480 | /* Initialise I2C adapter */ | 2475 | /* Initialise I2C adapter */ |
2481 | efx->i2c_adap.owner = THIS_MODULE; | 2476 | efx->i2c_adap.owner = THIS_MODULE; |
2482 | efx->i2c_adap.class = I2C_CLASS_HWMON; | ||
2483 | nic_data->i2c_data = falcon_i2c_bit_operations; | 2477 | nic_data->i2c_data = falcon_i2c_bit_operations; |
2484 | nic_data->i2c_data.data = efx; | 2478 | nic_data->i2c_data.data = efx; |
2485 | efx->i2c_adap.algo_data = &nic_data->i2c_data; | 2479 | efx->i2c_adap.algo_data = &nic_data->i2c_data; |
2486 | efx->i2c_adap.dev.parent = &efx->pci_dev->dev; | 2480 | efx->i2c_adap.dev.parent = &efx->pci_dev->dev; |
2487 | strcpy(efx->i2c_adap.name, "SFC4000 GPIO"); | 2481 | strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name)); |
2488 | rc = i2c_bit_add_bus(&efx->i2c_adap); | 2482 | rc = i2c_bit_add_bus(&efx->i2c_adap); |
2489 | if (rc) | 2483 | if (rc) |
2490 | goto fail5; | 2484 | goto fail5; |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index d803b86c647c..219c74a772c3 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -616,7 +616,9 @@ union efx_multicast_hash { | |||
616 | * @pci_dev: The PCI device | 616 | * @pci_dev: The PCI device |
617 | * @type: Controller type attributes | 617 | * @type: Controller type attributes |
618 | * @legacy_irq: IRQ number | 618 | * @legacy_irq: IRQ number |
619 | * @workqueue: Workqueue for resets, port reconfigures and the HW monitor | 619 | * @workqueue: Workqueue for port reconfigures and the HW monitor. |
620 | * Work items do not hold and must not acquire RTNL. | ||
621 | * @reset_workqueue: Workqueue for resets. Work item will acquire RTNL. | ||
620 | * @reset_work: Scheduled reset workitem | 622 | * @reset_work: Scheduled reset workitem |
621 | * @monitor_work: Hardware monitor workitem | 623 | * @monitor_work: Hardware monitor workitem |
622 | * @membase_phys: Memory BAR value as physical address | 624 | * @membase_phys: Memory BAR value as physical address |
@@ -684,6 +686,7 @@ struct efx_nic { | |||
684 | const struct efx_nic_type *type; | 686 | const struct efx_nic_type *type; |
685 | int legacy_irq; | 687 | int legacy_irq; |
686 | struct workqueue_struct *workqueue; | 688 | struct workqueue_struct *workqueue; |
689 | struct workqueue_struct *reset_workqueue; | ||
687 | struct work_struct reset_work; | 690 | struct work_struct reset_work; |
688 | struct delayed_work monitor_work; | 691 | struct delayed_work monitor_work; |
689 | resource_size_t membase_phys; | 692 | resource_size_t membase_phys; |
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index a4bc812aa999..c69ba1395fa9 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c | |||
@@ -642,17 +642,12 @@ static void sh_eth_adjust_link(struct net_device *ndev) | |||
642 | | ECMR_DM, ioaddr + ECMR); | 642 | | ECMR_DM, ioaddr + ECMR); |
643 | new_state = 1; | 643 | new_state = 1; |
644 | mdp->link = phydev->link; | 644 | mdp->link = phydev->link; |
645 | netif_tx_schedule_all(ndev); | ||
646 | netif_carrier_on(ndev); | ||
647 | netif_start_queue(ndev); | ||
648 | } | 645 | } |
649 | } else if (mdp->link) { | 646 | } else if (mdp->link) { |
650 | new_state = 1; | 647 | new_state = 1; |
651 | mdp->link = PHY_DOWN; | 648 | mdp->link = PHY_DOWN; |
652 | mdp->speed = 0; | 649 | mdp->speed = 0; |
653 | mdp->duplex = -1; | 650 | mdp->duplex = -1; |
654 | netif_stop_queue(ndev); | ||
655 | netif_carrier_off(ndev); | ||
656 | } | 651 | } |
657 | 652 | ||
658 | if (new_state) | 653 | if (new_state) |
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index 41d3ac45685f..a645e5028c14 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c | |||
@@ -672,7 +672,6 @@ static void tc_handle_link_change(struct net_device *dev) | |||
672 | if (dev->flags & IFF_PROMISC) | 672 | if (dev->flags & IFF_PROMISC) |
673 | tc35815_set_multicast_list(dev); | 673 | tc35815_set_multicast_list(dev); |
674 | #endif | 674 | #endif |
675 | netif_tx_schedule_all(dev); | ||
676 | } else { | 675 | } else { |
677 | lp->speed = 0; | 676 | lp->speed = 0; |
678 | lp->duplex = -1; | 677 | lp->duplex = -1; |
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index bc30c6e8fea2..617ef41bdfea 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c | |||
@@ -5514,22 +5514,6 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
5514 | netif_wake_queue(dev); /* Unlock the TX ring */ | 5514 | netif_wake_queue(dev); /* Unlock the TX ring */ |
5515 | break; | 5515 | break; |
5516 | 5516 | ||
5517 | case DE4X5_SET_PROM: /* Set Promiscuous Mode */ | ||
5518 | if (!capable(CAP_NET_ADMIN)) return -EPERM; | ||
5519 | omr = inl(DE4X5_OMR); | ||
5520 | omr |= OMR_PR; | ||
5521 | outl(omr, DE4X5_OMR); | ||
5522 | dev->flags |= IFF_PROMISC; | ||
5523 | break; | ||
5524 | |||
5525 | case DE4X5_CLR_PROM: /* Clear Promiscuous Mode */ | ||
5526 | if (!capable(CAP_NET_ADMIN)) return -EPERM; | ||
5527 | omr = inl(DE4X5_OMR); | ||
5528 | omr &= ~OMR_PR; | ||
5529 | outl(omr, DE4X5_OMR); | ||
5530 | dev->flags &= ~IFF_PROMISC; | ||
5531 | break; | ||
5532 | |||
5533 | case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */ | 5517 | case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */ |
5534 | if (!capable(CAP_NET_ADMIN)) return -EPERM; | 5518 | if (!capable(CAP_NET_ADMIN)) return -EPERM; |
5535 | printk("%s: Boo!\n", dev->name); | 5519 | printk("%s: Boo!\n", dev->name); |
diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/tulip/de4x5.h index f5f33b3eb067..9f2877438fb0 100644 --- a/drivers/net/tulip/de4x5.h +++ b/drivers/net/tulip/de4x5.h | |||
@@ -1004,8 +1004,7 @@ struct de4x5_ioctl { | |||
1004 | */ | 1004 | */ |
1005 | #define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */ | 1005 | #define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */ |
1006 | #define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */ | 1006 | #define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */ |
1007 | #define DE4X5_SET_PROM 0x03 /* Set Promiscuous Mode */ | 1007 | /* 0x03 and 0x04 were used before and are obsoleted now. Don't use them. */ |
1008 | #define DE4X5_CLR_PROM 0x04 /* Clear Promiscuous Mode */ | ||
1009 | #define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */ | 1008 | #define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */ |
1010 | #define DE4X5_GET_MCA 0x06 /* Get a multicast address */ | 1009 | #define DE4X5_GET_MCA 0x06 /* Get a multicast address */ |
1011 | #define DE4X5_SET_MCA 0x07 /* Set a multicast address */ | 1010 | #define DE4X5_SET_MCA 0x07 /* Set a multicast address */ |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a82b32b40131..e6bbc639c2d0 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -900,7 +900,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
900 | if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) | 900 | if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) |
901 | return -EINVAL; | 901 | return -EINVAL; |
902 | rtnl_lock(); | 902 | rtnl_lock(); |
903 | ret = update_filter(&tun->txflt, (void *) __user arg); | 903 | ret = update_filter(&tun->txflt, (void __user *)arg); |
904 | rtnl_unlock(); | 904 | rtnl_unlock(); |
905 | return ret; | 905 | return ret; |
906 | 906 | ||
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index a934428a5890..0e061dfea78d 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -50,10 +50,18 @@ static int is_activesync(struct usb_interface_descriptor *desc) | |||
50 | && desc->bInterfaceProtocol == 1; | 50 | && desc->bInterfaceProtocol == 1; |
51 | } | 51 | } |
52 | 52 | ||
53 | static int is_wireless_rndis(struct usb_interface_descriptor *desc) | ||
54 | { | ||
55 | return desc->bInterfaceClass == USB_CLASS_WIRELESS_CONTROLLER | ||
56 | && desc->bInterfaceSubClass == 1 | ||
57 | && desc->bInterfaceProtocol == 3; | ||
58 | } | ||
59 | |||
53 | #else | 60 | #else |
54 | 61 | ||
55 | #define is_rndis(desc) 0 | 62 | #define is_rndis(desc) 0 |
56 | #define is_activesync(desc) 0 | 63 | #define is_activesync(desc) 0 |
64 | #define is_wireless_rndis(desc) 0 | ||
57 | 65 | ||
58 | #endif | 66 | #endif |
59 | 67 | ||
@@ -110,7 +118,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) | |||
110 | * of cdc-acm, it'll fail RNDIS requests cleanly. | 118 | * of cdc-acm, it'll fail RNDIS requests cleanly. |
111 | */ | 119 | */ |
112 | rndis = is_rndis(&intf->cur_altsetting->desc) | 120 | rndis = is_rndis(&intf->cur_altsetting->desc) |
113 | || is_activesync(&intf->cur_altsetting->desc); | 121 | || is_activesync(&intf->cur_altsetting->desc) |
122 | || is_wireless_rndis(&intf->cur_altsetting->desc); | ||
114 | 123 | ||
115 | memset(info, 0, sizeof *info); | 124 | memset(info, 0, sizeof *info); |
116 | info->control = intf; | 125 | info->control = intf; |
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 61c98beb4d17..bcd858c567e0 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c | |||
@@ -576,6 +576,10 @@ static const struct usb_device_id products [] = { | |||
576 | /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */ | 576 | /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */ |
577 | USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1), | 577 | USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1), |
578 | .driver_info = (unsigned long) &rndis_info, | 578 | .driver_info = (unsigned long) &rndis_info, |
579 | }, { | ||
580 | /* RNDIS for tethering */ | ||
581 | USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), | ||
582 | .driver_info = (unsigned long) &rndis_info, | ||
579 | }, | 583 | }, |
580 | { }, // END | 584 | { }, // END |
581 | }; | 585 | }; |
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index 13d5882f1f21..3153fe9d7ce0 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c | |||
@@ -3101,6 +3101,7 @@ static void prism2_clear_set_tim_queue(local_info_t *local) | |||
3101 | * This is a natural nesting, which needs a split lock type. | 3101 | * This is a natural nesting, which needs a split lock type. |
3102 | */ | 3102 | */ |
3103 | static struct lock_class_key hostap_netdev_xmit_lock_key; | 3103 | static struct lock_class_key hostap_netdev_xmit_lock_key; |
3104 | static struct lock_class_key hostap_netdev_addr_lock_key; | ||
3104 | 3105 | ||
3105 | static void prism2_set_lockdep_class_one(struct net_device *dev, | 3106 | static void prism2_set_lockdep_class_one(struct net_device *dev, |
3106 | struct netdev_queue *txq, | 3107 | struct netdev_queue *txq, |
@@ -3112,6 +3113,8 @@ static void prism2_set_lockdep_class_one(struct net_device *dev, | |||
3112 | 3113 | ||
3113 | static void prism2_set_lockdep_class(struct net_device *dev) | 3114 | static void prism2_set_lockdep_class(struct net_device *dev) |
3114 | { | 3115 | { |
3116 | lockdep_set_class(&dev->addr_list_lock, | ||
3117 | &hostap_netdev_addr_lock_key); | ||
3115 | netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL); | 3118 | netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL); |
3116 | } | 3119 | } |
3117 | 3120 | ||
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index 6e704608947c..1acfbcd3703c 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -4972,8 +4972,7 @@ static int ipw_queue_tx_reclaim(struct ipw_priv *priv, | |||
4972 | } | 4972 | } |
4973 | done: | 4973 | done: |
4974 | if ((ipw_tx_queue_space(q) > q->low_mark) && | 4974 | if ((ipw_tx_queue_space(q) > q->low_mark) && |
4975 | (qindex >= 0) && | 4975 | (qindex >= 0)) |
4976 | (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev)) | ||
4977 | netif_wake_queue(priv->net_dev); | 4976 | netif_wake_queue(priv->net_dev); |
4978 | used = q->first_empty - q->last_used; | 4977 | used = q->first_empty - q->last_used; |
4979 | if (used < 0) | 4978 | if (used < 0) |
@@ -10154,14 +10153,8 @@ static void init_sys_config(struct ipw_sys_config *sys_config) | |||
10154 | 10153 | ||
10155 | static int ipw_net_open(struct net_device *dev) | 10154 | static int ipw_net_open(struct net_device *dev) |
10156 | { | 10155 | { |
10157 | struct ipw_priv *priv = ieee80211_priv(dev); | ||
10158 | IPW_DEBUG_INFO("dev->open\n"); | 10156 | IPW_DEBUG_INFO("dev->open\n"); |
10159 | /* we should be verifying the device is ready to be opened */ | 10157 | netif_start_queue(dev); |
10160 | mutex_lock(&priv->mutex); | ||
10161 | if (!(priv->status & STATUS_RF_KILL_MASK) && | ||
10162 | (priv->status & STATUS_ASSOCIATED)) | ||
10163 | netif_start_queue(dev); | ||
10164 | mutex_unlock(&priv->mutex); | ||
10165 | return 0; | 10158 | return 0; |
10166 | } | 10159 | } |
10167 | 10160 | ||
@@ -10481,13 +10474,6 @@ static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb, | |||
10481 | IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size); | 10474 | IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size); |
10482 | spin_lock_irqsave(&priv->lock, flags); | 10475 | spin_lock_irqsave(&priv->lock, flags); |
10483 | 10476 | ||
10484 | if (!(priv->status & STATUS_ASSOCIATED)) { | ||
10485 | IPW_DEBUG_INFO("Tx attempt while not associated.\n"); | ||
10486 | priv->ieee->stats.tx_carrier_errors++; | ||
10487 | netif_stop_queue(dev); | ||
10488 | goto fail_unlock; | ||
10489 | } | ||
10490 | |||
10491 | #ifdef CONFIG_IPW2200_PROMISCUOUS | 10477 | #ifdef CONFIG_IPW2200_PROMISCUOUS |
10492 | if (rtap_iface && netif_running(priv->prom_net_dev)) | 10478 | if (rtap_iface && netif_running(priv->prom_net_dev)) |
10493 | ipw_handle_promiscuous_tx(priv, txb); | 10479 | ipw_handle_promiscuous_tx(priv, txb); |
@@ -10499,10 +10485,6 @@ static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb, | |||
10499 | spin_unlock_irqrestore(&priv->lock, flags); | 10485 | spin_unlock_irqrestore(&priv->lock, flags); |
10500 | 10486 | ||
10501 | return ret; | 10487 | return ret; |
10502 | |||
10503 | fail_unlock: | ||
10504 | spin_unlock_irqrestore(&priv->lock, flags); | ||
10505 | return 1; | ||
10506 | } | 10488 | } |
10507 | 10489 | ||
10508 | static struct net_device_stats *ipw_net_get_stats(struct net_device *dev) | 10490 | static struct net_device_stats *ipw_net_get_stats(struct net_device *dev) |
@@ -10703,13 +10685,6 @@ static void ipw_link_up(struct ipw_priv *priv) | |||
10703 | priv->last_packet_time = 0; | 10685 | priv->last_packet_time = 0; |
10704 | 10686 | ||
10705 | netif_carrier_on(priv->net_dev); | 10687 | netif_carrier_on(priv->net_dev); |
10706 | if (netif_queue_stopped(priv->net_dev)) { | ||
10707 | IPW_DEBUG_NOTIF("waking queue\n"); | ||
10708 | netif_wake_queue(priv->net_dev); | ||
10709 | } else { | ||
10710 | IPW_DEBUG_NOTIF("starting queue\n"); | ||
10711 | netif_start_queue(priv->net_dev); | ||
10712 | } | ||
10713 | 10688 | ||
10714 | cancel_delayed_work(&priv->request_scan); | 10689 | cancel_delayed_work(&priv->request_scan); |
10715 | cancel_delayed_work(&priv->request_direct_scan); | 10690 | cancel_delayed_work(&priv->request_direct_scan); |
@@ -10739,7 +10714,6 @@ static void ipw_link_down(struct ipw_priv *priv) | |||
10739 | { | 10714 | { |
10740 | ipw_led_link_down(priv); | 10715 | ipw_led_link_down(priv); |
10741 | netif_carrier_off(priv->net_dev); | 10716 | netif_carrier_off(priv->net_dev); |
10742 | netif_stop_queue(priv->net_dev); | ||
10743 | notify_wx_assoc_event(priv); | 10717 | notify_wx_assoc_event(priv); |
10744 | 10718 | ||
10745 | /* Cancel any queued work ... */ | 10719 | /* Cancel any queued work ... */ |
@@ -11419,7 +11393,6 @@ static void ipw_down(struct ipw_priv *priv) | |||
11419 | /* Clear all bits but the RF Kill */ | 11393 | /* Clear all bits but the RF Kill */ |
11420 | priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING; | 11394 | priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING; |
11421 | netif_carrier_off(priv->net_dev); | 11395 | netif_carrier_off(priv->net_dev); |
11422 | netif_stop_queue(priv->net_dev); | ||
11423 | 11396 | ||
11424 | ipw_stop_nic(priv); | 11397 | ipw_stop_nic(priv); |
11425 | 11398 | ||
@@ -11522,7 +11495,6 @@ static int ipw_prom_open(struct net_device *dev) | |||
11522 | 11495 | ||
11523 | IPW_DEBUG_INFO("prom dev->open\n"); | 11496 | IPW_DEBUG_INFO("prom dev->open\n"); |
11524 | netif_carrier_off(dev); | 11497 | netif_carrier_off(dev); |
11525 | netif_stop_queue(dev); | ||
11526 | 11498 | ||
11527 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) { | 11499 | if (priv->ieee->iw_mode != IW_MODE_MONITOR) { |
11528 | priv->sys_config.accept_all_data_frames = 1; | 11500 | priv->sys_config.accept_all_data_frames = 1; |
@@ -11558,7 +11530,6 @@ static int ipw_prom_stop(struct net_device *dev) | |||
11558 | static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | 11530 | static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) |
11559 | { | 11531 | { |
11560 | IPW_DEBUG_INFO("prom dev->xmit\n"); | 11532 | IPW_DEBUG_INFO("prom dev->xmit\n"); |
11561 | netif_stop_queue(dev); | ||
11562 | return -EOPNOTSUPP; | 11533 | return -EOPNOTSUPP; |
11563 | } | 11534 | } |
11564 | 11535 | ||
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 913dc9fe08f9..5816230d58f8 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -364,8 +364,7 @@ static void mac80211_hwsim_free(void) | |||
364 | struct mac80211_hwsim_data *data; | 364 | struct mac80211_hwsim_data *data; |
365 | data = hwsim_radios[i]->priv; | 365 | data = hwsim_radios[i]->priv; |
366 | ieee80211_unregister_hw(hwsim_radios[i]); | 366 | ieee80211_unregister_hw(hwsim_radios[i]); |
367 | if (!IS_ERR(data->dev)) | 367 | device_unregister(data->dev); |
368 | device_unregister(data->dev); | ||
369 | ieee80211_free_hw(hwsim_radios[i]); | 368 | ieee80211_free_hw(hwsim_radios[i]); |
370 | } | 369 | } |
371 | } | 370 | } |
@@ -437,7 +436,7 @@ static int __init init_mac80211_hwsim(void) | |||
437 | "mac80211_hwsim: device_create_drvdata " | 436 | "mac80211_hwsim: device_create_drvdata " |
438 | "failed (%ld)\n", PTR_ERR(data->dev)); | 437 | "failed (%ld)\n", PTR_ERR(data->dev)); |
439 | err = -ENOMEM; | 438 | err = -ENOMEM; |
440 | goto failed; | 439 | goto failed_drvdata; |
441 | } | 440 | } |
442 | data->dev->driver = &mac80211_hwsim_driver; | 441 | data->dev->driver = &mac80211_hwsim_driver; |
443 | 442 | ||
@@ -461,7 +460,7 @@ static int __init init_mac80211_hwsim(void) | |||
461 | if (err < 0) { | 460 | if (err < 0) { |
462 | printk(KERN_DEBUG "mac80211_hwsim: " | 461 | printk(KERN_DEBUG "mac80211_hwsim: " |
463 | "ieee80211_register_hw failed (%d)\n", err); | 462 | "ieee80211_register_hw failed (%d)\n", err); |
464 | goto failed; | 463 | goto failed_hw; |
465 | } | 464 | } |
466 | 465 | ||
467 | printk(KERN_DEBUG "%s: hwaddr %s registered\n", | 466 | printk(KERN_DEBUG "%s: hwaddr %s registered\n", |
@@ -479,9 +478,9 @@ static int __init init_mac80211_hwsim(void) | |||
479 | rtnl_lock(); | 478 | rtnl_lock(); |
480 | 479 | ||
481 | err = dev_alloc_name(hwsim_mon, hwsim_mon->name); | 480 | err = dev_alloc_name(hwsim_mon, hwsim_mon->name); |
482 | if (err < 0) { | 481 | if (err < 0) |
483 | goto failed_mon; | 482 | goto failed_mon; |
484 | } | 483 | |
485 | 484 | ||
486 | err = register_netdevice(hwsim_mon); | 485 | err = register_netdevice(hwsim_mon); |
487 | if (err < 0) | 486 | if (err < 0) |
@@ -494,7 +493,14 @@ static int __init init_mac80211_hwsim(void) | |||
494 | failed_mon: | 493 | failed_mon: |
495 | rtnl_unlock(); | 494 | rtnl_unlock(); |
496 | free_netdev(hwsim_mon); | 495 | free_netdev(hwsim_mon); |
496 | mac80211_hwsim_free(); | ||
497 | return err; | ||
497 | 498 | ||
499 | failed_hw: | ||
500 | device_unregister(data->dev); | ||
501 | failed_drvdata: | ||
502 | ieee80211_free_hw(hw); | ||
503 | hwsim_radios[i] = 0; | ||
498 | failed: | 504 | failed: |
499 | mac80211_hwsim_free(); | 505 | mac80211_hwsim_free(); |
500 | return err; | 506 | return err; |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index c644669a75c2..a08b1682c8e8 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -58,13 +58,13 @@ | |||
58 | * 1.10 Changes for Buffer allocation | 58 | * 1.10 Changes for Buffer allocation |
59 | * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower | 59 | * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower |
60 | * 1.25 Added Packing support | 60 | * 1.25 Added Packing support |
61 | * 1.5 | ||
61 | */ | 62 | */ |
62 | #include <asm/ccwdev.h> | 63 | #include <asm/ccwdev.h> |
63 | #include <asm/ccwgroup.h> | 64 | #include <asm/ccwgroup.h> |
64 | #include <asm/debug.h> | 65 | #include <asm/debug.h> |
65 | #include <asm/idals.h> | 66 | #include <asm/idals.h> |
66 | #include <asm/io.h> | 67 | #include <asm/io.h> |
67 | |||
68 | #include <linux/bitops.h> | 68 | #include <linux/bitops.h> |
69 | #include <linux/ctype.h> | 69 | #include <linux/ctype.h> |
70 | #include <linux/delay.h> | 70 | #include <linux/delay.h> |
@@ -90,36 +90,10 @@ | |||
90 | #include "cu3088.h" | 90 | #include "cu3088.h" |
91 | #include "claw.h" | 91 | #include "claw.h" |
92 | 92 | ||
93 | MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>"); | 93 | /* |
94 | MODULE_DESCRIPTION("Linux for zSeries CLAW Driver\n" \ | 94 | CLAW uses the s390dbf file system see claw_trace and claw_setup |
95 | "Copyright 2000,2005 IBM Corporation\n"); | ||
96 | MODULE_LICENSE("GPL"); | ||
97 | |||
98 | /* Debugging is based on DEBUGMSG, IOTRACE, or FUNCTRACE options: | ||
99 | DEBUGMSG - Enables output of various debug messages in the code | ||
100 | IOTRACE - Enables output of CCW and other IO related traces | ||
101 | FUNCTRACE - Enables output of function entry/exit trace | ||
102 | Define any combination of above options to enable tracing | ||
103 | |||
104 | CLAW also uses the s390dbf file system see claw_trace and claw_setup | ||
105 | */ | 95 | */ |
106 | 96 | ||
107 | /* following enables tracing */ | ||
108 | //#define DEBUGMSG | ||
109 | //#define IOTRACE | ||
110 | //#define FUNCTRACE | ||
111 | |||
112 | #ifdef DEBUGMSG | ||
113 | #define DEBUG | ||
114 | #endif | ||
115 | |||
116 | #ifdef IOTRACE | ||
117 | #define DEBUG | ||
118 | #endif | ||
119 | |||
120 | #ifdef FUNCTRACE | ||
121 | #define DEBUG | ||
122 | #endif | ||
123 | 97 | ||
124 | static char debug_buffer[255]; | 98 | static char debug_buffer[255]; |
125 | /** | 99 | /** |
@@ -146,7 +120,6 @@ claw_register_debug_facility(void) | |||
146 | claw_dbf_setup = debug_register("claw_setup", 2, 1, 8); | 120 | claw_dbf_setup = debug_register("claw_setup", 2, 1, 8); |
147 | claw_dbf_trace = debug_register("claw_trace", 2, 2, 8); | 121 | claw_dbf_trace = debug_register("claw_trace", 2, 2, 8); |
148 | if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) { | 122 | if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) { |
149 | printk(KERN_WARNING "Not enough memory for debug facility.\n"); | ||
150 | claw_unregister_debug_facility(); | 123 | claw_unregister_debug_facility(); |
151 | return -ENOMEM; | 124 | return -ENOMEM; |
152 | } | 125 | } |
@@ -232,9 +205,6 @@ static void probe_error( struct ccwgroup_device *cgdev); | |||
232 | static struct net_device_stats *claw_stats(struct net_device *dev); | 205 | static struct net_device_stats *claw_stats(struct net_device *dev); |
233 | static int pages_to_order_of_mag(int num_of_pages); | 206 | static int pages_to_order_of_mag(int num_of_pages); |
234 | static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); | 207 | static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); |
235 | #ifdef DEBUG | ||
236 | static void dumpit (char *buf, int len); | ||
237 | #endif | ||
238 | /* sysfs Functions */ | 208 | /* sysfs Functions */ |
239 | static ssize_t claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf); | 209 | static ssize_t claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf); |
240 | static ssize_t claw_hname_write(struct device *dev, struct device_attribute *attr, | 210 | static ssize_t claw_hname_write(struct device *dev, struct device_attribute *attr, |
@@ -263,12 +233,12 @@ static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl); | |||
263 | static int claw_snd_sys_validate_rsp(struct net_device *dev, | 233 | static int claw_snd_sys_validate_rsp(struct net_device *dev, |
264 | struct clawctl * p_ctl, __u32 return_code); | 234 | struct clawctl * p_ctl, __u32 return_code); |
265 | static int claw_strt_conn_req(struct net_device *dev ); | 235 | static int claw_strt_conn_req(struct net_device *dev ); |
266 | static void claw_strt_read ( struct net_device *dev, int lock ); | 236 | static void claw_strt_read(struct net_device *dev, int lock); |
267 | static void claw_strt_out_IO( struct net_device *dev ); | 237 | static void claw_strt_out_IO(struct net_device *dev); |
268 | static void claw_free_wrt_buf( struct net_device *dev ); | 238 | static void claw_free_wrt_buf(struct net_device *dev); |
269 | 239 | ||
270 | /* Functions for unpack reads */ | 240 | /* Functions for unpack reads */ |
271 | static void unpack_read (struct net_device *dev ); | 241 | static void unpack_read(struct net_device *dev); |
272 | 242 | ||
273 | /* ccwgroup table */ | 243 | /* ccwgroup table */ |
274 | 244 | ||
@@ -284,7 +254,6 @@ static struct ccwgroup_driver claw_group_driver = { | |||
284 | }; | 254 | }; |
285 | 255 | ||
286 | /* | 256 | /* |
287 | * | ||
288 | * Key functions | 257 | * Key functions |
289 | */ | 258 | */ |
290 | 259 | ||
@@ -298,23 +267,14 @@ claw_probe(struct ccwgroup_device *cgdev) | |||
298 | int rc; | 267 | int rc; |
299 | struct claw_privbk *privptr=NULL; | 268 | struct claw_privbk *privptr=NULL; |
300 | 269 | ||
301 | #ifdef FUNCTRACE | 270 | CLAW_DBF_TEXT(2, setup, "probe"); |
302 | printk(KERN_INFO "%s Enter\n",__func__); | ||
303 | #endif | ||
304 | CLAW_DBF_TEXT(2,setup,"probe"); | ||
305 | if (!get_device(&cgdev->dev)) | 271 | if (!get_device(&cgdev->dev)) |
306 | return -ENODEV; | 272 | return -ENODEV; |
307 | #ifdef DEBUGMSG | ||
308 | printk(KERN_INFO "claw: variable cgdev =\n"); | ||
309 | dumpit((char *)cgdev, sizeof(struct ccwgroup_device)); | ||
310 | #endif | ||
311 | privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL); | 273 | privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL); |
312 | if (privptr == NULL) { | 274 | if (privptr == NULL) { |
313 | probe_error(cgdev); | 275 | probe_error(cgdev); |
314 | put_device(&cgdev->dev); | 276 | put_device(&cgdev->dev); |
315 | printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n", | 277 | CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM); |
316 | cgdev->cdev[0]->dev.bus_id,__func__,__LINE__); | ||
317 | CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM); | ||
318 | return -ENOMEM; | 278 | return -ENOMEM; |
319 | } | 279 | } |
320 | privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL); | 280 | privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL); |
@@ -322,9 +282,7 @@ claw_probe(struct ccwgroup_device *cgdev) | |||
322 | if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) { | 282 | if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) { |
323 | probe_error(cgdev); | 283 | probe_error(cgdev); |
324 | put_device(&cgdev->dev); | 284 | put_device(&cgdev->dev); |
325 | printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n", | 285 | CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM); |
326 | cgdev->cdev[0]->dev.bus_id,__func__,__LINE__); | ||
327 | CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM); | ||
328 | return -ENOMEM; | 286 | return -ENOMEM; |
329 | } | 287 | } |
330 | memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8); | 288 | memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8); |
@@ -341,19 +299,14 @@ claw_probe(struct ccwgroup_device *cgdev) | |||
341 | put_device(&cgdev->dev); | 299 | put_device(&cgdev->dev); |
342 | printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n", | 300 | printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n", |
343 | cgdev->cdev[0]->dev.bus_id,__func__,__LINE__); | 301 | cgdev->cdev[0]->dev.bus_id,__func__,__LINE__); |
344 | CLAW_DBF_TEXT_(2,setup,"probex%d",rc); | 302 | CLAW_DBF_TEXT_(2, setup, "probex%d", rc); |
345 | return rc; | 303 | return rc; |
346 | } | 304 | } |
347 | printk(KERN_INFO "claw: sysfs files added for %s\n",cgdev->cdev[0]->dev.bus_id); | ||
348 | privptr->p_env->p_priv = privptr; | 305 | privptr->p_env->p_priv = privptr; |
349 | cgdev->cdev[0]->handler = claw_irq_handler; | 306 | cgdev->cdev[0]->handler = claw_irq_handler; |
350 | cgdev->cdev[1]->handler = claw_irq_handler; | 307 | cgdev->cdev[1]->handler = claw_irq_handler; |
351 | cgdev->dev.driver_data = privptr; | 308 | cgdev->dev.driver_data = privptr; |
352 | #ifdef FUNCTRACE | 309 | CLAW_DBF_TEXT(2, setup, "prbext 0"); |
353 | printk(KERN_INFO "claw:%s exit on line %d, " | ||
354 | "rc = 0\n",__func__,__LINE__); | ||
355 | #endif | ||
356 | CLAW_DBF_TEXT(2,setup,"prbext 0"); | ||
357 | 310 | ||
358 | return 0; | 311 | return 0; |
359 | } /* end of claw_probe */ | 312 | } /* end of claw_probe */ |
@@ -370,37 +323,18 @@ claw_tx(struct sk_buff *skb, struct net_device *dev) | |||
370 | unsigned long saveflags; | 323 | unsigned long saveflags; |
371 | struct chbk *p_ch; | 324 | struct chbk *p_ch; |
372 | 325 | ||
373 | #ifdef FUNCTRACE | 326 | CLAW_DBF_TEXT(4, trace, "claw_tx"); |
374 | printk(KERN_INFO "%s:%s enter\n",dev->name,__func__); | ||
375 | #endif | ||
376 | CLAW_DBF_TEXT(4,trace,"claw_tx"); | ||
377 | p_ch=&privptr->channel[WRITE]; | 327 | p_ch=&privptr->channel[WRITE]; |
378 | if (skb == NULL) { | 328 | if (skb == NULL) { |
379 | printk(KERN_WARNING "%s: null pointer passed as sk_buffer\n", | ||
380 | dev->name); | ||
381 | privptr->stats.tx_dropped++; | 329 | privptr->stats.tx_dropped++; |
382 | #ifdef FUNCTRACE | 330 | privptr->stats.tx_errors++; |
383 | printk(KERN_INFO "%s: %s() exit on line %d, rc = EIO\n", | 331 | CLAW_DBF_TEXT_(2, trace, "clawtx%d", -EIO); |
384 | dev->name,__func__, __LINE__); | ||
385 | #endif | ||
386 | CLAW_DBF_TEXT_(2,trace,"clawtx%d",-EIO); | ||
387 | return -EIO; | 332 | return -EIO; |
388 | } | 333 | } |
389 | |||
390 | #ifdef IOTRACE | ||
391 | printk(KERN_INFO "%s: variable sk_buff=\n",dev->name); | ||
392 | dumpit((char *) skb, sizeof(struct sk_buff)); | ||
393 | printk(KERN_INFO "%s: variable dev=\n",dev->name); | ||
394 | dumpit((char *) dev, sizeof(struct net_device)); | ||
395 | #endif | ||
396 | spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); | 334 | spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); |
397 | rc=claw_hw_tx( skb, dev, 1 ); | 335 | rc=claw_hw_tx( skb, dev, 1 ); |
398 | spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); | 336 | spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); |
399 | #ifdef FUNCTRACE | 337 | CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc); |
400 | printk(KERN_INFO "%s:%s exit on line %d, rc = %d\n", | ||
401 | dev->name, __func__, __LINE__, rc); | ||
402 | #endif | ||
403 | CLAW_DBF_TEXT_(4,trace,"clawtx%d",rc); | ||
404 | return rc; | 338 | return rc; |
405 | } /* end of claw_tx */ | 339 | } /* end of claw_tx */ |
406 | 340 | ||
@@ -419,7 +353,7 @@ claw_pack_skb(struct claw_privbk *privptr) | |||
419 | 353 | ||
420 | new_skb = NULL; /* assume no dice */ | 354 | new_skb = NULL; /* assume no dice */ |
421 | pkt_cnt = 0; | 355 | pkt_cnt = 0; |
422 | CLAW_DBF_TEXT(4,trace,"PackSKBe"); | 356 | CLAW_DBF_TEXT(4, trace, "PackSKBe"); |
423 | if (!skb_queue_empty(&p_ch->collect_queue)) { | 357 | if (!skb_queue_empty(&p_ch->collect_queue)) { |
424 | /* some data */ | 358 | /* some data */ |
425 | held_skb = skb_dequeue(&p_ch->collect_queue); | 359 | held_skb = skb_dequeue(&p_ch->collect_queue); |
@@ -457,13 +391,8 @@ claw_pack_skb(struct claw_privbk *privptr) | |||
457 | skb_queue_head(&p_ch->collect_queue,held_skb); | 391 | skb_queue_head(&p_ch->collect_queue,held_skb); |
458 | } | 392 | } |
459 | } | 393 | } |
460 | #ifdef IOTRACE | ||
461 | printk(KERN_INFO "%s: %s() Packed %d len %d\n", | ||
462 | p_env->ndev->name, | ||
463 | __func__,pkt_cnt,new_skb->len); | ||
464 | #endif | ||
465 | } | 394 | } |
466 | CLAW_DBF_TEXT(4,trace,"PackSKBx"); | 395 | CLAW_DBF_TEXT(4, trace, "PackSKBx"); |
467 | return new_skb; | 396 | return new_skb; |
468 | } | 397 | } |
469 | 398 | ||
@@ -477,29 +406,12 @@ claw_change_mtu(struct net_device *dev, int new_mtu) | |||
477 | { | 406 | { |
478 | struct claw_privbk *privptr=dev->priv; | 407 | struct claw_privbk *privptr=dev->priv; |
479 | int buff_size; | 408 | int buff_size; |
480 | #ifdef FUNCTRACE | 409 | CLAW_DBF_TEXT(4, trace, "setmtu"); |
481 | printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__); | ||
482 | #endif | ||
483 | #ifdef DEBUGMSG | ||
484 | printk(KERN_INFO "variable dev =\n"); | ||
485 | dumpit((char *) dev, sizeof(struct net_device)); | ||
486 | printk(KERN_INFO "variable new_mtu = %d\n", new_mtu); | ||
487 | #endif | ||
488 | CLAW_DBF_TEXT(4,trace,"setmtu"); | ||
489 | buff_size = privptr->p_env->write_size; | 410 | buff_size = privptr->p_env->write_size; |
490 | if ((new_mtu < 60) || (new_mtu > buff_size)) { | 411 | if ((new_mtu < 60) || (new_mtu > buff_size)) { |
491 | #ifdef FUNCTRACE | ||
492 | printk(KERN_INFO "%s:%s Exit on line %d, rc=EINVAL\n", | ||
493 | dev->name, | ||
494 | __func__, __LINE__); | ||
495 | #endif | ||
496 | return -EINVAL; | 412 | return -EINVAL; |
497 | } | 413 | } |
498 | dev->mtu = new_mtu; | 414 | dev->mtu = new_mtu; |
499 | #ifdef FUNCTRACE | ||
500 | printk(KERN_INFO "%s:%s Exit on line %d\n",dev->name, | ||
501 | __func__, __LINE__); | ||
502 | #endif | ||
503 | return 0; | 415 | return 0; |
504 | } /* end of claw_change_mtu */ | 416 | } /* end of claw_change_mtu */ |
505 | 417 | ||
@@ -521,24 +433,13 @@ claw_open(struct net_device *dev) | |||
521 | struct timer_list timer; | 433 | struct timer_list timer; |
522 | struct ccwbk *p_buf; | 434 | struct ccwbk *p_buf; |
523 | 435 | ||
524 | #ifdef FUNCTRACE | 436 | CLAW_DBF_TEXT(4, trace, "open"); |
525 | printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__); | ||
526 | #endif | ||
527 | CLAW_DBF_TEXT(4,trace,"open"); | ||
528 | if (!dev || (dev->name[0] == 0x00)) { | ||
529 | CLAW_DBF_TEXT(2,trace,"BadDev"); | ||
530 | printk(KERN_WARNING "claw: Bad device at open failing \n"); | ||
531 | return -ENODEV; | ||
532 | } | ||
533 | privptr = (struct claw_privbk *)dev->priv; | 437 | privptr = (struct claw_privbk *)dev->priv; |
534 | /* allocate and initialize CCW blocks */ | 438 | /* allocate and initialize CCW blocks */ |
535 | if (privptr->buffs_alloc == 0) { | 439 | if (privptr->buffs_alloc == 0) { |
536 | rc=init_ccw_bk(dev); | 440 | rc=init_ccw_bk(dev); |
537 | if (rc) { | 441 | if (rc) { |
538 | printk(KERN_INFO "%s:%s Exit on line %d, rc=ENOMEM\n", | 442 | CLAW_DBF_TEXT(2, trace, "openmem"); |
539 | dev->name, | ||
540 | __func__, __LINE__); | ||
541 | CLAW_DBF_TEXT(2,trace,"openmem"); | ||
542 | return -ENOMEM; | 443 | return -ENOMEM; |
543 | } | 444 | } |
544 | } | 445 | } |
@@ -557,7 +458,7 @@ claw_open(struct net_device *dev) | |||
557 | tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet, | 458 | tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet, |
558 | (unsigned long) &privptr->channel[READ]); | 459 | (unsigned long) &privptr->channel[READ]); |
559 | for ( i = 0; i < 2; i++) { | 460 | for ( i = 0; i < 2; i++) { |
560 | CLAW_DBF_TEXT_(2,trace,"opn_ch%d",i); | 461 | CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i); |
561 | init_waitqueue_head(&privptr->channel[i].wait); | 462 | init_waitqueue_head(&privptr->channel[i].wait); |
562 | /* skb_queue_head_init(&p_ch->io_queue); */ | 463 | /* skb_queue_head_init(&p_ch->io_queue); */ |
563 | if (i == WRITE) | 464 | if (i == WRITE) |
@@ -595,15 +496,8 @@ claw_open(struct net_device *dev) | |||
595 | ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) || | 496 | ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) || |
596 | (((privptr->channel[READ].flag | | 497 | (((privptr->channel[READ].flag | |
597 | privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) { | 498 | privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) { |
598 | #ifdef DEBUGMSG | ||
599 | printk(KERN_INFO "%s: channel problems during open - read:" | ||
600 | " %02x - write: %02x\n", | ||
601 | dev->name, | ||
602 | privptr->channel[READ].last_dstat, | ||
603 | privptr->channel[WRITE].last_dstat); | ||
604 | #endif | ||
605 | printk(KERN_INFO "%s: remote side is not ready\n", dev->name); | 499 | printk(KERN_INFO "%s: remote side is not ready\n", dev->name); |
606 | CLAW_DBF_TEXT(2,trace,"notrdy"); | 500 | CLAW_DBF_TEXT(2, trace, "notrdy"); |
607 | 501 | ||
608 | for ( i = 0; i < 2; i++) { | 502 | for ( i = 0; i < 2; i++) { |
609 | spin_lock_irqsave( | 503 | spin_lock_irqsave( |
@@ -659,23 +553,14 @@ claw_open(struct net_device *dev) | |||
659 | privptr->p_buff_read=NULL; | 553 | privptr->p_buff_read=NULL; |
660 | privptr->p_buff_write=NULL; | 554 | privptr->p_buff_write=NULL; |
661 | claw_clear_busy(dev); | 555 | claw_clear_busy(dev); |
662 | #ifdef FUNCTRACE | 556 | CLAW_DBF_TEXT(2, trace, "open EIO"); |
663 | printk(KERN_INFO "%s:%s Exit on line %d, rc=EIO\n", | ||
664 | dev->name,__func__,__LINE__); | ||
665 | #endif | ||
666 | CLAW_DBF_TEXT(2,trace,"open EIO"); | ||
667 | return -EIO; | 557 | return -EIO; |
668 | } | 558 | } |
669 | 559 | ||
670 | /* Send SystemValidate command */ | 560 | /* Send SystemValidate command */ |
671 | 561 | ||
672 | claw_clear_busy(dev); | 562 | claw_clear_busy(dev); |
673 | 563 | CLAW_DBF_TEXT(4, trace, "openok"); | |
674 | #ifdef FUNCTRACE | ||
675 | printk(KERN_INFO "%s:%s Exit on line %d, rc=0\n", | ||
676 | dev->name,__func__,__LINE__); | ||
677 | #endif | ||
678 | CLAW_DBF_TEXT(4,trace,"openok"); | ||
679 | return 0; | 564 | return 0; |
680 | } /* end of claw_open */ | 565 | } /* end of claw_open */ |
681 | 566 | ||
@@ -694,22 +579,14 @@ claw_irq_handler(struct ccw_device *cdev, | |||
694 | struct claw_env *p_env; | 579 | struct claw_env *p_env; |
695 | struct chbk *p_ch_r=NULL; | 580 | struct chbk *p_ch_r=NULL; |
696 | 581 | ||
697 | 582 | CLAW_DBF_TEXT(4, trace, "clawirq"); | |
698 | #ifdef FUNCTRACE | ||
699 | printk(KERN_INFO "%s enter \n",__func__); | ||
700 | #endif | ||
701 | CLAW_DBF_TEXT(4,trace,"clawirq"); | ||
702 | /* Bypass all 'unsolicited interrupts' */ | 583 | /* Bypass all 'unsolicited interrupts' */ |
703 | if (!cdev->dev.driver_data) { | 584 | if (!cdev->dev.driver_data) { |
704 | printk(KERN_WARNING "claw: unsolicited interrupt for device:" | 585 | printk(KERN_WARNING "claw: unsolicited interrupt for device:" |
705 | "%s received c-%02x d-%02x\n", | 586 | "%s received c-%02x d-%02x\n", |
706 | cdev->dev.bus_id, irb->scsw.cmd.cstat, | 587 | cdev->dev.bus_id, irb->scsw.cmd.cstat, |
707 | irb->scsw.cmd.dstat); | 588 | irb->scsw.cmd.dstat); |
708 | #ifdef FUNCTRACE | 589 | CLAW_DBF_TEXT(2, trace, "badirq"); |
709 | printk(KERN_INFO "claw: %s() " | ||
710 | "exit on line %d\n",__func__,__LINE__); | ||
711 | #endif | ||
712 | CLAW_DBF_TEXT(2,trace,"badirq"); | ||
713 | return; | 590 | return; |
714 | } | 591 | } |
715 | privptr = (struct claw_privbk *)cdev->dev.driver_data; | 592 | privptr = (struct claw_privbk *)cdev->dev.driver_data; |
@@ -722,41 +599,25 @@ claw_irq_handler(struct ccw_device *cdev, | |||
722 | else { | 599 | else { |
723 | printk(KERN_WARNING "claw: Can't determine channel for " | 600 | printk(KERN_WARNING "claw: Can't determine channel for " |
724 | "interrupt, device %s\n", cdev->dev.bus_id); | 601 | "interrupt, device %s\n", cdev->dev.bus_id); |
725 | CLAW_DBF_TEXT(2,trace,"badchan"); | 602 | CLAW_DBF_TEXT(2, trace, "badchan"); |
726 | return; | 603 | return; |
727 | } | 604 | } |
728 | CLAW_DBF_TEXT_(4,trace,"IRQCH=%d",p_ch->flag); | 605 | CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag); |
729 | 606 | ||
730 | dev = (struct net_device *) (p_ch->ndev); | 607 | dev = (struct net_device *) (p_ch->ndev); |
731 | p_env=privptr->p_env; | 608 | p_env=privptr->p_env; |
732 | 609 | ||
733 | #ifdef IOTRACE | ||
734 | printk(KERN_INFO "%s: interrupt for device: %04x " | ||
735 | "received c-%02x d-%02x state-%02x\n", | ||
736 | dev->name, p_ch->devno, irb->scsw.cmd.cstat, | ||
737 | irb->scsw.cmd.dstat, p_ch->claw_state); | ||
738 | #endif | ||
739 | |||
740 | /* Copy interruption response block. */ | 610 | /* Copy interruption response block. */ |
741 | memcpy(p_ch->irb, irb, sizeof(struct irb)); | 611 | memcpy(p_ch->irb, irb, sizeof(struct irb)); |
742 | 612 | ||
743 | /* Check for good subchannel return code, otherwise error message */ | 613 | /* Check for good subchannel return code, otherwise info message */ |
744 | if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) { | 614 | if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) { |
745 | printk(KERN_INFO "%s: subchannel check for device: %04x -" | 615 | printk(KERN_INFO "%s: subchannel check for device: %04x -" |
746 | " Sch Stat %02x Dev Stat %02x CPA - %04x\n", | 616 | " Sch Stat %02x Dev Stat %02x CPA - %04x\n", |
747 | dev->name, p_ch->devno, | 617 | dev->name, p_ch->devno, |
748 | irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, | 618 | irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, |
749 | irb->scsw.cmd.cpa); | 619 | irb->scsw.cmd.cpa); |
750 | #ifdef IOTRACE | 620 | CLAW_DBF_TEXT(2, trace, "chanchk"); |
751 | dumpit((char *)irb,sizeof(struct irb)); | ||
752 | dumpit((char *)(unsigned long)irb->scsw.cmd.cpa, | ||
753 | sizeof(struct ccw1)); | ||
754 | #endif | ||
755 | #ifdef FUNCTRACE | ||
756 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
757 | dev->name,__func__,__LINE__); | ||
758 | #endif | ||
759 | CLAW_DBF_TEXT(2,trace,"chanchk"); | ||
760 | /* return; */ | 621 | /* return; */ |
761 | } | 622 | } |
762 | 623 | ||
@@ -768,233 +629,138 @@ claw_irq_handler(struct ccw_device *cdev, | |||
768 | p_ch->last_dstat = irb->scsw.cmd.dstat; | 629 | p_ch->last_dstat = irb->scsw.cmd.dstat; |
769 | 630 | ||
770 | switch (p_ch->claw_state) { | 631 | switch (p_ch->claw_state) { |
771 | case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */ | 632 | case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */ |
772 | #ifdef DEBUGMSG | 633 | if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || |
773 | printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name); | 634 | (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || |
774 | #endif | 635 | (p_ch->irb->scsw.cmd.stctl == |
775 | if (!((p_ch->irb->scsw.cmd.stctl & | 636 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) |
776 | SCSW_STCTL_SEC_STATUS) || | 637 | return; |
777 | (p_ch->irb->scsw.cmd.stctl == | 638 | wake_up(&p_ch->wait); /* wake up claw_release */ |
778 | SCSW_STCTL_STATUS_PEND) || | 639 | CLAW_DBF_TEXT(4, trace, "stop"); |
779 | (p_ch->irb->scsw.cmd.stctl == | 640 | return; |
780 | (SCSW_STCTL_ALERT_STATUS | | 641 | case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */ |
781 | SCSW_STCTL_STATUS_PEND)))) { | 642 | if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || |
782 | #ifdef FUNCTRACE | 643 | (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || |
783 | printk(KERN_INFO "%s:%s Exit on line %d\n", | 644 | (p_ch->irb->scsw.cmd.stctl == |
784 | dev->name,__func__,__LINE__); | 645 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { |
785 | #endif | 646 | CLAW_DBF_TEXT(4, trace, "haltio"); |
786 | return; | 647 | return; |
787 | } | 648 | } |
788 | wake_up(&p_ch->wait); /* wake up claw_release */ | 649 | if (p_ch->flag == CLAW_READ) { |
789 | 650 | p_ch->claw_state = CLAW_START_READ; | |
790 | #ifdef DEBUGMSG | 651 | wake_up(&p_ch->wait); /* wake claw_open (READ)*/ |
791 | printk(KERN_INFO "%s: CLAW_STOP exit\n", dev->name); | 652 | } else if (p_ch->flag == CLAW_WRITE) { |
792 | #endif | 653 | p_ch->claw_state = CLAW_START_WRITE; |
793 | #ifdef FUNCTRACE | 654 | /* send SYSTEM_VALIDATE */ |
794 | printk(KERN_INFO "%s:%s Exit on line %d\n", | 655 | claw_strt_read(dev, LOCK_NO); |
795 | dev->name,__func__,__LINE__); | 656 | claw_send_control(dev, |
796 | #endif | 657 | SYSTEM_VALIDATE_REQUEST, |
797 | CLAW_DBF_TEXT(4,trace,"stop"); | 658 | 0, 0, 0, |
798 | return; | 659 | p_env->host_name, |
799 | 660 | p_env->adapter_name); | |
800 | case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */ | 661 | } else { |
801 | #ifdef DEBUGMSG | 662 | printk(KERN_WARNING "claw: unsolicited " |
802 | printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n", | 663 | "interrupt for device:" |
803 | dev->name); | 664 | "%s received c-%02x d-%02x\n", |
804 | #endif | 665 | cdev->dev.bus_id, |
805 | if (!((p_ch->irb->scsw.cmd.stctl & | 666 | irb->scsw.cmd.cstat, |
806 | SCSW_STCTL_SEC_STATUS) || | 667 | irb->scsw.cmd.dstat); |
807 | (p_ch->irb->scsw.cmd.stctl == | 668 | return; |
808 | SCSW_STCTL_STATUS_PEND) || | 669 | } |
809 | (p_ch->irb->scsw.cmd.stctl == | 670 | CLAW_DBF_TEXT(4, trace, "haltio"); |
810 | (SCSW_STCTL_ALERT_STATUS | | 671 | return; |
811 | SCSW_STCTL_STATUS_PEND)))) { | 672 | case CLAW_START_READ: |
812 | #ifdef FUNCTRACE | 673 | CLAW_DBF_TEXT(4, trace, "ReadIRQ"); |
813 | printk(KERN_INFO "%s:%s Exit on line %d\n", | 674 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
814 | dev->name,__func__,__LINE__); | 675 | clear_bit(0, (void *)&p_ch->IO_active); |
815 | #endif | 676 | if ((p_ch->irb->ecw[0] & 0x41) == 0x41 || |
816 | CLAW_DBF_TEXT(4,trace,"haltio"); | 677 | (p_ch->irb->ecw[0] & 0x40) == 0x40 || |
817 | return; | 678 | (p_ch->irb->ecw[0]) == 0) { |
818 | } | 679 | privptr->stats.rx_errors++; |
819 | if (p_ch->flag == CLAW_READ) { | 680 | printk(KERN_INFO "%s: Restart is " |
820 | p_ch->claw_state = CLAW_START_READ; | 681 | "required after remote " |
821 | wake_up(&p_ch->wait); /* wake claw_open (READ)*/ | 682 | "side recovers \n", |
822 | } | 683 | dev->name); |
684 | } | ||
685 | CLAW_DBF_TEXT(4, trace, "notrdy"); | ||
686 | return; | ||
687 | } | ||
688 | if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) && | ||
689 | (p_ch->irb->scsw.cmd.dstat == 0)) { | ||
690 | if (test_and_set_bit(CLAW_BH_ACTIVE, | ||
691 | (void *)&p_ch->flag_a) == 0) | ||
692 | tasklet_schedule(&p_ch->tasklet); | ||
823 | else | 693 | else |
824 | if (p_ch->flag == CLAW_WRITE) { | 694 | CLAW_DBF_TEXT(4, trace, "PCINoBH"); |
825 | p_ch->claw_state = CLAW_START_WRITE; | 695 | CLAW_DBF_TEXT(4, trace, "PCI_read"); |
826 | /* send SYSTEM_VALIDATE */ | 696 | return; |
827 | claw_strt_read(dev, LOCK_NO); | 697 | } |
828 | claw_send_control(dev, | 698 | if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || |
829 | SYSTEM_VALIDATE_REQUEST, | 699 | (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || |
830 | 0, 0, 0, | 700 | (p_ch->irb->scsw.cmd.stctl == |
831 | p_env->host_name, | 701 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { |
832 | p_env->adapter_name ); | 702 | CLAW_DBF_TEXT(4, trace, "SPend_rd"); |
833 | } else { | 703 | return; |
834 | printk(KERN_WARNING "claw: unsolicited " | 704 | } |
835 | "interrupt for device:" | 705 | clear_bit(0, (void *)&p_ch->IO_active); |
836 | "%s received c-%02x d-%02x\n", | 706 | claw_clearbit_busy(TB_RETRY, dev); |
837 | cdev->dev.bus_id, | 707 | if (test_and_set_bit(CLAW_BH_ACTIVE, |
838 | irb->scsw.cmd.cstat, | 708 | (void *)&p_ch->flag_a) == 0) |
839 | irb->scsw.cmd.dstat); | 709 | tasklet_schedule(&p_ch->tasklet); |
840 | return; | 710 | else |
841 | } | 711 | CLAW_DBF_TEXT(4, trace, "RdBHAct"); |
842 | #ifdef DEBUGMSG | 712 | CLAW_DBF_TEXT(4, trace, "RdIRQXit"); |
843 | printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO exit\n", | 713 | return; |
844 | dev->name); | 714 | case CLAW_START_WRITE: |
845 | #endif | 715 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
846 | #ifdef FUNCTRACE | 716 | printk(KERN_INFO "%s: Unit Check Occured in " |
847 | printk(KERN_INFO "%s:%s Exit on line %d\n", | 717 | "write channel\n", dev->name); |
848 | dev->name,__func__,__LINE__); | 718 | clear_bit(0, (void *)&p_ch->IO_active); |
849 | #endif | 719 | if (p_ch->irb->ecw[0] & 0x80) { |
850 | CLAW_DBF_TEXT(4,trace,"haltio"); | 720 | printk(KERN_INFO "%s: Resetting Event " |
851 | return; | 721 | "occurred:\n", dev->name); |
852 | case CLAW_START_READ: | 722 | init_timer(&p_ch->timer); |
853 | CLAW_DBF_TEXT(4,trace,"ReadIRQ"); | 723 | p_ch->timer.function = |
854 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { | 724 | (void *)claw_write_retry; |
855 | clear_bit(0, (void *)&p_ch->IO_active); | 725 | p_ch->timer.data = (unsigned long)p_ch; |
856 | if ((p_ch->irb->ecw[0] & 0x41) == 0x41 || | 726 | p_ch->timer.expires = jiffies + 10*HZ; |
857 | (p_ch->irb->ecw[0] & 0x40) == 0x40 || | 727 | add_timer(&p_ch->timer); |
858 | (p_ch->irb->ecw[0]) == 0) | 728 | printk(KERN_INFO "%s: write connection " |
859 | { | 729 | "restarting\n", dev->name); |
860 | privptr->stats.rx_errors++; | 730 | } |
861 | printk(KERN_INFO "%s: Restart is " | 731 | CLAW_DBF_TEXT(4, trace, "rstrtwrt"); |
862 | "required after remote " | 732 | return; |
863 | "side recovers \n", | 733 | } |
864 | dev->name); | 734 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { |
865 | } | 735 | clear_bit(0, (void *)&p_ch->IO_active); |
866 | #ifdef FUNCTRACE | 736 | printk(KERN_INFO "%s: Unit Exception " |
867 | printk(KERN_INFO "%s:%s Exit on line %d\n", | 737 | "Occured in write channel\n", |
868 | dev->name,__func__,__LINE__); | 738 | dev->name); |
869 | #endif | 739 | } |
870 | CLAW_DBF_TEXT(4,trace,"notrdy"); | 740 | if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || |
871 | return; | 741 | (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || |
872 | } | 742 | (p_ch->irb->scsw.cmd.stctl == |
873 | if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) && | 743 | (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { |
874 | (p_ch->irb->scsw.cmd.dstat == 0)) { | 744 | CLAW_DBF_TEXT(4, trace, "writeUE"); |
875 | if (test_and_set_bit(CLAW_BH_ACTIVE, | 745 | return; |
876 | (void *)&p_ch->flag_a) == 0) { | 746 | } |
877 | tasklet_schedule(&p_ch->tasklet); | 747 | clear_bit(0, (void *)&p_ch->IO_active); |
878 | } | 748 | if (claw_test_and_setbit_busy(TB_TX, dev) == 0) { |
879 | else { | 749 | claw_write_next(p_ch); |
880 | CLAW_DBF_TEXT(4,trace,"PCINoBH"); | 750 | claw_clearbit_busy(TB_TX, dev); |
881 | } | 751 | claw_clear_busy(dev); |
882 | #ifdef FUNCTRACE | 752 | } |
883 | printk(KERN_INFO "%s:%s Exit on line %d\n", | 753 | p_ch_r = (struct chbk *)&privptr->channel[READ]; |
884 | dev->name,__func__,__LINE__); | 754 | if (test_and_set_bit(CLAW_BH_ACTIVE, |
885 | #endif | 755 | (void *)&p_ch_r->flag_a) == 0) |
886 | CLAW_DBF_TEXT(4,trace,"PCI_read"); | 756 | tasklet_schedule(&p_ch_r->tasklet); |
887 | return; | 757 | CLAW_DBF_TEXT(4, trace, "StWtExit"); |
888 | } | 758 | return; |
889 | if (!((p_ch->irb->scsw.cmd.stctl & | 759 | default: |
890 | SCSW_STCTL_SEC_STATUS) || | 760 | printk(KERN_WARNING "%s: wrong selection code - irq " |
891 | (p_ch->irb->scsw.cmd.stctl == | 761 | "state=%d\n", dev->name, p_ch->claw_state); |
892 | SCSW_STCTL_STATUS_PEND) || | 762 | CLAW_DBF_TEXT(2, trace, "badIRQ"); |
893 | (p_ch->irb->scsw.cmd.stctl == | 763 | return; |
894 | (SCSW_STCTL_ALERT_STATUS | | ||
895 | SCSW_STCTL_STATUS_PEND)))) { | ||
896 | #ifdef FUNCTRACE | ||
897 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
898 | dev->name,__func__,__LINE__); | ||
899 | #endif | ||
900 | CLAW_DBF_TEXT(4,trace,"SPend_rd"); | ||
901 | return; | ||
902 | } | ||
903 | clear_bit(0, (void *)&p_ch->IO_active); | ||
904 | claw_clearbit_busy(TB_RETRY,dev); | ||
905 | if (test_and_set_bit(CLAW_BH_ACTIVE, | ||
906 | (void *)&p_ch->flag_a) == 0) { | ||
907 | tasklet_schedule(&p_ch->tasklet); | ||
908 | } | ||
909 | else { | ||
910 | CLAW_DBF_TEXT(4,trace,"RdBHAct"); | ||
911 | } | ||
912 | |||
913 | #ifdef DEBUGMSG | ||
914 | printk(KERN_INFO "%s: process CLAW_START_READ exit\n", | ||
915 | dev->name); | ||
916 | #endif | ||
917 | #ifdef FUNCTRACE | ||
918 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
919 | dev->name,__func__,__LINE__); | ||
920 | #endif | ||
921 | CLAW_DBF_TEXT(4,trace,"RdIRQXit"); | ||
922 | return; | ||
923 | case CLAW_START_WRITE: | ||
924 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { | ||
925 | printk(KERN_INFO "%s: Unit Check Occured in " | ||
926 | "write channel\n",dev->name); | ||
927 | clear_bit(0, (void *)&p_ch->IO_active); | ||
928 | if (p_ch->irb->ecw[0] & 0x80 ) { | ||
929 | printk(KERN_INFO "%s: Resetting Event " | ||
930 | "occurred:\n",dev->name); | ||
931 | init_timer(&p_ch->timer); | ||
932 | p_ch->timer.function = | ||
933 | (void *)claw_write_retry; | ||
934 | p_ch->timer.data = (unsigned long)p_ch; | ||
935 | p_ch->timer.expires = jiffies + 10*HZ; | ||
936 | add_timer(&p_ch->timer); | ||
937 | printk(KERN_INFO "%s: write connection " | ||
938 | "restarting\n",dev->name); | ||
939 | } | ||
940 | #ifdef FUNCTRACE | ||
941 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
942 | dev->name,__func__,__LINE__); | ||
943 | #endif | ||
944 | CLAW_DBF_TEXT(4,trace,"rstrtwrt"); | ||
945 | return; | ||
946 | } | ||
947 | if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { | ||
948 | clear_bit(0, (void *)&p_ch->IO_active); | ||
949 | printk(KERN_INFO "%s: Unit Exception " | ||
950 | "Occured in write channel\n", | ||
951 | dev->name); | ||
952 | } | ||
953 | if (!((p_ch->irb->scsw.cmd.stctl & | ||
954 | SCSW_STCTL_SEC_STATUS) || | ||
955 | (p_ch->irb->scsw.cmd.stctl == | ||
956 | SCSW_STCTL_STATUS_PEND) || | ||
957 | (p_ch->irb->scsw.cmd.stctl == | ||
958 | (SCSW_STCTL_ALERT_STATUS | | ||
959 | SCSW_STCTL_STATUS_PEND)))) { | ||
960 | #ifdef FUNCTRACE | ||
961 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
962 | dev->name,__func__,__LINE__); | ||
963 | #endif | ||
964 | CLAW_DBF_TEXT(4,trace,"writeUE"); | ||
965 | return; | ||
966 | } | ||
967 | clear_bit(0, (void *)&p_ch->IO_active); | ||
968 | if (claw_test_and_setbit_busy(TB_TX,dev)==0) { | ||
969 | claw_write_next(p_ch); | ||
970 | claw_clearbit_busy(TB_TX,dev); | ||
971 | claw_clear_busy(dev); | ||
972 | } | ||
973 | p_ch_r=(struct chbk *)&privptr->channel[READ]; | ||
974 | if (test_and_set_bit(CLAW_BH_ACTIVE, | ||
975 | (void *)&p_ch_r->flag_a) == 0) { | ||
976 | tasklet_schedule(&p_ch_r->tasklet); | ||
977 | } | ||
978 | |||
979 | #ifdef DEBUGMSG | ||
980 | printk(KERN_INFO "%s: process CLAW_START_WRITE exit\n", | ||
981 | dev->name); | ||
982 | #endif | ||
983 | #ifdef FUNCTRACE | ||
984 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
985 | dev->name,__func__,__LINE__); | ||
986 | #endif | ||
987 | CLAW_DBF_TEXT(4,trace,"StWtExit"); | ||
988 | return; | ||
989 | default: | ||
990 | printk(KERN_WARNING "%s: wrong selection code - irq " | ||
991 | "state=%d\n",dev->name,p_ch->claw_state); | ||
992 | #ifdef FUNCTRACE | ||
993 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
994 | dev->name,__func__,__LINE__); | ||
995 | #endif | ||
996 | CLAW_DBF_TEXT(2,trace,"badIRQ"); | ||
997 | return; | ||
998 | } | 764 | } |
999 | 765 | ||
1000 | } /* end of claw_irq_handler */ | 766 | } /* end of claw_irq_handler */ |
@@ -1013,29 +779,11 @@ claw_irq_tasklet ( unsigned long data ) | |||
1013 | 779 | ||
1014 | p_ch = (struct chbk *) data; | 780 | p_ch = (struct chbk *) data; |
1015 | dev = (struct net_device *)p_ch->ndev; | 781 | dev = (struct net_device *)p_ch->ndev; |
1016 | #ifdef FUNCTRACE | 782 | CLAW_DBF_TEXT(4, trace, "IRQtask"); |
1017 | printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__); | ||
1018 | #endif | ||
1019 | #ifdef DEBUGMSG | ||
1020 | printk(KERN_INFO "%s: variable p_ch =\n",dev->name); | ||
1021 | dumpit((char *) p_ch, sizeof(struct chbk)); | ||
1022 | #endif | ||
1023 | CLAW_DBF_TEXT(4,trace,"IRQtask"); | ||
1024 | |||
1025 | privptr = (struct claw_privbk *) dev->priv; | 783 | privptr = (struct claw_privbk *) dev->priv; |
1026 | |||
1027 | #ifdef DEBUGMSG | ||
1028 | printk(KERN_INFO "%s: bh routine - state-%02x\n" , | ||
1029 | dev->name, p_ch->claw_state); | ||
1030 | #endif | ||
1031 | |||
1032 | unpack_read(dev); | 784 | unpack_read(dev); |
1033 | clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a); | 785 | clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a); |
1034 | CLAW_DBF_TEXT(4,trace,"TskletXt"); | 786 | CLAW_DBF_TEXT(4, trace, "TskletXt"); |
1035 | #ifdef FUNCTRACE | ||
1036 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
1037 | dev->name,__func__,__LINE__); | ||
1038 | #endif | ||
1039 | return; | 787 | return; |
1040 | } /* end of claw_irq_bh */ | 788 | } /* end of claw_irq_bh */ |
1041 | 789 | ||
@@ -1060,16 +808,7 @@ claw_release(struct net_device *dev) | |||
1060 | privptr = (struct claw_privbk *) dev->priv; | 808 | privptr = (struct claw_privbk *) dev->priv; |
1061 | if (!privptr) | 809 | if (!privptr) |
1062 | return 0; | 810 | return 0; |
1063 | #ifdef FUNCTRACE | 811 | CLAW_DBF_TEXT(4, trace, "release"); |
1064 | printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__); | ||
1065 | #endif | ||
1066 | CLAW_DBF_TEXT(4,trace,"release"); | ||
1067 | #ifdef DEBUGMSG | ||
1068 | printk(KERN_INFO "%s: variable dev =\n",dev->name); | ||
1069 | dumpit((char *) dev, sizeof(struct net_device)); | ||
1070 | printk(KERN_INFO "Priv Buffalloc %d\n",privptr->buffs_alloc); | ||
1071 | printk(KERN_INFO "Priv p_buff_ccw = %p\n",&privptr->p_buff_ccw); | ||
1072 | #endif | ||
1073 | privptr->release_pend=1; | 812 | privptr->release_pend=1; |
1074 | claw_setbit_busy(TB_STOP,dev); | 813 | claw_setbit_busy(TB_STOP,dev); |
1075 | for ( i = 1; i >=0 ; i--) { | 814 | for ( i = 1; i >=0 ; i--) { |
@@ -1101,19 +840,15 @@ claw_release(struct net_device *dev) | |||
1101 | privptr->pk_skb = NULL; | 840 | privptr->pk_skb = NULL; |
1102 | } | 841 | } |
1103 | if(privptr->buffs_alloc != 1) { | 842 | if(privptr->buffs_alloc != 1) { |
1104 | #ifdef FUNCTRACE | 843 | CLAW_DBF_TEXT(4, trace, "none2fre"); |
1105 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
1106 | dev->name,__func__,__LINE__); | ||
1107 | #endif | ||
1108 | CLAW_DBF_TEXT(4,trace,"none2fre"); | ||
1109 | return 0; | 844 | return 0; |
1110 | } | 845 | } |
1111 | CLAW_DBF_TEXT(4,trace,"freebufs"); | 846 | CLAW_DBF_TEXT(4, trace, "freebufs"); |
1112 | if (privptr->p_buff_ccw != NULL) { | 847 | if (privptr->p_buff_ccw != NULL) { |
1113 | free_pages((unsigned long)privptr->p_buff_ccw, | 848 | free_pages((unsigned long)privptr->p_buff_ccw, |
1114 | (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); | 849 | (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); |
1115 | } | 850 | } |
1116 | CLAW_DBF_TEXT(4,trace,"freeread"); | 851 | CLAW_DBF_TEXT(4, trace, "freeread"); |
1117 | if (privptr->p_env->read_size < PAGE_SIZE) { | 852 | if (privptr->p_env->read_size < PAGE_SIZE) { |
1118 | if (privptr->p_buff_read != NULL) { | 853 | if (privptr->p_buff_read != NULL) { |
1119 | free_pages((unsigned long)privptr->p_buff_read, | 854 | free_pages((unsigned long)privptr->p_buff_read, |
@@ -1129,7 +864,7 @@ claw_release(struct net_device *dev) | |||
1129 | p_buf=p_buf->next; | 864 | p_buf=p_buf->next; |
1130 | } | 865 | } |
1131 | } | 866 | } |
1132 | CLAW_DBF_TEXT(4,trace,"freewrit"); | 867 | CLAW_DBF_TEXT(4, trace, "freewrit"); |
1133 | if (privptr->p_env->write_size < PAGE_SIZE ) { | 868 | if (privptr->p_env->write_size < PAGE_SIZE ) { |
1134 | free_pages((unsigned long)privptr->p_buff_write, | 869 | free_pages((unsigned long)privptr->p_buff_write, |
1135 | (int)pages_to_order_of_mag(privptr->p_buff_write_num)); | 870 | (int)pages_to_order_of_mag(privptr->p_buff_write_num)); |
@@ -1143,7 +878,7 @@ claw_release(struct net_device *dev) | |||
1143 | p_buf=p_buf->next; | 878 | p_buf=p_buf->next; |
1144 | } | 879 | } |
1145 | } | 880 | } |
1146 | CLAW_DBF_TEXT(4,trace,"clearptr"); | 881 | CLAW_DBF_TEXT(4, trace, "clearptr"); |
1147 | privptr->buffs_alloc = 0; | 882 | privptr->buffs_alloc = 0; |
1148 | privptr->p_buff_ccw=NULL; | 883 | privptr->p_buff_ccw=NULL; |
1149 | privptr->p_buff_read=NULL; | 884 | privptr->p_buff_read=NULL; |
@@ -1180,18 +915,12 @@ claw_release(struct net_device *dev) | |||
1180 | dev->name, | 915 | dev->name, |
1181 | privptr->channel[READ].last_dstat, | 916 | privptr->channel[READ].last_dstat, |
1182 | privptr->channel[WRITE].last_dstat); | 917 | privptr->channel[WRITE].last_dstat); |
1183 | CLAW_DBF_TEXT(2,trace,"badclose"); | 918 | CLAW_DBF_TEXT(2, trace, "badclose"); |
1184 | } | 919 | } |
1185 | #ifdef FUNCTRACE | 920 | CLAW_DBF_TEXT(4, trace, "rlsexit"); |
1186 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
1187 | dev->name,__func__,__LINE__); | ||
1188 | #endif | ||
1189 | CLAW_DBF_TEXT(4,trace,"rlsexit"); | ||
1190 | return 0; | 921 | return 0; |
1191 | } /* end of claw_release */ | 922 | } /* end of claw_release */ |
1192 | 923 | ||
1193 | |||
1194 | |||
1195 | /*-------------------------------------------------------------------* | 924 | /*-------------------------------------------------------------------* |
1196 | * claw_write_retry * | 925 | * claw_write_retry * |
1197 | * * | 926 | * * |
@@ -1203,32 +932,12 @@ claw_write_retry ( struct chbk *p_ch ) | |||
1203 | 932 | ||
1204 | struct net_device *dev=p_ch->ndev; | 933 | struct net_device *dev=p_ch->ndev; |
1205 | 934 | ||
1206 | 935 | CLAW_DBF_TEXT(4, trace, "w_retry"); | |
1207 | #ifdef FUNCTRACE | ||
1208 | printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__); | ||
1209 | printk(KERN_INFO "claw: variable p_ch =\n"); | ||
1210 | dumpit((char *) p_ch, sizeof(struct chbk)); | ||
1211 | #endif | ||
1212 | CLAW_DBF_TEXT(4,trace,"w_retry"); | ||
1213 | if (p_ch->claw_state == CLAW_STOP) { | 936 | if (p_ch->claw_state == CLAW_STOP) { |
1214 | #ifdef FUNCTRACE | ||
1215 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
1216 | dev->name,__func__,__LINE__); | ||
1217 | #endif | ||
1218 | return; | 937 | return; |
1219 | } | 938 | } |
1220 | #ifdef DEBUGMSG | ||
1221 | printk( KERN_INFO "%s:%s state-%02x\n" , | ||
1222 | dev->name, | ||
1223 | __func__, | ||
1224 | p_ch->claw_state); | ||
1225 | #endif | ||
1226 | claw_strt_out_IO( dev ); | 939 | claw_strt_out_IO( dev ); |
1227 | #ifdef FUNCTRACE | 940 | CLAW_DBF_TEXT(4, trace, "rtry_xit"); |
1228 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
1229 | dev->name,__func__,__LINE__); | ||
1230 | #endif | ||
1231 | CLAW_DBF_TEXT(4,trace,"rtry_xit"); | ||
1232 | return; | 941 | return; |
1233 | } /* end of claw_write_retry */ | 942 | } /* end of claw_write_retry */ |
1234 | 943 | ||
@@ -1247,12 +956,7 @@ claw_write_next ( struct chbk * p_ch ) | |||
1247 | struct sk_buff *pk_skb; | 956 | struct sk_buff *pk_skb; |
1248 | int rc; | 957 | int rc; |
1249 | 958 | ||
1250 | #ifdef FUNCTRACE | 959 | CLAW_DBF_TEXT(4, trace, "claw_wrt"); |
1251 | printk(KERN_INFO "%s:%s Enter \n",p_ch->ndev->name,__func__); | ||
1252 | printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name); | ||
1253 | dumpit((char *) p_ch, sizeof(struct chbk)); | ||
1254 | #endif | ||
1255 | CLAW_DBF_TEXT(4,trace,"claw_wrt"); | ||
1256 | if (p_ch->claw_state == CLAW_STOP) | 960 | if (p_ch->claw_state == CLAW_STOP) |
1257 | return; | 961 | return; |
1258 | dev = (struct net_device *) p_ch->ndev; | 962 | dev = (struct net_device *) p_ch->ndev; |
@@ -1272,11 +976,6 @@ claw_write_next ( struct chbk * p_ch ) | |||
1272 | if (privptr->p_write_active_first!=NULL) { | 976 | if (privptr->p_write_active_first!=NULL) { |
1273 | claw_strt_out_IO(dev); | 977 | claw_strt_out_IO(dev); |
1274 | } | 978 | } |
1275 | |||
1276 | #ifdef FUNCTRACE | ||
1277 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
1278 | dev->name,__func__,__LINE__); | ||
1279 | #endif | ||
1280 | return; | 979 | return; |
1281 | } /* end of claw_write_next */ | 980 | } /* end of claw_write_next */ |
1282 | 981 | ||
@@ -1288,22 +987,12 @@ claw_write_next ( struct chbk * p_ch ) | |||
1288 | static void | 987 | static void |
1289 | claw_timer ( struct chbk * p_ch ) | 988 | claw_timer ( struct chbk * p_ch ) |
1290 | { | 989 | { |
1291 | #ifdef FUNCTRACE | 990 | CLAW_DBF_TEXT(4, trace, "timer"); |
1292 | printk(KERN_INFO "%s:%s Entry\n",p_ch->ndev->name,__func__); | ||
1293 | printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name); | ||
1294 | dumpit((char *) p_ch, sizeof(struct chbk)); | ||
1295 | #endif | ||
1296 | CLAW_DBF_TEXT(4,trace,"timer"); | ||
1297 | p_ch->flag |= CLAW_TIMER; | 991 | p_ch->flag |= CLAW_TIMER; |
1298 | wake_up(&p_ch->wait); | 992 | wake_up(&p_ch->wait); |
1299 | #ifdef FUNCTRACE | ||
1300 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
1301 | p_ch->ndev->name,__func__,__LINE__); | ||
1302 | #endif | ||
1303 | return; | 993 | return; |
1304 | } /* end of claw_timer */ | 994 | } /* end of claw_timer */ |
1305 | 995 | ||
1306 | |||
1307 | /* | 996 | /* |
1308 | * | 997 | * |
1309 | * functions | 998 | * functions |
@@ -1324,10 +1013,8 @@ pages_to_order_of_mag(int num_of_pages) | |||
1324 | { | 1013 | { |
1325 | int order_of_mag=1; /* assume 2 pages */ | 1014 | int order_of_mag=1; /* assume 2 pages */ |
1326 | int nump=2; | 1015 | int nump=2; |
1327 | #ifdef FUNCTRACE | 1016 | |
1328 | printk(KERN_INFO "%s Enter pages = %d \n",__func__,num_of_pages); | 1017 | CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages); |
1329 | #endif | ||
1330 | CLAW_DBF_TEXT_(5,trace,"pages%d",num_of_pages); | ||
1331 | if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */ | 1018 | if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */ |
1332 | /* 512 pages = 2Meg on 4k page systems */ | 1019 | /* 512 pages = 2Meg on 4k page systems */ |
1333 | if (num_of_pages >= 512) {return 9; } | 1020 | if (num_of_pages >= 512) {return 9; } |
@@ -1338,11 +1025,7 @@ pages_to_order_of_mag(int num_of_pages) | |||
1338 | order_of_mag +=1; | 1025 | order_of_mag +=1; |
1339 | } | 1026 | } |
1340 | if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */ | 1027 | if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */ |
1341 | #ifdef FUNCTRACE | 1028 | CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag); |
1342 | printk(KERN_INFO "%s Exit on line %d, order = %d\n", | ||
1343 | __func__,__LINE__, order_of_mag); | ||
1344 | #endif | ||
1345 | CLAW_DBF_TEXT_(5,trace,"mag%d",order_of_mag); | ||
1346 | return order_of_mag; | 1029 | return order_of_mag; |
1347 | } | 1030 | } |
1348 | 1031 | ||
@@ -1358,21 +1041,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first, | |||
1358 | struct claw_privbk *privptr; | 1041 | struct claw_privbk *privptr; |
1359 | struct ccw1 temp_ccw; | 1042 | struct ccw1 temp_ccw; |
1360 | struct endccw * p_end; | 1043 | struct endccw * p_end; |
1361 | #ifdef IOTRACE | 1044 | CLAW_DBF_TEXT(4, trace, "addreads"); |
1362 | struct ccwbk* p_buf; | ||
1363 | #endif | ||
1364 | #ifdef FUNCTRACE | ||
1365 | printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__); | ||
1366 | #endif | ||
1367 | #ifdef DEBUGMSG | ||
1368 | printk(KERN_INFO "dev\n"); | ||
1369 | dumpit((char *) dev, sizeof(struct net_device)); | ||
1370 | printk(KERN_INFO "p_first\n"); | ||
1371 | dumpit((char *) p_first, sizeof(struct ccwbk)); | ||
1372 | printk(KERN_INFO "p_last\n"); | ||
1373 | dumpit((char *) p_last, sizeof(struct ccwbk)); | ||
1374 | #endif | ||
1375 | CLAW_DBF_TEXT(4,trace,"addreads"); | ||
1376 | privptr = dev->priv; | 1045 | privptr = dev->priv; |
1377 | p_end = privptr->p_end_ccw; | 1046 | p_end = privptr->p_end_ccw; |
1378 | 1047 | ||
@@ -1380,11 +1049,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first, | |||
1380 | * to apend the running channel programs | 1049 | * to apend the running channel programs |
1381 | */ | 1050 | */ |
1382 | if ( p_first==NULL) { | 1051 | if ( p_first==NULL) { |
1383 | #ifdef FUNCTRACE | 1052 | CLAW_DBF_TEXT(4, trace, "addexit"); |
1384 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
1385 | dev->name,__func__,__LINE__); | ||
1386 | #endif | ||
1387 | CLAW_DBF_TEXT(4,trace,"addexit"); | ||
1388 | return 0; | 1053 | return 0; |
1389 | } | 1054 | } |
1390 | 1055 | ||
@@ -1411,21 +1076,11 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first, | |||
1411 | } | 1076 | } |
1412 | 1077 | ||
1413 | if ( privptr-> p_read_active_first ==NULL ) { | 1078 | if ( privptr-> p_read_active_first ==NULL ) { |
1414 | #ifdef DEBUGMSG | ||
1415 | printk(KERN_INFO "%s:%s p_read_active_first == NULL \n", | ||
1416 | dev->name,__func__); | ||
1417 | printk(KERN_INFO "%s:%s Read active first/last changed \n", | ||
1418 | dev->name,__func__); | ||
1419 | #endif | ||
1420 | privptr-> p_read_active_first= p_first; /* set new first */ | 1079 | privptr-> p_read_active_first= p_first; /* set new first */ |
1421 | privptr-> p_read_active_last = p_last; /* set new last */ | 1080 | privptr-> p_read_active_last = p_last; /* set new last */ |
1422 | } | 1081 | } |
1423 | else { | 1082 | else { |
1424 | 1083 | ||
1425 | #ifdef DEBUGMSG | ||
1426 | printk(KERN_INFO "%s:%s Read in progress \n", | ||
1427 | dev->name,__func__); | ||
1428 | #endif | ||
1429 | /* set up TIC ccw */ | 1084 | /* set up TIC ccw */ |
1430 | temp_ccw.cda= (__u32)__pa(&p_first->read); | 1085 | temp_ccw.cda= (__u32)__pa(&p_first->read); |
1431 | temp_ccw.count=0; | 1086 | temp_ccw.count=0; |
@@ -1462,27 +1117,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first, | |||
1462 | privptr->p_read_active_last->next = p_first; | 1117 | privptr->p_read_active_last->next = p_first; |
1463 | privptr->p_read_active_last=p_last; | 1118 | privptr->p_read_active_last=p_last; |
1464 | } /* end of if ( privptr-> p_read_active_first ==NULL) */ | 1119 | } /* end of if ( privptr-> p_read_active_first ==NULL) */ |
1465 | #ifdef IOTRACE | 1120 | CLAW_DBF_TEXT(4, trace, "addexit"); |
1466 | printk(KERN_INFO "%s:%s dump p_last CCW BK \n",dev->name,__func__); | ||
1467 | dumpit((char *)p_last, sizeof(struct ccwbk)); | ||
1468 | printk(KERN_INFO "%s:%s dump p_end CCW BK \n",dev->name,__func__); | ||
1469 | dumpit((char *)p_end, sizeof(struct endccw)); | ||
1470 | |||
1471 | printk(KERN_INFO "%s:%s dump p_first CCW BK \n",dev->name,__func__); | ||
1472 | dumpit((char *)p_first, sizeof(struct ccwbk)); | ||
1473 | printk(KERN_INFO "%s:%s Dump Active CCW chain \n", | ||
1474 | dev->name,__func__); | ||
1475 | p_buf=privptr->p_read_active_first; | ||
1476 | while (p_buf!=NULL) { | ||
1477 | dumpit((char *)p_buf, sizeof(struct ccwbk)); | ||
1478 | p_buf=p_buf->next; | ||
1479 | } | ||
1480 | #endif | ||
1481 | #ifdef FUNCTRACE | ||
1482 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
1483 | dev->name,__func__,__LINE__); | ||
1484 | #endif | ||
1485 | CLAW_DBF_TEXT(4,trace,"addexit"); | ||
1486 | return 0; | 1121 | return 0; |
1487 | } /* end of add_claw_reads */ | 1122 | } /* end of add_claw_reads */ |
1488 | 1123 | ||
@@ -1494,44 +1129,29 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first, | |||
1494 | static void | 1129 | static void |
1495 | ccw_check_return_code(struct ccw_device *cdev, int return_code) | 1130 | ccw_check_return_code(struct ccw_device *cdev, int return_code) |
1496 | { | 1131 | { |
1497 | #ifdef FUNCTRACE | 1132 | CLAW_DBF_TEXT(4, trace, "ccwret"); |
1498 | printk(KERN_INFO "%s: %s() > enter \n", | ||
1499 | cdev->dev.bus_id,__func__); | ||
1500 | #endif | ||
1501 | CLAW_DBF_TEXT(4,trace,"ccwret"); | ||
1502 | #ifdef DEBUGMSG | ||
1503 | printk(KERN_INFO "variable cdev =\n"); | ||
1504 | dumpit((char *) cdev, sizeof(struct ccw_device)); | ||
1505 | printk(KERN_INFO "variable return_code = %d\n",return_code); | ||
1506 | #endif | ||
1507 | if (return_code != 0) { | 1133 | if (return_code != 0) { |
1508 | switch (return_code) { | 1134 | switch (return_code) { |
1509 | case -EBUSY: | 1135 | case -EBUSY: /* BUSY is a transient state no action needed */ |
1510 | printk(KERN_INFO "%s: Busy !\n", | 1136 | break; |
1511 | cdev->dev.bus_id); | 1137 | case -ENODEV: |
1512 | break; | 1138 | printk(KERN_EMERG "%s: Missing device called " |
1513 | case -ENODEV: | 1139 | "for IO ENODEV\n", cdev->dev.bus_id); |
1514 | printk(KERN_EMERG "%s: Missing device called " | 1140 | break; |
1515 | "for IO ENODEV\n", cdev->dev.bus_id); | 1141 | case -EIO: |
1516 | break; | 1142 | printk(KERN_EMERG "%s: Status pending... EIO \n", |
1517 | case -EIO: | 1143 | cdev->dev.bus_id); |
1518 | printk(KERN_EMERG "%s: Status pending... EIO \n", | 1144 | break; |
1519 | cdev->dev.bus_id); | 1145 | case -EINVAL: |
1520 | break; | 1146 | printk(KERN_EMERG "%s: Invalid Dev State EINVAL \n", |
1521 | case -EINVAL: | 1147 | cdev->dev.bus_id); |
1522 | printk(KERN_EMERG "%s: Invalid Dev State EINVAL \n", | 1148 | break; |
1523 | cdev->dev.bus_id); | 1149 | default: |
1524 | break; | 1150 | printk(KERN_EMERG "%s: Unknown error in " |
1525 | default: | ||
1526 | printk(KERN_EMERG "%s: Unknown error in " | ||
1527 | "Do_IO %d\n",cdev->dev.bus_id, return_code); | 1151 | "Do_IO %d\n",cdev->dev.bus_id, return_code); |
1528 | } | 1152 | } |
1529 | } | 1153 | } |
1530 | #ifdef FUNCTRACE | 1154 | CLAW_DBF_TEXT(4, trace, "ccwret"); |
1531 | printk(KERN_INFO "%s: %s() > exit on line %d\n", | ||
1532 | cdev->dev.bus_id,__func__,__LINE__); | ||
1533 | #endif | ||
1534 | CLAW_DBF_TEXT(4,trace,"ccwret"); | ||
1535 | } /* end of ccw_check_return_code */ | 1155 | } /* end of ccw_check_return_code */ |
1536 | 1156 | ||
1537 | /*-------------------------------------------------------------------* | 1157 | /*-------------------------------------------------------------------* |
@@ -1541,173 +1161,46 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code) | |||
1541 | static void | 1161 | static void |
1542 | ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) | 1162 | ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) |
1543 | { | 1163 | { |
1544 | struct net_device *dev = p_ch->ndev; | 1164 | struct net_device *ndev = p_ch->ndev; |
1545 | |||
1546 | #ifdef FUNCTRACE | ||
1547 | printk(KERN_INFO "%s: %s() > enter\n",dev->name,__func__); | ||
1548 | #endif | ||
1549 | #ifdef DEBUGMSG | ||
1550 | printk(KERN_INFO "%s: variable dev =\n",dev->name); | ||
1551 | dumpit((char *)dev, sizeof(struct net_device)); | ||
1552 | printk(KERN_INFO "%s: variable sense =\n",dev->name); | ||
1553 | dumpit((char *)&sense, 2); | ||
1554 | #endif | ||
1555 | CLAW_DBF_TEXT(4,trace,"unitchek"); | ||
1556 | 1165 | ||
1166 | CLAW_DBF_TEXT(4, trace, "unitchek"); | ||
1557 | printk(KERN_INFO "%s: Unit Check with sense byte:0x%04x\n", | 1167 | printk(KERN_INFO "%s: Unit Check with sense byte:0x%04x\n", |
1558 | dev->name, sense); | 1168 | ndev->name, sense); |
1559 | 1169 | ||
1560 | if (sense & 0x40) { | 1170 | if (sense & 0x40) { |
1561 | if (sense & 0x01) { | 1171 | if (sense & 0x01) { |
1562 | printk(KERN_WARNING "%s: Interface disconnect or " | 1172 | printk(KERN_WARNING "%s: Interface disconnect or " |
1563 | "Selective reset " | 1173 | "Selective reset " |
1564 | "occurred (remote side)\n", dev->name); | 1174 | "occurred (remote side)\n", ndev->name); |
1565 | } | 1175 | } |
1566 | else { | 1176 | else { |
1567 | printk(KERN_WARNING "%s: System reset occured" | 1177 | printk(KERN_WARNING "%s: System reset occured" |
1568 | " (remote side)\n", dev->name); | 1178 | " (remote side)\n", ndev->name); |
1569 | } | 1179 | } |
1570 | } | 1180 | } |
1571 | else if (sense & 0x20) { | 1181 | else if (sense & 0x20) { |
1572 | if (sense & 0x04) { | 1182 | if (sense & 0x04) { |
1573 | printk(KERN_WARNING "%s: Data-streaming " | 1183 | printk(KERN_WARNING "%s: Data-streaming " |
1574 | "timeout)\n", dev->name); | 1184 | "timeout)\n", ndev->name); |
1575 | } | 1185 | } |
1576 | else { | 1186 | else { |
1577 | printk(KERN_WARNING "%s: Data-transfer parity" | 1187 | printk(KERN_WARNING "%s: Data-transfer parity" |
1578 | " error\n", dev->name); | 1188 | " error\n", ndev->name); |
1579 | } | 1189 | } |
1580 | } | 1190 | } |
1581 | else if (sense & 0x10) { | 1191 | else if (sense & 0x10) { |
1582 | if (sense & 0x20) { | 1192 | if (sense & 0x20) { |
1583 | printk(KERN_WARNING "%s: Hardware malfunction " | 1193 | printk(KERN_WARNING "%s: Hardware malfunction " |
1584 | "(remote side)\n", dev->name); | 1194 | "(remote side)\n", ndev->name); |
1585 | } | 1195 | } |
1586 | else { | 1196 | else { |
1587 | printk(KERN_WARNING "%s: read-data parity error " | 1197 | printk(KERN_WARNING "%s: read-data parity error " |
1588 | "(remote side)\n", dev->name); | 1198 | "(remote side)\n", ndev->name); |
1589 | } | 1199 | } |
1590 | } | 1200 | } |
1591 | 1201 | ||
1592 | #ifdef FUNCTRACE | ||
1593 | printk(KERN_INFO "%s: %s() exit on line %d\n", | ||
1594 | dev->name,__func__,__LINE__); | ||
1595 | #endif | ||
1596 | } /* end of ccw_check_unit_check */ | 1202 | } /* end of ccw_check_unit_check */ |
1597 | 1203 | ||
1598 | |||
1599 | |||
1600 | /*-------------------------------------------------------------------* | ||
1601 | * Dump buffer format * | ||
1602 | * * | ||
1603 | *--------------------------------------------------------------------*/ | ||
1604 | #ifdef DEBUG | ||
1605 | static void | ||
1606 | dumpit(char* buf, int len) | ||
1607 | { | ||
1608 | |||
1609 | __u32 ct, sw, rm, dup; | ||
1610 | char *ptr, *rptr; | ||
1611 | char tbuf[82], tdup[82]; | ||
1612 | #if (CONFIG_64BIT) | ||
1613 | char addr[22]; | ||
1614 | #else | ||
1615 | char addr[12]; | ||
1616 | #endif | ||
1617 | char boff[12]; | ||
1618 | char bhex[82], duphex[82]; | ||
1619 | char basc[40]; | ||
1620 | |||
1621 | sw = 0; | ||
1622 | rptr =ptr=buf; | ||
1623 | rm = 16; | ||
1624 | duphex[0] = 0x00; | ||
1625 | dup = 0; | ||
1626 | for ( ct=0; ct < len; ct++, ptr++, rptr++ ) { | ||
1627 | if (sw == 0) { | ||
1628 | #if (CONFIG_64BIT) | ||
1629 | sprintf(addr, "%16.16lX",(unsigned long)rptr); | ||
1630 | #else | ||
1631 | sprintf(addr, "%8.8X",(__u32)rptr); | ||
1632 | #endif | ||
1633 | sprintf(boff, "%4.4X", (__u32)ct); | ||
1634 | bhex[0] = '\0'; | ||
1635 | basc[0] = '\0'; | ||
1636 | } | ||
1637 | if ((sw == 4) || (sw == 12)) { | ||
1638 | strcat(bhex, " "); | ||
1639 | } | ||
1640 | if (sw == 8) { | ||
1641 | strcat(bhex, " "); | ||
1642 | } | ||
1643 | #if (CONFIG_64BIT) | ||
1644 | sprintf(tbuf,"%2.2lX", (unsigned long)*ptr); | ||
1645 | #else | ||
1646 | sprintf(tbuf,"%2.2X", (__u32)*ptr); | ||
1647 | #endif | ||
1648 | tbuf[2] = '\0'; | ||
1649 | strcat(bhex, tbuf); | ||
1650 | if ((0!=isprint(*ptr)) && (*ptr >= 0x20)) { | ||
1651 | basc[sw] = *ptr; | ||
1652 | } | ||
1653 | else { | ||
1654 | basc[sw] = '.'; | ||
1655 | } | ||
1656 | basc[sw+1] = '\0'; | ||
1657 | sw++; | ||
1658 | rm--; | ||
1659 | if (sw==16) { | ||
1660 | if ((strcmp(duphex, bhex)) !=0) { | ||
1661 | if (dup !=0) { | ||
1662 | sprintf(tdup,"Duplicate as above to" | ||
1663 | " %s", addr); | ||
1664 | printk( KERN_INFO " " | ||
1665 | " --- %s ---\n",tdup); | ||
1666 | } | ||
1667 | printk( KERN_INFO " %s (+%s) : %s [%s]\n", | ||
1668 | addr, boff, bhex, basc); | ||
1669 | dup = 0; | ||
1670 | strcpy(duphex, bhex); | ||
1671 | } | ||
1672 | else { | ||
1673 | dup++; | ||
1674 | } | ||
1675 | sw = 0; | ||
1676 | rm = 16; | ||
1677 | } | ||
1678 | } /* endfor */ | ||
1679 | |||
1680 | if (sw != 0) { | ||
1681 | for ( ; rm > 0; rm--, sw++ ) { | ||
1682 | if ((sw==4) || (sw==12)) strcat(bhex, " "); | ||
1683 | if (sw==8) strcat(bhex, " "); | ||
1684 | strcat(bhex, " "); | ||
1685 | strcat(basc, " "); | ||
1686 | } | ||
1687 | if (dup !=0) { | ||
1688 | sprintf(tdup,"Duplicate as above to %s", addr); | ||
1689 | printk( KERN_INFO " --- %s ---\n", | ||
1690 | tdup); | ||
1691 | } | ||
1692 | printk( KERN_INFO " %s (+%s) : %s [%s]\n", | ||
1693 | addr, boff, bhex, basc); | ||
1694 | } | ||
1695 | else { | ||
1696 | if (dup >=1) { | ||
1697 | sprintf(tdup,"Duplicate as above to %s", addr); | ||
1698 | printk( KERN_INFO " --- %s ---\n", | ||
1699 | tdup); | ||
1700 | } | ||
1701 | if (dup !=0) { | ||
1702 | printk( KERN_INFO " %s (+%s) : %s [%s]\n", | ||
1703 | addr, boff, bhex, basc); | ||
1704 | } | ||
1705 | } | ||
1706 | return; | ||
1707 | |||
1708 | } /* end of dumpit */ | ||
1709 | #endif | ||
1710 | |||
1711 | /*-------------------------------------------------------------------* | 1204 | /*-------------------------------------------------------------------* |
1712 | * find_link * | 1205 | * find_link * |
1713 | *--------------------------------------------------------------------*/ | 1206 | *--------------------------------------------------------------------*/ |
@@ -1718,16 +1211,7 @@ find_link(struct net_device *dev, char *host_name, char *ws_name ) | |||
1718 | struct claw_env *p_env; | 1211 | struct claw_env *p_env; |
1719 | int rc=0; | 1212 | int rc=0; |
1720 | 1213 | ||
1721 | #ifdef FUNCTRACE | 1214 | CLAW_DBF_TEXT(2, setup, "findlink"); |
1722 | printk(KERN_INFO "%s:%s > enter \n",dev->name,__func__); | ||
1723 | #endif | ||
1724 | CLAW_DBF_TEXT(2,setup,"findlink"); | ||
1725 | #ifdef DEBUGMSG | ||
1726 | printk(KERN_INFO "%s: variable dev = \n",dev->name); | ||
1727 | dumpit((char *) dev, sizeof(struct net_device)); | ||
1728 | printk(KERN_INFO "%s: variable host_name = %s\n",dev->name, host_name); | ||
1729 | printk(KERN_INFO "%s: variable ws_name = %s\n",dev->name, ws_name); | ||
1730 | #endif | ||
1731 | privptr=dev->priv; | 1215 | privptr=dev->priv; |
1732 | p_env=privptr->p_env; | 1216 | p_env=privptr->p_env; |
1733 | switch (p_env->packing) | 1217 | switch (p_env->packing) |
@@ -1750,10 +1234,6 @@ find_link(struct net_device *dev, char *host_name, char *ws_name ) | |||
1750 | break; | 1234 | break; |
1751 | } | 1235 | } |
1752 | 1236 | ||
1753 | #ifdef FUNCTRACE | ||
1754 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
1755 | dev->name,__func__,__LINE__); | ||
1756 | #endif | ||
1757 | return 0; | 1237 | return 0; |
1758 | } /* end of find_link */ | 1238 | } /* end of find_link */ |
1759 | 1239 | ||
@@ -1782,27 +1262,11 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
1782 | int lock; | 1262 | int lock; |
1783 | struct clawph *pk_head; | 1263 | struct clawph *pk_head; |
1784 | struct chbk *ch; | 1264 | struct chbk *ch; |
1785 | #ifdef IOTRACE | 1265 | |
1786 | struct ccwbk *p_buf; | 1266 | CLAW_DBF_TEXT(4, trace, "hw_tx"); |
1787 | #endif | ||
1788 | #ifdef FUNCTRACE | ||
1789 | printk(KERN_INFO "%s: %s() > enter\n",dev->name,__func__); | ||
1790 | #endif | ||
1791 | CLAW_DBF_TEXT(4,trace,"hw_tx"); | ||
1792 | #ifdef DEBUGMSG | ||
1793 | printk(KERN_INFO "%s: variable dev skb =\n",dev->name); | ||
1794 | dumpit((char *) skb, sizeof(struct sk_buff)); | ||
1795 | printk(KERN_INFO "%s: variable dev =\n",dev->name); | ||
1796 | dumpit((char *) dev, sizeof(struct net_device)); | ||
1797 | printk(KERN_INFO "%s: variable linkid = %ld\n",dev->name,linkid); | ||
1798 | #endif | ||
1799 | privptr = (struct claw_privbk *) (dev->priv); | 1267 | privptr = (struct claw_privbk *) (dev->priv); |
1800 | p_ch=(struct chbk *)&privptr->channel[WRITE]; | 1268 | p_ch=(struct chbk *)&privptr->channel[WRITE]; |
1801 | p_env =privptr->p_env; | 1269 | p_env =privptr->p_env; |
1802 | #ifdef IOTRACE | ||
1803 | printk(KERN_INFO "%s: %s() dump sk_buff \n",dev->name,__func__); | ||
1804 | dumpit((char *)skb ,sizeof(struct sk_buff)); | ||
1805 | #endif | ||
1806 | claw_free_wrt_buf(dev); /* Clean up free chain if posible */ | 1270 | claw_free_wrt_buf(dev); /* Clean up free chain if posible */ |
1807 | /* scan the write queue to free any completed write packets */ | 1271 | /* scan the write queue to free any completed write packets */ |
1808 | p_first_ccw=NULL; | 1272 | p_first_ccw=NULL; |
@@ -1834,11 +1298,6 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
1834 | claw_strt_out_IO(dev ); | 1298 | claw_strt_out_IO(dev ); |
1835 | claw_free_wrt_buf( dev ); | 1299 | claw_free_wrt_buf( dev ); |
1836 | if (privptr->write_free_count==0) { | 1300 | if (privptr->write_free_count==0) { |
1837 | #ifdef IOTRACE | ||
1838 | printk(KERN_INFO "%s: " | ||
1839 | "(claw_check_busy) no free write " | ||
1840 | "buffers\n", dev->name); | ||
1841 | #endif | ||
1842 | ch = &privptr->channel[WRITE]; | 1301 | ch = &privptr->channel[WRITE]; |
1843 | atomic_inc(&skb->users); | 1302 | atomic_inc(&skb->users); |
1844 | skb_queue_tail(&ch->collect_queue, skb); | 1303 | skb_queue_tail(&ch->collect_queue, skb); |
@@ -1851,10 +1310,6 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
1851 | } | 1310 | } |
1852 | /* tx lock */ | 1311 | /* tx lock */ |
1853 | if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */ | 1312 | if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */ |
1854 | #ifdef DEBUGMSG | ||
1855 | printk(KERN_INFO "%s: busy (claw_test_and_setbit_" | ||
1856 | "busy)\n", dev->name); | ||
1857 | #endif | ||
1858 | ch = &privptr->channel[WRITE]; | 1313 | ch = &privptr->channel[WRITE]; |
1859 | atomic_inc(&skb->users); | 1314 | atomic_inc(&skb->users); |
1860 | skb_queue_tail(&ch->collect_queue, skb); | 1315 | skb_queue_tail(&ch->collect_queue, skb); |
@@ -1871,28 +1326,16 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
1871 | privptr->p_write_free_chain == NULL ) { | 1326 | privptr->p_write_free_chain == NULL ) { |
1872 | 1327 | ||
1873 | claw_setbit_busy(TB_NOBUFFER,dev); | 1328 | claw_setbit_busy(TB_NOBUFFER,dev); |
1874 | |||
1875 | #ifdef DEBUGMSG | ||
1876 | printk(KERN_INFO "%s: busy (claw_setbit_busy" | ||
1877 | "(TB_NOBUFFER))\n", dev->name); | ||
1878 | printk(KERN_INFO " free_count: %d, numBuffers : %d\n", | ||
1879 | (int)privptr->write_free_count,(int) numBuffers ); | ||
1880 | #endif | ||
1881 | ch = &privptr->channel[WRITE]; | 1329 | ch = &privptr->channel[WRITE]; |
1882 | atomic_inc(&skb->users); | 1330 | atomic_inc(&skb->users); |
1883 | skb_queue_tail(&ch->collect_queue, skb); | 1331 | skb_queue_tail(&ch->collect_queue, skb); |
1884 | CLAW_DBF_TEXT(2,trace,"clawbusy"); | 1332 | CLAW_DBF_TEXT(2, trace, "clawbusy"); |
1885 | goto Done2; | 1333 | goto Done2; |
1886 | } | 1334 | } |
1887 | pDataAddress=skb->data; | 1335 | pDataAddress=skb->data; |
1888 | len_of_data=skb->len; | 1336 | len_of_data=skb->len; |
1889 | 1337 | ||
1890 | while (len_of_data > 0) { | 1338 | while (len_of_data > 0) { |
1891 | #ifdef DEBUGMSG | ||
1892 | printk(KERN_INFO "%s: %s() length-of-data is %ld \n", | ||
1893 | dev->name ,__func__,len_of_data); | ||
1894 | dumpit((char *)pDataAddress ,64); | ||
1895 | #endif | ||
1896 | p_this_ccw=privptr->p_write_free_chain; /* get a block */ | 1339 | p_this_ccw=privptr->p_write_free_chain; /* get a block */ |
1897 | if (p_this_ccw == NULL) { /* lost the race */ | 1340 | if (p_this_ccw == NULL) { /* lost the race */ |
1898 | ch = &privptr->channel[WRITE]; | 1341 | ch = &privptr->channel[WRITE]; |
@@ -1924,12 +1367,6 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
1924 | (__u32)__pa(&p_this_ccw->write); | 1367 | (__u32)__pa(&p_this_ccw->write); |
1925 | } | 1368 | } |
1926 | p_last_ccw=p_this_ccw; /* save new last block */ | 1369 | p_last_ccw=p_this_ccw; /* save new last block */ |
1927 | #ifdef IOTRACE | ||
1928 | printk(KERN_INFO "%s: %s() > CCW and Buffer %ld bytes long \n", | ||
1929 | dev->name,__func__,bytesInThisBuffer); | ||
1930 | dumpit((char *)p_this_ccw, sizeof(struct ccwbk)); | ||
1931 | dumpit((char *)p_this_ccw->p_buffer, 64); | ||
1932 | #endif | ||
1933 | } | 1370 | } |
1934 | 1371 | ||
1935 | /* FirstCCW and LastCCW now contain a new set of write channel | 1372 | /* FirstCCW and LastCCW now contain a new set of write channel |
@@ -1962,13 +1399,11 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
1962 | pEnd->write1_nop2.count=1; | 1399 | pEnd->write1_nop2.count=1; |
1963 | } /* end if if (pEnd->write1) */ | 1400 | } /* end if if (pEnd->write1) */ |
1964 | 1401 | ||
1965 | |||
1966 | if (privptr->p_write_active_first==NULL ) { | 1402 | if (privptr->p_write_active_first==NULL ) { |
1967 | privptr->p_write_active_first=p_first_ccw; | 1403 | privptr->p_write_active_first=p_first_ccw; |
1968 | privptr->p_write_active_last=p_last_ccw; | 1404 | privptr->p_write_active_last=p_last_ccw; |
1969 | } | 1405 | } |
1970 | else { | 1406 | else { |
1971 | |||
1972 | /* set up Tic CCWs */ | 1407 | /* set up Tic CCWs */ |
1973 | 1408 | ||
1974 | tempCCW.cda=(__u32)__pa(&p_first_ccw->write); | 1409 | tempCCW.cda=(__u32)__pa(&p_first_ccw->write); |
@@ -2007,19 +1442,6 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
2007 | } | 1442 | } |
2008 | 1443 | ||
2009 | } /* endif (p_first_ccw!=NULL) */ | 1444 | } /* endif (p_first_ccw!=NULL) */ |
2010 | |||
2011 | |||
2012 | #ifdef IOTRACE | ||
2013 | printk(KERN_INFO "%s: %s() > Dump Active CCW chain \n", | ||
2014 | dev->name,__func__); | ||
2015 | p_buf=privptr->p_write_active_first; | ||
2016 | while (p_buf!=NULL) { | ||
2017 | dumpit((char *)p_buf, sizeof(struct ccwbk)); | ||
2018 | p_buf=p_buf->next; | ||
2019 | } | ||
2020 | p_buf=(struct ccwbk*)privptr->p_end_ccw; | ||
2021 | dumpit((char *)p_buf, sizeof(struct endccw)); | ||
2022 | #endif | ||
2023 | dev_kfree_skb_any(skb); | 1445 | dev_kfree_skb_any(skb); |
2024 | if (linkid==0) { | 1446 | if (linkid==0) { |
2025 | lock=LOCK_NO; | 1447 | lock=LOCK_NO; |
@@ -2029,21 +1451,12 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
2029 | } | 1451 | } |
2030 | claw_strt_out_IO(dev ); | 1452 | claw_strt_out_IO(dev ); |
2031 | /* if write free count is zero , set NOBUFFER */ | 1453 | /* if write free count is zero , set NOBUFFER */ |
2032 | #ifdef DEBUGMSG | ||
2033 | printk(KERN_INFO "%s: %s() > free_count is %d\n", | ||
2034 | dev->name,__func__, | ||
2035 | (int) privptr->write_free_count ); | ||
2036 | #endif | ||
2037 | if (privptr->write_free_count==0) { | 1454 | if (privptr->write_free_count==0) { |
2038 | claw_setbit_busy(TB_NOBUFFER,dev); | 1455 | claw_setbit_busy(TB_NOBUFFER,dev); |
2039 | } | 1456 | } |
2040 | Done2: | 1457 | Done2: |
2041 | claw_clearbit_busy(TB_TX,dev); | 1458 | claw_clearbit_busy(TB_TX,dev); |
2042 | Done: | 1459 | Done: |
2043 | #ifdef FUNCTRACE | ||
2044 | printk(KERN_INFO "%s: %s() > exit on line %d, rc = %d \n", | ||
2045 | dev->name,__func__,__LINE__, rc); | ||
2046 | #endif | ||
2047 | return(rc); | 1460 | return(rc); |
2048 | } /* end of claw_hw_tx */ | 1461 | } /* end of claw_hw_tx */ |
2049 | 1462 | ||
@@ -2075,14 +1488,7 @@ init_ccw_bk(struct net_device *dev) | |||
2075 | struct clawh *pClawH=NULL; | 1488 | struct clawh *pClawH=NULL; |
2076 | addr_t real_TIC_address; | 1489 | addr_t real_TIC_address; |
2077 | int i,j; | 1490 | int i,j; |
2078 | #ifdef FUNCTRACE | 1491 | CLAW_DBF_TEXT(4, trace, "init_ccw"); |
2079 | printk(KERN_INFO "%s: %s() enter \n",dev->name,__func__); | ||
2080 | #endif | ||
2081 | CLAW_DBF_TEXT(4,trace,"init_ccw"); | ||
2082 | #ifdef DEBUGMSG | ||
2083 | printk(KERN_INFO "%s: variable dev =\n",dev->name); | ||
2084 | dumpit((char *) dev, sizeof(struct net_device)); | ||
2085 | #endif | ||
2086 | 1492 | ||
2087 | /* initialize statistics field */ | 1493 | /* initialize statistics field */ |
2088 | privptr->active_link_ID=0; | 1494 | privptr->active_link_ID=0; |
@@ -2107,20 +1513,6 @@ init_ccw_bk(struct net_device *dev) | |||
2107 | */ | 1513 | */ |
2108 | ccw_blocks_required = | 1514 | ccw_blocks_required = |
2109 | privptr->p_env->read_buffers+privptr->p_env->write_buffers+1; | 1515 | privptr->p_env->read_buffers+privptr->p_env->write_buffers+1; |
2110 | #ifdef DEBUGMSG | ||
2111 | printk(KERN_INFO "%s: %s() " | ||
2112 | "ccw_blocks_required=%d\n", | ||
2113 | dev->name,__func__, | ||
2114 | ccw_blocks_required); | ||
2115 | printk(KERN_INFO "%s: %s() " | ||
2116 | "PAGE_SIZE=0x%x\n", | ||
2117 | dev->name,__func__, | ||
2118 | (unsigned int)PAGE_SIZE); | ||
2119 | printk(KERN_INFO "%s: %s() > " | ||
2120 | "PAGE_MASK=0x%x\n", | ||
2121 | dev->name,__func__, | ||
2122 | (unsigned int)PAGE_MASK); | ||
2123 | #endif | ||
2124 | /* | 1516 | /* |
2125 | * compute number of CCW blocks that will fit in a page | 1517 | * compute number of CCW blocks that will fit in a page |
2126 | */ | 1518 | */ |
@@ -2128,14 +1520,6 @@ init_ccw_bk(struct net_device *dev) | |||
2128 | ccw_pages_required= | 1520 | ccw_pages_required= |
2129 | DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage); | 1521 | DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage); |
2130 | 1522 | ||
2131 | #ifdef DEBUGMSG | ||
2132 | printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n", | ||
2133 | dev->name,__func__, | ||
2134 | ccw_blocks_perpage); | ||
2135 | printk(KERN_INFO "%s: %s() > ccw_pages_required=%d\n", | ||
2136 | dev->name,__func__, | ||
2137 | ccw_pages_required); | ||
2138 | #endif | ||
2139 | /* | 1523 | /* |
2140 | * read and write sizes are set by 2 constants in claw.h | 1524 | * read and write sizes are set by 2 constants in claw.h |
2141 | * 4k and 32k. Unpacked values other than 4k are not going to | 1525 | * 4k and 32k. Unpacked values other than 4k are not going to |
@@ -2166,36 +1550,6 @@ init_ccw_bk(struct net_device *dev) | |||
2166 | claw_write_pages = privptr->p_env->write_buffers * | 1550 | claw_write_pages = privptr->p_env->write_buffers * |
2167 | privptr->p_buff_pages_perwrite; | 1551 | privptr->p_buff_pages_perwrite; |
2168 | } | 1552 | } |
2169 | #ifdef DEBUGMSG | ||
2170 | if (privptr->p_env->read_size < PAGE_SIZE) { | ||
2171 | printk(KERN_INFO "%s: %s() reads_perpage=%d\n", | ||
2172 | dev->name,__func__, | ||
2173 | claw_reads_perpage); | ||
2174 | } | ||
2175 | else { | ||
2176 | printk(KERN_INFO "%s: %s() pages_perread=%d\n", | ||
2177 | dev->name,__func__, | ||
2178 | privptr->p_buff_pages_perread); | ||
2179 | } | ||
2180 | printk(KERN_INFO "%s: %s() read_pages=%d\n", | ||
2181 | dev->name,__func__, | ||
2182 | claw_read_pages); | ||
2183 | if (privptr->p_env->write_size < PAGE_SIZE) { | ||
2184 | printk(KERN_INFO "%s: %s() writes_perpage=%d\n", | ||
2185 | dev->name,__func__, | ||
2186 | claw_writes_perpage); | ||
2187 | } | ||
2188 | else { | ||
2189 | printk(KERN_INFO "%s: %s() pages_perwrite=%d\n", | ||
2190 | dev->name,__func__, | ||
2191 | privptr->p_buff_pages_perwrite); | ||
2192 | } | ||
2193 | printk(KERN_INFO "%s: %s() write_pages=%d\n", | ||
2194 | dev->name,__func__, | ||
2195 | claw_write_pages); | ||
2196 | #endif | ||
2197 | |||
2198 | |||
2199 | /* | 1553 | /* |
2200 | * allocate ccw_pages_required | 1554 | * allocate ccw_pages_required |
2201 | */ | 1555 | */ |
@@ -2204,17 +1558,6 @@ init_ccw_bk(struct net_device *dev) | |||
2204 | (void *)__get_free_pages(__GFP_DMA, | 1558 | (void *)__get_free_pages(__GFP_DMA, |
2205 | (int)pages_to_order_of_mag(ccw_pages_required )); | 1559 | (int)pages_to_order_of_mag(ccw_pages_required )); |
2206 | if (privptr->p_buff_ccw==NULL) { | 1560 | if (privptr->p_buff_ccw==NULL) { |
2207 | printk(KERN_INFO "%s: %s() " | ||
2208 | "__get_free_pages for CCWs failed : " | ||
2209 | "pages is %d\n", | ||
2210 | dev->name,__func__, | ||
2211 | ccw_pages_required ); | ||
2212 | #ifdef FUNCTRACE | ||
2213 | printk(KERN_INFO "%s: %s() > " | ||
2214 | "exit on line %d, rc = ENOMEM\n", | ||
2215 | dev->name,__func__, | ||
2216 | __LINE__); | ||
2217 | #endif | ||
2218 | return -ENOMEM; | 1561 | return -ENOMEM; |
2219 | } | 1562 | } |
2220 | privptr->p_buff_ccw_num=ccw_pages_required; | 1563 | privptr->p_buff_ccw_num=ccw_pages_required; |
@@ -2229,11 +1572,6 @@ init_ccw_bk(struct net_device *dev) | |||
2229 | privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw; | 1572 | privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw; |
2230 | real_address = (__u32)__pa(privptr->p_end_ccw); | 1573 | real_address = (__u32)__pa(privptr->p_end_ccw); |
2231 | /* Initialize ending CCW block */ | 1574 | /* Initialize ending CCW block */ |
2232 | #ifdef DEBUGMSG | ||
2233 | printk(KERN_INFO "%s: %s() begin initialize ending CCW blocks\n", | ||
2234 | dev->name,__func__); | ||
2235 | #endif | ||
2236 | |||
2237 | p_endccw=privptr->p_end_ccw; | 1575 | p_endccw=privptr->p_end_ccw; |
2238 | p_endccw->real=real_address; | 1576 | p_endccw->real=real_address; |
2239 | p_endccw->write1=0x00; | 1577 | p_endccw->write1=0x00; |
@@ -2287,21 +1625,10 @@ init_ccw_bk(struct net_device *dev) | |||
2287 | p_endccw->read2_nop2.count = 1; | 1625 | p_endccw->read2_nop2.count = 1; |
2288 | p_endccw->read2_nop2.cda = 0; | 1626 | p_endccw->read2_nop2.cda = 0; |
2289 | 1627 | ||
2290 | #ifdef IOTRACE | ||
2291 | printk(KERN_INFO "%s: %s() dump claw ending CCW BK \n", | ||
2292 | dev->name,__func__); | ||
2293 | dumpit((char *)p_endccw, sizeof(struct endccw)); | ||
2294 | #endif | ||
2295 | |||
2296 | /* | 1628 | /* |
2297 | * Build a chain of CCWs | 1629 | * Build a chain of CCWs |
2298 | * | 1630 | * |
2299 | */ | 1631 | */ |
2300 | |||
2301 | #ifdef DEBUGMSG | ||
2302 | printk(KERN_INFO "%s: %s() Begin build a chain of CCW buffer \n", | ||
2303 | dev->name,__func__); | ||
2304 | #endif | ||
2305 | p_buff=privptr->p_buff_ccw; | 1632 | p_buff=privptr->p_buff_ccw; |
2306 | 1633 | ||
2307 | p_free_chain=NULL; | 1634 | p_free_chain=NULL; |
@@ -2316,26 +1643,10 @@ init_ccw_bk(struct net_device *dev) | |||
2316 | } | 1643 | } |
2317 | p_buff+=PAGE_SIZE; | 1644 | p_buff+=PAGE_SIZE; |
2318 | } | 1645 | } |
2319 | #ifdef DEBUGMSG | ||
2320 | printk(KERN_INFO "%s: %s() " | ||
2321 | "End build a chain of CCW buffer \n", | ||
2322 | dev->name,__func__); | ||
2323 | p_buf=p_free_chain; | ||
2324 | while (p_buf!=NULL) { | ||
2325 | dumpit((char *)p_buf, sizeof(struct ccwbk)); | ||
2326 | p_buf=p_buf->next; | ||
2327 | } | ||
2328 | #endif | ||
2329 | |||
2330 | /* | 1646 | /* |
2331 | * Initialize ClawSignalBlock | 1647 | * Initialize ClawSignalBlock |
2332 | * | 1648 | * |
2333 | */ | 1649 | */ |
2334 | #ifdef DEBUGMSG | ||
2335 | printk(KERN_INFO "%s: %s() " | ||
2336 | "Begin initialize ClawSignalBlock \n", | ||
2337 | dev->name,__func__); | ||
2338 | #endif | ||
2339 | if (privptr->p_claw_signal_blk==NULL) { | 1650 | if (privptr->p_claw_signal_blk==NULL) { |
2340 | privptr->p_claw_signal_blk=p_free_chain; | 1651 | privptr->p_claw_signal_blk=p_free_chain; |
2341 | p_free_chain=p_free_chain->next; | 1652 | p_free_chain=p_free_chain->next; |
@@ -2344,12 +1655,6 @@ init_ccw_bk(struct net_device *dev) | |||
2344 | pClawH->opcode=0xff; | 1655 | pClawH->opcode=0xff; |
2345 | pClawH->flag=CLAW_BUSY; | 1656 | pClawH->flag=CLAW_BUSY; |
2346 | } | 1657 | } |
2347 | #ifdef DEBUGMSG | ||
2348 | printk(KERN_INFO "%s: %s() > End initialize " | ||
2349 | "ClawSignalBlock\n", | ||
2350 | dev->name,__func__); | ||
2351 | dumpit((char *)privptr->p_claw_signal_blk, sizeof(struct ccwbk)); | ||
2352 | #endif | ||
2353 | 1658 | ||
2354 | /* | 1659 | /* |
2355 | * allocate write_pages_required and add to free chain | 1660 | * allocate write_pages_required and add to free chain |
@@ -2360,17 +1665,7 @@ init_ccw_bk(struct net_device *dev) | |||
2360 | (void *)__get_free_pages(__GFP_DMA, | 1665 | (void *)__get_free_pages(__GFP_DMA, |
2361 | (int)pages_to_order_of_mag(claw_write_pages )); | 1666 | (int)pages_to_order_of_mag(claw_write_pages )); |
2362 | if (privptr->p_buff_write==NULL) { | 1667 | if (privptr->p_buff_write==NULL) { |
2363 | printk(KERN_INFO "%s: %s() __get_free_pages for write" | ||
2364 | " bufs failed : get is for %d pages\n", | ||
2365 | dev->name,__func__,claw_write_pages ); | ||
2366 | free_pages((unsigned long)privptr->p_buff_ccw, | ||
2367 | (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); | ||
2368 | privptr->p_buff_ccw=NULL; | 1668 | privptr->p_buff_ccw=NULL; |
2369 | #ifdef FUNCTRACE | ||
2370 | printk(KERN_INFO "%s: %s() > exit on line %d," | ||
2371 | "rc = ENOMEM\n", | ||
2372 | dev->name,__func__,__LINE__); | ||
2373 | #endif | ||
2374 | return -ENOMEM; | 1669 | return -ENOMEM; |
2375 | } | 1670 | } |
2376 | /* | 1671 | /* |
@@ -2380,10 +1675,6 @@ init_ccw_bk(struct net_device *dev) | |||
2380 | 1675 | ||
2381 | memset(privptr->p_buff_write, 0x00, | 1676 | memset(privptr->p_buff_write, 0x00, |
2382 | ccw_pages_required * PAGE_SIZE); | 1677 | ccw_pages_required * PAGE_SIZE); |
2383 | #ifdef DEBUGMSG | ||
2384 | printk(KERN_INFO "%s: %s() Begin build claw write free " | ||
2385 | "chain \n",dev->name,__func__); | ||
2386 | #endif | ||
2387 | privptr->p_write_free_chain=NULL; | 1678 | privptr->p_write_free_chain=NULL; |
2388 | 1679 | ||
2389 | p_buff=privptr->p_buff_write; | 1680 | p_buff=privptr->p_buff_write; |
@@ -2419,18 +1710,7 @@ init_ccw_bk(struct net_device *dev) | |||
2419 | p_buff=(void *)__get_free_pages(__GFP_DMA, | 1710 | p_buff=(void *)__get_free_pages(__GFP_DMA, |
2420 | (int)pages_to_order_of_mag( | 1711 | (int)pages_to_order_of_mag( |
2421 | privptr->p_buff_pages_perwrite) ); | 1712 | privptr->p_buff_pages_perwrite) ); |
2422 | #ifdef IOTRACE | ||
2423 | printk(KERN_INFO "%s:%s __get_free_pages " | ||
2424 | "for writes buf: get for %d pages\n", | ||
2425 | dev->name,__func__, | ||
2426 | privptr->p_buff_pages_perwrite); | ||
2427 | #endif | ||
2428 | if (p_buff==NULL) { | 1713 | if (p_buff==NULL) { |
2429 | printk(KERN_INFO "%s:%s __get_free_pages " | ||
2430 | "for writes buf failed : get is for %d pages\n", | ||
2431 | dev->name, | ||
2432 | __func__, | ||
2433 | privptr->p_buff_pages_perwrite ); | ||
2434 | free_pages((unsigned long)privptr->p_buff_ccw, | 1714 | free_pages((unsigned long)privptr->p_buff_ccw, |
2435 | (int)pages_to_order_of_mag( | 1715 | (int)pages_to_order_of_mag( |
2436 | privptr->p_buff_ccw_num)); | 1716 | privptr->p_buff_ccw_num)); |
@@ -2443,12 +1723,6 @@ init_ccw_bk(struct net_device *dev) | |||
2443 | privptr->p_buff_pages_perwrite)); | 1723 | privptr->p_buff_pages_perwrite)); |
2444 | p_buf=p_buf->next; | 1724 | p_buf=p_buf->next; |
2445 | } | 1725 | } |
2446 | #ifdef FUNCTRACE | ||
2447 | printk(KERN_INFO "%s: %s exit on line %d, rc = ENOMEM\n", | ||
2448 | dev->name, | ||
2449 | __func__, | ||
2450 | __LINE__); | ||
2451 | #endif | ||
2452 | return -ENOMEM; | 1726 | return -ENOMEM; |
2453 | } /* Error on get_pages */ | 1727 | } /* Error on get_pages */ |
2454 | memset(p_buff, 0x00, privptr->p_env->write_size ); | 1728 | memset(p_buff, 0x00, privptr->p_env->write_size ); |
@@ -2477,15 +1751,6 @@ init_ccw_bk(struct net_device *dev) | |||
2477 | privptr->write_free_count=privptr->p_env->write_buffers; | 1751 | privptr->write_free_count=privptr->p_env->write_buffers; |
2478 | 1752 | ||
2479 | 1753 | ||
2480 | #ifdef DEBUGMSG | ||
2481 | printk(KERN_INFO "%s:%s End build claw write free chain \n", | ||
2482 | dev->name,__func__); | ||
2483 | p_buf=privptr->p_write_free_chain; | ||
2484 | while (p_buf!=NULL) { | ||
2485 | dumpit((char *)p_buf, sizeof(struct ccwbk)); | ||
2486 | p_buf=p_buf->next; | ||
2487 | } | ||
2488 | #endif | ||
2489 | /* | 1754 | /* |
2490 | * allocate read_pages_required and chain to free chain | 1755 | * allocate read_pages_required and chain to free chain |
2491 | */ | 1756 | */ |
@@ -2495,10 +1760,6 @@ init_ccw_bk(struct net_device *dev) | |||
2495 | (void *)__get_free_pages(__GFP_DMA, | 1760 | (void *)__get_free_pages(__GFP_DMA, |
2496 | (int)pages_to_order_of_mag(claw_read_pages) ); | 1761 | (int)pages_to_order_of_mag(claw_read_pages) ); |
2497 | if (privptr->p_buff_read==NULL) { | 1762 | if (privptr->p_buff_read==NULL) { |
2498 | printk(KERN_INFO "%s: %s() " | ||
2499 | "__get_free_pages for read buf failed : " | ||
2500 | "get is for %d pages\n", | ||
2501 | dev->name,__func__,claw_read_pages ); | ||
2502 | free_pages((unsigned long)privptr->p_buff_ccw, | 1763 | free_pages((unsigned long)privptr->p_buff_ccw, |
2503 | (int)pages_to_order_of_mag( | 1764 | (int)pages_to_order_of_mag( |
2504 | privptr->p_buff_ccw_num)); | 1765 | privptr->p_buff_ccw_num)); |
@@ -2508,10 +1769,6 @@ init_ccw_bk(struct net_device *dev) | |||
2508 | privptr->p_buff_write_num)); | 1769 | privptr->p_buff_write_num)); |
2509 | privptr->p_buff_ccw=NULL; | 1770 | privptr->p_buff_ccw=NULL; |
2510 | privptr->p_buff_write=NULL; | 1771 | privptr->p_buff_write=NULL; |
2511 | #ifdef FUNCTRACE | ||
2512 | printk(KERN_INFO "%s: %s() > exit on line %d, rc =" | ||
2513 | " ENOMEM\n",dev->name,__func__,__LINE__); | ||
2514 | #endif | ||
2515 | return -ENOMEM; | 1772 | return -ENOMEM; |
2516 | } | 1773 | } |
2517 | memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE); | 1774 | memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE); |
@@ -2520,10 +1777,6 @@ init_ccw_bk(struct net_device *dev) | |||
2520 | * Build CLAW read free chain | 1777 | * Build CLAW read free chain |
2521 | * | 1778 | * |
2522 | */ | 1779 | */ |
2523 | #ifdef DEBUGMSG | ||
2524 | printk(KERN_INFO "%s: %s() Begin build claw read free chain \n", | ||
2525 | dev->name,__func__); | ||
2526 | #endif | ||
2527 | p_buff=privptr->p_buff_read; | 1780 | p_buff=privptr->p_buff_read; |
2528 | for (i=0 ; i< privptr->p_env->read_buffers ; i++) { | 1781 | for (i=0 ; i< privptr->p_env->read_buffers ; i++) { |
2529 | p_buf = p_free_chain; | 1782 | p_buf = p_free_chain; |
@@ -2600,19 +1853,10 @@ init_ccw_bk(struct net_device *dev) | |||
2600 | } /* for read_buffers */ | 1853 | } /* for read_buffers */ |
2601 | } /* read_size < PAGE_SIZE */ | 1854 | } /* read_size < PAGE_SIZE */ |
2602 | else { /* read Size >= PAGE_SIZE */ | 1855 | else { /* read Size >= PAGE_SIZE */ |
2603 | |||
2604 | #ifdef DEBUGMSG | ||
2605 | printk(KERN_INFO "%s: %s() Begin build claw read free chain \n", | ||
2606 | dev->name,__func__); | ||
2607 | #endif | ||
2608 | for (i=0 ; i< privptr->p_env->read_buffers ; i++) { | 1856 | for (i=0 ; i< privptr->p_env->read_buffers ; i++) { |
2609 | p_buff = (void *)__get_free_pages(__GFP_DMA, | 1857 | p_buff = (void *)__get_free_pages(__GFP_DMA, |
2610 | (int)pages_to_order_of_mag(privptr->p_buff_pages_perread) ); | 1858 | (int)pages_to_order_of_mag(privptr->p_buff_pages_perread) ); |
2611 | if (p_buff==NULL) { | 1859 | if (p_buff==NULL) { |
2612 | printk(KERN_INFO "%s: %s() __get_free_pages for read " | ||
2613 | "buf failed : get is for %d pages\n", | ||
2614 | dev->name,__func__, | ||
2615 | privptr->p_buff_pages_perread ); | ||
2616 | free_pages((unsigned long)privptr->p_buff_ccw, | 1860 | free_pages((unsigned long)privptr->p_buff_ccw, |
2617 | (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); | 1861 | (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); |
2618 | /* free the write pages */ | 1862 | /* free the write pages */ |
@@ -2633,11 +1877,6 @@ init_ccw_bk(struct net_device *dev) | |||
2633 | } | 1877 | } |
2634 | privptr->p_buff_ccw=NULL; | 1878 | privptr->p_buff_ccw=NULL; |
2635 | privptr->p_buff_write=NULL; | 1879 | privptr->p_buff_write=NULL; |
2636 | #ifdef FUNCTRACE | ||
2637 | printk(KERN_INFO "%s: %s() exit on line %d, rc = ENOMEM\n", | ||
2638 | dev->name,__func__, | ||
2639 | __LINE__); | ||
2640 | #endif | ||
2641 | return -ENOMEM; | 1880 | return -ENOMEM; |
2642 | } | 1881 | } |
2643 | memset(p_buff, 0x00, privptr->p_env->read_size); | 1882 | memset(p_buff, 0x00, privptr->p_env->read_size); |
@@ -2706,22 +1945,9 @@ init_ccw_bk(struct net_device *dev) | |||
2706 | } /* For read_buffers */ | 1945 | } /* For read_buffers */ |
2707 | } /* read_size >= PAGE_SIZE */ | 1946 | } /* read_size >= PAGE_SIZE */ |
2708 | } /* pBuffread = NULL */ | 1947 | } /* pBuffread = NULL */ |
2709 | #ifdef DEBUGMSG | ||
2710 | printk(KERN_INFO "%s: %s() > End build claw read free chain \n", | ||
2711 | dev->name,__func__); | ||
2712 | p_buf=p_first_CCWB; | ||
2713 | while (p_buf!=NULL) { | ||
2714 | dumpit((char *)p_buf, sizeof(struct ccwbk)); | ||
2715 | p_buf=p_buf->next; | ||
2716 | } | ||
2717 | |||
2718 | #endif | ||
2719 | add_claw_reads( dev ,p_first_CCWB , p_last_CCWB); | 1948 | add_claw_reads( dev ,p_first_CCWB , p_last_CCWB); |
2720 | privptr->buffs_alloc = 1; | 1949 | privptr->buffs_alloc = 1; |
2721 | #ifdef FUNCTRACE | 1950 | |
2722 | printk(KERN_INFO "%s: %s() exit on line %d\n", | ||
2723 | dev->name,__func__,__LINE__); | ||
2724 | #endif | ||
2725 | return 0; | 1951 | return 0; |
2726 | } /* end of init_ccw_bk */ | 1952 | } /* end of init_ccw_bk */ |
2727 | 1953 | ||
@@ -2735,14 +1961,8 @@ static void | |||
2735 | probe_error( struct ccwgroup_device *cgdev) | 1961 | probe_error( struct ccwgroup_device *cgdev) |
2736 | { | 1962 | { |
2737 | struct claw_privbk *privptr; | 1963 | struct claw_privbk *privptr; |
2738 | #ifdef FUNCTRACE | 1964 | |
2739 | printk(KERN_INFO "%s enter \n",__func__); | 1965 | CLAW_DBF_TEXT(4, trace, "proberr"); |
2740 | #endif | ||
2741 | CLAW_DBF_TEXT(4,trace,"proberr"); | ||
2742 | #ifdef DEBUGMSG | ||
2743 | printk(KERN_INFO "%s variable cgdev =\n",__func__); | ||
2744 | dumpit((char *) cgdev, sizeof(struct ccwgroup_device)); | ||
2745 | #endif | ||
2746 | privptr=(struct claw_privbk *)cgdev->dev.driver_data; | 1966 | privptr=(struct claw_privbk *)cgdev->dev.driver_data; |
2747 | if (privptr!=NULL) { | 1967 | if (privptr!=NULL) { |
2748 | kfree(privptr->p_env); | 1968 | kfree(privptr->p_env); |
@@ -2752,16 +1972,9 @@ probe_error( struct ccwgroup_device *cgdev) | |||
2752 | kfree(privptr); | 1972 | kfree(privptr); |
2753 | privptr=NULL; | 1973 | privptr=NULL; |
2754 | } | 1974 | } |
2755 | #ifdef FUNCTRACE | ||
2756 | printk(KERN_INFO "%s > exit on line %d\n", | ||
2757 | __func__,__LINE__); | ||
2758 | #endif | ||
2759 | |||
2760 | return; | 1975 | return; |
2761 | } /* probe_error */ | 1976 | } /* probe_error */ |
2762 | 1977 | ||
2763 | |||
2764 | |||
2765 | /*-------------------------------------------------------------------* | 1978 | /*-------------------------------------------------------------------* |
2766 | * claw_process_control * | 1979 | * claw_process_control * |
2767 | * * | 1980 | * * |
@@ -2783,32 +1996,19 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2783 | struct conncmd *p_connect=NULL; | 1996 | struct conncmd *p_connect=NULL; |
2784 | int rc; | 1997 | int rc; |
2785 | struct chbk *p_ch = NULL; | 1998 | struct chbk *p_ch = NULL; |
2786 | #ifdef FUNCTRACE | 1999 | struct device *tdev; |
2787 | printk(KERN_INFO "%s: %s() > enter \n", | 2000 | CLAW_DBF_TEXT(2, setup, "clw_cntl"); |
2788 | dev->name,__func__); | ||
2789 | #endif | ||
2790 | CLAW_DBF_TEXT(2,setup,"clw_cntl"); | ||
2791 | #ifdef DEBUGMSG | ||
2792 | printk(KERN_INFO "%s: variable dev =\n",dev->name); | ||
2793 | dumpit((char *) dev, sizeof(struct net_device)); | ||
2794 | printk(KERN_INFO "%s: variable p_ccw =\n",dev->name); | ||
2795 | dumpit((char *) p_ccw, sizeof(struct ccwbk *)); | ||
2796 | #endif | ||
2797 | udelay(1000); /* Wait a ms for the control packets to | 2001 | udelay(1000); /* Wait a ms for the control packets to |
2798 | *catch up to each other */ | 2002 | *catch up to each other */ |
2799 | privptr=dev->priv; | 2003 | privptr=dev->priv; |
2800 | p_env=privptr->p_env; | 2004 | p_env=privptr->p_env; |
2005 | tdev = &privptr->channel[READ].cdev->dev; | ||
2801 | memcpy( &temp_host_name, p_env->host_name, 8); | 2006 | memcpy( &temp_host_name, p_env->host_name, 8); |
2802 | memcpy( &temp_ws_name, p_env->adapter_name , 8); | 2007 | memcpy( &temp_ws_name, p_env->adapter_name , 8); |
2803 | printk(KERN_INFO "%s: CLAW device %.8s: " | 2008 | printk(KERN_INFO "%s: CLAW device %.8s: " |
2804 | "Received Control Packet\n", | 2009 | "Received Control Packet\n", |
2805 | dev->name, temp_ws_name); | 2010 | dev->name, temp_ws_name); |
2806 | if (privptr->release_pend==1) { | 2011 | if (privptr->release_pend==1) { |
2807 | #ifdef FUNCTRACE | ||
2808 | printk(KERN_INFO "%s: %s() > " | ||
2809 | "exit on line %d, rc=0\n", | ||
2810 | dev->name,__func__,__LINE__); | ||
2811 | #endif | ||
2812 | return 0; | 2012 | return 0; |
2813 | } | 2013 | } |
2814 | p_buf=p_ccw->p_buffer; | 2014 | p_buf=p_ccw->p_buffer; |
@@ -2818,261 +2018,246 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) | |||
2818 | } else { | 2018 | } else { |
2819 | memcpy(p_ctlbk, p_buf, sizeof(struct clawctl)); | 2019 | memcpy(p_ctlbk, p_buf, sizeof(struct clawctl)); |
2820 | } | 2020 | } |
2821 | #ifdef IOTRACE | ||
2822 | printk(KERN_INFO "%s: dump claw control data inbound\n",dev->name); | ||
2823 | dumpit((char *)p_ctlbk, sizeof(struct clawctl)); | ||
2824 | #endif | ||
2825 | switch (p_ctlbk->command) | 2021 | switch (p_ctlbk->command) |
2826 | { | 2022 | { |
2827 | case SYSTEM_VALIDATE_REQUEST: | 2023 | case SYSTEM_VALIDATE_REQUEST: |
2828 | if (p_ctlbk->version!=CLAW_VERSION_ID) { | 2024 | if (p_ctlbk->version != CLAW_VERSION_ID) { |
2829 | claw_snd_sys_validate_rsp(dev, p_ctlbk, | 2025 | claw_snd_sys_validate_rsp(dev, p_ctlbk, |
2830 | CLAW_RC_WRONG_VERSION ); | 2026 | CLAW_RC_WRONG_VERSION); |
2831 | printk("%s: %d is wrong version id. " | 2027 | printk("%s: %d is wrong version id. " |
2832 | "Expected %d\n", | 2028 | "Expected %d\n", |
2833 | dev->name, p_ctlbk->version, | 2029 | dev->name, p_ctlbk->version, |
2834 | CLAW_VERSION_ID); | 2030 | CLAW_VERSION_ID); |
2835 | } | 2031 | } |
2836 | p_sysval=(struct sysval *)&(p_ctlbk->data); | 2032 | p_sysval = (struct sysval *)&(p_ctlbk->data); |
2837 | printk( "%s: Recv Sys Validate Request: " | 2033 | printk("%s: Recv Sys Validate Request: " |
2838 | "Vers=%d,link_id=%d,Corr=%d,WS name=%." | 2034 | "Vers=%d,link_id=%d,Corr=%d,WS name=%." |
2839 | "8s,Host name=%.8s\n", | 2035 | "8s,Host name=%.8s\n", |
2840 | dev->name, p_ctlbk->version, | 2036 | dev->name, p_ctlbk->version, |
2841 | p_ctlbk->linkid, | 2037 | p_ctlbk->linkid, |
2842 | p_ctlbk->correlator, | 2038 | p_ctlbk->correlator, |
2843 | p_sysval->WS_name, | 2039 | p_sysval->WS_name, |
2844 | p_sysval->host_name); | 2040 | p_sysval->host_name); |
2845 | if (0!=memcmp(temp_host_name,p_sysval->host_name,8)) { | 2041 | if (memcmp(temp_host_name, p_sysval->host_name, 8)) { |
2846 | claw_snd_sys_validate_rsp(dev, p_ctlbk, | 2042 | claw_snd_sys_validate_rsp(dev, p_ctlbk, |
2847 | CLAW_RC_NAME_MISMATCH ); | 2043 | CLAW_RC_NAME_MISMATCH); |
2848 | CLAW_DBF_TEXT(2,setup,"HSTBAD"); | 2044 | CLAW_DBF_TEXT(2, setup, "HSTBAD"); |
2849 | CLAW_DBF_TEXT_(2,setup,"%s",p_sysval->host_name); | 2045 | CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name); |
2850 | CLAW_DBF_TEXT_(2,setup,"%s",temp_host_name); | 2046 | CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name); |
2851 | printk(KERN_INFO "%s: Host name mismatch\n", | 2047 | printk(KERN_INFO "%s: Host name mismatch\n", |
2852 | dev->name); | 2048 | dev->name); |
2853 | printk(KERN_INFO "%s: Received :%s: " | 2049 | printk(KERN_INFO "%s: Received :%s: " |
2854 | "expected :%s: \n", | 2050 | "expected :%s: \n", |
2855 | dev->name, | 2051 | dev->name, |
2856 | p_sysval->host_name, | 2052 | p_sysval->host_name, |
2857 | temp_host_name); | 2053 | temp_host_name); |
2858 | } | 2054 | } |
2859 | if (0!=memcmp(temp_ws_name,p_sysval->WS_name,8)) { | 2055 | if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) { |
2860 | claw_snd_sys_validate_rsp(dev, p_ctlbk, | 2056 | claw_snd_sys_validate_rsp(dev, p_ctlbk, |
2861 | CLAW_RC_NAME_MISMATCH ); | 2057 | CLAW_RC_NAME_MISMATCH); |
2862 | CLAW_DBF_TEXT(2,setup,"WSNBAD"); | 2058 | CLAW_DBF_TEXT(2, setup, "WSNBAD"); |
2863 | CLAW_DBF_TEXT_(2,setup,"%s",p_sysval->WS_name); | 2059 | CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name); |
2864 | CLAW_DBF_TEXT_(2,setup,"%s",temp_ws_name); | 2060 | CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name); |
2865 | printk(KERN_INFO "%s: WS name mismatch\n", | 2061 | printk(KERN_INFO "%s: WS name mismatch\n", |
2866 | dev->name); | 2062 | dev->name); |
2867 | printk(KERN_INFO "%s: Received :%s: " | 2063 | printk(KERN_INFO "%s: Received :%s: " |
2868 | "expected :%s: \n", | 2064 | "expected :%s: \n", |
2869 | dev->name, | 2065 | dev->name, |
2870 | p_sysval->WS_name, | 2066 | p_sysval->WS_name, |
2871 | temp_ws_name); | 2067 | temp_ws_name); |
2872 | } | 2068 | } |
2873 | if (( p_sysval->write_frame_size < p_env->write_size) && | 2069 | if ((p_sysval->write_frame_size < p_env->write_size) && |
2874 | ( p_env->packing == 0)) { | 2070 | (p_env->packing == 0)) { |
2875 | claw_snd_sys_validate_rsp(dev, p_ctlbk, | 2071 | claw_snd_sys_validate_rsp(dev, p_ctlbk, |
2876 | CLAW_RC_HOST_RCV_TOO_SMALL ); | 2072 | CLAW_RC_HOST_RCV_TOO_SMALL); |
2877 | printk(KERN_INFO "%s: host write size is too " | 2073 | printk(KERN_INFO "%s: host write size is too " |
2878 | "small\n", dev->name); | 2074 | "small\n", dev->name); |
2879 | CLAW_DBF_TEXT(2,setup,"wrtszbad"); | 2075 | CLAW_DBF_TEXT(2, setup, "wrtszbad"); |
2880 | } | 2076 | } |
2881 | if (( p_sysval->read_frame_size < p_env->read_size) && | 2077 | if ((p_sysval->read_frame_size < p_env->read_size) && |
2882 | ( p_env->packing == 0)) { | 2078 | (p_env->packing == 0)) { |
2883 | claw_snd_sys_validate_rsp(dev, p_ctlbk, | 2079 | claw_snd_sys_validate_rsp(dev, p_ctlbk, |
2884 | CLAW_RC_HOST_RCV_TOO_SMALL ); | 2080 | CLAW_RC_HOST_RCV_TOO_SMALL); |
2885 | printk(KERN_INFO "%s: host read size is too " | 2081 | printk(KERN_INFO "%s: host read size is too " |
2886 | "small\n", dev->name); | 2082 | "small\n", dev->name); |
2887 | CLAW_DBF_TEXT(2,setup,"rdsizbad"); | 2083 | CLAW_DBF_TEXT(2, setup, "rdsizbad"); |
2888 | } | 2084 | } |
2889 | claw_snd_sys_validate_rsp(dev, p_ctlbk, 0 ); | 2085 | claw_snd_sys_validate_rsp(dev, p_ctlbk, 0); |
2890 | printk("%s: CLAW device %.8s: System validate" | 2086 | printk(KERN_INFO "%s: CLAW device %.8s: System validate " |
2891 | " completed.\n",dev->name, temp_ws_name); | 2087 | "completed.\n", dev->name, temp_ws_name); |
2892 | printk("%s: sys Validate Rsize:%d Wsize:%d\n",dev->name, | 2088 | printk("%s: sys Validate Rsize:%d Wsize:%d\n", dev->name, |
2893 | p_sysval->read_frame_size,p_sysval->write_frame_size); | 2089 | p_sysval->read_frame_size, p_sysval->write_frame_size); |
2894 | privptr->system_validate_comp=1; | 2090 | privptr->system_validate_comp = 1; |
2895 | if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) { | 2091 | if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0) |
2896 | p_env->packing = PACKING_ASK; | 2092 | p_env->packing = PACKING_ASK; |
2897 | } | 2093 | claw_strt_conn_req(dev); |
2898 | claw_strt_conn_req(dev); | 2094 | break; |
2899 | break; | 2095 | case SYSTEM_VALIDATE_RESPONSE: |
2900 | 2096 | p_sysval = (struct sysval *)&(p_ctlbk->data); | |
2901 | case SYSTEM_VALIDATE_RESPONSE: | 2097 | printk("%s: Recv Sys Validate Resp: Vers=%d,Corr=%d,RC=%d," |
2902 | p_sysval=(struct sysval *)&(p_ctlbk->data); | 2098 | "WS name=%.8s,Host name=%.8s\n", |
2903 | printk("%s: Recv Sys Validate Resp: Vers=%d,Corr=%d,RC=%d," | 2099 | dev->name, |
2904 | "WS name=%.8s,Host name=%.8s\n", | 2100 | p_ctlbk->version, |
2905 | dev->name, | 2101 | p_ctlbk->correlator, |
2906 | p_ctlbk->version, | 2102 | p_ctlbk->rc, |
2907 | p_ctlbk->correlator, | 2103 | p_sysval->WS_name, |
2908 | p_ctlbk->rc, | 2104 | p_sysval->host_name); |
2909 | p_sysval->WS_name, | 2105 | switch (p_ctlbk->rc) { |
2910 | p_sysval->host_name); | 2106 | case 0: |
2911 | switch (p_ctlbk->rc) | 2107 | printk(KERN_INFO "%s: CLAW device " |
2912 | { | 2108 | "%.8s: System validate " |
2913 | case 0: | 2109 | "completed.\n", |
2914 | printk(KERN_INFO "%s: CLAW device " | 2110 | dev->name, temp_ws_name); |
2915 | "%.8s: System validate " | 2111 | if (privptr->system_validate_comp == 0) |
2916 | "completed.\n", | 2112 | claw_strt_conn_req(dev); |
2917 | dev->name, temp_ws_name); | 2113 | privptr->system_validate_comp = 1; |
2918 | if (privptr->system_validate_comp == 0) | 2114 | break; |
2919 | claw_strt_conn_req(dev); | 2115 | case CLAW_RC_NAME_MISMATCH: |
2920 | privptr->system_validate_comp=1; | 2116 | printk(KERN_INFO "%s: Sys Validate " |
2921 | break; | 2117 | "Resp : Host, WS name is " |
2922 | case CLAW_RC_NAME_MISMATCH: | 2118 | "mismatch\n", |
2923 | printk(KERN_INFO "%s: Sys Validate " | 2119 | dev->name); |
2924 | "Resp : Host, WS name is " | 2120 | break; |
2925 | "mismatch\n", | 2121 | case CLAW_RC_WRONG_VERSION: |
2926 | dev->name); | 2122 | printk(KERN_INFO "%s: Sys Validate " |
2927 | break; | 2123 | "Resp : Wrong version\n", |
2928 | case CLAW_RC_WRONG_VERSION: | 2124 | dev->name); |
2929 | printk(KERN_INFO "%s: Sys Validate " | 2125 | break; |
2930 | "Resp : Wrong version\n", | 2126 | case CLAW_RC_HOST_RCV_TOO_SMALL: |
2931 | dev->name); | 2127 | printk(KERN_INFO "%s: Sys Validate " |
2932 | break; | 2128 | "Resp : bad frame size\n", |
2933 | case CLAW_RC_HOST_RCV_TOO_SMALL: | 2129 | dev->name); |
2934 | printk(KERN_INFO "%s: Sys Validate " | 2130 | break; |
2935 | "Resp : bad frame size\n", | 2131 | default: |
2936 | dev->name); | 2132 | printk(KERN_INFO "%s: Sys Validate " |
2937 | break; | 2133 | "error code=%d \n", |
2938 | default: | 2134 | dev->name, p_ctlbk->rc); |
2939 | printk(KERN_INFO "%s: Sys Validate " | 2135 | break; |
2940 | "error code=%d \n", | 2136 | } |
2941 | dev->name, p_ctlbk->rc ); | 2137 | break; |
2942 | break; | ||
2943 | } | ||
2944 | break; | ||
2945 | 2138 | ||
2946 | case CONNECTION_REQUEST: | 2139 | case CONNECTION_REQUEST: |
2947 | p_connect=(struct conncmd *)&(p_ctlbk->data); | 2140 | p_connect = (struct conncmd *)&(p_ctlbk->data); |
2948 | printk(KERN_INFO "%s: Recv Conn Req: Vers=%d,link_id=%d," | 2141 | printk(KERN_INFO "%s: Recv Conn Req: Vers=%d,link_id=%d," |
2949 | "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n", | 2142 | "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n", |
2950 | dev->name, | 2143 | dev->name, |
2951 | p_ctlbk->version, | 2144 | p_ctlbk->version, |
2952 | p_ctlbk->linkid, | 2145 | p_ctlbk->linkid, |
2953 | p_ctlbk->correlator, | 2146 | p_ctlbk->correlator, |
2954 | p_connect->host_name, | 2147 | p_connect->host_name, |
2955 | p_connect->WS_name); | 2148 | p_connect->WS_name); |
2956 | if (privptr->active_link_ID!=0 ) { | 2149 | if (privptr->active_link_ID != 0) { |
2957 | claw_snd_disc(dev, p_ctlbk); | 2150 | claw_snd_disc(dev, p_ctlbk); |
2958 | printk(KERN_INFO "%s: Conn Req error : " | 2151 | printk(KERN_INFO "%s: Conn Req error : " |
2959 | "already logical link is active \n", | 2152 | "already logical link is active \n", |
2960 | dev->name); | 2153 | dev->name); |
2961 | } | 2154 | } |
2962 | if (p_ctlbk->linkid!=1 ) { | 2155 | if (p_ctlbk->linkid != 1) { |
2963 | claw_snd_disc(dev, p_ctlbk); | 2156 | claw_snd_disc(dev, p_ctlbk); |
2964 | printk(KERN_INFO "%s: Conn Req error : " | 2157 | printk(KERN_INFO "%s: Conn Req error : " |
2965 | "req logical link id is not 1\n", | 2158 | "req logical link id is not 1\n", |
2159 | dev->name); | ||
2160 | } | ||
2161 | rc = find_link(dev, p_connect->host_name, p_connect->WS_name); | ||
2162 | if (rc != 0) { | ||
2163 | claw_snd_disc(dev, p_ctlbk); | ||
2164 | printk(KERN_INFO "%s: Conn Resp error: " | ||
2165 | "req appl name does not match\n", | ||
2166 | dev->name); | ||
2167 | } | ||
2168 | claw_send_control(dev, | ||
2169 | CONNECTION_CONFIRM, p_ctlbk->linkid, | ||
2170 | p_ctlbk->correlator, | ||
2171 | 0, p_connect->host_name, | ||
2172 | p_connect->WS_name); | ||
2173 | if (p_env->packing == PACKING_ASK) { | ||
2174 | p_env->packing = PACK_SEND; | ||
2175 | claw_snd_conn_req(dev, 0); | ||
2176 | } | ||
2177 | printk(KERN_INFO "%s: CLAW device %.8s: Connection " | ||
2178 | "completed link_id=%d.\n", | ||
2179 | dev->name, temp_ws_name, | ||
2180 | p_ctlbk->linkid); | ||
2181 | privptr->active_link_ID = p_ctlbk->linkid; | ||
2182 | p_ch = &privptr->channel[WRITE]; | ||
2183 | wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */ | ||
2184 | break; | ||
2185 | case CONNECTION_RESPONSE: | ||
2186 | p_connect = (struct conncmd *)&(p_ctlbk->data); | ||
2187 | printk(KERN_INFO "%s: Revc Conn Resp: Vers=%d,link_id=%d," | ||
2188 | "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n", | ||
2189 | dev->name, | ||
2190 | p_ctlbk->version, | ||
2191 | p_ctlbk->linkid, | ||
2192 | p_ctlbk->correlator, | ||
2193 | p_ctlbk->rc, | ||
2194 | p_connect->host_name, | ||
2195 | p_connect->WS_name); | ||
2196 | |||
2197 | if (p_ctlbk->rc != 0) { | ||
2198 | printk(KERN_INFO "%s: Conn Resp error: rc=%d \n", | ||
2199 | dev->name, p_ctlbk->rc); | ||
2200 | return 1; | ||
2201 | } | ||
2202 | rc = find_link(dev, | ||
2203 | p_connect->host_name, p_connect->WS_name); | ||
2204 | if (rc != 0) { | ||
2205 | claw_snd_disc(dev, p_ctlbk); | ||
2206 | printk(KERN_INFO "%s: Conn Resp error: " | ||
2207 | "req appl name does not match\n", | ||
2208 | dev->name); | ||
2209 | } | ||
2210 | /* should be until CONNECTION_CONFIRM */ | ||
2211 | privptr->active_link_ID = -(p_ctlbk->linkid); | ||
2212 | break; | ||
2213 | case CONNECTION_CONFIRM: | ||
2214 | p_connect = (struct conncmd *)&(p_ctlbk->data); | ||
2215 | printk(KERN_INFO "%s: Recv Conn Confirm:Vers=%d,link_id=%d," | ||
2216 | "Corr=%d,Host appl=%.8s,WS appl=%.8s\n", | ||
2217 | dev->name, | ||
2218 | p_ctlbk->version, | ||
2219 | p_ctlbk->linkid, | ||
2220 | p_ctlbk->correlator, | ||
2221 | p_connect->host_name, | ||
2222 | p_connect->WS_name); | ||
2223 | if (p_ctlbk->linkid == -(privptr->active_link_ID)) { | ||
2224 | privptr->active_link_ID = p_ctlbk->linkid; | ||
2225 | if (p_env->packing > PACKING_ASK) { | ||
2226 | printk(KERN_INFO "%s: Confirmed Now packing\n", | ||
2966 | dev->name); | 2227 | dev->name); |
2967 | } | ||
2968 | rc=find_link(dev, | ||
2969 | p_connect->host_name, p_connect->WS_name); | ||
2970 | if (rc!=0) { | ||
2971 | claw_snd_disc(dev, p_ctlbk); | ||
2972 | printk(KERN_INFO "%s: Conn Req error : " | ||
2973 | "req appl name does not match\n", | ||
2974 | dev->name); | ||
2975 | } | ||
2976 | claw_send_control(dev, | ||
2977 | CONNECTION_CONFIRM, p_ctlbk->linkid, | ||
2978 | p_ctlbk->correlator, | ||
2979 | 0, p_connect->host_name, | ||
2980 | p_connect->WS_name); | ||
2981 | if (p_env->packing == PACKING_ASK) { | ||
2982 | printk("%s: Now Pack ask\n",dev->name); | ||
2983 | p_env->packing = PACK_SEND; | ||
2984 | claw_snd_conn_req(dev,0); | ||
2985 | } | ||
2986 | printk(KERN_INFO "%s: CLAW device %.8s: Connection " | ||
2987 | "completed link_id=%d.\n", | ||
2988 | dev->name, temp_ws_name, | ||
2989 | p_ctlbk->linkid); | ||
2990 | privptr->active_link_ID=p_ctlbk->linkid; | ||
2991 | p_ch=&privptr->channel[WRITE]; | ||
2992 | wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */ | ||
2993 | break; | ||
2994 | case CONNECTION_RESPONSE: | ||
2995 | p_connect=(struct conncmd *)&(p_ctlbk->data); | ||
2996 | printk(KERN_INFO "%s: Revc Conn Resp: Vers=%d,link_id=%d," | ||
2997 | "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n", | ||
2998 | dev->name, | ||
2999 | p_ctlbk->version, | ||
3000 | p_ctlbk->linkid, | ||
3001 | p_ctlbk->correlator, | ||
3002 | p_ctlbk->rc, | ||
3003 | p_connect->host_name, | ||
3004 | p_connect->WS_name); | ||
3005 | |||
3006 | if (p_ctlbk->rc !=0 ) { | ||
3007 | printk(KERN_INFO "%s: Conn Resp error: rc=%d \n", | ||
3008 | dev->name, p_ctlbk->rc); | ||
3009 | return 1; | ||
3010 | } | ||
3011 | rc=find_link(dev, | ||
3012 | p_connect->host_name, p_connect->WS_name); | ||
3013 | if (rc!=0) { | ||
3014 | claw_snd_disc(dev, p_ctlbk); | ||
3015 | printk(KERN_INFO "%s: Conn Resp error: " | ||
3016 | "req appl name does not match\n", | ||
3017 | dev->name); | ||
3018 | } | ||
3019 | /* should be until CONNECTION_CONFIRM */ | ||
3020 | privptr->active_link_ID = - (p_ctlbk->linkid); | ||
3021 | break; | ||
3022 | case CONNECTION_CONFIRM: | ||
3023 | p_connect=(struct conncmd *)&(p_ctlbk->data); | ||
3024 | printk(KERN_INFO "%s: Recv Conn Confirm:Vers=%d,link_id=%d," | ||
3025 | "Corr=%d,Host appl=%.8s,WS appl=%.8s\n", | ||
3026 | dev->name, | ||
3027 | p_ctlbk->version, | ||
3028 | p_ctlbk->linkid, | ||
3029 | p_ctlbk->correlator, | ||
3030 | p_connect->host_name, | ||
3031 | p_connect->WS_name); | ||
3032 | if (p_ctlbk->linkid== -(privptr->active_link_ID)) { | ||
3033 | privptr->active_link_ID=p_ctlbk->linkid; | ||
3034 | if (p_env->packing > PACKING_ASK) { | ||
3035 | printk(KERN_INFO "%s: Confirmed Now packing\n",dev->name); | ||
3036 | p_env->packing = DO_PACKED; | ||
3037 | } | ||
3038 | p_ch=&privptr->channel[WRITE]; | ||
3039 | wake_up(&p_ch->wait); | ||
3040 | } | ||
3041 | else { | ||
3042 | printk(KERN_INFO "%s: Conn confirm: " | ||
3043 | "unexpected linkid=%d \n", | ||
3044 | dev->name, p_ctlbk->linkid); | ||
3045 | claw_snd_disc(dev, p_ctlbk); | ||
3046 | } | ||
3047 | break; | ||
3048 | case DISCONNECT: | ||
3049 | printk(KERN_INFO "%s: Disconnect: " | ||
3050 | "Vers=%d,link_id=%d,Corr=%d\n", | ||
3051 | dev->name, p_ctlbk->version, | ||
3052 | p_ctlbk->linkid, p_ctlbk->correlator); | ||
3053 | if ((p_ctlbk->linkid == 2) && | ||
3054 | (p_env->packing == PACK_SEND)) { | ||
3055 | privptr->active_link_ID = 1; | ||
3056 | p_env->packing = DO_PACKED; | 2228 | p_env->packing = DO_PACKED; |
3057 | } | 2229 | } |
3058 | else | 2230 | p_ch = &privptr->channel[WRITE]; |
3059 | privptr->active_link_ID=0; | 2231 | wake_up(&p_ch->wait); |
3060 | break; | 2232 | } else { |
3061 | case CLAW_ERROR: | 2233 | printk(KERN_INFO "%s: Conn confirm: " |
3062 | printk(KERN_INFO "%s: CLAW ERROR detected\n", | 2234 | "unexpected linkid=%d \n", |
3063 | dev->name); | 2235 | dev->name, p_ctlbk->linkid); |
3064 | break; | 2236 | claw_snd_disc(dev, p_ctlbk); |
3065 | default: | 2237 | } |
3066 | printk(KERN_INFO "%s: Unexpected command code=%d \n", | 2238 | break; |
3067 | dev->name, p_ctlbk->command); | 2239 | case DISCONNECT: |
3068 | break; | 2240 | printk(KERN_INFO "%s: Disconnect: " |
2241 | "Vers=%d,link_id=%d,Corr=%d\n", | ||
2242 | dev->name, p_ctlbk->version, | ||
2243 | p_ctlbk->linkid, p_ctlbk->correlator); | ||
2244 | if ((p_ctlbk->linkid == 2) && | ||
2245 | (p_env->packing == PACK_SEND)) { | ||
2246 | privptr->active_link_ID = 1; | ||
2247 | p_env->packing = DO_PACKED; | ||
2248 | } else | ||
2249 | privptr->active_link_ID = 0; | ||
2250 | break; | ||
2251 | case CLAW_ERROR: | ||
2252 | printk(KERN_INFO "%s: CLAW ERROR detected\n", | ||
2253 | dev->name); | ||
2254 | break; | ||
2255 | default: | ||
2256 | printk(KERN_INFO "%s: Unexpected command code=%d \n", | ||
2257 | dev->name, p_ctlbk->command); | ||
2258 | break; | ||
3069 | } | 2259 | } |
3070 | 2260 | ||
3071 | #ifdef FUNCTRACE | ||
3072 | printk(KERN_INFO "%s: %s() exit on line %d, rc = 0\n", | ||
3073 | dev->name,__func__,__LINE__); | ||
3074 | #endif | ||
3075 | |||
3076 | return 0; | 2261 | return 0; |
3077 | } /* end of claw_process_control */ | 2262 | } /* end of claw_process_control */ |
3078 | 2263 | ||
@@ -3092,18 +2277,7 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link, | |||
3092 | struct conncmd *p_connect; | 2277 | struct conncmd *p_connect; |
3093 | struct sk_buff *skb; | 2278 | struct sk_buff *skb; |
3094 | 2279 | ||
3095 | #ifdef FUNCTRACE | 2280 | CLAW_DBF_TEXT(2, setup, "sndcntl"); |
3096 | printk(KERN_INFO "%s:%s > enter \n",dev->name,__func__); | ||
3097 | #endif | ||
3098 | CLAW_DBF_TEXT(2,setup,"sndcntl"); | ||
3099 | #ifdef DEBUGMSG | ||
3100 | printk(KERN_INFO "%s: Sending Control Packet \n",dev->name); | ||
3101 | printk(KERN_INFO "%s: variable type = 0x%X, link = " | ||
3102 | "%d, correlator = %d, rc = %d\n", | ||
3103 | dev->name,type, link, correlator, rc); | ||
3104 | printk(KERN_INFO "%s: variable local_name = %s, " | ||
3105 | "remote_name = %s\n",dev->name, local_name, remote_name); | ||
3106 | #endif | ||
3107 | privptr=dev->priv; | 2281 | privptr=dev->priv; |
3108 | p_ctl=(struct clawctl *)&privptr->ctl_bk; | 2282 | p_ctl=(struct clawctl *)&privptr->ctl_bk; |
3109 | 2283 | ||
@@ -3125,7 +2299,7 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link, | |||
3125 | p_sysval->read_frame_size=DEF_PACK_BUFSIZE; | 2299 | p_sysval->read_frame_size=DEF_PACK_BUFSIZE; |
3126 | p_sysval->write_frame_size=DEF_PACK_BUFSIZE; | 2300 | p_sysval->write_frame_size=DEF_PACK_BUFSIZE; |
3127 | } else { | 2301 | } else { |
3128 | /* how big is the piggest group of packets */ | 2302 | /* how big is the biggest group of packets */ |
3129 | p_sysval->read_frame_size=privptr->p_env->read_size; | 2303 | p_sysval->read_frame_size=privptr->p_env->read_size; |
3130 | p_sysval->write_frame_size=privptr->p_env->write_size; | 2304 | p_sysval->write_frame_size=privptr->p_env->write_size; |
3131 | } | 2305 | } |
@@ -3155,29 +2329,14 @@ claw_send_control(struct net_device *dev, __u8 type, __u8 link, | |||
3155 | 2329 | ||
3156 | skb = dev_alloc_skb(sizeof(struct clawctl)); | 2330 | skb = dev_alloc_skb(sizeof(struct clawctl)); |
3157 | if (!skb) { | 2331 | if (!skb) { |
3158 | printk( "%s:%s low on mem, returning...\n", | ||
3159 | dev->name,__func__); | ||
3160 | #ifdef DEBUG | ||
3161 | printk(KERN_INFO "%s:%s Exit, rc = ENOMEM\n", | ||
3162 | dev->name,__func__); | ||
3163 | #endif | ||
3164 | return -ENOMEM; | 2332 | return -ENOMEM; |
3165 | } | 2333 | } |
3166 | memcpy(skb_put(skb, sizeof(struct clawctl)), | 2334 | memcpy(skb_put(skb, sizeof(struct clawctl)), |
3167 | p_ctl, sizeof(struct clawctl)); | 2335 | p_ctl, sizeof(struct clawctl)); |
3168 | #ifdef IOTRACE | ||
3169 | printk(KERN_INFO "%s: outbnd claw cntl data \n",dev->name); | ||
3170 | dumpit((char *)p_ctl,sizeof(struct clawctl)); | ||
3171 | #endif | ||
3172 | if (privptr->p_env->packing >= PACK_SEND) | 2336 | if (privptr->p_env->packing >= PACK_SEND) |
3173 | claw_hw_tx(skb, dev, 1); | 2337 | claw_hw_tx(skb, dev, 1); |
3174 | else | 2338 | else |
3175 | claw_hw_tx(skb, dev, 0); | 2339 | claw_hw_tx(skb, dev, 0); |
3176 | #ifdef FUNCTRACE | ||
3177 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
3178 | dev->name,__func__,__LINE__); | ||
3179 | #endif | ||
3180 | |||
3181 | return 0; | 2340 | return 0; |
3182 | } /* end of claw_send_control */ | 2341 | } /* end of claw_send_control */ |
3183 | 2342 | ||
@@ -3192,22 +2351,11 @@ claw_snd_conn_req(struct net_device *dev, __u8 link) | |||
3192 | struct claw_privbk *privptr=dev->priv; | 2351 | struct claw_privbk *privptr=dev->priv; |
3193 | struct clawctl *p_ctl; | 2352 | struct clawctl *p_ctl; |
3194 | 2353 | ||
3195 | #ifdef FUNCTRACE | 2354 | CLAW_DBF_TEXT(2, setup, "snd_conn"); |
3196 | printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__); | ||
3197 | #endif | ||
3198 | CLAW_DBF_TEXT(2,setup,"snd_conn"); | ||
3199 | #ifdef DEBUGMSG | ||
3200 | printk(KERN_INFO "%s: variable link = %X, dev =\n",dev->name, link); | ||
3201 | dumpit((char *) dev, sizeof(struct net_device)); | ||
3202 | #endif | ||
3203 | rc = 1; | 2355 | rc = 1; |
3204 | p_ctl=(struct clawctl *)&privptr->ctl_bk; | 2356 | p_ctl=(struct clawctl *)&privptr->ctl_bk; |
3205 | p_ctl->linkid = link; | 2357 | p_ctl->linkid = link; |
3206 | if ( privptr->system_validate_comp==0x00 ) { | 2358 | if ( privptr->system_validate_comp==0x00 ) { |
3207 | #ifdef FUNCTRACE | ||
3208 | printk(KERN_INFO "%s:%s Exit on line %d, rc = 1\n", | ||
3209 | dev->name,__func__,__LINE__); | ||
3210 | #endif | ||
3211 | return rc; | 2359 | return rc; |
3212 | } | 2360 | } |
3213 | if (privptr->p_env->packing == PACKING_ASK ) | 2361 | if (privptr->p_env->packing == PACKING_ASK ) |
@@ -3220,10 +2368,6 @@ claw_snd_conn_req(struct net_device *dev, __u8 link) | |||
3220 | if (privptr->p_env->packing == 0) | 2368 | if (privptr->p_env->packing == 0) |
3221 | rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0, | 2369 | rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0, |
3222 | HOST_APPL_NAME, privptr->p_env->api_type); | 2370 | HOST_APPL_NAME, privptr->p_env->api_type); |
3223 | #ifdef FUNCTRACE | ||
3224 | printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", | ||
3225 | dev->name,__func__,__LINE__, rc); | ||
3226 | #endif | ||
3227 | return rc; | 2371 | return rc; |
3228 | 2372 | ||
3229 | } /* end of claw_snd_conn_req */ | 2373 | } /* end of claw_snd_conn_req */ |
@@ -3240,25 +2384,12 @@ claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl) | |||
3240 | int rc; | 2384 | int rc; |
3241 | struct conncmd * p_connect; | 2385 | struct conncmd * p_connect; |
3242 | 2386 | ||
3243 | #ifdef FUNCTRACE | 2387 | CLAW_DBF_TEXT(2, setup, "snd_dsc"); |
3244 | printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__); | ||
3245 | #endif | ||
3246 | CLAW_DBF_TEXT(2,setup,"snd_dsc"); | ||
3247 | #ifdef DEBUGMSG | ||
3248 | printk(KERN_INFO "%s: variable dev =\n",dev->name); | ||
3249 | dumpit((char *) dev, sizeof(struct net_device)); | ||
3250 | printk(KERN_INFO "%s: variable p_ctl",dev->name); | ||
3251 | dumpit((char *) p_ctl, sizeof(struct clawctl)); | ||
3252 | #endif | ||
3253 | p_connect=(struct conncmd *)&p_ctl->data; | 2388 | p_connect=(struct conncmd *)&p_ctl->data; |
3254 | 2389 | ||
3255 | rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid, | 2390 | rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid, |
3256 | p_ctl->correlator, 0, | 2391 | p_ctl->correlator, 0, |
3257 | p_connect->host_name, p_connect->WS_name); | 2392 | p_connect->host_name, p_connect->WS_name); |
3258 | #ifdef FUNCTRACE | ||
3259 | printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", | ||
3260 | dev->name,__func__, __LINE__, rc); | ||
3261 | #endif | ||
3262 | return rc; | 2393 | return rc; |
3263 | } /* end of claw_snd_disc */ | 2394 | } /* end of claw_snd_disc */ |
3264 | 2395 | ||
@@ -3276,18 +2407,7 @@ claw_snd_sys_validate_rsp(struct net_device *dev, | |||
3276 | struct claw_privbk *privptr; | 2407 | struct claw_privbk *privptr; |
3277 | int rc; | 2408 | int rc; |
3278 | 2409 | ||
3279 | #ifdef FUNCTRACE | 2410 | CLAW_DBF_TEXT(2, setup, "chkresp"); |
3280 | printk(KERN_INFO "%s:%s Enter\n", | ||
3281 | dev->name,__func__); | ||
3282 | #endif | ||
3283 | CLAW_DBF_TEXT(2,setup,"chkresp"); | ||
3284 | #ifdef DEBUGMSG | ||
3285 | printk(KERN_INFO "%s: variable return_code = %d, dev =\n", | ||
3286 | dev->name, return_code); | ||
3287 | dumpit((char *) dev, sizeof(struct net_device)); | ||
3288 | printk(KERN_INFO "%s: variable p_ctl =\n",dev->name); | ||
3289 | dumpit((char *) p_ctl, sizeof(struct clawctl)); | ||
3290 | #endif | ||
3291 | privptr = dev->priv; | 2411 | privptr = dev->priv; |
3292 | p_env=privptr->p_env; | 2412 | p_env=privptr->p_env; |
3293 | rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE, | 2413 | rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE, |
@@ -3296,10 +2416,6 @@ claw_snd_sys_validate_rsp(struct net_device *dev, | |||
3296 | return_code, | 2416 | return_code, |
3297 | p_env->host_name, | 2417 | p_env->host_name, |
3298 | p_env->adapter_name ); | 2418 | p_env->adapter_name ); |
3299 | #ifdef FUNCTRACE | ||
3300 | printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", | ||
3301 | dev->name,__func__,__LINE__, rc); | ||
3302 | #endif | ||
3303 | return rc; | 2419 | return rc; |
3304 | } /* end of claw_snd_sys_validate_rsp */ | 2420 | } /* end of claw_snd_sys_validate_rsp */ |
3305 | 2421 | ||
@@ -3313,19 +2429,8 @@ claw_strt_conn_req(struct net_device *dev ) | |||
3313 | { | 2429 | { |
3314 | int rc; | 2430 | int rc; |
3315 | 2431 | ||
3316 | #ifdef FUNCTRACE | 2432 | CLAW_DBF_TEXT(2, setup, "conn_req"); |
3317 | printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__); | ||
3318 | #endif | ||
3319 | CLAW_DBF_TEXT(2,setup,"conn_req"); | ||
3320 | #ifdef DEBUGMSG | ||
3321 | printk(KERN_INFO "%s: variable dev =\n",dev->name); | ||
3322 | dumpit((char *) dev, sizeof(struct net_device)); | ||
3323 | #endif | ||
3324 | rc=claw_snd_conn_req(dev, 1); | 2433 | rc=claw_snd_conn_req(dev, 1); |
3325 | #ifdef FUNCTRACE | ||
3326 | printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", | ||
3327 | dev->name,__func__,__LINE__, rc); | ||
3328 | #endif | ||
3329 | return rc; | 2434 | return rc; |
3330 | } /* end of claw_strt_conn_req */ | 2435 | } /* end of claw_strt_conn_req */ |
3331 | 2436 | ||
@@ -3339,15 +2444,9 @@ static struct | |||
3339 | net_device_stats *claw_stats(struct net_device *dev) | 2444 | net_device_stats *claw_stats(struct net_device *dev) |
3340 | { | 2445 | { |
3341 | struct claw_privbk *privptr; | 2446 | struct claw_privbk *privptr; |
3342 | #ifdef FUNCTRACE | 2447 | |
3343 | printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__); | 2448 | CLAW_DBF_TEXT(4, trace, "stats"); |
3344 | #endif | ||
3345 | CLAW_DBF_TEXT(4,trace,"stats"); | ||
3346 | privptr = dev->priv; | 2449 | privptr = dev->priv; |
3347 | #ifdef FUNCTRACE | ||
3348 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
3349 | dev->name,__func__,__LINE__); | ||
3350 | #endif | ||
3351 | return &privptr->stats; | 2450 | return &privptr->stats; |
3352 | } /* end of claw_stats */ | 2451 | } /* end of claw_stats */ |
3353 | 2452 | ||
@@ -3368,36 +2467,28 @@ unpack_read(struct net_device *dev ) | |||
3368 | struct clawph *p_packh; | 2467 | struct clawph *p_packh; |
3369 | void *p_packd; | 2468 | void *p_packd; |
3370 | struct clawctl *p_ctlrec=NULL; | 2469 | struct clawctl *p_ctlrec=NULL; |
2470 | struct device *p_dev; | ||
3371 | 2471 | ||
3372 | __u32 len_of_data; | 2472 | __u32 len_of_data; |
3373 | __u32 pack_off; | 2473 | __u32 pack_off; |
3374 | __u8 link_num; | 2474 | __u8 link_num; |
3375 | __u8 mtc_this_frm=0; | 2475 | __u8 mtc_this_frm=0; |
3376 | __u32 bytes_to_mov; | 2476 | __u32 bytes_to_mov; |
3377 | struct chbk *p_ch = NULL; | ||
3378 | int i=0; | 2477 | int i=0; |
3379 | int p=0; | 2478 | int p=0; |
3380 | 2479 | ||
3381 | #ifdef FUNCTRACE | 2480 | CLAW_DBF_TEXT(4, trace, "unpkread"); |
3382 | printk(KERN_INFO "%s:%s enter \n",dev->name,__func__); | ||
3383 | #endif | ||
3384 | CLAW_DBF_TEXT(4,trace,"unpkread"); | ||
3385 | p_first_ccw=NULL; | 2481 | p_first_ccw=NULL; |
3386 | p_last_ccw=NULL; | 2482 | p_last_ccw=NULL; |
3387 | p_packh=NULL; | 2483 | p_packh=NULL; |
3388 | p_packd=NULL; | 2484 | p_packd=NULL; |
3389 | privptr=dev->priv; | 2485 | privptr=dev->priv; |
2486 | |||
2487 | p_dev = &privptr->channel[READ].cdev->dev; | ||
3390 | p_env = privptr->p_env; | 2488 | p_env = privptr->p_env; |
3391 | p_this_ccw=privptr->p_read_active_first; | 2489 | p_this_ccw=privptr->p_read_active_first; |
3392 | i=0; | 2490 | i=0; |
3393 | while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) { | 2491 | while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) { |
3394 | #ifdef IOTRACE | ||
3395 | printk(KERN_INFO "%s p_this_ccw \n",dev->name); | ||
3396 | dumpit((char*)p_this_ccw, sizeof(struct ccwbk)); | ||
3397 | printk(KERN_INFO "%s Inbound p_this_ccw->p_buffer(64)" | ||
3398 | " pk=%d \n",dev->name,p_env->packing); | ||
3399 | dumpit((char *)p_this_ccw->p_buffer, 64 ); | ||
3400 | #endif | ||
3401 | pack_off = 0; | 2492 | pack_off = 0; |
3402 | p = 0; | 2493 | p = 0; |
3403 | p_this_ccw->header.flag=CLAW_PENDING; | 2494 | p_this_ccw->header.flag=CLAW_PENDING; |
@@ -3419,10 +2510,6 @@ unpack_read(struct net_device *dev ) | |||
3419 | else | 2510 | else |
3420 | link_num=p_this_ccw->header.opcode / 8; | 2511 | link_num=p_this_ccw->header.opcode / 8; |
3421 | if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) { | 2512 | if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) { |
3422 | #ifdef DEBUGMSG | ||
3423 | printk(KERN_INFO "%s: %s > More_to_come is ON\n", | ||
3424 | dev->name,__func__); | ||
3425 | #endif | ||
3426 | mtc_this_frm=1; | 2513 | mtc_this_frm=1; |
3427 | if (p_this_ccw->header.length!= | 2514 | if (p_this_ccw->header.length!= |
3428 | privptr->p_env->read_size ) { | 2515 | privptr->p_env->read_size ) { |
@@ -3445,22 +2532,12 @@ unpack_read(struct net_device *dev ) | |||
3445 | privptr->mtc_skipping=0; /* Ok, the end */ | 2532 | privptr->mtc_skipping=0; /* Ok, the end */ |
3446 | privptr->mtc_logical_link=-1; | 2533 | privptr->mtc_logical_link=-1; |
3447 | } | 2534 | } |
3448 | #ifdef DEBUGMSG | ||
3449 | printk(KERN_INFO "%s:%s goto next " | ||
3450 | "frame from MoretoComeSkip \n", | ||
3451 | dev->name,__func__); | ||
3452 | #endif | ||
3453 | goto NextFrame; | 2535 | goto NextFrame; |
3454 | } | 2536 | } |
3455 | 2537 | ||
3456 | if (link_num==0) { | 2538 | if (link_num==0) { |
3457 | claw_process_control(dev, p_this_ccw); | 2539 | claw_process_control(dev, p_this_ccw); |
3458 | #ifdef DEBUGMSG | 2540 | CLAW_DBF_TEXT(4, trace, "UnpkCntl"); |
3459 | printk(KERN_INFO "%s:%s goto next " | ||
3460 | "frame from claw_process_control \n", | ||
3461 | dev->name,__func__); | ||
3462 | #endif | ||
3463 | CLAW_DBF_TEXT(4,trace,"UnpkCntl"); | ||
3464 | goto NextFrame; | 2541 | goto NextFrame; |
3465 | } | 2542 | } |
3466 | unpack_next: | 2543 | unpack_next: |
@@ -3479,10 +2556,6 @@ unpack_next: | |||
3479 | bytes_to_mov=p_this_ccw->header.length; | 2556 | bytes_to_mov=p_this_ccw->header.length; |
3480 | } | 2557 | } |
3481 | if (privptr->mtc_logical_link<0) { | 2558 | if (privptr->mtc_logical_link<0) { |
3482 | #ifdef DEBUGMSG | ||
3483 | printk(KERN_INFO "%s: %s mtc_logical_link < 0 \n", | ||
3484 | dev->name,__func__); | ||
3485 | #endif | ||
3486 | 2559 | ||
3487 | /* | 2560 | /* |
3488 | * if More-To-Come is set in this frame then we don't know | 2561 | * if More-To-Come is set in this frame then we don't know |
@@ -3496,15 +2569,6 @@ unpack_next: | |||
3496 | 2569 | ||
3497 | if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) { | 2570 | if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) { |
3498 | /* error */ | 2571 | /* error */ |
3499 | #ifdef DEBUGMSG | ||
3500 | printk(KERN_INFO "%s: %s > goto next " | ||
3501 | "frame from MoretoComeSkip \n", | ||
3502 | dev->name, | ||
3503 | __func__); | ||
3504 | printk(KERN_INFO " bytes_to_mov %d > (MAX_ENVELOPE_" | ||
3505 | "SIZE-privptr->mtc_offset %d)\n", | ||
3506 | bytes_to_mov,(MAX_ENVELOPE_SIZE- privptr->mtc_offset)); | ||
3507 | #endif | ||
3508 | privptr->stats.rx_frame_errors++; | 2572 | privptr->stats.rx_frame_errors++; |
3509 | goto NextFrame; | 2573 | goto NextFrame; |
3510 | } | 2574 | } |
@@ -3516,16 +2580,6 @@ unpack_next: | |||
3516 | memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset, | 2580 | memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset, |
3517 | p_this_ccw->p_buffer, bytes_to_mov); | 2581 | p_this_ccw->p_buffer, bytes_to_mov); |
3518 | } | 2582 | } |
3519 | #ifdef DEBUGMSG | ||
3520 | printk(KERN_INFO "%s: %s() received data \n", | ||
3521 | dev->name,__func__); | ||
3522 | if (p_env->packing == DO_PACKED) | ||
3523 | dumpit((char *)p_packd+sizeof(struct clawph),32); | ||
3524 | else | ||
3525 | dumpit((char *)p_this_ccw->p_buffer, 32); | ||
3526 | printk(KERN_INFO "%s: %s() bytelength %d \n", | ||
3527 | dev->name,__func__,bytes_to_mov); | ||
3528 | #endif | ||
3529 | if (mtc_this_frm==0) { | 2583 | if (mtc_this_frm==0) { |
3530 | len_of_data=privptr->mtc_offset+bytes_to_mov; | 2584 | len_of_data=privptr->mtc_offset+bytes_to_mov; |
3531 | skb=dev_alloc_skb(len_of_data); | 2585 | skb=dev_alloc_skb(len_of_data); |
@@ -3540,11 +2594,6 @@ unpack_next: | |||
3540 | privptr->stats.rx_packets++; | 2594 | privptr->stats.rx_packets++; |
3541 | privptr->stats.rx_bytes+=len_of_data; | 2595 | privptr->stats.rx_bytes+=len_of_data; |
3542 | netif_rx(skb); | 2596 | netif_rx(skb); |
3543 | #ifdef DEBUGMSG | ||
3544 | printk(KERN_INFO "%s: %s() netif_" | ||
3545 | "rx(skb) completed \n", | ||
3546 | dev->name,__func__); | ||
3547 | #endif | ||
3548 | } | 2597 | } |
3549 | else { | 2598 | else { |
3550 | privptr->stats.rx_dropped++; | 2599 | privptr->stats.rx_dropped++; |
@@ -3581,28 +2630,14 @@ NextFrame: | |||
3581 | * chain to next block on active read queue | 2630 | * chain to next block on active read queue |
3582 | */ | 2631 | */ |
3583 | p_this_ccw = privptr->p_read_active_first; | 2632 | p_this_ccw = privptr->p_read_active_first; |
3584 | CLAW_DBF_TEXT_(4,trace,"rxpkt %d",p); | 2633 | CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p); |
3585 | } /* end of while */ | 2634 | } /* end of while */ |
3586 | 2635 | ||
3587 | /* check validity */ | 2636 | /* check validity */ |
3588 | 2637 | ||
3589 | #ifdef IOTRACE | 2638 | CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i); |
3590 | printk(KERN_INFO "%s:%s processed frame is %d \n", | ||
3591 | dev->name,__func__,i); | ||
3592 | printk(KERN_INFO "%s:%s F:%lx L:%lx\n", | ||
3593 | dev->name, | ||
3594 | __func__, | ||
3595 | (unsigned long)p_first_ccw, | ||
3596 | (unsigned long)p_last_ccw); | ||
3597 | #endif | ||
3598 | CLAW_DBF_TEXT_(4,trace,"rxfrm %d",i); | ||
3599 | add_claw_reads(dev, p_first_ccw, p_last_ccw); | 2639 | add_claw_reads(dev, p_first_ccw, p_last_ccw); |
3600 | p_ch=&privptr->channel[READ]; | ||
3601 | claw_strt_read(dev, LOCK_YES); | 2640 | claw_strt_read(dev, LOCK_YES); |
3602 | #ifdef FUNCTRACE | ||
3603 | printk(KERN_INFO "%s: %s exit on line %d\n", | ||
3604 | dev->name, __func__, __LINE__); | ||
3605 | #endif | ||
3606 | return; | 2641 | return; |
3607 | } /* end of unpack_read */ | 2642 | } /* end of unpack_read */ |
3608 | 2643 | ||
@@ -3622,12 +2657,7 @@ claw_strt_read (struct net_device *dev, int lock ) | |||
3622 | struct clawh *p_clawh; | 2657 | struct clawh *p_clawh; |
3623 | p_ch=&privptr->channel[READ]; | 2658 | p_ch=&privptr->channel[READ]; |
3624 | 2659 | ||
3625 | #ifdef FUNCTRACE | 2660 | CLAW_DBF_TEXT(4, trace, "StRdNter"); |
3626 | printk(KERN_INFO "%s:%s Enter \n",dev->name,__func__); | ||
3627 | printk(KERN_INFO "%s: variable lock = %d, dev =\n",dev->name, lock); | ||
3628 | dumpit((char *) dev, sizeof(struct net_device)); | ||
3629 | #endif | ||
3630 | CLAW_DBF_TEXT(4,trace,"StRdNter"); | ||
3631 | p_clawh=(struct clawh *)privptr->p_claw_signal_blk; | 2661 | p_clawh=(struct clawh *)privptr->p_claw_signal_blk; |
3632 | p_clawh->flag=CLAW_IDLE; /* 0x00 */ | 2662 | p_clawh->flag=CLAW_IDLE; /* 0x00 */ |
3633 | 2663 | ||
@@ -3637,21 +2667,11 @@ claw_strt_read (struct net_device *dev, int lock ) | |||
3637 | privptr->p_read_active_first->header.flag!=CLAW_PENDING )) { | 2667 | privptr->p_read_active_first->header.flag!=CLAW_PENDING )) { |
3638 | p_clawh->flag=CLAW_BUSY; /* 0xff */ | 2668 | p_clawh->flag=CLAW_BUSY; /* 0xff */ |
3639 | } | 2669 | } |
3640 | #ifdef DEBUGMSG | ||
3641 | printk(KERN_INFO "%s:%s state-%02x\n" , | ||
3642 | dev->name,__func__, p_ch->claw_state); | ||
3643 | #endif | ||
3644 | if (lock==LOCK_YES) { | 2670 | if (lock==LOCK_YES) { |
3645 | spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); | 2671 | spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); |
3646 | } | 2672 | } |
3647 | if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { | 2673 | if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { |
3648 | #ifdef DEBUGMSG | 2674 | CLAW_DBF_TEXT(4, trace, "HotRead"); |
3649 | printk(KERN_INFO "%s: HOT READ started in %s\n" , | ||
3650 | dev->name,__func__); | ||
3651 | p_clawh=(struct clawh *)privptr->p_claw_signal_blk; | ||
3652 | dumpit((char *)&p_clawh->flag , 1); | ||
3653 | #endif | ||
3654 | CLAW_DBF_TEXT(4,trace,"HotRead"); | ||
3655 | p_ccwbk=privptr->p_read_active_first; | 2675 | p_ccwbk=privptr->p_read_active_first; |
3656 | parm = (unsigned long) p_ch; | 2676 | parm = (unsigned long) p_ch; |
3657 | rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm, | 2677 | rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm, |
@@ -3661,21 +2681,13 @@ claw_strt_read (struct net_device *dev, int lock ) | |||
3661 | } | 2681 | } |
3662 | } | 2682 | } |
3663 | else { | 2683 | else { |
3664 | #ifdef DEBUGMSG | 2684 | CLAW_DBF_TEXT(2, trace, "ReadAct"); |
3665 | printk(KERN_INFO "%s: No READ started by %s() In progress\n" , | ||
3666 | dev->name,__func__); | ||
3667 | #endif | ||
3668 | CLAW_DBF_TEXT(2,trace,"ReadAct"); | ||
3669 | } | 2685 | } |
3670 | 2686 | ||
3671 | if (lock==LOCK_YES) { | 2687 | if (lock==LOCK_YES) { |
3672 | spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); | 2688 | spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); |
3673 | } | 2689 | } |
3674 | #ifdef FUNCTRACE | 2690 | CLAW_DBF_TEXT(4, trace, "StRdExit"); |
3675 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
3676 | dev->name,__func__,__LINE__); | ||
3677 | #endif | ||
3678 | CLAW_DBF_TEXT(4,trace,"StRdExit"); | ||
3679 | return; | 2691 | return; |
3680 | } /* end of claw_strt_read */ | 2692 | } /* end of claw_strt_read */ |
3681 | 2693 | ||
@@ -3693,38 +2705,23 @@ claw_strt_out_IO( struct net_device *dev ) | |||
3693 | struct chbk *p_ch; | 2705 | struct chbk *p_ch; |
3694 | struct ccwbk *p_first_ccw; | 2706 | struct ccwbk *p_first_ccw; |
3695 | 2707 | ||
3696 | #ifdef FUNCTRACE | ||
3697 | printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__); | ||
3698 | #endif | ||
3699 | if (!dev) { | 2708 | if (!dev) { |
3700 | return; | 2709 | return; |
3701 | } | 2710 | } |
3702 | privptr=(struct claw_privbk *)dev->priv; | 2711 | privptr=(struct claw_privbk *)dev->priv; |
3703 | p_ch=&privptr->channel[WRITE]; | 2712 | p_ch=&privptr->channel[WRITE]; |
3704 | 2713 | ||
3705 | #ifdef DEBUGMSG | 2714 | CLAW_DBF_TEXT(4, trace, "strt_io"); |
3706 | printk(KERN_INFO "%s:%s state-%02x\n" , | ||
3707 | dev->name,__func__,p_ch->claw_state); | ||
3708 | #endif | ||
3709 | CLAW_DBF_TEXT(4,trace,"strt_io"); | ||
3710 | p_first_ccw=privptr->p_write_active_first; | 2715 | p_first_ccw=privptr->p_write_active_first; |
3711 | 2716 | ||
3712 | if (p_ch->claw_state == CLAW_STOP) | 2717 | if (p_ch->claw_state == CLAW_STOP) |
3713 | return; | 2718 | return; |
3714 | if (p_first_ccw == NULL) { | 2719 | if (p_first_ccw == NULL) { |
3715 | #ifdef FUNCTRACE | ||
3716 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
3717 | dev->name,__func__,__LINE__); | ||
3718 | #endif | ||
3719 | return; | 2720 | return; |
3720 | } | 2721 | } |
3721 | if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { | 2722 | if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { |
3722 | parm = (unsigned long) p_ch; | 2723 | parm = (unsigned long) p_ch; |
3723 | #ifdef DEBUGMSG | 2724 | CLAW_DBF_TEXT(2, trace, "StWrtIO"); |
3724 | printk(KERN_INFO "%s:%s do_io \n" ,dev->name,__func__); | ||
3725 | dumpit((char *)p_first_ccw, sizeof(struct ccwbk)); | ||
3726 | #endif | ||
3727 | CLAW_DBF_TEXT(2,trace,"StWrtIO"); | ||
3728 | rc = ccw_device_start (p_ch->cdev,&p_first_ccw->write, parm, | 2725 | rc = ccw_device_start (p_ch->cdev,&p_first_ccw->write, parm, |
3729 | 0xff, 0); | 2726 | 0xff, 0); |
3730 | if (rc != 0) { | 2727 | if (rc != 0) { |
@@ -3732,11 +2729,6 @@ claw_strt_out_IO( struct net_device *dev ) | |||
3732 | } | 2729 | } |
3733 | } | 2730 | } |
3734 | dev->trans_start = jiffies; | 2731 | dev->trans_start = jiffies; |
3735 | #ifdef FUNCTRACE | ||
3736 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
3737 | dev->name,__func__,__LINE__); | ||
3738 | #endif | ||
3739 | |||
3740 | return; | 2732 | return; |
3741 | } /* end of claw_strt_out_IO */ | 2733 | } /* end of claw_strt_out_IO */ |
3742 | 2734 | ||
@@ -3754,32 +2746,11 @@ claw_free_wrt_buf( struct net_device *dev ) | |||
3754 | struct ccwbk*p_last_ccw; | 2746 | struct ccwbk*p_last_ccw; |
3755 | struct ccwbk*p_this_ccw; | 2747 | struct ccwbk*p_this_ccw; |
3756 | struct ccwbk*p_next_ccw; | 2748 | struct ccwbk*p_next_ccw; |
3757 | #ifdef IOTRACE | 2749 | |
3758 | struct ccwbk*p_buf; | 2750 | CLAW_DBF_TEXT(4, trace, "freewrtb"); |
3759 | #endif | ||
3760 | #ifdef FUNCTRACE | ||
3761 | printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__); | ||
3762 | printk(KERN_INFO "%s: free count = %d variable dev =\n", | ||
3763 | dev->name,privptr->write_free_count); | ||
3764 | #endif | ||
3765 | CLAW_DBF_TEXT(4,trace,"freewrtb"); | ||
3766 | /* scan the write queue to free any completed write packets */ | 2751 | /* scan the write queue to free any completed write packets */ |
3767 | p_first_ccw=NULL; | 2752 | p_first_ccw=NULL; |
3768 | p_last_ccw=NULL; | 2753 | p_last_ccw=NULL; |
3769 | #ifdef IOTRACE | ||
3770 | printk(KERN_INFO "%s: Dump current CCW chain \n",dev->name ); | ||
3771 | p_buf=privptr->p_write_active_first; | ||
3772 | while (p_buf!=NULL) { | ||
3773 | dumpit((char *)p_buf, sizeof(struct ccwbk)); | ||
3774 | p_buf=p_buf->next; | ||
3775 | } | ||
3776 | if (p_buf==NULL) { | ||
3777 | printk(KERN_INFO "%s: privptr->p_write_" | ||
3778 | "active_first==NULL\n",dev->name ); | ||
3779 | } | ||
3780 | p_buf=(struct ccwbk*)privptr->p_end_ccw; | ||
3781 | dumpit((char *)p_buf, sizeof(struct endccw)); | ||
3782 | #endif | ||
3783 | p_this_ccw=privptr->p_write_active_first; | 2754 | p_this_ccw=privptr->p_write_active_first; |
3784 | while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING)) | 2755 | while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING)) |
3785 | { | 2756 | { |
@@ -3809,31 +2780,8 @@ claw_free_wrt_buf( struct net_device *dev ) | |||
3809 | /* whole chain removed? */ | 2780 | /* whole chain removed? */ |
3810 | if (privptr->p_write_active_first==NULL) { | 2781 | if (privptr->p_write_active_first==NULL) { |
3811 | privptr->p_write_active_last=NULL; | 2782 | privptr->p_write_active_last=NULL; |
3812 | #ifdef DEBUGMSG | ||
3813 | printk(KERN_INFO "%s:%s p_write_" | ||
3814 | "active_first==NULL\n",dev->name,__func__); | ||
3815 | #endif | ||
3816 | } | ||
3817 | #ifdef IOTRACE | ||
3818 | printk(KERN_INFO "%s: Dump arranged CCW chain \n",dev->name ); | ||
3819 | p_buf=privptr->p_write_active_first; | ||
3820 | while (p_buf!=NULL) { | ||
3821 | dumpit((char *)p_buf, sizeof(struct ccwbk)); | ||
3822 | p_buf=p_buf->next; | ||
3823 | } | 2783 | } |
3824 | if (p_buf==NULL) { | 2784 | CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count); |
3825 | printk(KERN_INFO "%s: privptr->p_write_active_" | ||
3826 | "first==NULL\n",dev->name ); | ||
3827 | } | ||
3828 | p_buf=(struct ccwbk*)privptr->p_end_ccw; | ||
3829 | dumpit((char *)p_buf, sizeof(struct endccw)); | ||
3830 | #endif | ||
3831 | |||
3832 | CLAW_DBF_TEXT_(4,trace,"FWC=%d",privptr->write_free_count); | ||
3833 | #ifdef FUNCTRACE | ||
3834 | printk(KERN_INFO "%s:%s Exit on line %d free_count =%d\n", | ||
3835 | dev->name,__func__, __LINE__,privptr->write_free_count); | ||
3836 | #endif | ||
3837 | return; | 2785 | return; |
3838 | } | 2786 | } |
3839 | 2787 | ||
@@ -3845,14 +2793,11 @@ static void | |||
3845 | claw_free_netdevice(struct net_device * dev, int free_dev) | 2793 | claw_free_netdevice(struct net_device * dev, int free_dev) |
3846 | { | 2794 | { |
3847 | struct claw_privbk *privptr; | 2795 | struct claw_privbk *privptr; |
3848 | #ifdef FUNCTRACE | ||
3849 | printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__); | ||
3850 | #endif | ||
3851 | CLAW_DBF_TEXT(2,setup,"free_dev"); | ||
3852 | 2796 | ||
2797 | CLAW_DBF_TEXT(2, setup, "free_dev"); | ||
3853 | if (!dev) | 2798 | if (!dev) |
3854 | return; | 2799 | return; |
3855 | CLAW_DBF_TEXT_(2,setup,"%s",dev->name); | 2800 | CLAW_DBF_TEXT_(2, setup, "%s", dev->name); |
3856 | privptr = dev->priv; | 2801 | privptr = dev->priv; |
3857 | if (dev->flags & IFF_RUNNING) | 2802 | if (dev->flags & IFF_RUNNING) |
3858 | claw_release(dev); | 2803 | claw_release(dev); |
@@ -3865,10 +2810,7 @@ claw_free_netdevice(struct net_device * dev, int free_dev) | |||
3865 | free_netdev(dev); | 2810 | free_netdev(dev); |
3866 | } | 2811 | } |
3867 | #endif | 2812 | #endif |
3868 | CLAW_DBF_TEXT(2,setup,"feee_ok"); | 2813 | CLAW_DBF_TEXT(2, setup, "free_ok"); |
3869 | #ifdef FUNCTRACE | ||
3870 | printk(KERN_INFO "%s:%s Exit\n",dev->name,__func__); | ||
3871 | #endif | ||
3872 | } | 2814 | } |
3873 | 2815 | ||
3874 | /** | 2816 | /** |
@@ -3879,17 +2821,8 @@ claw_free_netdevice(struct net_device * dev, int free_dev) | |||
3879 | static void | 2821 | static void |
3880 | claw_init_netdevice(struct net_device * dev) | 2822 | claw_init_netdevice(struct net_device * dev) |
3881 | { | 2823 | { |
3882 | #ifdef FUNCTRACE | 2824 | CLAW_DBF_TEXT(2, setup, "init_dev"); |
3883 | printk(KERN_INFO "%s:%s Enter\n",dev->name,__func__); | 2825 | CLAW_DBF_TEXT_(2, setup, "%s", dev->name); |
3884 | #endif | ||
3885 | CLAW_DBF_TEXT(2,setup,"init_dev"); | ||
3886 | CLAW_DBF_TEXT_(2,setup,"%s",dev->name); | ||
3887 | if (!dev) { | ||
3888 | printk(KERN_WARNING "claw:%s BAD Device exit line %d\n", | ||
3889 | __func__,__LINE__); | ||
3890 | CLAW_DBF_TEXT(2,setup,"baddev"); | ||
3891 | return; | ||
3892 | } | ||
3893 | dev->mtu = CLAW_DEFAULT_MTU_SIZE; | 2826 | dev->mtu = CLAW_DEFAULT_MTU_SIZE; |
3894 | dev->hard_start_xmit = claw_tx; | 2827 | dev->hard_start_xmit = claw_tx; |
3895 | dev->open = claw_open; | 2828 | dev->open = claw_open; |
@@ -3901,10 +2834,7 @@ claw_init_netdevice(struct net_device * dev) | |||
3901 | dev->type = ARPHRD_SLIP; | 2834 | dev->type = ARPHRD_SLIP; |
3902 | dev->tx_queue_len = 1300; | 2835 | dev->tx_queue_len = 1300; |
3903 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | 2836 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; |
3904 | #ifdef FUNCTRACE | 2837 | CLAW_DBF_TEXT(2, setup, "initok"); |
3905 | printk(KERN_INFO "%s:%s Exit\n",dev->name,__func__); | ||
3906 | #endif | ||
3907 | CLAW_DBF_TEXT(2,setup,"initok"); | ||
3908 | return; | 2838 | return; |
3909 | } | 2839 | } |
3910 | 2840 | ||
@@ -3921,10 +2851,7 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr) | |||
3921 | struct chbk *p_ch; | 2851 | struct chbk *p_ch; |
3922 | struct ccw_dev_id dev_id; | 2852 | struct ccw_dev_id dev_id; |
3923 | 2853 | ||
3924 | #ifdef FUNCTRACE | 2854 | CLAW_DBF_TEXT_(2, setup, "%s", cdev->dev.bus_id); |
3925 | printk(KERN_INFO "%s:%s Enter\n",cdev->dev.bus_id,__func__); | ||
3926 | #endif | ||
3927 | CLAW_DBF_TEXT_(2,setup,"%s",cdev->dev.bus_id); | ||
3928 | privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */ | 2855 | privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */ |
3929 | p_ch = &privptr->channel[i]; | 2856 | p_ch = &privptr->channel[i]; |
3930 | p_ch->cdev = cdev; | 2857 | p_ch->cdev = cdev; |
@@ -3932,18 +2859,8 @@ add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr) | |||
3932 | ccw_device_get_id(cdev, &dev_id); | 2859 | ccw_device_get_id(cdev, &dev_id); |
3933 | p_ch->devno = dev_id.devno; | 2860 | p_ch->devno = dev_id.devno; |
3934 | if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) { | 2861 | if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) { |
3935 | printk(KERN_WARNING "%s Out of memory in %s for irb\n", | ||
3936 | p_ch->id,__func__); | ||
3937 | #ifdef FUNCTRACE | ||
3938 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
3939 | p_ch->id,__func__,__LINE__); | ||
3940 | #endif | ||
3941 | return -ENOMEM; | 2862 | return -ENOMEM; |
3942 | } | 2863 | } |
3943 | #ifdef FUNCTRACE | ||
3944 | printk(KERN_INFO "%s:%s Exit on line %d\n", | ||
3945 | cdev->dev.bus_id,__func__,__LINE__); | ||
3946 | #endif | ||
3947 | return 0; | 2864 | return 0; |
3948 | } | 2865 | } |
3949 | 2866 | ||
@@ -3965,9 +2882,8 @@ claw_new_device(struct ccwgroup_device *cgdev) | |||
3965 | int ret; | 2882 | int ret; |
3966 | struct ccw_dev_id dev_id; | 2883 | struct ccw_dev_id dev_id; |
3967 | 2884 | ||
3968 | pr_debug("%s() called\n", __func__); | ||
3969 | printk(KERN_INFO "claw: add for %s\n",cgdev->cdev[READ]->dev.bus_id); | 2885 | printk(KERN_INFO "claw: add for %s\n",cgdev->cdev[READ]->dev.bus_id); |
3970 | CLAW_DBF_TEXT(2,setup,"new_dev"); | 2886 | CLAW_DBF_TEXT(2, setup, "new_dev"); |
3971 | privptr = cgdev->dev.driver_data; | 2887 | privptr = cgdev->dev.driver_data; |
3972 | cgdev->cdev[READ]->dev.driver_data = privptr; | 2888 | cgdev->cdev[READ]->dev.driver_data = privptr; |
3973 | cgdev->cdev[WRITE]->dev.driver_data = privptr; | 2889 | cgdev->cdev[WRITE]->dev.driver_data = privptr; |
@@ -3982,22 +2898,21 @@ claw_new_device(struct ccwgroup_device *cgdev) | |||
3982 | if (ret == 0) | 2898 | if (ret == 0) |
3983 | ret = add_channel(cgdev->cdev[1],1,privptr); | 2899 | ret = add_channel(cgdev->cdev[1],1,privptr); |
3984 | if (ret != 0) { | 2900 | if (ret != 0) { |
3985 | printk(KERN_WARNING | 2901 | printk(KERN_WARNING |
3986 | "add channel failed " | 2902 | "add channel failed with ret = %d\n", ret); |
3987 | "with ret = %d\n", ret); | 2903 | goto out; |
3988 | goto out; | ||
3989 | } | 2904 | } |
3990 | ret = ccw_device_set_online(cgdev->cdev[READ]); | 2905 | ret = ccw_device_set_online(cgdev->cdev[READ]); |
3991 | if (ret != 0) { | 2906 | if (ret != 0) { |
3992 | printk(KERN_WARNING | 2907 | printk(KERN_WARNING |
3993 | "claw: ccw_device_set_online %s READ failed " | 2908 | "claw: ccw_device_set_online %s READ failed " |
3994 | "with ret = %d\n",cgdev->cdev[READ]->dev.bus_id,ret); | 2909 | "with ret = %d\n",cgdev->cdev[READ]->dev.bus_id,ret); |
3995 | goto out; | 2910 | goto out; |
3996 | } | 2911 | } |
3997 | ret = ccw_device_set_online(cgdev->cdev[WRITE]); | 2912 | ret = ccw_device_set_online(cgdev->cdev[WRITE]); |
3998 | if (ret != 0) { | 2913 | if (ret != 0) { |
3999 | printk(KERN_WARNING | 2914 | printk(KERN_WARNING |
4000 | "claw: ccw_device_set_online %s WRITE failed " | 2915 | "claw: ccw_device_set_online %s WRITE failed " |
4001 | "with ret = %d\n",cgdev->cdev[WRITE]->dev.bus_id, ret); | 2916 | "with ret = %d\n",cgdev->cdev[WRITE]->dev.bus_id, ret); |
4002 | goto out; | 2917 | goto out; |
4003 | } | 2918 | } |
@@ -4014,18 +2929,16 @@ claw_new_device(struct ccwgroup_device *cgdev) | |||
4014 | SET_NETDEV_DEV(dev, &cgdev->dev); | 2929 | SET_NETDEV_DEV(dev, &cgdev->dev); |
4015 | if (register_netdev(dev) != 0) { | 2930 | if (register_netdev(dev) != 0) { |
4016 | claw_free_netdevice(dev, 1); | 2931 | claw_free_netdevice(dev, 1); |
4017 | CLAW_DBF_TEXT(2,trace,"regfail"); | 2932 | CLAW_DBF_TEXT(2, trace, "regfail"); |
4018 | goto out; | 2933 | goto out; |
4019 | } | 2934 | } |
4020 | dev->flags &=~IFF_RUNNING; | 2935 | dev->flags &=~IFF_RUNNING; |
4021 | if (privptr->buffs_alloc == 0) { | 2936 | if (privptr->buffs_alloc == 0) { |
4022 | ret=init_ccw_bk(dev); | 2937 | ret=init_ccw_bk(dev); |
4023 | if (ret !=0) { | 2938 | if (ret !=0) { |
4024 | printk(KERN_WARNING | ||
4025 | "claw: init_ccw_bk failed with ret=%d\n", ret); | ||
4026 | unregister_netdev(dev); | 2939 | unregister_netdev(dev); |
4027 | claw_free_netdevice(dev,1); | 2940 | claw_free_netdevice(dev,1); |
4028 | CLAW_DBF_TEXT(2,trace,"ccwmem"); | 2941 | CLAW_DBF_TEXT(2, trace, "ccwmem"); |
4029 | goto out; | 2942 | goto out; |
4030 | } | 2943 | } |
4031 | } | 2944 | } |
@@ -4047,7 +2960,6 @@ claw_new_device(struct ccwgroup_device *cgdev) | |||
4047 | out: | 2960 | out: |
4048 | ccw_device_set_offline(cgdev->cdev[1]); | 2961 | ccw_device_set_offline(cgdev->cdev[1]); |
4049 | ccw_device_set_offline(cgdev->cdev[0]); | 2962 | ccw_device_set_offline(cgdev->cdev[0]); |
4050 | |||
4051 | return -ENODEV; | 2963 | return -ENODEV; |
4052 | } | 2964 | } |
4053 | 2965 | ||
@@ -4056,8 +2968,7 @@ claw_purge_skb_queue(struct sk_buff_head *q) | |||
4056 | { | 2968 | { |
4057 | struct sk_buff *skb; | 2969 | struct sk_buff *skb; |
4058 | 2970 | ||
4059 | CLAW_DBF_TEXT(4,trace,"purgque"); | 2971 | CLAW_DBF_TEXT(4, trace, "purgque"); |
4060 | |||
4061 | while ((skb = skb_dequeue(q))) { | 2972 | while ((skb = skb_dequeue(q))) { |
4062 | atomic_dec(&skb->users); | 2973 | atomic_dec(&skb->users); |
4063 | dev_kfree_skb_any(skb); | 2974 | dev_kfree_skb_any(skb); |
@@ -4078,8 +2989,7 @@ claw_shutdown_device(struct ccwgroup_device *cgdev) | |||
4078 | struct net_device *ndev; | 2989 | struct net_device *ndev; |
4079 | int ret; | 2990 | int ret; |
4080 | 2991 | ||
4081 | pr_debug("%s() called\n", __func__); | 2992 | CLAW_DBF_TEXT_(2, setup, "%s", cgdev->dev.bus_id); |
4082 | CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id); | ||
4083 | priv = cgdev->dev.driver_data; | 2993 | priv = cgdev->dev.driver_data; |
4084 | if (!priv) | 2994 | if (!priv) |
4085 | return -ENODEV; | 2995 | return -ENODEV; |
@@ -4108,13 +3018,10 @@ claw_remove_device(struct ccwgroup_device *cgdev) | |||
4108 | { | 3018 | { |
4109 | struct claw_privbk *priv; | 3019 | struct claw_privbk *priv; |
4110 | 3020 | ||
4111 | pr_debug("%s() called\n", __func__); | 3021 | BUG_ON(!cgdev); |
4112 | CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id); | 3022 | CLAW_DBF_TEXT_(2, setup, "%s", cgdev->dev.bus_id); |
4113 | priv = cgdev->dev.driver_data; | 3023 | priv = cgdev->dev.driver_data; |
4114 | if (!priv) { | 3024 | BUG_ON(!priv); |
4115 | printk(KERN_WARNING "claw: %s() no Priv exiting\n",__func__); | ||
4116 | return; | ||
4117 | } | ||
4118 | printk(KERN_INFO "claw: %s() called %s will be removed.\n", | 3025 | printk(KERN_INFO "claw: %s() called %s will be removed.\n", |
4119 | __func__,cgdev->cdev[0]->dev.bus_id); | 3026 | __func__,cgdev->cdev[0]->dev.bus_id); |
4120 | if (cgdev->state == CCWGROUP_ONLINE) | 3027 | if (cgdev->state == CCWGROUP_ONLINE) |
@@ -4133,6 +3040,8 @@ claw_remove_device(struct ccwgroup_device *cgdev) | |||
4133 | cgdev->cdev[READ]->dev.driver_data = NULL; | 3040 | cgdev->cdev[READ]->dev.driver_data = NULL; |
4134 | cgdev->cdev[WRITE]->dev.driver_data = NULL; | 3041 | cgdev->cdev[WRITE]->dev.driver_data = NULL; |
4135 | put_device(&cgdev->dev); | 3042 | put_device(&cgdev->dev); |
3043 | |||
3044 | return; | ||
4136 | } | 3045 | } |
4137 | 3046 | ||
4138 | 3047 | ||
@@ -4168,8 +3077,8 @@ claw_hname_write(struct device *dev, struct device_attribute *attr, const char * | |||
4168 | strncpy(p_env->host_name,buf, count); | 3077 | strncpy(p_env->host_name,buf, count); |
4169 | p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */ | 3078 | p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */ |
4170 | p_env->host_name[MAX_NAME_LEN] = 0x00; | 3079 | p_env->host_name[MAX_NAME_LEN] = 0x00; |
4171 | CLAW_DBF_TEXT(2,setup,"HstnSet"); | 3080 | CLAW_DBF_TEXT(2, setup, "HstnSet"); |
4172 | CLAW_DBF_TEXT_(2,setup,"%s",p_env->host_name); | 3081 | CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name); |
4173 | 3082 | ||
4174 | return count; | 3083 | return count; |
4175 | } | 3084 | } |
@@ -4186,7 +3095,7 @@ claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
4186 | if (!priv) | 3095 | if (!priv) |
4187 | return -ENODEV; | 3096 | return -ENODEV; |
4188 | p_env = priv->p_env; | 3097 | p_env = priv->p_env; |
4189 | return sprintf(buf, "%s\n",p_env->adapter_name); | 3098 | return sprintf(buf, "%s\n", p_env->adapter_name); |
4190 | } | 3099 | } |
4191 | 3100 | ||
4192 | static ssize_t | 3101 | static ssize_t |
@@ -4205,8 +3114,8 @@ claw_adname_write(struct device *dev, struct device_attribute *attr, const char | |||
4205 | strncpy(p_env->adapter_name,buf, count); | 3114 | strncpy(p_env->adapter_name,buf, count); |
4206 | p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */ | 3115 | p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */ |
4207 | p_env->adapter_name[MAX_NAME_LEN] = 0x00; | 3116 | p_env->adapter_name[MAX_NAME_LEN] = 0x00; |
4208 | CLAW_DBF_TEXT(2,setup,"AdnSet"); | 3117 | CLAW_DBF_TEXT(2, setup, "AdnSet"); |
4209 | CLAW_DBF_TEXT_(2,setup,"%s",p_env->adapter_name); | 3118 | CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name); |
4210 | 3119 | ||
4211 | return count; | 3120 | return count; |
4212 | } | 3121 | } |
@@ -4247,15 +3156,15 @@ claw_apname_write(struct device *dev, struct device_attribute *attr, const char | |||
4247 | p_env->read_size=DEF_PACK_BUFSIZE; | 3156 | p_env->read_size=DEF_PACK_BUFSIZE; |
4248 | p_env->write_size=DEF_PACK_BUFSIZE; | 3157 | p_env->write_size=DEF_PACK_BUFSIZE; |
4249 | p_env->packing=PACKING_ASK; | 3158 | p_env->packing=PACKING_ASK; |
4250 | CLAW_DBF_TEXT(2,setup,"PACKING"); | 3159 | CLAW_DBF_TEXT(2, setup, "PACKING"); |
4251 | } | 3160 | } |
4252 | else { | 3161 | else { |
4253 | p_env->packing=0; | 3162 | p_env->packing=0; |
4254 | p_env->read_size=CLAW_FRAME_SIZE; | 3163 | p_env->read_size=CLAW_FRAME_SIZE; |
4255 | p_env->write_size=CLAW_FRAME_SIZE; | 3164 | p_env->write_size=CLAW_FRAME_SIZE; |
4256 | CLAW_DBF_TEXT(2,setup,"ApiSet"); | 3165 | CLAW_DBF_TEXT(2, setup, "ApiSet"); |
4257 | } | 3166 | } |
4258 | CLAW_DBF_TEXT_(2,setup,"%s",p_env->api_type); | 3167 | CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type); |
4259 | return count; | 3168 | return count; |
4260 | } | 3169 | } |
4261 | 3170 | ||
@@ -4295,8 +3204,8 @@ claw_wbuff_write(struct device *dev, struct device_attribute *attr, const char * | |||
4295 | if ((nnn > max ) || (nnn < 2)) | 3204 | if ((nnn > max ) || (nnn < 2)) |
4296 | return -EINVAL; | 3205 | return -EINVAL; |
4297 | p_env->write_buffers = nnn; | 3206 | p_env->write_buffers = nnn; |
4298 | CLAW_DBF_TEXT(2,setup,"Wbufset"); | 3207 | CLAW_DBF_TEXT(2, setup, "Wbufset"); |
4299 | CLAW_DBF_TEXT_(2,setup,"WB=%d",p_env->write_buffers); | 3208 | CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers); |
4300 | return count; | 3209 | return count; |
4301 | } | 3210 | } |
4302 | 3211 | ||
@@ -4336,8 +3245,8 @@ claw_rbuff_write(struct device *dev, struct device_attribute *attr, const char * | |||
4336 | if ((nnn > max ) || (nnn < 2)) | 3245 | if ((nnn > max ) || (nnn < 2)) |
4337 | return -EINVAL; | 3246 | return -EINVAL; |
4338 | p_env->read_buffers = nnn; | 3247 | p_env->read_buffers = nnn; |
4339 | CLAW_DBF_TEXT(2,setup,"Rbufset"); | 3248 | CLAW_DBF_TEXT(2, setup, "Rbufset"); |
4340 | CLAW_DBF_TEXT_(2,setup,"RB=%d",p_env->read_buffers); | 3249 | CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers); |
4341 | return count; | 3250 | return count; |
4342 | } | 3251 | } |
4343 | 3252 | ||
@@ -4359,16 +3268,14 @@ static struct attribute_group claw_attr_group = { | |||
4359 | static int | 3268 | static int |
4360 | claw_add_files(struct device *dev) | 3269 | claw_add_files(struct device *dev) |
4361 | { | 3270 | { |
4362 | pr_debug("%s() called\n", __func__); | 3271 | CLAW_DBF_TEXT(2, setup, "add_file"); |
4363 | CLAW_DBF_TEXT(2,setup,"add_file"); | ||
4364 | return sysfs_create_group(&dev->kobj, &claw_attr_group); | 3272 | return sysfs_create_group(&dev->kobj, &claw_attr_group); |
4365 | } | 3273 | } |
4366 | 3274 | ||
4367 | static void | 3275 | static void |
4368 | claw_remove_files(struct device *dev) | 3276 | claw_remove_files(struct device *dev) |
4369 | { | 3277 | { |
4370 | pr_debug("%s() called\n", __func__); | 3278 | CLAW_DBF_TEXT(2, setup, "rem_file"); |
4371 | CLAW_DBF_TEXT(2,setup,"rem_file"); | ||
4372 | sysfs_remove_group(&dev->kobj, &claw_attr_group); | 3279 | sysfs_remove_group(&dev->kobj, &claw_attr_group); |
4373 | } | 3280 | } |
4374 | 3281 | ||
@@ -4397,35 +3304,27 @@ claw_init(void) | |||
4397 | int ret = 0; | 3304 | int ret = 0; |
4398 | printk(KERN_INFO "claw: starting driver\n"); | 3305 | printk(KERN_INFO "claw: starting driver\n"); |
4399 | 3306 | ||
4400 | #ifdef FUNCTRACE | ||
4401 | printk(KERN_INFO "claw: %s() enter \n",__func__); | ||
4402 | #endif | ||
4403 | ret = claw_register_debug_facility(); | 3307 | ret = claw_register_debug_facility(); |
4404 | if (ret) { | 3308 | if (ret) { |
4405 | printk(KERN_WARNING "claw: %s() debug_register failed %d\n", | 3309 | printk(KERN_WARNING "claw: %s() debug_register failed %d\n", |
4406 | __func__,ret); | 3310 | __func__,ret); |
4407 | return ret; | 3311 | return ret; |
4408 | } | 3312 | } |
4409 | CLAW_DBF_TEXT(2,setup,"init_mod"); | 3313 | CLAW_DBF_TEXT(2, setup, "init_mod"); |
4410 | ret = register_cu3088_discipline(&claw_group_driver); | 3314 | ret = register_cu3088_discipline(&claw_group_driver); |
4411 | if (ret) { | 3315 | if (ret) { |
3316 | CLAW_DBF_TEXT(2, setup, "init_bad"); | ||
4412 | claw_unregister_debug_facility(); | 3317 | claw_unregister_debug_facility(); |
4413 | printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n", | 3318 | printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n", |
4414 | __func__,ret); | 3319 | __func__,ret); |
4415 | } | 3320 | } |
4416 | #ifdef FUNCTRACE | ||
4417 | printk(KERN_INFO "claw: %s() exit \n",__func__); | ||
4418 | #endif | ||
4419 | return ret; | 3321 | return ret; |
4420 | } | 3322 | } |
4421 | 3323 | ||
4422 | module_init(claw_init); | 3324 | module_init(claw_init); |
4423 | module_exit(claw_cleanup); | 3325 | module_exit(claw_cleanup); |
4424 | 3326 | ||
4425 | 3327 | MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>"); | |
4426 | 3328 | MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \ | |
4427 | /*--------------------------------------------------------------------* | 3329 | "Copyright 2000,2008 IBM Corporation\n"); |
4428 | * End of File * | 3330 | MODULE_LICENSE("GPL"); |
4429 | *---------------------------------------------------------------------*/ | ||
4430 | |||
4431 | |||
diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c index 8eb25d00b2e7..1ca58f153470 100644 --- a/drivers/s390/net/ctcm_dbug.c +++ b/drivers/s390/net/ctcm_dbug.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/stddef.h> | 9 | #include <linux/stddef.h> |
10 | #include <linux/string.h> | ||
10 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
11 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
12 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
@@ -22,15 +23,13 @@ | |||
22 | * Debug Facility Stuff | 23 | * Debug Facility Stuff |
23 | */ | 24 | */ |
24 | 25 | ||
25 | DEFINE_PER_CPU(char[256], ctcm_dbf_txt_buf); | ||
26 | |||
27 | struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS] = { | 26 | struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS] = { |
28 | [CTCM_DBF_SETUP] = {"ctc_setup", 8, 1, 64, 5, NULL}, | 27 | [CTCM_DBF_SETUP] = {"ctc_setup", 8, 1, 64, CTC_DBF_INFO, NULL}, |
29 | [CTCM_DBF_ERROR] = {"ctc_error", 8, 1, 64, 3, NULL}, | 28 | [CTCM_DBF_ERROR] = {"ctc_error", 8, 1, 64, CTC_DBF_ERROR, NULL}, |
30 | [CTCM_DBF_TRACE] = {"ctc_trace", 8, 1, 64, 3, NULL}, | 29 | [CTCM_DBF_TRACE] = {"ctc_trace", 8, 1, 64, CTC_DBF_ERROR, NULL}, |
31 | [CTCM_DBF_MPC_SETUP] = {"mpc_setup", 8, 1, 64, 5, NULL}, | 30 | [CTCM_DBF_MPC_SETUP] = {"mpc_setup", 8, 1, 80, CTC_DBF_INFO, NULL}, |
32 | [CTCM_DBF_MPC_ERROR] = {"mpc_error", 8, 1, 64, 3, NULL}, | 31 | [CTCM_DBF_MPC_ERROR] = {"mpc_error", 8, 1, 80, CTC_DBF_ERROR, NULL}, |
33 | [CTCM_DBF_MPC_TRACE] = {"mpc_trace", 8, 1, 64, 3, NULL}, | 32 | [CTCM_DBF_MPC_TRACE] = {"mpc_trace", 8, 1, 80, CTC_DBF_ERROR, NULL}, |
34 | }; | 33 | }; |
35 | 34 | ||
36 | void ctcm_unregister_dbf_views(void) | 35 | void ctcm_unregister_dbf_views(void) |
@@ -65,3 +64,17 @@ int ctcm_register_dbf_views(void) | |||
65 | return 0; | 64 | return 0; |
66 | } | 65 | } |
67 | 66 | ||
67 | void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...) | ||
68 | { | ||
69 | char dbf_txt_buf[64]; | ||
70 | va_list args; | ||
71 | |||
72 | if (level > (ctcm_dbf[dbf_nix].id)->level) | ||
73 | return; | ||
74 | va_start(args, fmt); | ||
75 | vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); | ||
76 | va_end(args); | ||
77 | |||
78 | debug_text_event(ctcm_dbf[dbf_nix].id, level, dbf_txt_buf); | ||
79 | } | ||
80 | |||
diff --git a/drivers/s390/net/ctcm_dbug.h b/drivers/s390/net/ctcm_dbug.h index fdff34fe59a2..26966d0b9abd 100644 --- a/drivers/s390/net/ctcm_dbug.h +++ b/drivers/s390/net/ctcm_dbug.h | |||
@@ -20,16 +20,17 @@ | |||
20 | #else | 20 | #else |
21 | #define do_debug 0 | 21 | #define do_debug 0 |
22 | #endif | 22 | #endif |
23 | #ifdef DEBUGDATA | ||
24 | #define do_debug_data 1 | ||
25 | #else | ||
26 | #define do_debug_data 0 | ||
27 | #endif | ||
28 | #ifdef DEBUGCCW | 23 | #ifdef DEBUGCCW |
29 | #define do_debug_ccw 1 | 24 | #define do_debug_ccw 1 |
25 | #define DEBUGDATA 1 | ||
30 | #else | 26 | #else |
31 | #define do_debug_ccw 0 | 27 | #define do_debug_ccw 0 |
32 | #endif | 28 | #endif |
29 | #ifdef DEBUGDATA | ||
30 | #define do_debug_data 1 | ||
31 | #else | ||
32 | #define do_debug_data 0 | ||
33 | #endif | ||
33 | 34 | ||
34 | /* define dbf debug levels similar to kernel msg levels */ | 35 | /* define dbf debug levels similar to kernel msg levels */ |
35 | #define CTC_DBF_ALWAYS 0 /* always print this */ | 36 | #define CTC_DBF_ALWAYS 0 /* always print this */ |
@@ -42,8 +43,6 @@ | |||
42 | #define CTC_DBF_INFO 5 /* informational */ | 43 | #define CTC_DBF_INFO 5 /* informational */ |
43 | #define CTC_DBF_DEBUG 6 /* debug-level messages */ | 44 | #define CTC_DBF_DEBUG 6 /* debug-level messages */ |
44 | 45 | ||
45 | DECLARE_PER_CPU(char[256], ctcm_dbf_txt_buf); | ||
46 | |||
47 | enum ctcm_dbf_names { | 46 | enum ctcm_dbf_names { |
48 | CTCM_DBF_SETUP, | 47 | CTCM_DBF_SETUP, |
49 | CTCM_DBF_ERROR, | 48 | CTCM_DBF_ERROR, |
@@ -67,6 +66,7 @@ extern struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS]; | |||
67 | 66 | ||
68 | int ctcm_register_dbf_views(void); | 67 | int ctcm_register_dbf_views(void); |
69 | void ctcm_unregister_dbf_views(void); | 68 | void ctcm_unregister_dbf_views(void); |
69 | void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *text, ...); | ||
70 | 70 | ||
71 | static inline const char *strtail(const char *s, int n) | 71 | static inline const char *strtail(const char *s, int n) |
72 | { | 72 | { |
@@ -74,12 +74,6 @@ static inline const char *strtail(const char *s, int n) | |||
74 | return (l > n) ? s + (l - n) : s; | 74 | return (l > n) ? s + (l - n) : s; |
75 | } | 75 | } |
76 | 76 | ||
77 | /* sort out levels early to avoid unnecessary sprintfs */ | ||
78 | static inline int ctcm_dbf_passes(debug_info_t *dbf_grp, int level) | ||
79 | { | ||
80 | return (dbf_grp->level >= level); | ||
81 | } | ||
82 | |||
83 | #define CTCM_FUNTAIL strtail((char *)__func__, 16) | 77 | #define CTCM_FUNTAIL strtail((char *)__func__, 16) |
84 | 78 | ||
85 | #define CTCM_DBF_TEXT(name, level, text) \ | 79 | #define CTCM_DBF_TEXT(name, level, text) \ |
@@ -94,16 +88,7 @@ static inline int ctcm_dbf_passes(debug_info_t *dbf_grp, int level) | |||
94 | } while (0) | 88 | } while (0) |
95 | 89 | ||
96 | #define CTCM_DBF_TEXT_(name, level, text...) \ | 90 | #define CTCM_DBF_TEXT_(name, level, text...) \ |
97 | do { \ | 91 | ctcm_dbf_longtext(CTCM_DBF_##name, level, text) |
98 | if (ctcm_dbf_passes(ctcm_dbf[CTCM_DBF_##name].id, level)) { \ | ||
99 | char *ctcm_dbf_txt_buf = \ | ||
100 | get_cpu_var(ctcm_dbf_txt_buf); \ | ||
101 | sprintf(ctcm_dbf_txt_buf, text); \ | ||
102 | debug_text_event(ctcm_dbf[CTCM_DBF_##name].id, \ | ||
103 | level, ctcm_dbf_txt_buf); \ | ||
104 | put_cpu_var(ctcm_dbf_txt_buf); \ | ||
105 | } \ | ||
106 | } while (0) | ||
107 | 92 | ||
108 | /* | 93 | /* |
109 | * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}. | 94 | * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}. |
@@ -112,13 +97,13 @@ static inline int ctcm_dbf_passes(debug_info_t *dbf_grp, int level) | |||
112 | */ | 97 | */ |
113 | #define CTCM_DBF_DEV_NAME(cat, dev, text) \ | 98 | #define CTCM_DBF_DEV_NAME(cat, dev, text) \ |
114 | do { \ | 99 | do { \ |
115 | CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%s) : %s", \ | 100 | CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%s) :- %s", \ |
116 | CTCM_FUNTAIL, dev->name, text); \ | 101 | CTCM_FUNTAIL, dev->name, text); \ |
117 | } while (0) | 102 | } while (0) |
118 | 103 | ||
119 | #define MPC_DBF_DEV_NAME(cat, dev, text) \ | 104 | #define MPC_DBF_DEV_NAME(cat, dev, text) \ |
120 | do { \ | 105 | do { \ |
121 | CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%s) : %s", \ | 106 | CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%s) := %s", \ |
122 | CTCM_FUNTAIL, dev->name, text); \ | 107 | CTCM_FUNTAIL, dev->name, text); \ |
123 | } while (0) | 108 | } while (0) |
124 | 109 | ||
@@ -137,13 +122,13 @@ static inline int ctcm_dbf_passes(debug_info_t *dbf_grp, int level) | |||
137 | */ | 122 | */ |
138 | #define CTCM_DBF_DEV(cat, dev, text) \ | 123 | #define CTCM_DBF_DEV(cat, dev, text) \ |
139 | do { \ | 124 | do { \ |
140 | CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%p) : %s", \ | 125 | CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%p) :-: %s", \ |
141 | CTCM_FUNTAIL, dev, text); \ | 126 | CTCM_FUNTAIL, dev, text); \ |
142 | } while (0) | 127 | } while (0) |
143 | 128 | ||
144 | #define MPC_DBF_DEV(cat, dev, text) \ | 129 | #define MPC_DBF_DEV(cat, dev, text) \ |
145 | do { \ | 130 | do { \ |
146 | CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%p) : %s", \ | 131 | CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%p) :=: %s", \ |
147 | CTCM_FUNTAIL, dev, text); \ | 132 | CTCM_FUNTAIL, dev, text); \ |
148 | } while (0) | 133 | } while (0) |
149 | 134 | ||
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 7e6bd387f4d8..0b4e6253abe4 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c | |||
@@ -190,7 +190,8 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); | |||
190 | void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) | 190 | void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) |
191 | { | 191 | { |
192 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, | 192 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
193 | "ccw error %s (%s): %04x\n", ch->id, msg, rc); | 193 | "%s(%s): %s: %04x\n", |
194 | CTCM_FUNTAIL, ch->id, msg, rc); | ||
194 | switch (rc) { | 195 | switch (rc) { |
195 | case -EBUSY: | 196 | case -EBUSY: |
196 | ctcm_pr_warn("%s (%s): Busy !\n", ch->id, msg); | 197 | ctcm_pr_warn("%s (%s): Busy !\n", ch->id, msg); |
@@ -212,7 +213,7 @@ void ctcm_purge_skb_queue(struct sk_buff_head *q) | |||
212 | { | 213 | { |
213 | struct sk_buff *skb; | 214 | struct sk_buff *skb; |
214 | 215 | ||
215 | CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__); | 216 | CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__); |
216 | 217 | ||
217 | while ((skb = skb_dequeue(q))) { | 218 | while ((skb = skb_dequeue(q))) { |
218 | atomic_dec(&skb->users); | 219 | atomic_dec(&skb->users); |
@@ -251,6 +252,8 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg) | |||
251 | unsigned long duration; | 252 | unsigned long duration; |
252 | struct timespec done_stamp = current_kernel_time(); /* xtime */ | 253 | struct timespec done_stamp = current_kernel_time(); /* xtime */ |
253 | 254 | ||
255 | CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); | ||
256 | |||
254 | duration = | 257 | duration = |
255 | (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 + | 258 | (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 + |
256 | (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000; | 259 | (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000; |
@@ -258,8 +261,9 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg) | |||
258 | ch->prof.tx_time = duration; | 261 | ch->prof.tx_time = duration; |
259 | 262 | ||
260 | if (ch->irb->scsw.cmd.count != 0) | 263 | if (ch->irb->scsw.cmd.count != 0) |
261 | ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", | 264 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, |
262 | dev->name, ch->irb->scsw.cmd.count); | 265 | "%s(%s): TX not complete, remaining %d bytes", |
266 | CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); | ||
263 | fsm_deltimer(&ch->timer); | 267 | fsm_deltimer(&ch->timer); |
264 | while ((skb = skb_dequeue(&ch->io_queue))) { | 268 | while ((skb = skb_dequeue(&ch->io_queue))) { |
265 | priv->stats.tx_packets++; | 269 | priv->stats.tx_packets++; |
@@ -334,7 +338,8 @@ void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg) | |||
334 | struct net_device *dev = ch->netdev; | 338 | struct net_device *dev = ch->netdev; |
335 | struct ctcm_priv *priv = dev->priv; | 339 | struct ctcm_priv *priv = dev->priv; |
336 | 340 | ||
337 | CTCM_DBF_TEXT(TRACE, 6, __FUNCTION__); | 341 | CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); |
342 | |||
338 | fsm_deltimer(&ch->timer); | 343 | fsm_deltimer(&ch->timer); |
339 | fsm_newstate(fi, CTC_STATE_TXIDLE); | 344 | fsm_newstate(fi, CTC_STATE_TXIDLE); |
340 | fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev); | 345 | fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev); |
@@ -361,15 +366,17 @@ static void chx_rx(fsm_instance *fi, int event, void *arg) | |||
361 | 366 | ||
362 | fsm_deltimer(&ch->timer); | 367 | fsm_deltimer(&ch->timer); |
363 | if (len < 8) { | 368 | if (len < 8) { |
364 | ctcm_pr_debug("%s: got packet with length %d < 8\n", | 369 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, |
365 | dev->name, len); | 370 | "%s(%s): got packet with length %d < 8\n", |
371 | CTCM_FUNTAIL, dev->name, len); | ||
366 | priv->stats.rx_dropped++; | 372 | priv->stats.rx_dropped++; |
367 | priv->stats.rx_length_errors++; | 373 | priv->stats.rx_length_errors++; |
368 | goto again; | 374 | goto again; |
369 | } | 375 | } |
370 | if (len > ch->max_bufsize) { | 376 | if (len > ch->max_bufsize) { |
371 | ctcm_pr_debug("%s: got packet with length %d > %d\n", | 377 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, |
372 | dev->name, len, ch->max_bufsize); | 378 | "%s(%s): got packet with length %d > %d\n", |
379 | CTCM_FUNTAIL, dev->name, len, ch->max_bufsize); | ||
373 | priv->stats.rx_dropped++; | 380 | priv->stats.rx_dropped++; |
374 | priv->stats.rx_length_errors++; | 381 | priv->stats.rx_length_errors++; |
375 | goto again; | 382 | goto again; |
@@ -388,8 +395,9 @@ static void chx_rx(fsm_instance *fi, int event, void *arg) | |||
388 | break; | 395 | break; |
389 | } | 396 | } |
390 | if ((len < block_len) || (len > check_len)) { | 397 | if ((len < block_len) || (len > check_len)) { |
391 | ctcm_pr_debug("%s: got block length %d != rx length %d\n", | 398 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, |
392 | dev->name, block_len, len); | 399 | "%s(%s): got block length %d != rx length %d\n", |
400 | CTCM_FUNTAIL, dev->name, block_len, len); | ||
393 | if (do_debug) | 401 | if (do_debug) |
394 | ctcmpc_dump_skb(skb, 0); | 402 | ctcmpc_dump_skb(skb, 0); |
395 | 403 | ||
@@ -425,17 +433,23 @@ static void chx_rx(fsm_instance *fi, int event, void *arg) | |||
425 | */ | 433 | */ |
426 | static void chx_firstio(fsm_instance *fi, int event, void *arg) | 434 | static void chx_firstio(fsm_instance *fi, int event, void *arg) |
427 | { | 435 | { |
428 | struct channel *ch = arg; | ||
429 | int rc; | 436 | int rc; |
437 | struct channel *ch = arg; | ||
438 | int fsmstate = fsm_getstate(fi); | ||
430 | 439 | ||
431 | CTCM_DBF_TEXT(TRACE, 6, __FUNCTION__); | 440 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, |
441 | "%s(%s) : %02x", | ||
442 | CTCM_FUNTAIL, ch->id, fsmstate); | ||
432 | 443 | ||
433 | if (fsm_getstate(fi) == CTC_STATE_TXIDLE) | 444 | ch->sense_rc = 0; /* reset unit check report control */ |
434 | ctcm_pr_debug("%s: remote side issued READ?, init.\n", ch->id); | 445 | if (fsmstate == CTC_STATE_TXIDLE) |
446 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, | ||
447 | "%s(%s): remote side issued READ?, init.\n", | ||
448 | CTCM_FUNTAIL, ch->id); | ||
435 | fsm_deltimer(&ch->timer); | 449 | fsm_deltimer(&ch->timer); |
436 | if (ctcm_checkalloc_buffer(ch)) | 450 | if (ctcm_checkalloc_buffer(ch)) |
437 | return; | 451 | return; |
438 | if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) && | 452 | if ((fsmstate == CTC_STATE_SETUPWAIT) && |
439 | (ch->protocol == CTCM_PROTO_OS390)) { | 453 | (ch->protocol == CTCM_PROTO_OS390)) { |
440 | /* OS/390 resp. z/OS */ | 454 | /* OS/390 resp. z/OS */ |
441 | if (CHANNEL_DIRECTION(ch->flags) == READ) { | 455 | if (CHANNEL_DIRECTION(ch->flags) == READ) { |
@@ -451,7 +465,6 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg) | |||
451 | } | 465 | } |
452 | return; | 466 | return; |
453 | } | 467 | } |
454 | |||
455 | /* | 468 | /* |
456 | * Don't setup a timer for receiving the initial RX frame | 469 | * Don't setup a timer for receiving the initial RX frame |
457 | * if in compatibility mode, since VM TCP delays the initial | 470 | * if in compatibility mode, since VM TCP delays the initial |
@@ -505,11 +518,10 @@ static void chx_rxidle(fsm_instance *fi, int event, void *arg) | |||
505 | __u16 buflen; | 518 | __u16 buflen; |
506 | int rc; | 519 | int rc; |
507 | 520 | ||
508 | CTCM_DBF_TEXT(TRACE, 6, __FUNCTION__); | ||
509 | fsm_deltimer(&ch->timer); | 521 | fsm_deltimer(&ch->timer); |
510 | buflen = *((__u16 *)ch->trans_skb->data); | 522 | buflen = *((__u16 *)ch->trans_skb->data); |
511 | if (do_debug) | 523 | CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n", |
512 | ctcm_pr_debug("%s: Initial RX count %d\n", dev->name, buflen); | 524 | __func__, dev->name, buflen); |
513 | 525 | ||
514 | if (buflen >= CTCM_INITIAL_BLOCKLEN) { | 526 | if (buflen >= CTCM_INITIAL_BLOCKLEN) { |
515 | if (ctcm_checkalloc_buffer(ch)) | 527 | if (ctcm_checkalloc_buffer(ch)) |
@@ -524,9 +536,9 @@ static void chx_rxidle(fsm_instance *fi, int event, void *arg) | |||
524 | } else | 536 | } else |
525 | fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); | 537 | fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); |
526 | } else { | 538 | } else { |
527 | if (do_debug) | 539 | CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n", |
528 | ctcm_pr_debug("%s: Initial RX count %d not %d\n", | 540 | __func__, dev->name, |
529 | dev->name, buflen, CTCM_INITIAL_BLOCKLEN); | 541 | buflen, CTCM_INITIAL_BLOCKLEN); |
530 | chx_firstio(fi, event, arg); | 542 | chx_firstio(fi, event, arg); |
531 | } | 543 | } |
532 | } | 544 | } |
@@ -548,14 +560,12 @@ static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) | |||
548 | fsm_deltimer(&ch->timer); | 560 | fsm_deltimer(&ch->timer); |
549 | if (IS_MPC(ch)) { | 561 | if (IS_MPC(ch)) { |
550 | timeout = 1500; | 562 | timeout = 1500; |
551 | if (do_debug) | 563 | CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n", |
552 | ctcm_pr_debug("ctcm enter: %s(): cp=%i ch=0x%p id=%s\n", | 564 | __func__, smp_processor_id(), ch, ch->id); |
553 | __FUNCTION__, smp_processor_id(), ch, ch->id); | ||
554 | } | 565 | } |
555 | fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch); | 566 | fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch); |
556 | fsm_newstate(fi, CTC_STATE_SETUPWAIT); | 567 | fsm_newstate(fi, CTC_STATE_SETUPWAIT); |
557 | if (do_debug_ccw && IS_MPC(ch)) | 568 | CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2); |
558 | ctcmpc_dumpit((char *)&ch->ccw[6], sizeof(struct ccw1) * 2); | ||
559 | 569 | ||
560 | if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ | 570 | if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ |
561 | spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); | 571 | spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); |
@@ -583,24 +593,12 @@ static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) | |||
583 | */ | 593 | */ |
584 | static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) | 594 | static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) |
585 | { | 595 | { |
586 | struct channel *ch = arg; | 596 | struct channel *ch = arg; |
587 | int rc; | ||
588 | struct net_device *dev; | ||
589 | unsigned long saveflags; | 597 | unsigned long saveflags; |
598 | int rc; | ||
590 | 599 | ||
591 | CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__); | 600 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s", |
592 | if (ch == NULL) { | 601 | CTCM_FUNTAIL, ch->id, |
593 | ctcm_pr_warn("chx_start ch=NULL\n"); | ||
594 | return; | ||
595 | } | ||
596 | if (ch->netdev == NULL) { | ||
597 | ctcm_pr_warn("chx_start dev=NULL, id=%s\n", ch->id); | ||
598 | return; | ||
599 | } | ||
600 | dev = ch->netdev; | ||
601 | |||
602 | if (do_debug) | ||
603 | ctcm_pr_debug("%s: %s channel start\n", dev->name, | ||
604 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); | 602 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); |
605 | 603 | ||
606 | if (ch->trans_skb != NULL) { | 604 | if (ch->trans_skb != NULL) { |
@@ -618,11 +616,12 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) | |||
618 | ch->ccw[1].count = 0; | 616 | ch->ccw[1].count = 0; |
619 | } | 617 | } |
620 | if (ctcm_checkalloc_buffer(ch)) { | 618 | if (ctcm_checkalloc_buffer(ch)) { |
621 | ctcm_pr_notice("%s: %s trans_skb allocation delayed " | 619 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, |
622 | "until first transfer\n", dev->name, | 620 | "%s(%s): %s trans_skb alloc delayed " |
621 | "until first transfer", | ||
622 | CTCM_FUNTAIL, ch->id, | ||
623 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); | 623 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); |
624 | } | 624 | } |
625 | |||
626 | ch->ccw[0].cmd_code = CCW_CMD_PREPARE; | 625 | ch->ccw[0].cmd_code = CCW_CMD_PREPARE; |
627 | ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; | 626 | ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; |
628 | ch->ccw[0].count = 0; | 627 | ch->ccw[0].count = 0; |
@@ -661,7 +660,6 @@ static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) | |||
661 | int rc; | 660 | int rc; |
662 | int oldstate; | 661 | int oldstate; |
663 | 662 | ||
664 | CTCM_DBF_TEXT(TRACE, 2, __FUNCTION__); | ||
665 | fsm_deltimer(&ch->timer); | 663 | fsm_deltimer(&ch->timer); |
666 | if (IS_MPC(ch)) | 664 | if (IS_MPC(ch)) |
667 | fsm_deltimer(&ch->sweep_timer); | 665 | fsm_deltimer(&ch->sweep_timer); |
@@ -684,7 +682,7 @@ static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) | |||
684 | fsm_deltimer(&ch->timer); | 682 | fsm_deltimer(&ch->timer); |
685 | if (event != CTC_EVENT_STOP) { | 683 | if (event != CTC_EVENT_STOP) { |
686 | fsm_newstate(fi, oldstate); | 684 | fsm_newstate(fi, oldstate); |
687 | ctcm_ccw_check_rc(ch, rc, (char *)__FUNCTION__); | 685 | ctcm_ccw_check_rc(ch, rc, (char *)__func__); |
688 | } | 686 | } |
689 | } | 687 | } |
690 | } | 688 | } |
@@ -703,7 +701,9 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state, | |||
703 | struct net_device *dev = ch->netdev; | 701 | struct net_device *dev = ch->netdev; |
704 | struct ctcm_priv *priv = dev->priv; | 702 | struct ctcm_priv *priv = dev->priv; |
705 | 703 | ||
706 | CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__); | 704 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, |
705 | "%s(%s): %s[%d]\n", | ||
706 | CTCM_FUNTAIL, dev->name, ch->id, state); | ||
707 | 707 | ||
708 | fsm_deltimer(&ch->timer); | 708 | fsm_deltimer(&ch->timer); |
709 | if (IS_MPC(ch)) | 709 | if (IS_MPC(ch)) |
@@ -743,7 +743,6 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state, | |||
743 | */ | 743 | */ |
744 | static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg) | 744 | static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg) |
745 | { | 745 | { |
746 | CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__); | ||
747 | ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg); | 746 | ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg); |
748 | } | 747 | } |
749 | 748 | ||
@@ -771,7 +770,6 @@ static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg) | |||
771 | */ | 770 | */ |
772 | static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg) | 771 | static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg) |
773 | { | 772 | { |
774 | CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__); | ||
775 | ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg); | 773 | ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg); |
776 | } | 774 | } |
777 | 775 | ||
@@ -809,8 +807,8 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) | |||
809 | } | 807 | } |
810 | 808 | ||
811 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, | 809 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, |
812 | "%s : %s error during %s channel setup state=%s\n", | 810 | "%s(%s) : %s error during %s channel setup state=%s\n", |
813 | dev->name, ctc_ch_event_names[event], | 811 | CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], |
814 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX", | 812 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX", |
815 | fsm_getstate_str(fi)); | 813 | fsm_getstate_str(fi)); |
816 | 814 | ||
@@ -838,10 +836,12 @@ static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg) | |||
838 | int oldstate; | 836 | int oldstate; |
839 | int rc; | 837 | int rc; |
840 | 838 | ||
841 | CTCM_DBF_TEXT(TRACE, CTC_DBF_NOTICE, __FUNCTION__); | 839 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, |
840 | "%s: %s[%d] of %s\n", | ||
841 | CTCM_FUNTAIL, ch->id, event, dev->name); | ||
842 | |||
842 | fsm_deltimer(&ch->timer); | 843 | fsm_deltimer(&ch->timer); |
843 | ctcm_pr_debug("%s: %s channel restart\n", dev->name, | 844 | |
844 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); | ||
845 | fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); | 845 | fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); |
846 | oldstate = fsm_getstate(fi); | 846 | oldstate = fsm_getstate(fi); |
847 | fsm_newstate(fi, CTC_STATE_STARTWAIT); | 847 | fsm_newstate(fi, CTC_STATE_STARTWAIT); |
@@ -876,13 +876,10 @@ static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) | |||
876 | struct net_device *dev = ch->netdev; | 876 | struct net_device *dev = ch->netdev; |
877 | struct ctcm_priv *priv = dev->priv; | 877 | struct ctcm_priv *priv = dev->priv; |
878 | 878 | ||
879 | CTCM_DBF_TEXT(SETUP, 3, __FUNCTION__); | ||
880 | if (event == CTC_EVENT_TIMER) { | 879 | if (event == CTC_EVENT_TIMER) { |
881 | if (!IS_MPCDEV(dev)) | 880 | if (!IS_MPCDEV(dev)) |
882 | /* TODO : check if MPC deletes timer somewhere */ | 881 | /* TODO : check if MPC deletes timer somewhere */ |
883 | fsm_deltimer(&ch->timer); | 882 | fsm_deltimer(&ch->timer); |
884 | ctcm_pr_debug("%s: Timeout during RX init handshake\n", | ||
885 | dev->name); | ||
886 | if (ch->retry++ < 3) | 883 | if (ch->retry++ < 3) |
887 | ctcm_chx_restart(fi, event, arg); | 884 | ctcm_chx_restart(fi, event, arg); |
888 | else { | 885 | else { |
@@ -907,9 +904,10 @@ static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg) | |||
907 | struct net_device *dev = ch->netdev; | 904 | struct net_device *dev = ch->netdev; |
908 | struct ctcm_priv *priv = dev->priv; | 905 | struct ctcm_priv *priv = dev->priv; |
909 | 906 | ||
910 | CTCM_DBF_TEXT(SETUP, 3, __FUNCTION__); | 907 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
908 | "%s(%s): RX %s busy, init. fail", | ||
909 | CTCM_FUNTAIL, dev->name, ch->id); | ||
911 | fsm_newstate(fi, CTC_STATE_RXERR); | 910 | fsm_newstate(fi, CTC_STATE_RXERR); |
912 | ctcm_pr_warn("%s: RX busy. Initialization failed\n", dev->name); | ||
913 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); | 911 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); |
914 | } | 912 | } |
915 | 913 | ||
@@ -927,11 +925,10 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) | |||
927 | struct net_device *dev = ch->netdev; | 925 | struct net_device *dev = ch->netdev; |
928 | struct ctcm_priv *priv = dev->priv; | 926 | struct ctcm_priv *priv = dev->priv; |
929 | 927 | ||
930 | CTCM_DBF_DEV_NAME(TRACE, dev, "Got remote disconnect, re-initializing"); | 928 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, |
929 | "%s: %s: remote disconnect - re-init ...", | ||
930 | CTCM_FUNTAIL, dev->name); | ||
931 | fsm_deltimer(&ch->timer); | 931 | fsm_deltimer(&ch->timer); |
932 | if (do_debug) | ||
933 | ctcm_pr_debug("%s: Got remote disconnect, " | ||
934 | "re-initializing ...\n", dev->name); | ||
935 | /* | 932 | /* |
936 | * Notify device statemachine | 933 | * Notify device statemachine |
937 | */ | 934 | */ |
@@ -961,8 +958,6 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) | |||
961 | 958 | ||
962 | if (event == CTC_EVENT_TIMER) { | 959 | if (event == CTC_EVENT_TIMER) { |
963 | fsm_deltimer(&ch->timer); | 960 | fsm_deltimer(&ch->timer); |
964 | CTCM_DBF_DEV_NAME(ERROR, dev, | ||
965 | "Timeout during TX init handshake"); | ||
966 | if (ch->retry++ < 3) | 961 | if (ch->retry++ < 3) |
967 | ctcm_chx_restart(fi, event, arg); | 962 | ctcm_chx_restart(fi, event, arg); |
968 | else { | 963 | else { |
@@ -971,9 +966,8 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) | |||
971 | } | 966 | } |
972 | } else { | 967 | } else { |
973 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, | 968 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
974 | "%s : %s error during channel setup state=%s", | 969 | "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, |
975 | dev->name, ctc_ch_event_names[event], | 970 | ctc_ch_event_names[event], fsm_getstate_str(fi)); |
976 | fsm_getstate_str(fi)); | ||
977 | 971 | ||
978 | ctcm_pr_warn("%s: Error during TX init handshake\n", dev->name); | 972 | ctcm_pr_warn("%s: Error during TX init handshake\n", dev->name); |
979 | } | 973 | } |
@@ -993,15 +987,15 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) | |||
993 | struct ctcm_priv *priv = dev->priv; | 987 | struct ctcm_priv *priv = dev->priv; |
994 | struct sk_buff *skb; | 988 | struct sk_buff *skb; |
995 | 989 | ||
996 | if (do_debug) | 990 | CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n", |
997 | ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", | 991 | __func__, smp_processor_id(), ch, ch->id); |
998 | __FUNCTION__, smp_processor_id(), ch, ch->id); | ||
999 | 992 | ||
1000 | fsm_deltimer(&ch->timer); | 993 | fsm_deltimer(&ch->timer); |
1001 | if (ch->retry++ > 3) { | 994 | if (ch->retry++ > 3) { |
1002 | struct mpc_group *gptr = priv->mpcg; | 995 | struct mpc_group *gptr = priv->mpcg; |
1003 | ctcm_pr_debug("%s: TX retry failed, restarting channel\n", | 996 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, |
1004 | dev->name); | 997 | "%s: %s: retries exceeded", |
998 | CTCM_FUNTAIL, ch->id); | ||
1005 | fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); | 999 | fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); |
1006 | /* call restart if not MPC or if MPC and mpcg fsm is ready. | 1000 | /* call restart if not MPC or if MPC and mpcg fsm is ready. |
1007 | use gptr as mpc indicator */ | 1001 | use gptr as mpc indicator */ |
@@ -1010,7 +1004,9 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) | |||
1010 | goto done; | 1004 | goto done; |
1011 | } | 1005 | } |
1012 | 1006 | ||
1013 | ctcm_pr_debug("%s: TX retry %d\n", dev->name, ch->retry); | 1007 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, |
1008 | "%s : %s: retry %d", | ||
1009 | CTCM_FUNTAIL, ch->id, ch->retry); | ||
1014 | skb = skb_peek(&ch->io_queue); | 1010 | skb = skb_peek(&ch->io_queue); |
1015 | if (skb) { | 1011 | if (skb) { |
1016 | int rc = 0; | 1012 | int rc = 0; |
@@ -1018,8 +1014,9 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) | |||
1018 | clear_normalized_cda(&ch->ccw[4]); | 1014 | clear_normalized_cda(&ch->ccw[4]); |
1019 | ch->ccw[4].count = skb->len; | 1015 | ch->ccw[4].count = skb->len; |
1020 | if (set_normalized_cda(&ch->ccw[4], skb->data)) { | 1016 | if (set_normalized_cda(&ch->ccw[4], skb->data)) { |
1021 | ctcm_pr_debug("%s: IDAL alloc failed, chan restart\n", | 1017 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, |
1022 | dev->name); | 1018 | "%s: %s: IDAL alloc failed", |
1019 | CTCM_FUNTAIL, ch->id); | ||
1023 | fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); | 1020 | fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); |
1024 | ctcm_chx_restart(fi, event, arg); | 1021 | ctcm_chx_restart(fi, event, arg); |
1025 | goto done; | 1022 | goto done; |
@@ -1061,22 +1058,21 @@ static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg) | |||
1061 | struct channel *ch = arg; | 1058 | struct channel *ch = arg; |
1062 | struct net_device *dev = ch->netdev; | 1059 | struct net_device *dev = ch->netdev; |
1063 | struct ctcm_priv *priv = dev->priv; | 1060 | struct ctcm_priv *priv = dev->priv; |
1061 | int rd = CHANNEL_DIRECTION(ch->flags); | ||
1064 | 1062 | ||
1065 | CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__); | ||
1066 | fsm_deltimer(&ch->timer); | 1063 | fsm_deltimer(&ch->timer); |
1067 | ctcm_pr_warn("%s %s : unrecoverable channel error\n", | 1064 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
1068 | CTC_DRIVER_NAME, dev->name); | 1065 | "%s: %s: %s unrecoverable channel error", |
1066 | CTCM_FUNTAIL, ch->id, rd == READ ? "RX" : "TX"); | ||
1067 | |||
1069 | if (IS_MPC(ch)) { | 1068 | if (IS_MPC(ch)) { |
1070 | priv->stats.tx_dropped++; | 1069 | priv->stats.tx_dropped++; |
1071 | priv->stats.tx_errors++; | 1070 | priv->stats.tx_errors++; |
1072 | } | 1071 | } |
1073 | 1072 | if (rd == READ) { | |
1074 | if (CHANNEL_DIRECTION(ch->flags) == READ) { | ||
1075 | ctcm_pr_debug("%s: RX I/O error\n", dev->name); | ||
1076 | fsm_newstate(fi, CTC_STATE_RXERR); | 1073 | fsm_newstate(fi, CTC_STATE_RXERR); |
1077 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); | 1074 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); |
1078 | } else { | 1075 | } else { |
1079 | ctcm_pr_debug("%s: TX I/O error\n", dev->name); | ||
1080 | fsm_newstate(fi, CTC_STATE_TXERR); | 1076 | fsm_newstate(fi, CTC_STATE_TXERR); |
1081 | fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); | 1077 | fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); |
1082 | } | 1078 | } |
@@ -1216,27 +1212,27 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) | |||
1216 | struct sk_buff *skb; | 1212 | struct sk_buff *skb; |
1217 | int first = 1; | 1213 | int first = 1; |
1218 | int i; | 1214 | int i; |
1219 | struct timespec done_stamp; | ||
1220 | __u32 data_space; | 1215 | __u32 data_space; |
1221 | unsigned long duration; | 1216 | unsigned long duration; |
1222 | struct sk_buff *peekskb; | 1217 | struct sk_buff *peekskb; |
1223 | int rc; | 1218 | int rc; |
1224 | struct th_header *header; | 1219 | struct th_header *header; |
1225 | struct pdu *p_header; | 1220 | struct pdu *p_header; |
1221 | struct timespec done_stamp = current_kernel_time(); /* xtime */ | ||
1226 | 1222 | ||
1227 | if (do_debug) | 1223 | CTCM_PR_DEBUG("Enter %s: %s cp:%i\n", |
1228 | ctcm_pr_debug("%s cp:%i enter: %s()\n", | 1224 | __func__, dev->name, smp_processor_id()); |
1229 | dev->name, smp_processor_id(), __FUNCTION__); | ||
1230 | 1225 | ||
1231 | done_stamp = current_kernel_time(); /* xtime */ | 1226 | duration = |
1232 | duration = (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 | 1227 | (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 + |
1233 | + (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000; | 1228 | (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000; |
1234 | if (duration > ch->prof.tx_time) | 1229 | if (duration > ch->prof.tx_time) |
1235 | ch->prof.tx_time = duration; | 1230 | ch->prof.tx_time = duration; |
1236 | 1231 | ||
1237 | if (ch->irb->scsw.cmd.count != 0) | 1232 | if (ch->irb->scsw.cmd.count != 0) |
1238 | ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", | 1233 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, |
1239 | dev->name, ch->irb->scsw.cmd.count); | 1234 | "%s(%s): TX not complete, remaining %d bytes", |
1235 | CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); | ||
1240 | fsm_deltimer(&ch->timer); | 1236 | fsm_deltimer(&ch->timer); |
1241 | while ((skb = skb_dequeue(&ch->io_queue))) { | 1237 | while ((skb = skb_dequeue(&ch->io_queue))) { |
1242 | priv->stats.tx_packets++; | 1238 | priv->stats.tx_packets++; |
@@ -1250,7 +1246,6 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) | |||
1250 | } | 1246 | } |
1251 | spin_lock(&ch->collect_lock); | 1247 | spin_lock(&ch->collect_lock); |
1252 | clear_normalized_cda(&ch->ccw[4]); | 1248 | clear_normalized_cda(&ch->ccw[4]); |
1253 | |||
1254 | if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) { | 1249 | if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) { |
1255 | spin_unlock(&ch->collect_lock); | 1250 | spin_unlock(&ch->collect_lock); |
1256 | fsm_newstate(fi, CTC_STATE_TXIDLE); | 1251 | fsm_newstate(fi, CTC_STATE_TXIDLE); |
@@ -1269,17 +1264,13 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) | |||
1269 | if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) | 1264 | if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) |
1270 | ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); | 1265 | ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); |
1271 | i = 0; | 1266 | i = 0; |
1272 | 1267 | p_header = NULL; | |
1273 | if (do_debug_data) | ||
1274 | ctcm_pr_debug("ctcmpc: %s() building " | ||
1275 | "trans_skb from collect_q \n", __FUNCTION__); | ||
1276 | |||
1277 | data_space = grp->group_max_buflen - TH_HEADER_LENGTH; | 1268 | data_space = grp->group_max_buflen - TH_HEADER_LENGTH; |
1278 | 1269 | ||
1279 | if (do_debug_data) | 1270 | CTCM_PR_DBGDATA("%s: building trans_skb from collect_q" |
1280 | ctcm_pr_debug("ctcmpc: %s() building trans_skb from collect_q" | 1271 | " data_space:%04x\n", |
1281 | " data_space:%04x\n", __FUNCTION__, data_space); | 1272 | __func__, data_space); |
1282 | p_header = NULL; | 1273 | |
1283 | while ((skb = skb_dequeue(&ch->collect_queue))) { | 1274 | while ((skb = skb_dequeue(&ch->collect_queue))) { |
1284 | memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len); | 1275 | memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len); |
1285 | p_header = (struct pdu *) | 1276 | p_header = (struct pdu *) |
@@ -1290,15 +1281,12 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) | |||
1290 | else | 1281 | else |
1291 | p_header->pdu_flag |= 0x20; | 1282 | p_header->pdu_flag |= 0x20; |
1292 | 1283 | ||
1293 | if (do_debug_data) { | 1284 | CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", |
1294 | ctcm_pr_debug("ctcmpc: %s()trans_skb len:%04x \n", | 1285 | __func__, ch->trans_skb->len); |
1295 | __FUNCTION__, ch->trans_skb->len); | 1286 | CTCM_PR_DBGDATA("%s: pdu header and data for up" |
1296 | ctcm_pr_debug("ctcmpc: %s() pdu header and data" | 1287 | " to 32 bytes sent to vtam\n", __func__); |
1297 | " for up to 32 bytes sent to vtam\n", | 1288 | CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32)); |
1298 | __FUNCTION__); | 1289 | |
1299 | ctcmpc_dumpit((char *)p_header, | ||
1300 | min_t(int, skb->len, 32)); | ||
1301 | } | ||
1302 | ch->collect_len -= skb->len; | 1290 | ch->collect_len -= skb->len; |
1303 | data_space -= skb->len; | 1291 | data_space -= skb->len; |
1304 | priv->stats.tx_packets++; | 1292 | priv->stats.tx_packets++; |
@@ -1314,46 +1302,38 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) | |||
1314 | if (p_header) | 1302 | if (p_header) |
1315 | p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/ | 1303 | p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/ |
1316 | header = kzalloc(TH_HEADER_LENGTH, gfp_type()); | 1304 | header = kzalloc(TH_HEADER_LENGTH, gfp_type()); |
1317 | |||
1318 | if (!header) { | 1305 | if (!header) { |
1319 | printk(KERN_WARNING "ctcmpc: OUT OF MEMORY IN %s()" | ||
1320 | ": Data Lost \n", __FUNCTION__); | ||
1321 | spin_unlock(&ch->collect_lock); | 1306 | spin_unlock(&ch->collect_lock); |
1322 | fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); | 1307 | fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); |
1323 | goto done; | 1308 | goto done; |
1324 | } | 1309 | } |
1325 | |||
1326 | header->th_ch_flag = TH_HAS_PDU; /* Normal data */ | 1310 | header->th_ch_flag = TH_HAS_PDU; /* Normal data */ |
1327 | ch->th_seq_num++; | 1311 | ch->th_seq_num++; |
1328 | header->th_seq_num = ch->th_seq_num; | 1312 | header->th_seq_num = ch->th_seq_num; |
1329 | 1313 | ||
1330 | if (do_debug_data) | 1314 | CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" , |
1331 | ctcm_pr_debug("%s: ToVTAM_th_seq= %08x\n" , | 1315 | __func__, ch->th_seq_num); |
1332 | __FUNCTION__, ch->th_seq_num); | ||
1333 | 1316 | ||
1334 | memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header, | 1317 | memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header, |
1335 | TH_HEADER_LENGTH); /* put the TH on the packet */ | 1318 | TH_HEADER_LENGTH); /* put the TH on the packet */ |
1336 | 1319 | ||
1337 | kfree(header); | 1320 | kfree(header); |
1338 | 1321 | ||
1339 | if (do_debug_data) { | 1322 | CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", |
1340 | ctcm_pr_debug("ctcmpc: %s()trans_skb len:%04x \n", | 1323 | __func__, ch->trans_skb->len); |
1341 | __FUNCTION__, ch->trans_skb->len); | 1324 | CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb " |
1342 | 1325 | "data to vtam from collect_q\n", __func__); | |
1343 | ctcm_pr_debug("ctcmpc: %s() up-to-50 bytes of trans_skb " | 1326 | CTCM_D3_DUMP((char *)ch->trans_skb->data, |
1344 | "data to vtam from collect_q\n", __FUNCTION__); | ||
1345 | ctcmpc_dumpit((char *)ch->trans_skb->data, | ||
1346 | min_t(int, ch->trans_skb->len, 50)); | 1327 | min_t(int, ch->trans_skb->len, 50)); |
1347 | } | ||
1348 | 1328 | ||
1349 | spin_unlock(&ch->collect_lock); | 1329 | spin_unlock(&ch->collect_lock); |
1350 | clear_normalized_cda(&ch->ccw[1]); | 1330 | clear_normalized_cda(&ch->ccw[1]); |
1351 | if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { | 1331 | if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { |
1352 | dev_kfree_skb_any(ch->trans_skb); | 1332 | dev_kfree_skb_any(ch->trans_skb); |
1353 | ch->trans_skb = NULL; | 1333 | ch->trans_skb = NULL; |
1354 | printk(KERN_WARNING | 1334 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR, |
1355 | "ctcmpc: %s()CCW failure - data lost\n", | 1335 | "%s: %s: IDAL alloc failed", |
1356 | __FUNCTION__); | 1336 | CTCM_FUNTAIL, ch->id); |
1357 | fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); | 1337 | fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); |
1358 | return; | 1338 | return; |
1359 | } | 1339 | } |
@@ -1373,7 +1353,6 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) | |||
1373 | } | 1353 | } |
1374 | done: | 1354 | done: |
1375 | ctcm_clear_busy(dev); | 1355 | ctcm_clear_busy(dev); |
1376 | ctcm_pr_debug("ctcmpc exit: %s %s()\n", dev->name, __FUNCTION__); | ||
1377 | return; | 1356 | return; |
1378 | } | 1357 | } |
1379 | 1358 | ||
@@ -1393,26 +1372,25 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) | |||
1393 | struct mpc_group *grp = priv->mpcg; | 1372 | struct mpc_group *grp = priv->mpcg; |
1394 | struct sk_buff *skb = ch->trans_skb; | 1373 | struct sk_buff *skb = ch->trans_skb; |
1395 | struct sk_buff *new_skb; | 1374 | struct sk_buff *new_skb; |
1396 | unsigned long saveflags = 0; /* avoids compiler warning */ | 1375 | unsigned long saveflags = 0; /* avoids compiler warning */ |
1397 | int len = ch->max_bufsize - ch->irb->scsw.cmd.count; | 1376 | int len = ch->max_bufsize - ch->irb->scsw.cmd.count; |
1398 | 1377 | ||
1399 | if (do_debug_data) { | 1378 | CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n", |
1400 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n", | 1379 | CTCM_FUNTAIL, dev->name, smp_processor_id(), |
1401 | dev->name, smp_processor_id(), ch->id); | 1380 | ch->id, ch->max_bufsize, len); |
1402 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx: maxbuf: %04x " | ||
1403 | "len: %04x\n", ch->max_bufsize, len); | ||
1404 | } | ||
1405 | fsm_deltimer(&ch->timer); | 1381 | fsm_deltimer(&ch->timer); |
1406 | 1382 | ||
1407 | if (skb == NULL) { | 1383 | if (skb == NULL) { |
1408 | ctcm_pr_debug("ctcmpc exit: %s() TRANS_SKB = NULL \n", | 1384 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1409 | __FUNCTION__); | 1385 | "%s(%s): TRANS_SKB = NULL", |
1410 | goto again; | 1386 | CTCM_FUNTAIL, dev->name); |
1387 | goto again; | ||
1411 | } | 1388 | } |
1412 | 1389 | ||
1413 | if (len < TH_HEADER_LENGTH) { | 1390 | if (len < TH_HEADER_LENGTH) { |
1414 | ctcm_pr_info("%s: got packet with invalid length %d\n", | 1391 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1415 | dev->name, len); | 1392 | "%s(%s): packet length %d to short", |
1393 | CTCM_FUNTAIL, dev->name, len); | ||
1416 | priv->stats.rx_dropped++; | 1394 | priv->stats.rx_dropped++; |
1417 | priv->stats.rx_length_errors++; | 1395 | priv->stats.rx_length_errors++; |
1418 | } else { | 1396 | } else { |
@@ -1422,11 +1400,9 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) | |||
1422 | new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC); | 1400 | new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC); |
1423 | 1401 | ||
1424 | if (new_skb == NULL) { | 1402 | if (new_skb == NULL) { |
1425 | printk(KERN_INFO "ctcmpc:%s() NEW_SKB = NULL\n", | 1403 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1426 | __FUNCTION__); | 1404 | "%s(%d): skb allocation failed", |
1427 | printk(KERN_WARNING "ctcmpc: %s() MEMORY ALLOC FAILED" | 1405 | CTCM_FUNTAIL, dev->name); |
1428 | " - DATA LOST - MPC FAILED\n", | ||
1429 | __FUNCTION__); | ||
1430 | fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); | 1406 | fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); |
1431 | goto again; | 1407 | goto again; |
1432 | } | 1408 | } |
@@ -1479,9 +1455,8 @@ again: | |||
1479 | break; | 1455 | break; |
1480 | } | 1456 | } |
1481 | 1457 | ||
1482 | if (do_debug) | 1458 | CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n", |
1483 | ctcm_pr_debug("ctcmpc exit : %s %s(): ch=0x%p id=%s\n", | 1459 | __func__, dev->name, ch, ch->id); |
1484 | dev->name, __FUNCTION__, ch, ch->id); | ||
1485 | 1460 | ||
1486 | } | 1461 | } |
1487 | 1462 | ||
@@ -1497,15 +1472,16 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) | |||
1497 | struct channel *ch = arg; | 1472 | struct channel *ch = arg; |
1498 | struct net_device *dev = ch->netdev; | 1473 | struct net_device *dev = ch->netdev; |
1499 | struct ctcm_priv *priv = dev->priv; | 1474 | struct ctcm_priv *priv = dev->priv; |
1475 | struct mpc_group *gptr = priv->mpcg; | ||
1476 | |||
1477 | CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n", | ||
1478 | __func__, ch->id, ch); | ||
1479 | |||
1480 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO, | ||
1481 | "%s: %s: chstate:%i, grpstate:%i, prot:%i\n", | ||
1482 | CTCM_FUNTAIL, ch->id, fsm_getstate(fi), | ||
1483 | fsm_getstate(gptr->fsm), ch->protocol); | ||
1500 | 1484 | ||
1501 | if (do_debug) { | ||
1502 | struct mpc_group *gptr = priv->mpcg; | ||
1503 | ctcm_pr_debug("ctcmpc enter: %s(): ch=0x%p id=%s\n", | ||
1504 | __FUNCTION__, ch, ch->id); | ||
1505 | ctcm_pr_debug("%s() %s chstate:%i grpstate:%i chprotocol:%i\n", | ||
1506 | __FUNCTION__, ch->id, fsm_getstate(fi), | ||
1507 | fsm_getstate(gptr->fsm), ch->protocol); | ||
1508 | } | ||
1509 | if (fsm_getstate(fi) == CTC_STATE_TXIDLE) | 1485 | if (fsm_getstate(fi) == CTC_STATE_TXIDLE) |
1510 | MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? "); | 1486 | MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? "); |
1511 | 1487 | ||
@@ -1531,9 +1507,8 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) | |||
1531 | ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); | 1507 | ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); |
1532 | 1508 | ||
1533 | done: | 1509 | done: |
1534 | if (do_debug) | 1510 | CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n", |
1535 | ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n", | 1511 | __func__, ch->id, ch); |
1536 | __FUNCTION__, ch, ch->id); | ||
1537 | return; | 1512 | return; |
1538 | } | 1513 | } |
1539 | 1514 | ||
@@ -1556,12 +1531,9 @@ void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) | |||
1556 | unsigned long saveflags = 0; /* avoids compiler warning */ | 1531 | unsigned long saveflags = 0; /* avoids compiler warning */ |
1557 | 1532 | ||
1558 | fsm_deltimer(&ch->timer); | 1533 | fsm_deltimer(&ch->timer); |
1559 | ctcm_pr_debug("%s cp:%i enter: %s()\n", | 1534 | CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n", |
1560 | dev->name, smp_processor_id(), __FUNCTION__); | 1535 | __func__, ch->id, dev->name, smp_processor_id(), |
1561 | if (do_debug) | 1536 | fsm_getstate(fi), fsm_getstate(grp->fsm)); |
1562 | ctcm_pr_debug("%s() %s chstate:%i grpstate:%i\n", | ||
1563 | __FUNCTION__, ch->id, | ||
1564 | fsm_getstate(fi), fsm_getstate(grp->fsm)); | ||
1565 | 1537 | ||
1566 | fsm_newstate(fi, CTC_STATE_RXIDLE); | 1538 | fsm_newstate(fi, CTC_STATE_RXIDLE); |
1567 | /* XID processing complete */ | 1539 | /* XID processing complete */ |
@@ -1575,9 +1547,7 @@ void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) | |||
1575 | skb_reset_tail_pointer(ch->trans_skb); | 1547 | skb_reset_tail_pointer(ch->trans_skb); |
1576 | ch->trans_skb->len = 0; | 1548 | ch->trans_skb->len = 0; |
1577 | ch->ccw[1].count = ch->max_bufsize; | 1549 | ch->ccw[1].count = ch->max_bufsize; |
1578 | if (do_debug_ccw) | 1550 | CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); |
1579 | ctcmpc_dumpit((char *)&ch->ccw[0], | ||
1580 | sizeof(struct ccw1) * 3); | ||
1581 | if (event == CTC_EVENT_START) | 1551 | if (event == CTC_EVENT_START) |
1582 | /* see remark about conditional locking */ | 1552 | /* see remark about conditional locking */ |
1583 | spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); | 1553 | spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); |
@@ -1598,9 +1568,6 @@ void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) | |||
1598 | 1568 | ||
1599 | fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); | 1569 | fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); |
1600 | done: | 1570 | done: |
1601 | if (do_debug) | ||
1602 | ctcm_pr_debug("ctcmpc exit: %s %s()\n", | ||
1603 | dev->name, __FUNCTION__); | ||
1604 | return; | 1571 | return; |
1605 | } | 1572 | } |
1606 | 1573 | ||
@@ -1616,13 +1583,9 @@ static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg) | |||
1616 | struct ctcm_priv *priv = dev->priv; | 1583 | struct ctcm_priv *priv = dev->priv; |
1617 | struct mpc_group *grp = priv->mpcg; | 1584 | struct mpc_group *grp = priv->mpcg; |
1618 | 1585 | ||
1619 | if (do_debug) { | 1586 | CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n", |
1620 | ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s" | 1587 | __func__, dev->name, ch->id, ch, smp_processor_id(), |
1621 | "GrpState:%s ChState:%s\n", | 1588 | fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); |
1622 | __FUNCTION__, smp_processor_id(), ch, ch->id, | ||
1623 | fsm_getstate_str(grp->fsm), | ||
1624 | fsm_getstate_str(ch->fsm)); | ||
1625 | } | ||
1626 | 1589 | ||
1627 | switch (fsm_getstate(grp->fsm)) { | 1590 | switch (fsm_getstate(grp->fsm)) { |
1628 | case MPCG_STATE_XID2INITW: | 1591 | case MPCG_STATE_XID2INITW: |
@@ -1664,11 +1627,7 @@ static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg) | |||
1664 | break; | 1627 | break; |
1665 | } | 1628 | } |
1666 | 1629 | ||
1667 | if (do_debug) | ||
1668 | ctcm_pr_debug("ctcmpc exit : %s(): cp=%i ch=0x%p id=%s\n", | ||
1669 | __FUNCTION__, smp_processor_id(), ch, ch->id); | ||
1670 | return; | 1630 | return; |
1671 | |||
1672 | } | 1631 | } |
1673 | 1632 | ||
1674 | /* | 1633 | /* |
@@ -1683,11 +1642,9 @@ static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) | |||
1683 | struct ctcm_priv *priv = dev->priv; | 1642 | struct ctcm_priv *priv = dev->priv; |
1684 | struct mpc_group *grp = priv->mpcg; | 1643 | struct mpc_group *grp = priv->mpcg; |
1685 | 1644 | ||
1686 | ctcm_pr_debug("ctcmpc enter: %s %s() %s \nGrpState:%s ChState:%s\n", | 1645 | CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n", |
1687 | dev->name, | 1646 | __func__, dev->name, ch->id, |
1688 | __FUNCTION__, ch->id, | 1647 | fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); |
1689 | fsm_getstate_str(grp->fsm), | ||
1690 | fsm_getstate_str(ch->fsm)); | ||
1691 | 1648 | ||
1692 | fsm_deltimer(&ch->timer); | 1649 | fsm_deltimer(&ch->timer); |
1693 | 1650 | ||
@@ -1750,16 +1707,12 @@ static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) | |||
1750 | if (ch->in_mpcgroup) | 1707 | if (ch->in_mpcgroup) |
1751 | fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); | 1708 | fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); |
1752 | else | 1709 | else |
1753 | printk(KERN_WARNING "ctcmpc: %s() Not all channels have" | 1710 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1754 | " been added to group\n", __FUNCTION__); | 1711 | "%s(%s): channel %s not added to group", |
1712 | CTCM_FUNTAIL, dev->name, ch->id); | ||
1755 | 1713 | ||
1756 | done: | 1714 | done: |
1757 | if (do_debug) | ||
1758 | ctcm_pr_debug("ctcmpc exit : %s()%s ch=0x%p id=%s\n", | ||
1759 | __FUNCTION__, dev->name, ch, ch->id); | ||
1760 | |||
1761 | return; | 1715 | return; |
1762 | |||
1763 | } | 1716 | } |
1764 | 1717 | ||
1765 | /* | 1718 | /* |
@@ -1774,13 +1727,7 @@ static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg) | |||
1774 | struct ctcm_priv *priv = dev->priv; | 1727 | struct ctcm_priv *priv = dev->priv; |
1775 | struct mpc_group *grp = priv->mpcg; | 1728 | struct mpc_group *grp = priv->mpcg; |
1776 | 1729 | ||
1777 | ctcm_pr_debug("ctcmpc enter: %s %s() %s \nGrpState:%s ChState:%s\n", | ||
1778 | dev->name, __FUNCTION__, ch->id, | ||
1779 | fsm_getstate_str(grp->fsm), | ||
1780 | fsm_getstate_str(ch->fsm)); | ||
1781 | |||
1782 | fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); | 1730 | fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); |
1783 | |||
1784 | return; | 1731 | return; |
1785 | } | 1732 | } |
1786 | 1733 | ||
@@ -1802,19 +1749,16 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) | |||
1802 | int rc = 0; | 1749 | int rc = 0; |
1803 | unsigned long saveflags = 0; | 1750 | unsigned long saveflags = 0; |
1804 | 1751 | ||
1805 | if (do_debug) | 1752 | CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", |
1806 | ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", | 1753 | __func__, smp_processor_id(), ach, ach->id); |
1807 | __FUNCTION__, smp_processor_id(), ach, ach->id); | ||
1808 | 1754 | ||
1809 | if (grp->in_sweep == 0) | 1755 | if (grp->in_sweep == 0) |
1810 | goto done; | 1756 | goto done; |
1811 | 1757 | ||
1812 | if (do_debug_data) { | 1758 | CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" , |
1813 | ctcm_pr_debug("ctcmpc: %s() 1: ToVTAM_th_seq= %08x\n" , | 1759 | __func__, wch->th_seq_num); |
1814 | __FUNCTION__, wch->th_seq_num); | 1760 | CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" , |
1815 | ctcm_pr_debug("ctcmpc: %s() 1: FromVTAM_th_seq= %08x\n" , | 1761 | __func__, rch->th_seq_num); |
1816 | __FUNCTION__, rch->th_seq_num); | ||
1817 | } | ||
1818 | 1762 | ||
1819 | if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) { | 1763 | if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) { |
1820 | /* give the previous IO time to complete */ | 1764 | /* give the previous IO time to complete */ |
@@ -1853,11 +1797,9 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) | |||
1853 | 1797 | ||
1854 | header->sw.th_last_seq = wch->th_seq_num; | 1798 | header->sw.th_last_seq = wch->th_seq_num; |
1855 | 1799 | ||
1856 | if (do_debug_ccw) | 1800 | CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3); |
1857 | ctcmpc_dumpit((char *)&wch->ccw[3], sizeof(struct ccw1) * 3); | 1801 | CTCM_PR_DBGDATA("%s: sweep packet\n", __func__); |
1858 | 1802 | CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH); | |
1859 | ctcm_pr_debug("ctcmpc: %s() sweep packet\n", __FUNCTION__); | ||
1860 | ctcmpc_dumpit((char *)header, TH_SWEEP_LENGTH); | ||
1861 | 1803 | ||
1862 | fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch); | 1804 | fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch); |
1863 | fsm_newstate(wch->fsm, CTC_STATE_TX); | 1805 | fsm_newstate(wch->fsm, CTC_STATE_TX); |
@@ -1876,19 +1818,13 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) | |||
1876 | ctcm_clear_busy_do(dev); | 1818 | ctcm_clear_busy_do(dev); |
1877 | } | 1819 | } |
1878 | 1820 | ||
1879 | if (do_debug_data) { | 1821 | CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" , |
1880 | ctcm_pr_debug("ctcmpc: %s()2: ToVTAM_th_seq= %08x\n" , | 1822 | __func__, wch->th_seq_num, rch->th_seq_num); |
1881 | __FUNCTION__, wch->th_seq_num); | ||
1882 | ctcm_pr_debug("ctcmpc: %s()2: FromVTAM_th_seq= %08x\n" , | ||
1883 | __FUNCTION__, rch->th_seq_num); | ||
1884 | } | ||
1885 | 1823 | ||
1886 | if (rc != 0) | 1824 | if (rc != 0) |
1887 | ctcm_ccw_check_rc(wch, rc, "send sweep"); | 1825 | ctcm_ccw_check_rc(wch, rc, "send sweep"); |
1888 | 1826 | ||
1889 | done: | 1827 | done: |
1890 | if (do_debug) | ||
1891 | ctcm_pr_debug("ctcmpc exit: %s() %s\n", __FUNCTION__, ach->id); | ||
1892 | return; | 1828 | return; |
1893 | } | 1829 | } |
1894 | 1830 | ||
@@ -2149,9 +2085,8 @@ static void dev_action_stop(fsm_instance *fi, int event, void *arg) | |||
2149 | struct channel *ch = priv->channel[direction]; | 2085 | struct channel *ch = priv->channel[direction]; |
2150 | fsm_event(ch->fsm, CTC_EVENT_STOP, ch); | 2086 | fsm_event(ch->fsm, CTC_EVENT_STOP, ch); |
2151 | ch->th_seq_num = 0x00; | 2087 | ch->th_seq_num = 0x00; |
2152 | if (do_debug) | 2088 | CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n", |
2153 | ctcm_pr_debug("ctcm: %s() CH_th_seq= %08x\n", | 2089 | __func__, ch->th_seq_num); |
2154 | __FUNCTION__, ch->th_seq_num); | ||
2155 | } | 2090 | } |
2156 | if (IS_MPC(priv)) | 2091 | if (IS_MPC(priv)) |
2157 | fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); | 2092 | fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); |
@@ -2199,8 +2134,11 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg) | |||
2199 | { | 2134 | { |
2200 | struct net_device *dev = arg; | 2135 | struct net_device *dev = arg; |
2201 | struct ctcm_priv *priv = dev->priv; | 2136 | struct ctcm_priv *priv = dev->priv; |
2137 | int dev_stat = fsm_getstate(fi); | ||
2202 | 2138 | ||
2203 | CTCMY_DBF_DEV_NAME(SETUP, dev, ""); | 2139 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, |
2140 | "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL, | ||
2141 | dev->name, dev->priv, dev_stat, event); | ||
2204 | 2142 | ||
2205 | switch (fsm_getstate(fi)) { | 2143 | switch (fsm_getstate(fi)) { |
2206 | case DEV_STATE_STARTWAIT_RXTX: | 2144 | case DEV_STATE_STARTWAIT_RXTX: |
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 6b13c1c1beb8..126a3ebb8ab2 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c | |||
@@ -84,20 +84,19 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
84 | skb_pull(pskb, LL_HEADER_LENGTH); | 84 | skb_pull(pskb, LL_HEADER_LENGTH); |
85 | if ((ch->protocol == CTCM_PROTO_S390) && | 85 | if ((ch->protocol == CTCM_PROTO_S390) && |
86 | (header->type != ETH_P_IP)) { | 86 | (header->type != ETH_P_IP)) { |
87 | |||
88 | if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) { | 87 | if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) { |
88 | ch->logflags |= LOG_FLAG_ILLEGALPKT; | ||
89 | /* | 89 | /* |
90 | * Check packet type only if we stick strictly | 90 | * Check packet type only if we stick strictly |
91 | * to S/390's protocol of OS390. This only | 91 | * to S/390's protocol of OS390. This only |
92 | * supports IP. Otherwise allow any packet | 92 | * supports IP. Otherwise allow any packet |
93 | * type. | 93 | * type. |
94 | */ | 94 | */ |
95 | ctcm_pr_warn("%s Illegal packet type 0x%04x " | 95 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
96 | "received, dropping\n", | 96 | "%s(%s): Illegal packet type 0x%04x" |
97 | dev->name, header->type); | 97 | " - dropping", |
98 | ch->logflags |= LOG_FLAG_ILLEGALPKT; | 98 | CTCM_FUNTAIL, dev->name, header->type); |
99 | } | 99 | } |
100 | |||
101 | priv->stats.rx_dropped++; | 100 | priv->stats.rx_dropped++; |
102 | priv->stats.rx_frame_errors++; | 101 | priv->stats.rx_frame_errors++; |
103 | return; | 102 | return; |
@@ -105,11 +104,11 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
105 | pskb->protocol = ntohs(header->type); | 104 | pskb->protocol = ntohs(header->type); |
106 | if (header->length <= LL_HEADER_LENGTH) { | 105 | if (header->length <= LL_HEADER_LENGTH) { |
107 | if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) { | 106 | if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) { |
108 | ctcm_pr_warn( | 107 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
109 | "%s Illegal packet size %d " | 108 | "%s(%s): Illegal packet size %d(%d,%d)" |
110 | "received (MTU=%d blocklen=%d), " | 109 | "- dropping", |
111 | "dropping\n", dev->name, header->length, | 110 | CTCM_FUNTAIL, dev->name, |
112 | dev->mtu, len); | 111 | header->length, dev->mtu, len); |
113 | ch->logflags |= LOG_FLAG_ILLEGALSIZE; | 112 | ch->logflags |= LOG_FLAG_ILLEGALSIZE; |
114 | } | 113 | } |
115 | 114 | ||
@@ -122,10 +121,10 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
122 | if ((header->length > skb_tailroom(pskb)) || | 121 | if ((header->length > skb_tailroom(pskb)) || |
123 | (header->length > len)) { | 122 | (header->length > len)) { |
124 | if (!(ch->logflags & LOG_FLAG_OVERRUN)) { | 123 | if (!(ch->logflags & LOG_FLAG_OVERRUN)) { |
125 | ctcm_pr_warn( | 124 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
126 | "%s Illegal packet size %d (beyond the" | 125 | "%s(%s): Packet size %d (overrun)" |
127 | " end of received data), dropping\n", | 126 | " - dropping", CTCM_FUNTAIL, |
128 | dev->name, header->length); | 127 | dev->name, header->length); |
129 | ch->logflags |= LOG_FLAG_OVERRUN; | 128 | ch->logflags |= LOG_FLAG_OVERRUN; |
130 | } | 129 | } |
131 | 130 | ||
@@ -139,9 +138,9 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
139 | skb = dev_alloc_skb(pskb->len); | 138 | skb = dev_alloc_skb(pskb->len); |
140 | if (!skb) { | 139 | if (!skb) { |
141 | if (!(ch->logflags & LOG_FLAG_NOMEM)) { | 140 | if (!(ch->logflags & LOG_FLAG_NOMEM)) { |
142 | ctcm_pr_warn( | 141 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
143 | "%s Out of memory in ctcm_unpack_skb\n", | 142 | "%s(%s): MEMORY allocation error", |
144 | dev->name); | 143 | CTCM_FUNTAIL, dev->name); |
145 | ch->logflags |= LOG_FLAG_NOMEM; | 144 | ch->logflags |= LOG_FLAG_NOMEM; |
146 | } | 145 | } |
147 | priv->stats.rx_dropped++; | 146 | priv->stats.rx_dropped++; |
@@ -184,7 +183,7 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
184 | */ | 183 | */ |
185 | static void channel_free(struct channel *ch) | 184 | static void channel_free(struct channel *ch) |
186 | { | 185 | { |
187 | CTCM_DBF_TEXT(TRACE, 2, __FUNCTION__); | 186 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s)", CTCM_FUNTAIL, ch->id); |
188 | ch->flags &= ~CHANNEL_FLAGS_INUSE; | 187 | ch->flags &= ~CHANNEL_FLAGS_INUSE; |
189 | fsm_newstate(ch->fsm, CTC_STATE_IDLE); | 188 | fsm_newstate(ch->fsm, CTC_STATE_IDLE); |
190 | } | 189 | } |
@@ -251,19 +250,12 @@ static struct channel *channel_get(enum channel_types type, | |||
251 | { | 250 | { |
252 | struct channel *ch = channels; | 251 | struct channel *ch = channels; |
253 | 252 | ||
254 | if (do_debug) { | ||
255 | char buf[64]; | ||
256 | sprintf(buf, "%s(%d, %s, %d)\n", | ||
257 | CTCM_FUNTAIL, type, id, direction); | ||
258 | CTCM_DBF_TEXT(TRACE, CTC_DBF_INFO, buf); | ||
259 | } | ||
260 | while (ch && (strncmp(ch->id, id, CTCM_ID_SIZE) || (ch->type != type))) | 253 | while (ch && (strncmp(ch->id, id, CTCM_ID_SIZE) || (ch->type != type))) |
261 | ch = ch->next; | 254 | ch = ch->next; |
262 | if (!ch) { | 255 | if (!ch) { |
263 | char buf[64]; | 256 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
264 | sprintf(buf, "%s(%d, %s, %d) not found in channel list\n", | 257 | "%s(%d, %s, %d) not found in channel list\n", |
265 | CTCM_FUNTAIL, type, id, direction); | 258 | CTCM_FUNTAIL, type, id, direction); |
266 | CTCM_DBF_TEXT(ERROR, CTC_DBF_ERROR, buf); | ||
267 | } else { | 259 | } else { |
268 | if (ch->flags & CHANNEL_FLAGS_INUSE) | 260 | if (ch->flags & CHANNEL_FLAGS_INUSE) |
269 | ch = NULL; | 261 | ch = NULL; |
@@ -283,8 +275,9 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb) | |||
283 | if (!IS_ERR(irb)) | 275 | if (!IS_ERR(irb)) |
284 | return 0; | 276 | return 0; |
285 | 277 | ||
286 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN, "irb error %ld on device %s\n", | 278 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN, |
287 | PTR_ERR(irb), cdev->dev.bus_id); | 279 | "irb error %ld on device %s\n", |
280 | PTR_ERR(irb), cdev->dev.bus_id); | ||
288 | 281 | ||
289 | switch (PTR_ERR(irb)) { | 282 | switch (PTR_ERR(irb)) { |
290 | case -EIO: | 283 | case -EIO: |
@@ -307,58 +300,85 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb) | |||
307 | * ch The channel, the sense code belongs to. | 300 | * ch The channel, the sense code belongs to. |
308 | * sense The sense code to inspect. | 301 | * sense The sense code to inspect. |
309 | */ | 302 | */ |
310 | static inline void ccw_unit_check(struct channel *ch, unsigned char sense) | 303 | static inline void ccw_unit_check(struct channel *ch, __u8 sense) |
311 | { | 304 | { |
312 | CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__); | 305 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, |
306 | "%s(%s): %02x", | ||
307 | CTCM_FUNTAIL, ch->id, sense); | ||
308 | |||
313 | if (sense & SNS0_INTERVENTION_REQ) { | 309 | if (sense & SNS0_INTERVENTION_REQ) { |
314 | if (sense & 0x01) { | 310 | if (sense & 0x01) { |
315 | ctcm_pr_debug("%s: Interface disc. or Sel. reset " | 311 | if (ch->sense_rc != 0x01) { |
316 | "(remote)\n", ch->id); | 312 | ctcm_pr_debug("%s: Interface disc. or Sel. " |
313 | "reset (remote)\n", ch->id); | ||
314 | ch->sense_rc = 0x01; | ||
315 | } | ||
317 | fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch); | 316 | fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch); |
318 | } else { | 317 | } else { |
319 | ctcm_pr_debug("%s: System reset (remote)\n", ch->id); | 318 | if (ch->sense_rc != SNS0_INTERVENTION_REQ) { |
319 | ctcm_pr_debug("%s: System reset (remote)\n", | ||
320 | ch->id); | ||
321 | ch->sense_rc = SNS0_INTERVENTION_REQ; | ||
322 | } | ||
320 | fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch); | 323 | fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch); |
321 | } | 324 | } |
322 | } else if (sense & SNS0_EQUIPMENT_CHECK) { | 325 | } else if (sense & SNS0_EQUIPMENT_CHECK) { |
323 | if (sense & SNS0_BUS_OUT_CHECK) { | 326 | if (sense & SNS0_BUS_OUT_CHECK) { |
324 | ctcm_pr_warn("%s: Hardware malfunction (remote)\n", | 327 | if (ch->sense_rc != SNS0_BUS_OUT_CHECK) { |
325 | ch->id); | 328 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, |
329 | "%s(%s): remote HW error %02x", | ||
330 | CTCM_FUNTAIL, ch->id, sense); | ||
331 | ch->sense_rc = SNS0_BUS_OUT_CHECK; | ||
332 | } | ||
326 | fsm_event(ch->fsm, CTC_EVENT_UC_HWFAIL, ch); | 333 | fsm_event(ch->fsm, CTC_EVENT_UC_HWFAIL, ch); |
327 | } else { | 334 | } else { |
328 | ctcm_pr_warn("%s: Read-data parity error (remote)\n", | 335 | if (ch->sense_rc != SNS0_EQUIPMENT_CHECK) { |
329 | ch->id); | 336 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, |
337 | "%s(%s): remote read parity error %02x", | ||
338 | CTCM_FUNTAIL, ch->id, sense); | ||
339 | ch->sense_rc = SNS0_EQUIPMENT_CHECK; | ||
340 | } | ||
330 | fsm_event(ch->fsm, CTC_EVENT_UC_RXPARITY, ch); | 341 | fsm_event(ch->fsm, CTC_EVENT_UC_RXPARITY, ch); |
331 | } | 342 | } |
332 | } else if (sense & SNS0_BUS_OUT_CHECK) { | 343 | } else if (sense & SNS0_BUS_OUT_CHECK) { |
333 | if (sense & 0x04) { | 344 | if (ch->sense_rc != SNS0_BUS_OUT_CHECK) { |
334 | ctcm_pr_warn("%s: Data-streaming timeout)\n", ch->id); | 345 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, |
346 | "%s(%s): BUS OUT error %02x", | ||
347 | CTCM_FUNTAIL, ch->id, sense); | ||
348 | ch->sense_rc = SNS0_BUS_OUT_CHECK; | ||
349 | } | ||
350 | if (sense & 0x04) /* data-streaming timeout */ | ||
335 | fsm_event(ch->fsm, CTC_EVENT_UC_TXTIMEOUT, ch); | 351 | fsm_event(ch->fsm, CTC_EVENT_UC_TXTIMEOUT, ch); |
336 | } else { | 352 | else /* Data-transfer parity error */ |
337 | ctcm_pr_warn("%s: Data-transfer parity error\n", | ||
338 | ch->id); | ||
339 | fsm_event(ch->fsm, CTC_EVENT_UC_TXPARITY, ch); | 353 | fsm_event(ch->fsm, CTC_EVENT_UC_TXPARITY, ch); |
340 | } | ||
341 | } else if (sense & SNS0_CMD_REJECT) { | 354 | } else if (sense & SNS0_CMD_REJECT) { |
342 | ctcm_pr_warn("%s: Command reject\n", ch->id); | 355 | if (ch->sense_rc != SNS0_CMD_REJECT) { |
356 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, | ||
357 | "%s(%s): Command rejected", | ||
358 | CTCM_FUNTAIL, ch->id); | ||
359 | ch->sense_rc = SNS0_CMD_REJECT; | ||
360 | } | ||
343 | } else if (sense == 0) { | 361 | } else if (sense == 0) { |
344 | ctcm_pr_debug("%s: Unit check ZERO\n", ch->id); | 362 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, |
363 | "%s(%s): Unit check ZERO", | ||
364 | CTCM_FUNTAIL, ch->id); | ||
345 | fsm_event(ch->fsm, CTC_EVENT_UC_ZERO, ch); | 365 | fsm_event(ch->fsm, CTC_EVENT_UC_ZERO, ch); |
346 | } else { | 366 | } else { |
347 | ctcm_pr_warn("%s: Unit Check with sense code: %02x\n", | 367 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, |
348 | ch->id, sense); | 368 | "%s(%s): Unit check code %02x unknown", |
369 | CTCM_FUNTAIL, ch->id, sense); | ||
349 | fsm_event(ch->fsm, CTC_EVENT_UC_UNKNOWN, ch); | 370 | fsm_event(ch->fsm, CTC_EVENT_UC_UNKNOWN, ch); |
350 | } | 371 | } |
351 | } | 372 | } |
352 | 373 | ||
353 | int ctcm_ch_alloc_buffer(struct channel *ch) | 374 | int ctcm_ch_alloc_buffer(struct channel *ch) |
354 | { | 375 | { |
355 | CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__); | ||
356 | |||
357 | clear_normalized_cda(&ch->ccw[1]); | 376 | clear_normalized_cda(&ch->ccw[1]); |
358 | ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA); | 377 | ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA); |
359 | if (ch->trans_skb == NULL) { | 378 | if (ch->trans_skb == NULL) { |
360 | ctcm_pr_warn("%s: Couldn't alloc %s trans_skb\n", | 379 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
361 | ch->id, | 380 | "%s(%s): %s trans_skb allocation error", |
381 | CTCM_FUNTAIL, ch->id, | ||
362 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); | 382 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); |
363 | return -ENOMEM; | 383 | return -ENOMEM; |
364 | } | 384 | } |
@@ -367,9 +387,9 @@ int ctcm_ch_alloc_buffer(struct channel *ch) | |||
367 | if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { | 387 | if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { |
368 | dev_kfree_skb(ch->trans_skb); | 388 | dev_kfree_skb(ch->trans_skb); |
369 | ch->trans_skb = NULL; | 389 | ch->trans_skb = NULL; |
370 | ctcm_pr_warn("%s: set_normalized_cda for %s " | 390 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
371 | "trans_skb failed, dropping packets\n", | 391 | "%s(%s): %s set norm_cda failed", |
372 | ch->id, | 392 | CTCM_FUNTAIL, ch->id, |
373 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); | 393 | (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); |
374 | return -ENOMEM; | 394 | return -ENOMEM; |
375 | } | 395 | } |
@@ -516,7 +536,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
516 | atomic_dec(&skb->users); | 536 | atomic_dec(&skb->users); |
517 | skb_pull(skb, LL_HEADER_LENGTH + 2); | 537 | skb_pull(skb, LL_HEADER_LENGTH + 2); |
518 | ctcm_clear_busy(ch->netdev); | 538 | ctcm_clear_busy(ch->netdev); |
519 | return -EBUSY; | 539 | return -ENOMEM; |
520 | } | 540 | } |
521 | 541 | ||
522 | skb_reset_tail_pointer(ch->trans_skb); | 542 | skb_reset_tail_pointer(ch->trans_skb); |
@@ -570,15 +590,12 @@ static void ctcmpc_send_sweep_req(struct channel *rch) | |||
570 | struct th_sweep *header; | 590 | struct th_sweep *header; |
571 | struct sk_buff *sweep_skb; | 591 | struct sk_buff *sweep_skb; |
572 | struct channel *ch; | 592 | struct channel *ch; |
573 | int rc = 0; | 593 | /* int rc = 0; */ |
574 | 594 | ||
575 | priv = dev->priv; | 595 | priv = dev->priv; |
576 | grp = priv->mpcg; | 596 | grp = priv->mpcg; |
577 | ch = priv->channel[WRITE]; | 597 | ch = priv->channel[WRITE]; |
578 | 598 | ||
579 | if (do_debug) | ||
580 | MPC_DBF_DEV_NAME(TRACE, dev, ch->id); | ||
581 | |||
582 | /* sweep processing is not complete until response and request */ | 599 | /* sweep processing is not complete until response and request */ |
583 | /* has completed for all read channels in group */ | 600 | /* has completed for all read channels in group */ |
584 | if (grp->in_sweep == 0) { | 601 | if (grp->in_sweep == 0) { |
@@ -590,17 +607,16 @@ static void ctcmpc_send_sweep_req(struct channel *rch) | |||
590 | sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA); | 607 | sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA); |
591 | 608 | ||
592 | if (sweep_skb == NULL) { | 609 | if (sweep_skb == NULL) { |
593 | printk(KERN_INFO "Couldn't alloc sweep_skb\n"); | 610 | /* rc = -ENOMEM; */ |
594 | rc = -ENOMEM; | 611 | goto nomem; |
595 | goto done; | ||
596 | } | 612 | } |
597 | 613 | ||
598 | header = kmalloc(TH_SWEEP_LENGTH, gfp_type()); | 614 | header = kmalloc(TH_SWEEP_LENGTH, gfp_type()); |
599 | 615 | ||
600 | if (!header) { | 616 | if (!header) { |
601 | dev_kfree_skb_any(sweep_skb); | 617 | dev_kfree_skb_any(sweep_skb); |
602 | rc = -ENOMEM; | 618 | /* rc = -ENOMEM; */ |
603 | goto done; | 619 | goto nomem; |
604 | } | 620 | } |
605 | 621 | ||
606 | header->th.th_seg = 0x00 ; | 622 | header->th.th_seg = 0x00 ; |
@@ -621,12 +637,10 @@ static void ctcmpc_send_sweep_req(struct channel *rch) | |||
621 | 637 | ||
622 | return; | 638 | return; |
623 | 639 | ||
624 | done: | 640 | nomem: |
625 | if (rc != 0) { | 641 | grp->in_sweep = 0; |
626 | grp->in_sweep = 0; | 642 | ctcm_clear_busy(dev); |
627 | ctcm_clear_busy(dev); | 643 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); |
628 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); | ||
629 | } | ||
630 | 644 | ||
631 | return; | 645 | return; |
632 | } | 646 | } |
@@ -648,11 +662,9 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
648 | unsigned long saveflags = 0; /* avoids compiler warning */ | 662 | unsigned long saveflags = 0; /* avoids compiler warning */ |
649 | __u16 block_len; | 663 | __u16 block_len; |
650 | 664 | ||
651 | if (do_debug) | 665 | CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n", |
652 | ctcm_pr_debug( | 666 | __func__, dev->name, smp_processor_id(), ch, |
653 | "ctcm enter: %s(): %s cp=%i ch=0x%p id=%s state=%s\n", | 667 | ch->id, fsm_getstate_str(ch->fsm)); |
654 | __FUNCTION__, dev->name, smp_processor_id(), ch, | ||
655 | ch->id, fsm_getstate_str(ch->fsm)); | ||
656 | 668 | ||
657 | if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) { | 669 | if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) { |
658 | spin_lock_irqsave(&ch->collect_lock, saveflags); | 670 | spin_lock_irqsave(&ch->collect_lock, saveflags); |
@@ -660,14 +672,8 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
660 | p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type()); | 672 | p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type()); |
661 | 673 | ||
662 | if (!p_header) { | 674 | if (!p_header) { |
663 | printk(KERN_WARNING "ctcm: OUT OF MEMORY IN %s():" | ||
664 | " Data Lost \n", __FUNCTION__); | ||
665 | |||
666 | atomic_dec(&skb->users); | ||
667 | dev_kfree_skb_any(skb); | ||
668 | spin_unlock_irqrestore(&ch->collect_lock, saveflags); | 675 | spin_unlock_irqrestore(&ch->collect_lock, saveflags); |
669 | fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); | 676 | goto nomem_exit; |
670 | goto done; | ||
671 | } | 677 | } |
672 | 678 | ||
673 | p_header->pdu_offset = skb->len; | 679 | p_header->pdu_offset = skb->len; |
@@ -682,13 +688,10 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
682 | memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header, | 688 | memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header, |
683 | PDU_HEADER_LENGTH); | 689 | PDU_HEADER_LENGTH); |
684 | 690 | ||
685 | if (do_debug_data) { | 691 | CTCM_PR_DEBUG("%s(%s): Put on collect_q - skb len: %04x \n" |
686 | ctcm_pr_debug("ctcm: %s() Putting on collect_q" | 692 | "pdu header and data for up to 32 bytes:\n", |
687 | " - skb len: %04x \n", __FUNCTION__, skb->len); | 693 | __func__, dev->name, skb->len); |
688 | ctcm_pr_debug("ctcm: %s() pdu header and data" | 694 | CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len)); |
689 | " for up to 32 bytes\n", __FUNCTION__); | ||
690 | ctcmpc_dump32((char *)skb->data, skb->len); | ||
691 | } | ||
692 | 695 | ||
693 | skb_queue_tail(&ch->collect_queue, skb); | 696 | skb_queue_tail(&ch->collect_queue, skb); |
694 | ch->collect_len += skb->len; | 697 | ch->collect_len += skb->len; |
@@ -713,12 +716,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
713 | if (hi) { | 716 | if (hi) { |
714 | nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); | 717 | nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); |
715 | if (!nskb) { | 718 | if (!nskb) { |
716 | printk(KERN_WARNING "ctcm: %s() OUT OF MEMORY" | 719 | goto nomem_exit; |
717 | "- Data Lost \n", __FUNCTION__); | ||
718 | atomic_dec(&skb->users); | ||
719 | dev_kfree_skb_any(skb); | ||
720 | fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); | ||
721 | goto done; | ||
722 | } else { | 720 | } else { |
723 | memcpy(skb_put(nskb, skb->len), skb->data, skb->len); | 721 | memcpy(skb_put(nskb, skb->len), skb->data, skb->len); |
724 | atomic_inc(&nskb->users); | 722 | atomic_inc(&nskb->users); |
@@ -730,15 +728,8 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
730 | 728 | ||
731 | p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type()); | 729 | p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type()); |
732 | 730 | ||
733 | if (!p_header) { | 731 | if (!p_header) |
734 | printk(KERN_WARNING "ctcm: %s() OUT OF MEMORY" | 732 | goto nomem_exit; |
735 | ": Data Lost \n", __FUNCTION__); | ||
736 | |||
737 | atomic_dec(&skb->users); | ||
738 | dev_kfree_skb_any(skb); | ||
739 | fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); | ||
740 | goto done; | ||
741 | } | ||
742 | 733 | ||
743 | p_header->pdu_offset = skb->len; | 734 | p_header->pdu_offset = skb->len; |
744 | p_header->pdu_proto = 0x01; | 735 | p_header->pdu_proto = 0x01; |
@@ -768,15 +759,8 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
768 | ch->prof.txlen += skb->len - PDU_HEADER_LENGTH; | 759 | ch->prof.txlen += skb->len - PDU_HEADER_LENGTH; |
769 | 760 | ||
770 | header = kmalloc(TH_HEADER_LENGTH, gfp_type()); | 761 | header = kmalloc(TH_HEADER_LENGTH, gfp_type()); |
771 | 762 | if (!header) | |
772 | if (!header) { | 763 | goto nomem_exit; |
773 | printk(KERN_WARNING "ctcm: %s() OUT OF MEMORY: Data Lost \n", | ||
774 | __FUNCTION__); | ||
775 | atomic_dec(&skb->users); | ||
776 | dev_kfree_skb_any(skb); | ||
777 | fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); | ||
778 | goto done; | ||
779 | } | ||
780 | 764 | ||
781 | header->th_seg = 0x00; | 765 | header->th_seg = 0x00; |
782 | header->th_ch_flag = TH_HAS_PDU; /* Normal data */ | 766 | header->th_ch_flag = TH_HAS_PDU; /* Normal data */ |
@@ -785,41 +769,31 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
785 | ch->th_seq_num++; | 769 | ch->th_seq_num++; |
786 | header->th_seq_num = ch->th_seq_num; | 770 | header->th_seq_num = ch->th_seq_num; |
787 | 771 | ||
788 | if (do_debug_data) | 772 | CTCM_PR_DBGDATA("%s(%s) ToVTAM_th_seq= %08x\n" , |
789 | ctcm_pr_debug("ctcm: %s() ToVTAM_th_seq= %08x\n" , | 773 | __func__, dev->name, ch->th_seq_num); |
790 | __FUNCTION__, ch->th_seq_num); | ||
791 | 774 | ||
792 | /* put the TH on the packet */ | 775 | /* put the TH on the packet */ |
793 | memcpy(skb_push(skb, TH_HEADER_LENGTH), header, TH_HEADER_LENGTH); | 776 | memcpy(skb_push(skb, TH_HEADER_LENGTH), header, TH_HEADER_LENGTH); |
794 | 777 | ||
795 | kfree(header); | 778 | kfree(header); |
796 | 779 | ||
797 | if (do_debug_data) { | 780 | CTCM_PR_DBGDATA("%s(%s): skb len: %04x\n - pdu header and data for " |
798 | ctcm_pr_debug("ctcm: %s(): skb len: %04x \n", | 781 | "up to 32 bytes sent to vtam:\n", |
799 | __FUNCTION__, skb->len); | 782 | __func__, dev->name, skb->len); |
800 | ctcm_pr_debug("ctcm: %s(): pdu header and data for up to 32 " | 783 | CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len)); |
801 | "bytes sent to vtam\n", __FUNCTION__); | ||
802 | ctcmpc_dump32((char *)skb->data, skb->len); | ||
803 | } | ||
804 | 784 | ||
805 | ch->ccw[4].count = skb->len; | 785 | ch->ccw[4].count = skb->len; |
806 | if (set_normalized_cda(&ch->ccw[4], skb->data)) { | 786 | if (set_normalized_cda(&ch->ccw[4], skb->data)) { |
807 | /* | 787 | /* |
808 | * idal allocation failed, try via copying to | 788 | * idal allocation failed, try via copying to trans_skb. |
809 | * trans_skb. trans_skb usually has a pre-allocated | 789 | * trans_skb usually has a pre-allocated idal. |
810 | * idal. | ||
811 | */ | 790 | */ |
812 | if (ctcm_checkalloc_buffer(ch)) { | 791 | if (ctcm_checkalloc_buffer(ch)) { |
813 | /* | 792 | /* |
814 | * Remove our header. It gets added | 793 | * Remove our header. |
815 | * again on retransmit. | 794 | * It gets added again on retransmit. |
816 | */ | 795 | */ |
817 | atomic_dec(&skb->users); | 796 | goto nomem_exit; |
818 | dev_kfree_skb_any(skb); | ||
819 | printk(KERN_WARNING "ctcm: %s()OUT OF MEMORY:" | ||
820 | " Data Lost \n", __FUNCTION__); | ||
821 | fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); | ||
822 | goto done; | ||
823 | } | 797 | } |
824 | 798 | ||
825 | skb_reset_tail_pointer(ch->trans_skb); | 799 | skb_reset_tail_pointer(ch->trans_skb); |
@@ -829,14 +803,11 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
829 | atomic_dec(&skb->users); | 803 | atomic_dec(&skb->users); |
830 | dev_kfree_skb_irq(skb); | 804 | dev_kfree_skb_irq(skb); |
831 | ccw_idx = 0; | 805 | ccw_idx = 0; |
832 | if (do_debug_data) { | 806 | CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n" |
833 | ctcm_pr_debug("ctcm: %s() TRANS skb len: %d \n", | 807 | "up to 32 bytes sent to vtam:\n", |
834 | __FUNCTION__, ch->trans_skb->len); | 808 | __func__, dev->name, ch->trans_skb->len); |
835 | ctcm_pr_debug("ctcm: %s up to 32 bytes of data" | 809 | CTCM_D3_DUMP((char *)ch->trans_skb->data, |
836 | " sent to vtam\n", __FUNCTION__); | 810 | min_t(int, 32, ch->trans_skb->len)); |
837 | ctcmpc_dump32((char *)ch->trans_skb->data, | ||
838 | ch->trans_skb->len); | ||
839 | } | ||
840 | } else { | 811 | } else { |
841 | skb_queue_tail(&ch->io_queue, skb); | 812 | skb_queue_tail(&ch->io_queue, skb); |
842 | ccw_idx = 3; | 813 | ccw_idx = 3; |
@@ -865,13 +836,21 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) | |||
865 | priv->stats.tx_packets++; | 836 | priv->stats.tx_packets++; |
866 | priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH; | 837 | priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH; |
867 | } | 838 | } |
868 | if (ch->th_seq_num > 0xf0000000) /* Chose 4Billion at random. */ | 839 | if (ch->th_seq_num > 0xf0000000) /* Chose at random. */ |
869 | ctcmpc_send_sweep_req(ch); | 840 | ctcmpc_send_sweep_req(ch); |
870 | 841 | ||
842 | goto done; | ||
843 | nomem_exit: | ||
844 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_CRIT, | ||
845 | "%s(%s): MEMORY allocation ERROR\n", | ||
846 | CTCM_FUNTAIL, ch->id); | ||
847 | rc = -ENOMEM; | ||
848 | atomic_dec(&skb->users); | ||
849 | dev_kfree_skb_any(skb); | ||
850 | fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); | ||
871 | done: | 851 | done: |
872 | if (do_debug) | 852 | CTCM_PR_DEBUG("Exit %s(%s)\n", __func__, dev->name); |
873 | ctcm_pr_debug("ctcm exit: %s %s()\n", dev->name, __FUNCTION__); | 853 | return rc; |
874 | return 0; | ||
875 | } | 854 | } |
876 | 855 | ||
877 | /** | 856 | /** |
@@ -888,20 +867,19 @@ done: | |||
888 | /* first merge version - leaving both functions separated */ | 867 | /* first merge version - leaving both functions separated */ |
889 | static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) | 868 | static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) |
890 | { | 869 | { |
891 | int rc = 0; | 870 | struct ctcm_priv *priv = dev->priv; |
892 | struct ctcm_priv *priv; | ||
893 | |||
894 | CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__); | ||
895 | priv = dev->priv; | ||
896 | 871 | ||
897 | if (skb == NULL) { | 872 | if (skb == NULL) { |
898 | ctcm_pr_warn("%s: NULL sk_buff passed\n", dev->name); | 873 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
874 | "%s(%s): NULL sk_buff passed", | ||
875 | CTCM_FUNTAIL, dev->name); | ||
899 | priv->stats.tx_dropped++; | 876 | priv->stats.tx_dropped++; |
900 | return 0; | 877 | return 0; |
901 | } | 878 | } |
902 | if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) { | 879 | if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) { |
903 | ctcm_pr_warn("%s: Got sk_buff with head room < %ld bytes\n", | 880 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
904 | dev->name, LL_HEADER_LENGTH + 2); | 881 | "%s(%s): Got sk_buff with head room < %ld bytes", |
882 | CTCM_FUNTAIL, dev->name, LL_HEADER_LENGTH + 2); | ||
905 | dev_kfree_skb(skb); | 883 | dev_kfree_skb(skb); |
906 | priv->stats.tx_dropped++; | 884 | priv->stats.tx_dropped++; |
907 | return 0; | 885 | return 0; |
@@ -925,51 +903,43 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) | |||
925 | 903 | ||
926 | dev->trans_start = jiffies; | 904 | dev->trans_start = jiffies; |
927 | if (ctcm_transmit_skb(priv->channel[WRITE], skb) != 0) | 905 | if (ctcm_transmit_skb(priv->channel[WRITE], skb) != 0) |
928 | rc = 1; | 906 | return 1; |
929 | return rc; | 907 | return 0; |
930 | } | 908 | } |
931 | 909 | ||
932 | /* unmerged MPC variant of ctcm_tx */ | 910 | /* unmerged MPC variant of ctcm_tx */ |
933 | static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) | 911 | static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) |
934 | { | 912 | { |
935 | int len = 0; | 913 | int len = 0; |
936 | struct ctcm_priv *priv = NULL; | 914 | struct ctcm_priv *priv = dev->priv; |
937 | struct mpc_group *grp = NULL; | 915 | struct mpc_group *grp = priv->mpcg; |
938 | struct sk_buff *newskb = NULL; | 916 | struct sk_buff *newskb = NULL; |
939 | 917 | ||
940 | if (do_debug) | ||
941 | ctcm_pr_debug("ctcmpc enter: %s(): skb:%0lx\n", | ||
942 | __FUNCTION__, (unsigned long)skb); | ||
943 | |||
944 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, | ||
945 | "ctcmpc enter: %s(): skb:%0lx\n", | ||
946 | __FUNCTION__, (unsigned long)skb); | ||
947 | |||
948 | priv = dev->priv; | ||
949 | grp = priv->mpcg; | ||
950 | /* | 918 | /* |
951 | * Some sanity checks ... | 919 | * Some sanity checks ... |
952 | */ | 920 | */ |
953 | if (skb == NULL) { | 921 | if (skb == NULL) { |
954 | ctcm_pr_warn("ctcmpc: %s: NULL sk_buff passed\n", dev->name); | 922 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
923 | "%s(%s): NULL sk_buff passed", | ||
924 | CTCM_FUNTAIL, dev->name); | ||
955 | priv->stats.tx_dropped++; | 925 | priv->stats.tx_dropped++; |
956 | goto done; | 926 | goto done; |
957 | } | 927 | } |
958 | if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) { | 928 | if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) { |
959 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_WARN, | 929 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR, |
960 | "%s: Got sk_buff with head room < %ld bytes\n", | 930 | "%s(%s): Got sk_buff with head room < %ld bytes", |
961 | dev->name, TH_HEADER_LENGTH + PDU_HEADER_LENGTH); | 931 | CTCM_FUNTAIL, dev->name, |
932 | TH_HEADER_LENGTH + PDU_HEADER_LENGTH); | ||
962 | 933 | ||
963 | if (do_debug_data) | 934 | CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len)); |
964 | ctcmpc_dump32((char *)skb->data, skb->len); | ||
965 | 935 | ||
966 | len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH; | 936 | len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH; |
967 | newskb = __dev_alloc_skb(len, gfp_type() | GFP_DMA); | 937 | newskb = __dev_alloc_skb(len, gfp_type() | GFP_DMA); |
968 | 938 | ||
969 | if (!newskb) { | 939 | if (!newskb) { |
970 | printk(KERN_WARNING "ctcmpc: %s() OUT OF MEMORY-" | 940 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR, |
971 | "Data Lost\n", | 941 | "%s: %s: __dev_alloc_skb failed", |
972 | __FUNCTION__); | 942 | __func__, dev->name); |
973 | 943 | ||
974 | dev_kfree_skb_any(skb); | 944 | dev_kfree_skb_any(skb); |
975 | priv->stats.tx_dropped++; | 945 | priv->stats.tx_dropped++; |
@@ -993,9 +963,9 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) | |||
993 | if ((fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) || | 963 | if ((fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) || |
994 | (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) { | 964 | (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) { |
995 | dev_kfree_skb_any(skb); | 965 | dev_kfree_skb_any(skb); |
996 | printk(KERN_INFO "ctcmpc: %s() DATA RCVD - MPC GROUP " | 966 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
997 | "NOT ACTIVE - DROPPED\n", | 967 | "%s(%s): inactive MPCGROUP - dropped", |
998 | __FUNCTION__); | 968 | CTCM_FUNTAIL, dev->name); |
999 | priv->stats.tx_dropped++; | 969 | priv->stats.tx_dropped++; |
1000 | priv->stats.tx_errors++; | 970 | priv->stats.tx_errors++; |
1001 | priv->stats.tx_carrier_errors++; | 971 | priv->stats.tx_carrier_errors++; |
@@ -1003,8 +973,9 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) | |||
1003 | } | 973 | } |
1004 | 974 | ||
1005 | if (ctcm_test_and_set_busy(dev)) { | 975 | if (ctcm_test_and_set_busy(dev)) { |
1006 | printk(KERN_WARNING "%s:DEVICE ERR - UNRECOVERABLE DATA LOSS\n", | 976 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1007 | __FUNCTION__); | 977 | "%s(%s): device busy - dropped", |
978 | CTCM_FUNTAIL, dev->name); | ||
1008 | dev_kfree_skb_any(skb); | 979 | dev_kfree_skb_any(skb); |
1009 | priv->stats.tx_dropped++; | 980 | priv->stats.tx_dropped++; |
1010 | priv->stats.tx_errors++; | 981 | priv->stats.tx_errors++; |
@@ -1015,12 +986,9 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) | |||
1015 | 986 | ||
1016 | dev->trans_start = jiffies; | 987 | dev->trans_start = jiffies; |
1017 | if (ctcmpc_transmit_skb(priv->channel[WRITE], skb) != 0) { | 988 | if (ctcmpc_transmit_skb(priv->channel[WRITE], skb) != 0) { |
1018 | printk(KERN_WARNING "ctcmpc: %s() DEVICE ERROR" | 989 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1019 | ": Data Lost \n", | 990 | "%s(%s): device error - dropped", |
1020 | __FUNCTION__); | 991 | CTCM_FUNTAIL, dev->name); |
1021 | printk(KERN_WARNING "ctcmpc: %s() DEVICE ERROR" | ||
1022 | " - UNRECOVERABLE DATA LOSS\n", | ||
1023 | __FUNCTION__); | ||
1024 | dev_kfree_skb_any(skb); | 992 | dev_kfree_skb_any(skb); |
1025 | priv->stats.tx_dropped++; | 993 | priv->stats.tx_dropped++; |
1026 | priv->stats.tx_errors++; | 994 | priv->stats.tx_errors++; |
@@ -1054,8 +1022,6 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu) | |||
1054 | struct ctcm_priv *priv; | 1022 | struct ctcm_priv *priv; |
1055 | int max_bufsize; | 1023 | int max_bufsize; |
1056 | 1024 | ||
1057 | CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__); | ||
1058 | |||
1059 | if (new_mtu < 576 || new_mtu > 65527) | 1025 | if (new_mtu < 576 || new_mtu > 65527) |
1060 | return -EINVAL; | 1026 | return -EINVAL; |
1061 | 1027 | ||
@@ -1087,30 +1053,13 @@ static struct net_device_stats *ctcm_stats(struct net_device *dev) | |||
1087 | return &((struct ctcm_priv *)dev->priv)->stats; | 1053 | return &((struct ctcm_priv *)dev->priv)->stats; |
1088 | } | 1054 | } |
1089 | 1055 | ||
1090 | |||
1091 | static void ctcm_netdev_unregister(struct net_device *dev) | ||
1092 | { | ||
1093 | CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__); | ||
1094 | if (!dev) | ||
1095 | return; | ||
1096 | unregister_netdev(dev); | ||
1097 | } | ||
1098 | |||
1099 | static int ctcm_netdev_register(struct net_device *dev) | ||
1100 | { | ||
1101 | CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__); | ||
1102 | return register_netdev(dev); | ||
1103 | } | ||
1104 | |||
1105 | static void ctcm_free_netdevice(struct net_device *dev) | 1056 | static void ctcm_free_netdevice(struct net_device *dev) |
1106 | { | 1057 | { |
1107 | struct ctcm_priv *priv; | 1058 | struct ctcm_priv *priv; |
1108 | struct mpc_group *grp; | 1059 | struct mpc_group *grp; |
1109 | 1060 | ||
1110 | CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__); | 1061 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, |
1111 | 1062 | "%s(%s)", CTCM_FUNTAIL, dev->name); | |
1112 | if (!dev) | ||
1113 | return; | ||
1114 | priv = dev->priv; | 1063 | priv = dev->priv; |
1115 | if (priv) { | 1064 | if (priv) { |
1116 | grp = priv->mpcg; | 1065 | grp = priv->mpcg; |
@@ -1171,7 +1120,9 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) | |||
1171 | dev = alloc_netdev(0, CTC_DEVICE_GENE, ctcm_dev_setup); | 1120 | dev = alloc_netdev(0, CTC_DEVICE_GENE, ctcm_dev_setup); |
1172 | 1121 | ||
1173 | if (!dev) { | 1122 | if (!dev) { |
1174 | ctcm_pr_err("%s: Out of memory\n", __FUNCTION__); | 1123 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, |
1124 | "%s: MEMORY allocation ERROR", | ||
1125 | CTCM_FUNTAIL); | ||
1175 | return NULL; | 1126 | return NULL; |
1176 | } | 1127 | } |
1177 | dev->priv = priv; | 1128 | dev->priv = priv; |
@@ -1209,6 +1160,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) | |||
1209 | } | 1160 | } |
1210 | 1161 | ||
1211 | CTCMY_DBF_DEV(SETUP, dev, "finished"); | 1162 | CTCMY_DBF_DEV(SETUP, dev, "finished"); |
1163 | |||
1212 | return dev; | 1164 | return dev; |
1213 | } | 1165 | } |
1214 | 1166 | ||
@@ -1226,18 +1178,24 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
1226 | struct net_device *dev; | 1178 | struct net_device *dev; |
1227 | struct ctcm_priv *priv; | 1179 | struct ctcm_priv *priv; |
1228 | struct ccwgroup_device *cgdev; | 1180 | struct ccwgroup_device *cgdev; |
1181 | int cstat; | ||
1182 | int dstat; | ||
1183 | |||
1184 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, | ||
1185 | "Enter %s(%s)", CTCM_FUNTAIL, &cdev->dev.bus_id); | ||
1229 | 1186 | ||
1230 | CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __FUNCTION__); | ||
1231 | if (ctcm_check_irb_error(cdev, irb)) | 1187 | if (ctcm_check_irb_error(cdev, irb)) |
1232 | return; | 1188 | return; |
1233 | 1189 | ||
1234 | cgdev = dev_get_drvdata(&cdev->dev); | 1190 | cgdev = dev_get_drvdata(&cdev->dev); |
1235 | 1191 | ||
1192 | cstat = irb->scsw.cmd.cstat; | ||
1193 | dstat = irb->scsw.cmd.dstat; | ||
1194 | |||
1236 | /* Check for unsolicited interrupts. */ | 1195 | /* Check for unsolicited interrupts. */ |
1237 | if (cgdev == NULL) { | 1196 | if (cgdev == NULL) { |
1238 | ctcm_pr_warn("ctcm: Got unsolicited irq: %s c-%02x d-%02x\n", | 1197 | ctcm_pr_warn("ctcm: Got unsolicited irq: c-%02x d-%02x\n", |
1239 | cdev->dev.bus_id, irb->scsw.cmd.cstat, | 1198 | cstat, dstat); |
1240 | irb->scsw.cmd.dstat); | ||
1241 | return; | 1199 | return; |
1242 | } | 1200 | } |
1243 | 1201 | ||
@@ -1254,26 +1212,22 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
1254 | return; | 1212 | return; |
1255 | } | 1213 | } |
1256 | 1214 | ||
1257 | dev = (struct net_device *)(ch->netdev); | 1215 | dev = ch->netdev; |
1258 | if (dev == NULL) { | 1216 | if (dev == NULL) { |
1259 | ctcm_pr_crit("ctcm: %s dev=NULL bus_id=%s, ch=0x%p\n", | 1217 | ctcm_pr_crit("ctcm: %s dev=NULL bus_id=%s, ch=0x%p\n", |
1260 | __FUNCTION__, cdev->dev.bus_id, ch); | 1218 | __func__, cdev->dev.bus_id, ch); |
1261 | return; | 1219 | return; |
1262 | } | 1220 | } |
1263 | 1221 | ||
1264 | if (do_debug) | 1222 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, |
1265 | ctcm_pr_debug("%s: interrupt for device: %s " | 1223 | "%s(%s): int. for %s: cstat=%02x dstat=%02x", |
1266 | "received c-%02x d-%02x\n", | 1224 | CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat); |
1267 | dev->name, | ||
1268 | ch->id, | ||
1269 | irb->scsw.cmd.cstat, | ||
1270 | irb->scsw.cmd.dstat); | ||
1271 | 1225 | ||
1272 | /* Copy interruption response block. */ | 1226 | /* Copy interruption response block. */ |
1273 | memcpy(ch->irb, irb, sizeof(struct irb)); | 1227 | memcpy(ch->irb, irb, sizeof(struct irb)); |
1274 | 1228 | ||
1275 | /* Check for good subchannel return code, otherwise error message */ | ||
1276 | if (irb->scsw.cmd.cstat) { | 1229 | if (irb->scsw.cmd.cstat) { |
1230 | /* Check for good subchannel return code, otherwise error message */ | ||
1277 | fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); | 1231 | fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); |
1278 | ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n", | 1232 | ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n", |
1279 | dev->name, ch->id, irb->scsw.cmd.cstat, | 1233 | dev->name, ch->id, irb->scsw.cmd.cstat, |
@@ -1283,6 +1237,11 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
1283 | 1237 | ||
1284 | /* Check the reason-code of a unit check */ | 1238 | /* Check the reason-code of a unit check */ |
1285 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { | 1239 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
1240 | if ((irb->ecw[0] & ch->sense_rc) == 0) | ||
1241 | /* print it only once */ | ||
1242 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, | ||
1243 | "%s(%s): sense=%02x, ds=%02x", | ||
1244 | CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat); | ||
1286 | ccw_unit_check(ch, irb->ecw[0]); | 1245 | ccw_unit_check(ch, irb->ecw[0]); |
1287 | return; | 1246 | return; |
1288 | } | 1247 | } |
@@ -1320,14 +1279,18 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev) | |||
1320 | struct ctcm_priv *priv; | 1279 | struct ctcm_priv *priv; |
1321 | int rc; | 1280 | int rc; |
1322 | 1281 | ||
1323 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s %p", __FUNCTION__, cgdev); | 1282 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, |
1283 | "%s %p", | ||
1284 | __func__, cgdev); | ||
1324 | 1285 | ||
1325 | if (!get_device(&cgdev->dev)) | 1286 | if (!get_device(&cgdev->dev)) |
1326 | return -ENODEV; | 1287 | return -ENODEV; |
1327 | 1288 | ||
1328 | priv = kzalloc(sizeof(struct ctcm_priv), GFP_KERNEL); | 1289 | priv = kzalloc(sizeof(struct ctcm_priv), GFP_KERNEL); |
1329 | if (!priv) { | 1290 | if (!priv) { |
1330 | ctcm_pr_err("%s: Out of memory\n", __FUNCTION__); | 1291 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
1292 | "%s: memory allocation failure", | ||
1293 | CTCM_FUNTAIL); | ||
1331 | put_device(&cgdev->dev); | 1294 | put_device(&cgdev->dev); |
1332 | return -ENOMEM; | 1295 | return -ENOMEM; |
1333 | } | 1296 | } |
@@ -1364,10 +1327,13 @@ static int add_channel(struct ccw_device *cdev, enum channel_types type, | |||
1364 | int ccw_num; | 1327 | int ccw_num; |
1365 | int rc = 0; | 1328 | int rc = 0; |
1366 | 1329 | ||
1367 | CTCM_DBF_TEXT(TRACE, 2, __FUNCTION__); | 1330 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, |
1331 | "%s(%s), type %d, proto %d", | ||
1332 | __func__, cdev->dev.bus_id, type, priv->protocol); | ||
1333 | |||
1368 | ch = kzalloc(sizeof(struct channel), GFP_KERNEL); | 1334 | ch = kzalloc(sizeof(struct channel), GFP_KERNEL); |
1369 | if (ch == NULL) | 1335 | if (ch == NULL) |
1370 | goto nomem_return; | 1336 | return -ENOMEM; |
1371 | 1337 | ||
1372 | ch->protocol = priv->protocol; | 1338 | ch->protocol = priv->protocol; |
1373 | if (IS_MPC(priv)) { | 1339 | if (IS_MPC(priv)) { |
@@ -1478,7 +1444,7 @@ static int add_channel(struct ccw_device *cdev, enum channel_types type, | |||
1478 | if (*c && (!strncmp((*c)->id, ch->id, CTCM_ID_SIZE))) { | 1444 | if (*c && (!strncmp((*c)->id, ch->id, CTCM_ID_SIZE))) { |
1479 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, | 1445 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, |
1480 | "%s (%s) already in list, using old entry", | 1446 | "%s (%s) already in list, using old entry", |
1481 | __FUNCTION__, (*c)->id); | 1447 | __func__, (*c)->id); |
1482 | 1448 | ||
1483 | goto free_return; | 1449 | goto free_return; |
1484 | } | 1450 | } |
@@ -1498,11 +1464,10 @@ static int add_channel(struct ccw_device *cdev, enum channel_types type, | |||
1498 | return 0; | 1464 | return 0; |
1499 | 1465 | ||
1500 | nomem_return: | 1466 | nomem_return: |
1501 | ctcm_pr_warn("ctcm: Out of memory in %s\n", __FUNCTION__); | ||
1502 | rc = -ENOMEM; | 1467 | rc = -ENOMEM; |
1503 | 1468 | ||
1504 | free_return: /* note that all channel pointers are 0 or valid */ | 1469 | free_return: /* note that all channel pointers are 0 or valid */ |
1505 | kfree(ch->ccw); /* TODO: check that again */ | 1470 | kfree(ch->ccw); |
1506 | kfree(ch->discontact_th); | 1471 | kfree(ch->discontact_th); |
1507 | kfree_fsm(ch->fsm); | 1472 | kfree_fsm(ch->fsm); |
1508 | kfree(ch->irb); | 1473 | kfree(ch->irb); |
@@ -1540,48 +1505,48 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) | |||
1540 | enum channel_types type; | 1505 | enum channel_types type; |
1541 | struct ctcm_priv *priv; | 1506 | struct ctcm_priv *priv; |
1542 | struct net_device *dev; | 1507 | struct net_device *dev; |
1508 | struct ccw_device *cdev0; | ||
1509 | struct ccw_device *cdev1; | ||
1543 | int ret; | 1510 | int ret; |
1544 | 1511 | ||
1545 | CTCM_DBF_TEXT(SETUP, CTC_DBF_INFO, __FUNCTION__); | ||
1546 | |||
1547 | priv = dev_get_drvdata(&cgdev->dev); | 1512 | priv = dev_get_drvdata(&cgdev->dev); |
1548 | if (!priv) | 1513 | if (!priv) |
1549 | return -ENODEV; | 1514 | return -ENODEV; |
1550 | 1515 | ||
1551 | type = get_channel_type(&cgdev->cdev[0]->id); | 1516 | cdev0 = cgdev->cdev[0]; |
1517 | cdev1 = cgdev->cdev[1]; | ||
1518 | |||
1519 | type = get_channel_type(&cdev0->id); | ||
1552 | 1520 | ||
1553 | snprintf(read_id, CTCM_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id); | 1521 | snprintf(read_id, CTCM_ID_SIZE, "ch-%s", cdev0->dev.bus_id); |
1554 | snprintf(write_id, CTCM_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id); | 1522 | snprintf(write_id, CTCM_ID_SIZE, "ch-%s", cdev1->dev.bus_id); |
1555 | 1523 | ||
1556 | ret = add_channel(cgdev->cdev[0], type, priv); | 1524 | ret = add_channel(cdev0, type, priv); |
1557 | if (ret) | 1525 | if (ret) |
1558 | return ret; | 1526 | return ret; |
1559 | ret = add_channel(cgdev->cdev[1], type, priv); | 1527 | ret = add_channel(cdev1, type, priv); |
1560 | if (ret) | 1528 | if (ret) |
1561 | return ret; | 1529 | return ret; |
1562 | 1530 | ||
1563 | ret = ccw_device_set_online(cgdev->cdev[0]); | 1531 | ret = ccw_device_set_online(cdev0); |
1564 | if (ret != 0) { | 1532 | if (ret != 0) { |
1565 | CTCM_DBF_TEXT(SETUP, CTC_DBF_WARN, | 1533 | /* may be ok to fail now - can be done later */ |
1566 | "ccw_device_set_online (cdev[0]) failed "); | 1534 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, |
1567 | ctcm_pr_warn("ccw_device_set_online (cdev[0]) failed " | 1535 | "%s(%s) set_online rc=%d", |
1568 | "with ret = %d\n", ret); | 1536 | CTCM_FUNTAIL, read_id, ret); |
1569 | } | 1537 | } |
1570 | 1538 | ||
1571 | ret = ccw_device_set_online(cgdev->cdev[1]); | 1539 | ret = ccw_device_set_online(cdev1); |
1572 | if (ret != 0) { | 1540 | if (ret != 0) { |
1573 | CTCM_DBF_TEXT(SETUP, CTC_DBF_WARN, | 1541 | /* may be ok to fail now - can be done later */ |
1574 | "ccw_device_set_online (cdev[1]) failed "); | 1542 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, |
1575 | ctcm_pr_warn("ccw_device_set_online (cdev[1]) failed " | 1543 | "%s(%s) set_online rc=%d", |
1576 | "with ret = %d\n", ret); | 1544 | CTCM_FUNTAIL, write_id, ret); |
1577 | } | 1545 | } |
1578 | 1546 | ||
1579 | dev = ctcm_init_netdevice(priv); | 1547 | dev = ctcm_init_netdevice(priv); |
1580 | 1548 | if (dev == NULL) | |
1581 | if (dev == NULL) { | 1549 | goto out; |
1582 | ctcm_pr_warn("ctcm_init_netdevice failed\n"); | ||
1583 | goto out; | ||
1584 | } | ||
1585 | 1550 | ||
1586 | for (direction = READ; direction <= WRITE; direction++) { | 1551 | for (direction = READ; direction <= WRITE; direction++) { |
1587 | priv->channel[direction] = | 1552 | priv->channel[direction] = |
@@ -1590,8 +1555,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) | |||
1590 | if (priv->channel[direction] == NULL) { | 1555 | if (priv->channel[direction] == NULL) { |
1591 | if (direction == WRITE) | 1556 | if (direction == WRITE) |
1592 | channel_free(priv->channel[READ]); | 1557 | channel_free(priv->channel[READ]); |
1593 | ctcm_free_netdevice(dev); | 1558 | goto out_dev; |
1594 | goto out; | ||
1595 | } | 1559 | } |
1596 | priv->channel[direction]->netdev = dev; | 1560 | priv->channel[direction]->netdev = dev; |
1597 | priv->channel[direction]->protocol = priv->protocol; | 1561 | priv->channel[direction]->protocol = priv->protocol; |
@@ -1600,26 +1564,24 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) | |||
1600 | /* sysfs magic */ | 1564 | /* sysfs magic */ |
1601 | SET_NETDEV_DEV(dev, &cgdev->dev); | 1565 | SET_NETDEV_DEV(dev, &cgdev->dev); |
1602 | 1566 | ||
1603 | if (ctcm_netdev_register(dev) != 0) { | 1567 | if (register_netdev(dev)) |
1604 | ctcm_free_netdevice(dev); | 1568 | goto out_dev; |
1605 | goto out; | ||
1606 | } | ||
1607 | 1569 | ||
1608 | if (ctcm_add_attributes(&cgdev->dev)) { | 1570 | if (ctcm_add_attributes(&cgdev->dev)) { |
1609 | ctcm_netdev_unregister(dev); | 1571 | unregister_netdev(dev); |
1610 | /* dev->priv = NULL; why that ???? */ | 1572 | goto out_dev; |
1611 | ctcm_free_netdevice(dev); | ||
1612 | goto out; | ||
1613 | } | 1573 | } |
1614 | 1574 | ||
1615 | strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); | 1575 | strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); |
1616 | 1576 | ||
1617 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, | 1577 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, |
1618 | "setup(%s) ok : r/w = %s / %s, proto : %d", | 1578 | "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name, |
1619 | dev->name, priv->channel[READ]->id, | 1579 | priv->channel[READ]->id, |
1620 | priv->channel[WRITE]->id, priv->protocol); | 1580 | priv->channel[WRITE]->id, priv->protocol); |
1621 | 1581 | ||
1622 | return 0; | 1582 | return 0; |
1583 | out_dev: | ||
1584 | ctcm_free_netdevice(dev); | ||
1623 | out: | 1585 | out: |
1624 | ccw_device_set_offline(cgdev->cdev[1]); | 1586 | ccw_device_set_offline(cgdev->cdev[1]); |
1625 | ccw_device_set_offline(cgdev->cdev[0]); | 1587 | ccw_device_set_offline(cgdev->cdev[0]); |
@@ -1658,8 +1620,7 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev) | |||
1658 | channel_free(priv->channel[WRITE]); | 1620 | channel_free(priv->channel[WRITE]); |
1659 | 1621 | ||
1660 | if (dev) { | 1622 | if (dev) { |
1661 | ctcm_netdev_unregister(dev); | 1623 | unregister_netdev(dev); |
1662 | /* dev->priv = NULL; why that ??? */ | ||
1663 | ctcm_free_netdevice(dev); | 1624 | ctcm_free_netdevice(dev); |
1664 | } | 1625 | } |
1665 | 1626 | ||
@@ -1682,13 +1643,16 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev) | |||
1682 | 1643 | ||
1683 | static void ctcm_remove_device(struct ccwgroup_device *cgdev) | 1644 | static void ctcm_remove_device(struct ccwgroup_device *cgdev) |
1684 | { | 1645 | { |
1685 | struct ctcm_priv *priv; | 1646 | struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev); |
1686 | 1647 | ||
1687 | CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, __FUNCTION__); | 1648 | BUG_ON(priv == NULL); |
1649 | |||
1650 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, | ||
1651 | "removing device %s, r/w = %s/%s, proto : %d", | ||
1652 | priv->channel[READ]->netdev->name, | ||
1653 | priv->channel[READ]->id, priv->channel[WRITE]->id, | ||
1654 | priv->protocol); | ||
1688 | 1655 | ||
1689 | priv = dev_get_drvdata(&cgdev->dev); | ||
1690 | if (!priv) | ||
1691 | return; | ||
1692 | if (cgdev->state == CCWGROUP_ONLINE) | 1656 | if (cgdev->state == CCWGROUP_ONLINE) |
1693 | ctcm_shutdown_device(cgdev); | 1657 | ctcm_shutdown_device(cgdev); |
1694 | ctcm_remove_files(&cgdev->dev); | 1658 | ctcm_remove_files(&cgdev->dev); |
@@ -1748,8 +1712,6 @@ static int __init ctcm_init(void) | |||
1748 | 1712 | ||
1749 | ret = ctcm_register_dbf_views(); | 1713 | ret = ctcm_register_dbf_views(); |
1750 | if (ret) { | 1714 | if (ret) { |
1751 | ctcm_pr_crit("ctcm_init failed with ctcm_register_dbf_views " | ||
1752 | "rc = %d\n", ret); | ||
1753 | return ret; | 1715 | return ret; |
1754 | } | 1716 | } |
1755 | ret = register_cu3088_discipline(&ctcm_group_driver); | 1717 | ret = register_cu3088_discipline(&ctcm_group_driver); |
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h index 95b0c0b6ebc6..a72e0feeb27f 100644 --- a/drivers/s390/net/ctcm_main.h +++ b/drivers/s390/net/ctcm_main.h | |||
@@ -22,9 +22,9 @@ | |||
22 | 22 | ||
23 | #define CTC_DRIVER_NAME "ctcm" | 23 | #define CTC_DRIVER_NAME "ctcm" |
24 | #define CTC_DEVICE_NAME "ctc" | 24 | #define CTC_DEVICE_NAME "ctc" |
25 | #define CTC_DEVICE_GENE "ctc%d" | ||
26 | #define MPC_DEVICE_NAME "mpc" | 25 | #define MPC_DEVICE_NAME "mpc" |
27 | #define MPC_DEVICE_GENE "mpc%d" | 26 | #define CTC_DEVICE_GENE CTC_DEVICE_NAME "%d" |
27 | #define MPC_DEVICE_GENE MPC_DEVICE_NAME "%d" | ||
28 | 28 | ||
29 | #define CHANNEL_FLAGS_READ 0 | 29 | #define CHANNEL_FLAGS_READ 0 |
30 | #define CHANNEL_FLAGS_WRITE 1 | 30 | #define CHANNEL_FLAGS_WRITE 1 |
@@ -48,6 +48,30 @@ | |||
48 | #define ctcm_pr_err(fmt, arg...) printk(KERN_ERR fmt, ##arg) | 48 | #define ctcm_pr_err(fmt, arg...) printk(KERN_ERR fmt, ##arg) |
49 | #define ctcm_pr_crit(fmt, arg...) printk(KERN_CRIT fmt, ##arg) | 49 | #define ctcm_pr_crit(fmt, arg...) printk(KERN_CRIT fmt, ##arg) |
50 | 50 | ||
51 | #define CTCM_PR_DEBUG(fmt, arg...) \ | ||
52 | do { \ | ||
53 | if (do_debug) \ | ||
54 | printk(KERN_DEBUG fmt, ##arg); \ | ||
55 | } while (0) | ||
56 | |||
57 | #define CTCM_PR_DBGDATA(fmt, arg...) \ | ||
58 | do { \ | ||
59 | if (do_debug_data) \ | ||
60 | printk(KERN_DEBUG fmt, ##arg); \ | ||
61 | } while (0) | ||
62 | |||
63 | #define CTCM_D3_DUMP(buf, len) \ | ||
64 | do { \ | ||
65 | if (do_debug_data) \ | ||
66 | ctcmpc_dumpit(buf, len); \ | ||
67 | } while (0) | ||
68 | |||
69 | #define CTCM_CCW_DUMP(buf, len) \ | ||
70 | do { \ | ||
71 | if (do_debug_ccw) \ | ||
72 | ctcmpc_dumpit(buf, len); \ | ||
73 | } while (0) | ||
74 | |||
51 | /* | 75 | /* |
52 | * CCW commands, used in this driver. | 76 | * CCW commands, used in this driver. |
53 | */ | 77 | */ |
@@ -161,8 +185,9 @@ struct channel { | |||
161 | fsm_instance *fsm; /* finite state machine of this channel */ | 185 | fsm_instance *fsm; /* finite state machine of this channel */ |
162 | struct net_device *netdev; /* corresponding net_device */ | 186 | struct net_device *netdev; /* corresponding net_device */ |
163 | struct ctcm_profile prof; | 187 | struct ctcm_profile prof; |
164 | unsigned char *trans_skb_data; | 188 | __u8 *trans_skb_data; |
165 | __u16 logflags; | 189 | __u16 logflags; |
190 | __u8 sense_rc; /* last unit check sense code report control */ | ||
166 | }; | 191 | }; |
167 | 192 | ||
168 | struct ctcm_priv { | 193 | struct ctcm_priv { |
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 044addee64a2..49ae1cd25caa 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c | |||
@@ -149,7 +149,7 @@ void ctcmpc_dumpit(char *buf, int len) | |||
149 | for (ct = 0; ct < len; ct++, ptr++, rptr++) { | 149 | for (ct = 0; ct < len; ct++, ptr++, rptr++) { |
150 | if (sw == 0) { | 150 | if (sw == 0) { |
151 | #if (UTS_MACHINE == s390x) | 151 | #if (UTS_MACHINE == s390x) |
152 | sprintf(addr, "%16.16lx", (unsigned long)rptr); | 152 | sprintf(addr, "%16.16lx", (__u64)rptr); |
153 | #else | 153 | #else |
154 | sprintf(addr, "%8.8X", (__u32)rptr); | 154 | sprintf(addr, "%8.8X", (__u32)rptr); |
155 | #endif | 155 | #endif |
@@ -164,7 +164,7 @@ void ctcmpc_dumpit(char *buf, int len) | |||
164 | strcat(bhex, " "); | 164 | strcat(bhex, " "); |
165 | 165 | ||
166 | #if (UTS_MACHINE == s390x) | 166 | #if (UTS_MACHINE == s390x) |
167 | sprintf(tbuf, "%2.2lX", (unsigned long)*ptr); | 167 | sprintf(tbuf, "%2.2lX", (__u64)*ptr); |
168 | #else | 168 | #else |
169 | sprintf(tbuf, "%2.2X", (__u32)*ptr); | 169 | sprintf(tbuf, "%2.2X", (__u32)*ptr); |
170 | #endif | 170 | #endif |
@@ -179,24 +179,24 @@ void ctcmpc_dumpit(char *buf, int len) | |||
179 | basc[sw+1] = '\0'; | 179 | basc[sw+1] = '\0'; |
180 | sw++; | 180 | sw++; |
181 | rm--; | 181 | rm--; |
182 | if (sw == 16) { | 182 | if (sw != 16) |
183 | if ((strcmp(duphex, bhex)) != 0) { | 183 | continue; |
184 | if (dup != 0) { | 184 | if ((strcmp(duphex, bhex)) != 0) { |
185 | sprintf(tdup, "Duplicate as above " | 185 | if (dup != 0) { |
186 | "to %s", addr); | 186 | sprintf(tdup, |
187 | printk(KERN_INFO " " | 187 | "Duplicate as above to %s", addr); |
188 | " --- %s ---\n", tdup); | 188 | ctcm_pr_debug(" --- %s ---\n", |
189 | } | 189 | tdup); |
190 | printk(KERN_INFO " %s (+%s) : %s [%s]\n", | 190 | } |
191 | ctcm_pr_debug(" %s (+%s) : %s [%s]\n", | ||
191 | addr, boff, bhex, basc); | 192 | addr, boff, bhex, basc); |
192 | dup = 0; | 193 | dup = 0; |
193 | strcpy(duphex, bhex); | 194 | strcpy(duphex, bhex); |
194 | } else | 195 | } else |
195 | dup++; | 196 | dup++; |
196 | 197 | ||
197 | sw = 0; | 198 | sw = 0; |
198 | rm = 16; | 199 | rm = 16; |
199 | } | ||
200 | } /* endfor */ | 200 | } /* endfor */ |
201 | 201 | ||
202 | if (sw != 0) { | 202 | if (sw != 0) { |
@@ -210,19 +210,17 @@ void ctcmpc_dumpit(char *buf, int len) | |||
210 | } | 210 | } |
211 | if (dup != 0) { | 211 | if (dup != 0) { |
212 | sprintf(tdup, "Duplicate as above to %s", addr); | 212 | sprintf(tdup, "Duplicate as above to %s", addr); |
213 | printk(KERN_INFO " " | 213 | ctcm_pr_debug(" --- %s ---\n", tdup); |
214 | " --- %s ---\n", tdup); | ||
215 | } | 214 | } |
216 | printk(KERN_INFO " %s (+%s) : %s [%s]\n", | 215 | ctcm_pr_debug(" %s (+%s) : %s [%s]\n", |
217 | addr, boff, bhex, basc); | 216 | addr, boff, bhex, basc); |
218 | } else { | 217 | } else { |
219 | if (dup >= 1) { | 218 | if (dup >= 1) { |
220 | sprintf(tdup, "Duplicate as above to %s", addr); | 219 | sprintf(tdup, "Duplicate as above to %s", addr); |
221 | printk(KERN_INFO " " | 220 | ctcm_pr_debug(" --- %s ---\n", tdup); |
222 | " --- %s ---\n", tdup); | ||
223 | } | 221 | } |
224 | if (dup != 0) { | 222 | if (dup != 0) { |
225 | printk(KERN_INFO " %s (+%s) : %s [%s]\n", | 223 | ctcm_pr_debug(" %s (+%s) : %s [%s]\n", |
226 | addr, boff, bhex, basc); | 224 | addr, boff, bhex, basc); |
227 | } | 225 | } |
228 | } | 226 | } |
@@ -241,7 +239,7 @@ void ctcmpc_dumpit(char *buf, int len) | |||
241 | */ | 239 | */ |
242 | void ctcmpc_dump_skb(struct sk_buff *skb, int offset) | 240 | void ctcmpc_dump_skb(struct sk_buff *skb, int offset) |
243 | { | 241 | { |
244 | unsigned char *p = skb->data; | 242 | __u8 *p = skb->data; |
245 | struct th_header *header; | 243 | struct th_header *header; |
246 | struct pdu *pheader; | 244 | struct pdu *pheader; |
247 | int bl = skb->len; | 245 | int bl = skb->len; |
@@ -253,8 +251,8 @@ void ctcmpc_dump_skb(struct sk_buff *skb, int offset) | |||
253 | p += offset; | 251 | p += offset; |
254 | header = (struct th_header *)p; | 252 | header = (struct th_header *)p; |
255 | 253 | ||
256 | printk(KERN_INFO "dump:\n"); | 254 | ctcm_pr_debug("dump:\n"); |
257 | printk(KERN_INFO "skb len=%d \n", skb->len); | 255 | ctcm_pr_debug("skb len=%d \n", skb->len); |
258 | if (skb->len > 2) { | 256 | if (skb->len > 2) { |
259 | switch (header->th_ch_flag) { | 257 | switch (header->th_ch_flag) { |
260 | case TH_HAS_PDU: | 258 | case TH_HAS_PDU: |
@@ -273,32 +271,64 @@ void ctcmpc_dump_skb(struct sk_buff *skb, int offset) | |||
273 | } | 271 | } |
274 | 272 | ||
275 | pheader = (struct pdu *)p; | 273 | pheader = (struct pdu *)p; |
276 | printk(KERN_INFO "pdu->offset: %d hex: %04x\n", | 274 | ctcm_pr_debug("pdu->offset: %d hex: %04x\n", |
277 | pheader->pdu_offset, pheader->pdu_offset); | 275 | pheader->pdu_offset, pheader->pdu_offset); |
278 | printk(KERN_INFO "pdu->flag : %02x\n", pheader->pdu_flag); | 276 | ctcm_pr_debug("pdu->flag : %02x\n", pheader->pdu_flag); |
279 | printk(KERN_INFO "pdu->proto : %02x\n", pheader->pdu_proto); | 277 | ctcm_pr_debug("pdu->proto : %02x\n", pheader->pdu_proto); |
280 | printk(KERN_INFO "pdu->seq : %02x\n", pheader->pdu_seq); | 278 | ctcm_pr_debug("pdu->seq : %02x\n", pheader->pdu_seq); |
281 | goto dumpdata; | 279 | goto dumpdata; |
282 | 280 | ||
283 | dumpth: | 281 | dumpth: |
284 | printk(KERN_INFO "th->seg : %02x\n", header->th_seg); | 282 | ctcm_pr_debug("th->seg : %02x\n", header->th_seg); |
285 | printk(KERN_INFO "th->ch : %02x\n", header->th_ch_flag); | 283 | ctcm_pr_debug("th->ch : %02x\n", header->th_ch_flag); |
286 | printk(KERN_INFO "th->blk_flag: %02x\n", header->th_blk_flag); | 284 | ctcm_pr_debug("th->blk_flag: %02x\n", header->th_blk_flag); |
287 | printk(KERN_INFO "th->type : %s\n", | 285 | ctcm_pr_debug("th->type : %s\n", |
288 | (header->th_is_xid) ? "DATA" : "XID"); | 286 | (header->th_is_xid) ? "DATA" : "XID"); |
289 | printk(KERN_INFO "th->seqnum : %04x\n", header->th_seq_num); | 287 | ctcm_pr_debug("th->seqnum : %04x\n", header->th_seq_num); |
290 | 288 | ||
291 | } | 289 | } |
292 | dumpdata: | 290 | dumpdata: |
293 | if (bl > 32) | 291 | if (bl > 32) |
294 | bl = 32; | 292 | bl = 32; |
295 | printk(KERN_INFO "data: "); | 293 | ctcm_pr_debug("data: "); |
296 | for (i = 0; i < bl; i++) | 294 | for (i = 0; i < bl; i++) |
297 | printk(KERN_INFO "%02x%s", *p++, (i % 16) ? " " : "\n<7>"); | 295 | ctcm_pr_debug("%02x%s", *p++, (i % 16) ? " " : "\n"); |
298 | printk(KERN_INFO "\n"); | 296 | ctcm_pr_debug("\n"); |
299 | } | 297 | } |
300 | #endif | 298 | #endif |
301 | 299 | ||
300 | static struct net_device *ctcmpc_get_dev(int port_num) | ||
301 | { | ||
302 | char device[20]; | ||
303 | struct net_device *dev; | ||
304 | struct ctcm_priv *priv; | ||
305 | |||
306 | sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num); | ||
307 | |||
308 | dev = __dev_get_by_name(&init_net, device); | ||
309 | |||
310 | if (dev == NULL) { | ||
311 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, | ||
312 | "%s: Device not found by name: %s", | ||
313 | CTCM_FUNTAIL, device); | ||
314 | return NULL; | ||
315 | } | ||
316 | priv = dev->priv; | ||
317 | if (priv == NULL) { | ||
318 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, | ||
319 | "%s(%s): dev->priv is NULL", | ||
320 | CTCM_FUNTAIL, device); | ||
321 | return NULL; | ||
322 | } | ||
323 | if (priv->mpcg == NULL) { | ||
324 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, | ||
325 | "%s(%s): priv->mpcg is NULL", | ||
326 | CTCM_FUNTAIL, device); | ||
327 | return NULL; | ||
328 | } | ||
329 | return dev; | ||
330 | } | ||
331 | |||
302 | /* | 332 | /* |
303 | * ctc_mpc_alloc_channel | 333 | * ctc_mpc_alloc_channel |
304 | * (exported interface) | 334 | * (exported interface) |
@@ -308,34 +338,23 @@ dumpdata: | |||
308 | */ | 338 | */ |
309 | int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int)) | 339 | int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int)) |
310 | { | 340 | { |
311 | char device[20]; | ||
312 | struct net_device *dev; | 341 | struct net_device *dev; |
313 | struct mpc_group *grp; | 342 | struct mpc_group *grp; |
314 | struct ctcm_priv *priv; | 343 | struct ctcm_priv *priv; |
315 | 344 | ||
316 | ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__); | 345 | dev = ctcmpc_get_dev(port_num); |
317 | 346 | if (dev == NULL) | |
318 | sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num); | ||
319 | dev = __dev_get_by_name(&init_net, device); | ||
320 | |||
321 | if (dev == NULL) { | ||
322 | printk(KERN_INFO "ctc_mpc_alloc_channel %s dev=NULL\n", device); | ||
323 | return 1; | 347 | return 1; |
324 | } | ||
325 | |||
326 | priv = dev->priv; | 348 | priv = dev->priv; |
327 | grp = priv->mpcg; | 349 | grp = priv->mpcg; |
328 | if (!grp) | ||
329 | return 1; | ||
330 | 350 | ||
331 | grp->allochanfunc = callback; | 351 | grp->allochanfunc = callback; |
332 | grp->port_num = port_num; | 352 | grp->port_num = port_num; |
333 | grp->port_persist = 1; | 353 | grp->port_persist = 1; |
334 | 354 | ||
335 | ctcm_pr_debug("ctcmpc: %s called for device %s state=%s\n", | 355 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO, |
336 | __FUNCTION__, | 356 | "%s(%s): state=%s", |
337 | dev->name, | 357 | CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm)); |
338 | fsm_getstate_str(grp->fsm)); | ||
339 | 358 | ||
340 | switch (fsm_getstate(grp->fsm)) { | 359 | switch (fsm_getstate(grp->fsm)) { |
341 | case MPCG_STATE_INOP: | 360 | case MPCG_STATE_INOP: |
@@ -377,12 +396,8 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int)) | |||
377 | grp->allocchan_callback_retries = 0; | 396 | grp->allocchan_callback_retries = 0; |
378 | } | 397 | } |
379 | break; | 398 | break; |
380 | default: | ||
381 | return 0; | ||
382 | |||
383 | } | 399 | } |
384 | 400 | ||
385 | ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__); | ||
386 | return 0; | 401 | return 0; |
387 | } | 402 | } |
388 | EXPORT_SYMBOL(ctc_mpc_alloc_channel); | 403 | EXPORT_SYMBOL(ctc_mpc_alloc_channel); |
@@ -394,31 +409,22 @@ EXPORT_SYMBOL(ctc_mpc_alloc_channel); | |||
394 | void ctc_mpc_establish_connectivity(int port_num, | 409 | void ctc_mpc_establish_connectivity(int port_num, |
395 | void (*callback)(int, int, int)) | 410 | void (*callback)(int, int, int)) |
396 | { | 411 | { |
397 | char device[20]; | ||
398 | struct net_device *dev; | 412 | struct net_device *dev; |
399 | struct mpc_group *grp; | 413 | struct mpc_group *grp; |
400 | struct ctcm_priv *priv; | 414 | struct ctcm_priv *priv; |
401 | struct channel *rch, *wch; | 415 | struct channel *rch, *wch; |
402 | 416 | ||
403 | ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__); | 417 | dev = ctcmpc_get_dev(port_num); |
404 | 418 | if (dev == NULL) | |
405 | sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num); | ||
406 | dev = __dev_get_by_name(&init_net, device); | ||
407 | |||
408 | if (dev == NULL) { | ||
409 | printk(KERN_INFO "ctc_mpc_establish_connectivity " | ||
410 | "%s dev=NULL\n", device); | ||
411 | return; | 419 | return; |
412 | } | ||
413 | priv = dev->priv; | 420 | priv = dev->priv; |
421 | grp = priv->mpcg; | ||
414 | rch = priv->channel[READ]; | 422 | rch = priv->channel[READ]; |
415 | wch = priv->channel[WRITE]; | 423 | wch = priv->channel[WRITE]; |
416 | 424 | ||
417 | grp = priv->mpcg; | 425 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO, |
418 | 426 | "%s(%s): state=%s", | |
419 | ctcm_pr_debug("ctcmpc: %s() called for device %s state=%s\n", | 427 | CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm)); |
420 | __FUNCTION__, dev->name, | ||
421 | fsm_getstate_str(grp->fsm)); | ||
422 | 428 | ||
423 | grp->estconnfunc = callback; | 429 | grp->estconnfunc = callback; |
424 | grp->port_num = port_num; | 430 | grp->port_num = port_num; |
@@ -446,8 +452,10 @@ void ctc_mpc_establish_connectivity(int port_num, | |||
446 | case MPCG_STATE_RESET: | 452 | case MPCG_STATE_RESET: |
447 | /* MPC Group is not ready to start XID - min num of */ | 453 | /* MPC Group is not ready to start XID - min num of */ |
448 | /* 1 read and 1 write channel have not been acquired*/ | 454 | /* 1 read and 1 write channel have not been acquired*/ |
449 | printk(KERN_WARNING "ctcmpc: %s() REJECTED ACTIVE XID Req" | 455 | |
450 | "uest - Channel Pair is not Active\n", __FUNCTION__); | 456 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
457 | "%s(%s): REJECTED - inactive channels", | ||
458 | CTCM_FUNTAIL, dev->name); | ||
451 | if (grp->estconnfunc) { | 459 | if (grp->estconnfunc) { |
452 | grp->estconnfunc(grp->port_num, -1, 0); | 460 | grp->estconnfunc(grp->port_num, -1, 0); |
453 | grp->estconnfunc = NULL; | 461 | grp->estconnfunc = NULL; |
@@ -457,11 +465,12 @@ void ctc_mpc_establish_connectivity(int port_num, | |||
457 | /* alloc channel was called but no XID exchange */ | 465 | /* alloc channel was called but no XID exchange */ |
458 | /* has occurred. initiate xside XID exchange */ | 466 | /* has occurred. initiate xside XID exchange */ |
459 | /* make sure yside XID0 processing has not started */ | 467 | /* make sure yside XID0 processing has not started */ |
468 | |||
460 | if ((fsm_getstate(rch->fsm) > CH_XID0_PENDING) || | 469 | if ((fsm_getstate(rch->fsm) > CH_XID0_PENDING) || |
461 | (fsm_getstate(wch->fsm) > CH_XID0_PENDING)) { | 470 | (fsm_getstate(wch->fsm) > CH_XID0_PENDING)) { |
462 | printk(KERN_WARNING "mpc: %s() ABORT ACTIVE XID" | 471 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
463 | " Request- PASSIVE XID in process\n" | 472 | "%s(%s): ABORT - PASSIVE XID", |
464 | , __FUNCTION__); | 473 | CTCM_FUNTAIL, dev->name); |
465 | break; | 474 | break; |
466 | } | 475 | } |
467 | grp->send_qllc_disc = 1; | 476 | grp->send_qllc_disc = 1; |
@@ -476,9 +485,9 @@ void ctc_mpc_establish_connectivity(int port_num, | |||
476 | (fsm_getstate(rch->fsm) == CH_XID0_PENDING)) | 485 | (fsm_getstate(rch->fsm) == CH_XID0_PENDING)) |
477 | fsm_event(grp->fsm, MPCG_EVENT_XID0DO, rch); | 486 | fsm_event(grp->fsm, MPCG_EVENT_XID0DO, rch); |
478 | else { | 487 | else { |
479 | printk(KERN_WARNING "mpc: %s() Unable to start" | 488 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
480 | " ACTIVE XID0 on read channel\n", | 489 | "%s(%s): RX-%s not ready for ACTIVE XID0", |
481 | __FUNCTION__); | 490 | CTCM_FUNTAIL, dev->name, rch->id); |
482 | if (grp->estconnfunc) { | 491 | if (grp->estconnfunc) { |
483 | grp->estconnfunc(grp->port_num, -1, 0); | 492 | grp->estconnfunc(grp->port_num, -1, 0); |
484 | grp->estconnfunc = NULL; | 493 | grp->estconnfunc = NULL; |
@@ -490,9 +499,9 @@ void ctc_mpc_establish_connectivity(int port_num, | |||
490 | (fsm_getstate(wch->fsm) == CH_XID0_PENDING)) | 499 | (fsm_getstate(wch->fsm) == CH_XID0_PENDING)) |
491 | fsm_event(grp->fsm, MPCG_EVENT_XID0DO, wch); | 500 | fsm_event(grp->fsm, MPCG_EVENT_XID0DO, wch); |
492 | else { | 501 | else { |
493 | printk(KERN_WARNING "mpc: %s() Unable to start" | 502 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
494 | " ACTIVE XID0 on write channel\n", | 503 | "%s(%s): WX-%s not ready for ACTIVE XID0", |
495 | __FUNCTION__); | 504 | CTCM_FUNTAIL, dev->name, wch->id); |
496 | if (grp->estconnfunc) { | 505 | if (grp->estconnfunc) { |
497 | grp->estconnfunc(grp->port_num, -1, 0); | 506 | grp->estconnfunc(grp->port_num, -1, 0); |
498 | grp->estconnfunc = NULL; | 507 | grp->estconnfunc = NULL; |
@@ -508,7 +517,7 @@ void ctc_mpc_establish_connectivity(int port_num, | |||
508 | } | 517 | } |
509 | 518 | ||
510 | done: | 519 | done: |
511 | ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__); | 520 | CTCM_PR_DEBUG("Exit %s()\n", __func__); |
512 | return; | 521 | return; |
513 | } | 522 | } |
514 | EXPORT_SYMBOL(ctc_mpc_establish_connectivity); | 523 | EXPORT_SYMBOL(ctc_mpc_establish_connectivity); |
@@ -520,40 +529,22 @@ EXPORT_SYMBOL(ctc_mpc_establish_connectivity); | |||
520 | void ctc_mpc_dealloc_ch(int port_num) | 529 | void ctc_mpc_dealloc_ch(int port_num) |
521 | { | 530 | { |
522 | struct net_device *dev; | 531 | struct net_device *dev; |
523 | char device[20]; | ||
524 | struct ctcm_priv *priv; | 532 | struct ctcm_priv *priv; |
525 | struct mpc_group *grp; | 533 | struct mpc_group *grp; |
526 | 534 | ||
527 | ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__); | 535 | dev = ctcmpc_get_dev(port_num); |
528 | sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num); | 536 | if (dev == NULL) |
529 | dev = __dev_get_by_name(&init_net, device); | 537 | return; |
530 | 538 | priv = dev->priv; | |
531 | if (dev == NULL) { | 539 | grp = priv->mpcg; |
532 | printk(KERN_INFO "%s() %s dev=NULL\n", __FUNCTION__, device); | ||
533 | goto done; | ||
534 | } | ||
535 | 540 | ||
536 | ctcm_pr_debug("ctcmpc:%s %s() called for device %s refcount=%d\n", | 541 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG, |
537 | dev->name, __FUNCTION__, | 542 | "%s: %s: refcount = %d\n", |
538 | dev->name, atomic_read(&dev->refcnt)); | 543 | CTCM_FUNTAIL, dev->name, atomic_read(&dev->refcnt)); |
539 | 544 | ||
540 | priv = dev->priv; | ||
541 | if (priv == NULL) { | ||
542 | printk(KERN_INFO "%s() %s priv=NULL\n", | ||
543 | __FUNCTION__, device); | ||
544 | goto done; | ||
545 | } | ||
546 | fsm_deltimer(&priv->restart_timer); | 545 | fsm_deltimer(&priv->restart_timer); |
547 | |||
548 | grp = priv->mpcg; | ||
549 | if (grp == NULL) { | ||
550 | printk(KERN_INFO "%s() %s dev=NULL\n", __FUNCTION__, device); | ||
551 | goto done; | ||
552 | } | ||
553 | grp->channels_terminating = 0; | 546 | grp->channels_terminating = 0; |
554 | |||
555 | fsm_deltimer(&grp->timer); | 547 | fsm_deltimer(&grp->timer); |
556 | |||
557 | grp->allochanfunc = NULL; | 548 | grp->allochanfunc = NULL; |
558 | grp->estconnfunc = NULL; | 549 | grp->estconnfunc = NULL; |
559 | grp->port_persist = 0; | 550 | grp->port_persist = 0; |
@@ -561,8 +552,6 @@ void ctc_mpc_dealloc_ch(int port_num) | |||
561 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); | 552 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); |
562 | 553 | ||
563 | ctcm_close(dev); | 554 | ctcm_close(dev); |
564 | done: | ||
565 | ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__); | ||
566 | return; | 555 | return; |
567 | } | 556 | } |
568 | EXPORT_SYMBOL(ctc_mpc_dealloc_ch); | 557 | EXPORT_SYMBOL(ctc_mpc_dealloc_ch); |
@@ -573,32 +562,22 @@ EXPORT_SYMBOL(ctc_mpc_dealloc_ch); | |||
573 | */ | 562 | */ |
574 | void ctc_mpc_flow_control(int port_num, int flowc) | 563 | void ctc_mpc_flow_control(int port_num, int flowc) |
575 | { | 564 | { |
576 | char device[20]; | ||
577 | struct ctcm_priv *priv; | 565 | struct ctcm_priv *priv; |
578 | struct mpc_group *grp; | 566 | struct mpc_group *grp; |
579 | struct net_device *dev; | 567 | struct net_device *dev; |
580 | struct channel *rch; | 568 | struct channel *rch; |
581 | int mpcg_state; | 569 | int mpcg_state; |
582 | 570 | ||
583 | ctcm_pr_debug("ctcmpc enter: %s() %i\n", __FUNCTION__, flowc); | 571 | dev = ctcmpc_get_dev(port_num); |
584 | 572 | if (dev == NULL) | |
585 | sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num); | ||
586 | dev = __dev_get_by_name(&init_net, device); | ||
587 | |||
588 | if (dev == NULL) { | ||
589 | printk(KERN_INFO "ctc_mpc_flow_control %s dev=NULL\n", device); | ||
590 | return; | 573 | return; |
591 | } | 574 | priv = dev->priv; |
575 | grp = priv->mpcg; | ||
592 | 576 | ||
593 | ctcm_pr_debug("ctcmpc: %s %s called \n", dev->name, __FUNCTION__); | 577 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, |
578 | "%s: %s: flowc = %d", | ||
579 | CTCM_FUNTAIL, dev->name, flowc); | ||
594 | 580 | ||
595 | priv = dev->priv; | ||
596 | if (priv == NULL) { | ||
597 | printk(KERN_INFO "ctcmpc:%s() %s priv=NULL\n", | ||
598 | __FUNCTION__, device); | ||
599 | return; | ||
600 | } | ||
601 | grp = priv->mpcg; | ||
602 | rch = priv->channel[READ]; | 581 | rch = priv->channel[READ]; |
603 | 582 | ||
604 | mpcg_state = fsm_getstate(grp->fsm); | 583 | mpcg_state = fsm_getstate(grp->fsm); |
@@ -629,7 +608,6 @@ void ctc_mpc_flow_control(int port_num, int flowc) | |||
629 | break; | 608 | break; |
630 | } | 609 | } |
631 | 610 | ||
632 | ctcm_pr_debug("ctcmpc exit: %s() %i\n", __FUNCTION__, flowc); | ||
633 | } | 611 | } |
634 | EXPORT_SYMBOL(ctc_mpc_flow_control); | 612 | EXPORT_SYMBOL(ctc_mpc_flow_control); |
635 | 613 | ||
@@ -646,12 +624,8 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo) | |||
646 | struct mpc_group *grp = priv->mpcg; | 624 | struct mpc_group *grp = priv->mpcg; |
647 | struct channel *ch = priv->channel[WRITE]; | 625 | struct channel *ch = priv->channel[WRITE]; |
648 | 626 | ||
649 | if (do_debug) | 627 | CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id); |
650 | ctcm_pr_debug("ctcmpc enter: %s(): ch=0x%p id=%s\n", | 628 | CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); |
651 | __FUNCTION__, ch, ch->id); | ||
652 | |||
653 | if (do_debug_data) | ||
654 | ctcmpc_dumpit((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); | ||
655 | 629 | ||
656 | grp->sweep_rsp_pend_num--; | 630 | grp->sweep_rsp_pend_num--; |
657 | 631 | ||
@@ -684,14 +658,13 @@ static void ctcmpc_send_sweep_resp(struct channel *rch) | |||
684 | struct sk_buff *sweep_skb; | 658 | struct sk_buff *sweep_skb; |
685 | struct channel *ch = priv->channel[WRITE]; | 659 | struct channel *ch = priv->channel[WRITE]; |
686 | 660 | ||
687 | if (do_debug) | 661 | CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id); |
688 | ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n", | ||
689 | __FUNCTION__, rch, rch->id); | ||
690 | 662 | ||
691 | sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, | 663 | sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA); |
692 | GFP_ATOMIC|GFP_DMA); | ||
693 | if (sweep_skb == NULL) { | 664 | if (sweep_skb == NULL) { |
694 | printk(KERN_INFO "Couldn't alloc sweep_skb\n"); | 665 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
666 | "%s(%s): sweep_skb allocation ERROR\n", | ||
667 | CTCM_FUNTAIL, rch->id); | ||
695 | rc = -ENOMEM; | 668 | rc = -ENOMEM; |
696 | goto done; | 669 | goto done; |
697 | } | 670 | } |
@@ -746,7 +719,7 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo) | |||
746 | 719 | ||
747 | if (do_debug) | 720 | if (do_debug) |
748 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, | 721 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, |
749 | " %s(): ch=0x%p id=%s\n", __FUNCTION__, ch, ch->id); | 722 | " %s(): ch=0x%p id=%s\n", __func__, ch, ch->id); |
750 | 723 | ||
751 | if (grp->in_sweep == 0) { | 724 | if (grp->in_sweep == 0) { |
752 | grp->in_sweep = 1; | 725 | grp->in_sweep = 1; |
@@ -755,8 +728,7 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo) | |||
755 | grp->sweep_rsp_pend_num = grp->active_channels[READ]; | 728 | grp->sweep_rsp_pend_num = grp->active_channels[READ]; |
756 | } | 729 | } |
757 | 730 | ||
758 | if (do_debug_data) | 731 | CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); |
759 | ctcmpc_dumpit((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); | ||
760 | 732 | ||
761 | grp->sweep_req_pend_num--; | 733 | grp->sweep_req_pend_num--; |
762 | ctcmpc_send_sweep_resp(ch); | 734 | ctcmpc_send_sweep_resp(ch); |
@@ -875,25 +847,13 @@ static int mpcg_fsm_len = ARRAY_SIZE(mpcg_fsm); | |||
875 | static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg) | 847 | static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg) |
876 | { | 848 | { |
877 | struct net_device *dev = arg; | 849 | struct net_device *dev = arg; |
878 | struct ctcm_priv *priv = NULL; | 850 | struct ctcm_priv *priv = dev->priv; |
879 | struct mpc_group *grp = NULL; | 851 | struct mpc_group *grp = priv->mpcg; |
880 | |||
881 | if (dev == NULL) { | ||
882 | printk(KERN_INFO "%s() dev=NULL\n", __FUNCTION__); | ||
883 | return; | ||
884 | } | ||
885 | |||
886 | ctcm_pr_debug("ctcmpc enter: %s %s()\n", dev->name, __FUNCTION__); | ||
887 | |||
888 | priv = dev->priv; | ||
889 | if (priv == NULL) { | ||
890 | printk(KERN_INFO "%s() priv=NULL\n", __FUNCTION__); | ||
891 | return; | ||
892 | } | ||
893 | 852 | ||
894 | grp = priv->mpcg; | ||
895 | if (grp == NULL) { | 853 | if (grp == NULL) { |
896 | printk(KERN_INFO "%s() grp=NULL\n", __FUNCTION__); | 854 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
855 | "%s(%s): No MPC group", | ||
856 | CTCM_FUNTAIL, dev->name); | ||
897 | return; | 857 | return; |
898 | } | 858 | } |
899 | 859 | ||
@@ -907,7 +867,12 @@ static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg) | |||
907 | grp->estconnfunc = NULL; | 867 | grp->estconnfunc = NULL; |
908 | } else if (grp->allochanfunc) | 868 | } else if (grp->allochanfunc) |
909 | grp->send_qllc_disc = 1; | 869 | grp->send_qllc_disc = 1; |
910 | goto done; | 870 | |
871 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); | ||
872 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, | ||
873 | "%s(%s): fails", | ||
874 | CTCM_FUNTAIL, dev->name); | ||
875 | return; | ||
911 | } | 876 | } |
912 | 877 | ||
913 | grp->port_persist = 1; | 878 | grp->port_persist = 1; |
@@ -916,14 +881,7 @@ static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg) | |||
916 | 881 | ||
917 | tasklet_hi_schedule(&grp->mpc_tasklet2); | 882 | tasklet_hi_schedule(&grp->mpc_tasklet2); |
918 | 883 | ||
919 | ctcm_pr_debug("ctcmpc exit: %s %s()\n", dev->name, __FUNCTION__); | ||
920 | return; | 884 | return; |
921 | |||
922 | done: | ||
923 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); | ||
924 | |||
925 | |||
926 | ctcm_pr_info("ctcmpc: %s()failure occurred\n", __FUNCTION__); | ||
927 | } | 885 | } |
928 | 886 | ||
929 | /* | 887 | /* |
@@ -933,42 +891,28 @@ done: | |||
933 | void mpc_group_ready(unsigned long adev) | 891 | void mpc_group_ready(unsigned long adev) |
934 | { | 892 | { |
935 | struct net_device *dev = (struct net_device *)adev; | 893 | struct net_device *dev = (struct net_device *)adev; |
936 | struct ctcm_priv *priv = NULL; | 894 | struct ctcm_priv *priv = dev->priv; |
937 | struct mpc_group *grp = NULL; | 895 | struct mpc_group *grp = priv->mpcg; |
938 | struct channel *ch = NULL; | 896 | struct channel *ch = NULL; |
939 | 897 | ||
940 | |||
941 | ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__); | ||
942 | |||
943 | if (dev == NULL) { | ||
944 | printk(KERN_INFO "%s() dev=NULL\n", __FUNCTION__); | ||
945 | return; | ||
946 | } | ||
947 | |||
948 | priv = dev->priv; | ||
949 | if (priv == NULL) { | ||
950 | printk(KERN_INFO "%s() priv=NULL\n", __FUNCTION__); | ||
951 | return; | ||
952 | } | ||
953 | |||
954 | grp = priv->mpcg; | ||
955 | if (grp == NULL) { | 898 | if (grp == NULL) { |
956 | printk(KERN_INFO "ctcmpc:%s() grp=NULL\n", __FUNCTION__); | 899 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
900 | "%s(%s): No MPC group", | ||
901 | CTCM_FUNTAIL, dev->name); | ||
957 | return; | 902 | return; |
958 | } | 903 | } |
959 | 904 | ||
960 | printk(KERN_NOTICE "ctcmpc: %s GROUP TRANSITIONED TO READY" | 905 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE, |
961 | " maxbuf:%d\n", | 906 | "%s: %s: GROUP TRANSITIONED TO READY, maxbuf = %d\n", |
962 | dev->name, grp->group_max_buflen); | 907 | CTCM_FUNTAIL, dev->name, grp->group_max_buflen); |
963 | 908 | ||
964 | fsm_newstate(grp->fsm, MPCG_STATE_READY); | 909 | fsm_newstate(grp->fsm, MPCG_STATE_READY); |
965 | 910 | ||
966 | /* Put up a read on the channel */ | 911 | /* Put up a read on the channel */ |
967 | ch = priv->channel[READ]; | 912 | ch = priv->channel[READ]; |
968 | ch->pdu_seq = 0; | 913 | ch->pdu_seq = 0; |
969 | if (do_debug_data) | 914 | CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" , |
970 | ctcm_pr_debug("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" , | 915 | __func__, ch->pdu_seq); |
971 | __FUNCTION__, ch->pdu_seq); | ||
972 | 916 | ||
973 | ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch); | 917 | ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch); |
974 | /* Put the write channel in idle state */ | 918 | /* Put the write channel in idle state */ |
@@ -980,22 +924,18 @@ void mpc_group_ready(unsigned long adev) | |||
980 | spin_unlock(&ch->collect_lock); | 924 | spin_unlock(&ch->collect_lock); |
981 | } | 925 | } |
982 | ctcm_chx_txidle(ch->fsm, CTC_EVENT_START, ch); | 926 | ctcm_chx_txidle(ch->fsm, CTC_EVENT_START, ch); |
983 | |||
984 | ctcm_clear_busy(dev); | 927 | ctcm_clear_busy(dev); |
985 | 928 | ||
986 | if (grp->estconnfunc) { | 929 | if (grp->estconnfunc) { |
987 | grp->estconnfunc(grp->port_num, 0, | 930 | grp->estconnfunc(grp->port_num, 0, |
988 | grp->group_max_buflen); | 931 | grp->group_max_buflen); |
989 | grp->estconnfunc = NULL; | 932 | grp->estconnfunc = NULL; |
990 | } else | 933 | } else if (grp->allochanfunc) |
991 | if (grp->allochanfunc) | 934 | grp->allochanfunc(grp->port_num, grp->group_max_buflen); |
992 | grp->allochanfunc(grp->port_num, | ||
993 | grp->group_max_buflen); | ||
994 | 935 | ||
995 | grp->send_qllc_disc = 1; | 936 | grp->send_qllc_disc = 1; |
996 | grp->changed_side = 0; | 937 | grp->changed_side = 0; |
997 | 938 | ||
998 | ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__); | ||
999 | return; | 939 | return; |
1000 | 940 | ||
1001 | } | 941 | } |
@@ -1004,51 +944,26 @@ void mpc_group_ready(unsigned long adev) | |||
1004 | * Increment the MPC Group Active Channel Counts | 944 | * Increment the MPC Group Active Channel Counts |
1005 | * helper of dev_action (called from channel fsm) | 945 | * helper of dev_action (called from channel fsm) |
1006 | */ | 946 | */ |
1007 | int mpc_channel_action(struct channel *ch, int direction, int action) | 947 | void mpc_channel_action(struct channel *ch, int direction, int action) |
1008 | { | 948 | { |
1009 | struct net_device *dev = ch->netdev; | 949 | struct net_device *dev = ch->netdev; |
1010 | struct ctcm_priv *priv; | 950 | struct ctcm_priv *priv = dev->priv; |
1011 | struct mpc_group *grp = NULL; | 951 | struct mpc_group *grp = priv->mpcg; |
1012 | int rc = 0; | ||
1013 | |||
1014 | if (do_debug) | ||
1015 | ctcm_pr_debug("ctcmpc enter: %s(): ch=0x%p id=%s\n", | ||
1016 | __FUNCTION__, ch, ch->id); | ||
1017 | |||
1018 | if (dev == NULL) { | ||
1019 | printk(KERN_INFO "ctcmpc_channel_action %i dev=NULL\n", | ||
1020 | action); | ||
1021 | rc = 1; | ||
1022 | goto done; | ||
1023 | } | ||
1024 | |||
1025 | priv = dev->priv; | ||
1026 | if (priv == NULL) { | ||
1027 | printk(KERN_INFO | ||
1028 | "ctcmpc_channel_action%i priv=NULL, dev=%s\n", | ||
1029 | action, dev->name); | ||
1030 | rc = 2; | ||
1031 | goto done; | ||
1032 | } | ||
1033 | |||
1034 | grp = priv->mpcg; | ||
1035 | 952 | ||
1036 | if (grp == NULL) { | 953 | if (grp == NULL) { |
1037 | printk(KERN_INFO "ctcmpc: %s()%i mpcgroup=NULL, dev=%s\n", | 954 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1038 | __FUNCTION__, action, dev->name); | 955 | "%s(%s): No MPC group", |
1039 | rc = 3; | 956 | CTCM_FUNTAIL, dev->name); |
1040 | goto done; | 957 | return; |
1041 | } | 958 | } |
1042 | 959 | ||
1043 | ctcm_pr_info( | 960 | CTCM_PR_DEBUG("enter %s: ch=0x%p id=%s\n", __func__, ch, ch->id); |
1044 | "ctcmpc: %s() %i(): Grp:%s total_channel_paths=%i " | 961 | |
1045 | "active_channels read=%i, write=%i\n", | 962 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, |
1046 | __FUNCTION__, | 963 | "%s: %i / Grp:%s total_channels=%i, active_channels: " |
1047 | action, | 964 | "read=%i, write=%i\n", __func__, action, |
1048 | fsm_getstate_str(grp->fsm), | 965 | fsm_getstate_str(grp->fsm), grp->num_channel_paths, |
1049 | grp->num_channel_paths, | 966 | grp->active_channels[READ], grp->active_channels[WRITE]); |
1050 | grp->active_channels[READ], | ||
1051 | grp->active_channels[WRITE]); | ||
1052 | 967 | ||
1053 | if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) { | 968 | if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) { |
1054 | grp->num_channel_paths++; | 969 | grp->num_channel_paths++; |
@@ -1062,10 +977,11 @@ int mpc_channel_action(struct channel *ch, int direction, int action) | |||
1062 | ch->xid_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, | 977 | ch->xid_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, |
1063 | GFP_ATOMIC | GFP_DMA); | 978 | GFP_ATOMIC | GFP_DMA); |
1064 | if (ch->xid_skb == NULL) { | 979 | if (ch->xid_skb == NULL) { |
1065 | printk(KERN_INFO "ctcmpc: %s()" | 980 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1066 | "Couldn't alloc ch xid_skb\n", __FUNCTION__); | 981 | "%s(%s): Couldn't alloc ch xid_skb\n", |
982 | CTCM_FUNTAIL, dev->name); | ||
1067 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); | 983 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); |
1068 | return 1; | 984 | return; |
1069 | } | 985 | } |
1070 | ch->xid_skb_data = ch->xid_skb->data; | 986 | ch->xid_skb_data = ch->xid_skb->data; |
1071 | ch->xid_th = (struct th_header *)ch->xid_skb->data; | 987 | ch->xid_th = (struct th_header *)ch->xid_skb->data; |
@@ -1097,8 +1013,9 @@ int mpc_channel_action(struct channel *ch, int direction, int action) | |||
1097 | (grp->active_channels[WRITE] > 0) && | 1013 | (grp->active_channels[WRITE] > 0) && |
1098 | (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) { | 1014 | (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) { |
1099 | fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); | 1015 | fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); |
1100 | printk(KERN_NOTICE "ctcmpc: %s MPC GROUP " | 1016 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE, |
1101 | "CHANNELS ACTIVE\n", dev->name); | 1017 | "%s: %s: MPC GROUP CHANNELS ACTIVE\n", |
1018 | __func__, dev->name); | ||
1102 | } | 1019 | } |
1103 | } else if ((action == MPC_CHANNEL_REMOVE) && | 1020 | } else if ((action == MPC_CHANNEL_REMOVE) && |
1104 | (ch->in_mpcgroup == 1)) { | 1021 | (ch->in_mpcgroup == 1)) { |
@@ -1119,25 +1036,14 @@ int mpc_channel_action(struct channel *ch, int direction, int action) | |||
1119 | (grp->active_channels[READ] > 0))) | 1036 | (grp->active_channels[READ] > 0))) |
1120 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); | 1037 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); |
1121 | } | 1038 | } |
1122 | |||
1123 | done: | 1039 | done: |
1040 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, | ||
1041 | "exit %s: %i / Grp:%s total_channels=%i, active_channels: " | ||
1042 | "read=%i, write=%i\n", __func__, action, | ||
1043 | fsm_getstate_str(grp->fsm), grp->num_channel_paths, | ||
1044 | grp->active_channels[READ], grp->active_channels[WRITE]); | ||
1124 | 1045 | ||
1125 | if (do_debug) { | 1046 | CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id); |
1126 | ctcm_pr_debug( | ||
1127 | "ctcmpc: %s() %i Grp:%s ttl_chan_paths=%i " | ||
1128 | "active_chans read=%i, write=%i\n", | ||
1129 | __FUNCTION__, | ||
1130 | action, | ||
1131 | fsm_getstate_str(grp->fsm), | ||
1132 | grp->num_channel_paths, | ||
1133 | grp->active_channels[READ], | ||
1134 | grp->active_channels[WRITE]); | ||
1135 | |||
1136 | ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n", | ||
1137 | __FUNCTION__, ch, ch->id); | ||
1138 | } | ||
1139 | return rc; | ||
1140 | |||
1141 | } | 1047 | } |
1142 | 1048 | ||
1143 | /** | 1049 | /** |
@@ -1163,9 +1069,8 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
1163 | int skblen; | 1069 | int skblen; |
1164 | int sendrc = 0; | 1070 | int sendrc = 0; |
1165 | 1071 | ||
1166 | if (do_debug) | 1072 | CTCM_PR_DEBUG("ctcmpc enter: %s() %s cp:%i ch:%s\n", |
1167 | ctcm_pr_debug("ctcmpc enter: %s() %s cp:%i ch:%s\n", | 1073 | __func__, dev->name, smp_processor_id(), ch->id); |
1168 | __FUNCTION__, dev->name, smp_processor_id(), ch->id); | ||
1169 | 1074 | ||
1170 | header = (struct th_header *)pskb->data; | 1075 | header = (struct th_header *)pskb->data; |
1171 | if ((header->th_seg == 0) && | 1076 | if ((header->th_seg == 0) && |
@@ -1174,21 +1079,16 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
1174 | (header->th_seq_num == 0)) | 1079 | (header->th_seq_num == 0)) |
1175 | /* nothing for us */ goto done; | 1080 | /* nothing for us */ goto done; |
1176 | 1081 | ||
1177 | if (do_debug_data) { | 1082 | CTCM_PR_DBGDATA("%s: th_header\n", __func__); |
1178 | ctcm_pr_debug("ctcmpc: %s() th_header\n", __FUNCTION__); | 1083 | CTCM_D3_DUMP((char *)header, TH_HEADER_LENGTH); |
1179 | ctcmpc_dumpit((char *)header, TH_HEADER_LENGTH); | 1084 | CTCM_PR_DBGDATA("%s: pskb len: %04x \n", __func__, pskb->len); |
1180 | ctcm_pr_debug("ctcmpc: %s() pskb len: %04x \n", | ||
1181 | __FUNCTION__, pskb->len); | ||
1182 | } | ||
1183 | 1085 | ||
1184 | pskb->dev = dev; | 1086 | pskb->dev = dev; |
1185 | pskb->ip_summed = CHECKSUM_UNNECESSARY; | 1087 | pskb->ip_summed = CHECKSUM_UNNECESSARY; |
1186 | skb_pull(pskb, TH_HEADER_LENGTH); | 1088 | skb_pull(pskb, TH_HEADER_LENGTH); |
1187 | 1089 | ||
1188 | if (likely(header->th_ch_flag == TH_HAS_PDU)) { | 1090 | if (likely(header->th_ch_flag == TH_HAS_PDU)) { |
1189 | if (do_debug_data) | 1091 | CTCM_PR_DBGDATA("%s: came into th_has_pdu\n", __func__); |
1190 | ctcm_pr_debug("ctcmpc: %s() came into th_has_pdu\n", | ||
1191 | __FUNCTION__); | ||
1192 | if ((fsm_getstate(grp->fsm) == MPCG_STATE_FLOWC) || | 1092 | if ((fsm_getstate(grp->fsm) == MPCG_STATE_FLOWC) || |
1193 | ((fsm_getstate(grp->fsm) == MPCG_STATE_READY) && | 1093 | ((fsm_getstate(grp->fsm) == MPCG_STATE_READY) && |
1194 | (header->th_seq_num != ch->th_seq_num + 1) && | 1094 | (header->th_seq_num != ch->th_seq_num + 1) && |
@@ -1202,33 +1102,29 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
1202 | grp->out_of_sequence += 1; | 1102 | grp->out_of_sequence += 1; |
1203 | __skb_push(pskb, TH_HEADER_LENGTH); | 1103 | __skb_push(pskb, TH_HEADER_LENGTH); |
1204 | skb_queue_tail(&ch->io_queue, pskb); | 1104 | skb_queue_tail(&ch->io_queue, pskb); |
1205 | if (do_debug_data) | 1105 | CTCM_PR_DBGDATA("%s: th_seq_num expect:%08x " |
1206 | ctcm_pr_debug("ctcmpc: %s() th_seq_num " | 1106 | "got:%08x\n", __func__, |
1207 | "expect:%08x got:%08x\n", __FUNCTION__, | 1107 | ch->th_seq_num + 1, header->th_seq_num); |
1208 | ch->th_seq_num + 1, header->th_seq_num); | ||
1209 | 1108 | ||
1210 | return; | 1109 | return; |
1211 | } | 1110 | } |
1212 | grp->out_of_sequence = 0; | 1111 | grp->out_of_sequence = 0; |
1213 | ch->th_seq_num = header->th_seq_num; | 1112 | ch->th_seq_num = header->th_seq_num; |
1214 | 1113 | ||
1215 | if (do_debug_data) | 1114 | CTCM_PR_DBGDATA("ctcmpc: %s() FromVTAM_th_seq=%08x\n", |
1216 | ctcm_pr_debug("ctcmpc: %s() FromVTAM_th_seq=%08x\n", | 1115 | __func__, ch->th_seq_num); |
1217 | __FUNCTION__, ch->th_seq_num); | ||
1218 | 1116 | ||
1219 | if (unlikely(fsm_getstate(grp->fsm) != MPCG_STATE_READY)) | 1117 | if (unlikely(fsm_getstate(grp->fsm) != MPCG_STATE_READY)) |
1220 | goto done; | 1118 | goto done; |
1221 | pdu_last_seen = 0; | 1119 | pdu_last_seen = 0; |
1222 | while ((pskb->len > 0) && !pdu_last_seen) { | 1120 | while ((pskb->len > 0) && !pdu_last_seen) { |
1223 | curr_pdu = (struct pdu *)pskb->data; | 1121 | curr_pdu = (struct pdu *)pskb->data; |
1224 | if (do_debug_data) { | 1122 | |
1225 | ctcm_pr_debug("ctcm: %s() pdu_header\n", | 1123 | CTCM_PR_DBGDATA("%s: pdu_header\n", __func__); |
1226 | __FUNCTION__); | 1124 | CTCM_D3_DUMP((char *)pskb->data, PDU_HEADER_LENGTH); |
1227 | ctcmpc_dumpit((char *)pskb->data, | 1125 | CTCM_PR_DBGDATA("%s: pskb len: %04x \n", |
1228 | PDU_HEADER_LENGTH); | 1126 | __func__, pskb->len); |
1229 | ctcm_pr_debug("ctcm: %s() pskb len: %04x \n", | 1127 | |
1230 | __FUNCTION__, pskb->len); | ||
1231 | } | ||
1232 | skb_pull(pskb, PDU_HEADER_LENGTH); | 1128 | skb_pull(pskb, PDU_HEADER_LENGTH); |
1233 | 1129 | ||
1234 | if (curr_pdu->pdu_flag & PDU_LAST) | 1130 | if (curr_pdu->pdu_flag & PDU_LAST) |
@@ -1239,46 +1135,39 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
1239 | pskb->protocol = htons(ETH_P_SNA_DIX); | 1135 | pskb->protocol = htons(ETH_P_SNA_DIX); |
1240 | 1136 | ||
1241 | if ((pskb->len <= 0) || (pskb->len > ch->max_bufsize)) { | 1137 | if ((pskb->len <= 0) || (pskb->len > ch->max_bufsize)) { |
1242 | printk(KERN_INFO | 1138 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1243 | "%s Illegal packet size %d " | 1139 | "%s(%s): Dropping packet with " |
1244 | "received " | 1140 | "illegal siize %d", |
1245 | "dropping\n", dev->name, | 1141 | CTCM_FUNTAIL, dev->name, pskb->len); |
1246 | pskb->len); | 1142 | |
1247 | priv->stats.rx_dropped++; | 1143 | priv->stats.rx_dropped++; |
1248 | priv->stats.rx_length_errors++; | 1144 | priv->stats.rx_length_errors++; |
1249 | goto done; | 1145 | goto done; |
1250 | } | 1146 | } |
1251 | skb_reset_mac_header(pskb); | 1147 | skb_reset_mac_header(pskb); |
1252 | new_len = curr_pdu->pdu_offset; | 1148 | new_len = curr_pdu->pdu_offset; |
1253 | if (do_debug_data) | 1149 | CTCM_PR_DBGDATA("%s: new_len: %04x \n", |
1254 | ctcm_pr_debug("ctcmpc: %s() new_len: %04x \n", | 1150 | __func__, new_len); |
1255 | __FUNCTION__, new_len); | ||
1256 | if ((new_len == 0) || (new_len > pskb->len)) { | 1151 | if ((new_len == 0) || (new_len > pskb->len)) { |
1257 | /* should never happen */ | 1152 | /* should never happen */ |
1258 | /* pskb len must be hosed...bail out */ | 1153 | /* pskb len must be hosed...bail out */ |
1259 | printk(KERN_INFO | 1154 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1260 | "ctcmpc: %s(): invalid pdu" | 1155 | "%s(%s): non valid pdu_offset: %04x", |
1261 | " offset of %04x - data may be" | 1156 | /* "data may be lost", */ |
1262 | "lost\n", __FUNCTION__, new_len); | 1157 | CTCM_FUNTAIL, dev->name, new_len); |
1263 | goto done; | 1158 | goto done; |
1264 | } | 1159 | } |
1265 | skb = __dev_alloc_skb(new_len+4, GFP_ATOMIC); | 1160 | skb = __dev_alloc_skb(new_len+4, GFP_ATOMIC); |
1266 | 1161 | ||
1267 | if (!skb) { | 1162 | if (!skb) { |
1268 | printk(KERN_INFO | 1163 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1269 | "ctcm: %s Out of memory in " | 1164 | "%s(%s): MEMORY allocation error", |
1270 | "%s()- request-len:%04x \n", | 1165 | CTCM_FUNTAIL, dev->name); |
1271 | dev->name, | ||
1272 | __FUNCTION__, | ||
1273 | new_len+4); | ||
1274 | priv->stats.rx_dropped++; | 1166 | priv->stats.rx_dropped++; |
1275 | fsm_event(grp->fsm, | 1167 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); |
1276 | MPCG_EVENT_INOP, dev); | ||
1277 | goto done; | 1168 | goto done; |
1278 | } | 1169 | } |
1279 | 1170 | memcpy(skb_put(skb, new_len), pskb->data, new_len); | |
1280 | memcpy(skb_put(skb, new_len), | ||
1281 | pskb->data, new_len); | ||
1282 | 1171 | ||
1283 | skb_reset_mac_header(skb); | 1172 | skb_reset_mac_header(skb); |
1284 | skb->dev = pskb->dev; | 1173 | skb->dev = pskb->dev; |
@@ -1287,17 +1176,14 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
1287 | *((__u32 *) skb_push(skb, 4)) = ch->pdu_seq; | 1176 | *((__u32 *) skb_push(skb, 4)) = ch->pdu_seq; |
1288 | ch->pdu_seq++; | 1177 | ch->pdu_seq++; |
1289 | 1178 | ||
1290 | if (do_debug_data) | ||
1291 | ctcm_pr_debug("%s: ToDCM_pdu_seq= %08x\n", | ||
1292 | __FUNCTION__, ch->pdu_seq); | ||
1293 | |||
1294 | ctcm_pr_debug("ctcm: %s() skb:%0lx " | ||
1295 | "skb len: %d \n", __FUNCTION__, | ||
1296 | (unsigned long)skb, skb->len); | ||
1297 | if (do_debug_data) { | 1179 | if (do_debug_data) { |
1298 | ctcm_pr_debug("ctcmpc: %s() up to 32 bytes" | 1180 | ctcm_pr_debug("%s: ToDCM_pdu_seq= %08x\n", |
1299 | " of pdu_data sent\n", | 1181 | __func__, ch->pdu_seq); |
1300 | __FUNCTION__); | 1182 | ctcm_pr_debug("%s: skb:%0lx " |
1183 | "skb len: %d \n", __func__, | ||
1184 | (unsigned long)skb, skb->len); | ||
1185 | ctcm_pr_debug("%s: up to 32 bytes " | ||
1186 | "of pdu_data sent\n", __func__); | ||
1301 | ctcmpc_dump32((char *)skb->data, skb->len); | 1187 | ctcmpc_dump32((char *)skb->data, skb->len); |
1302 | } | 1188 | } |
1303 | 1189 | ||
@@ -1316,8 +1202,8 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
1316 | mpcginfo->ch = ch; | 1202 | mpcginfo->ch = ch; |
1317 | mpcginfo->th = header; | 1203 | mpcginfo->th = header; |
1318 | mpcginfo->skb = pskb; | 1204 | mpcginfo->skb = pskb; |
1319 | ctcm_pr_debug("ctcmpc: %s() Not PDU - may be control pkt\n", | 1205 | CTCM_PR_DEBUG("%s: Not PDU - may be control pkt\n", |
1320 | __FUNCTION__); | 1206 | __func__); |
1321 | /* it's a sweep? */ | 1207 | /* it's a sweep? */ |
1322 | sweep = (struct th_sweep *)pskb->data; | 1208 | sweep = (struct th_sweep *)pskb->data; |
1323 | mpcginfo->sweep = sweep; | 1209 | mpcginfo->sweep = sweep; |
@@ -1333,8 +1219,9 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
1333 | } else if (header->th_blk_flag == TH_DISCONTACT) | 1219 | } else if (header->th_blk_flag == TH_DISCONTACT) |
1334 | fsm_event(grp->fsm, MPCG_EVENT_DISCONC, mpcginfo); | 1220 | fsm_event(grp->fsm, MPCG_EVENT_DISCONC, mpcginfo); |
1335 | else if (header->th_seq_num != 0) { | 1221 | else if (header->th_seq_num != 0) { |
1336 | printk(KERN_INFO "%s unexpected packet" | 1222 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1337 | " expected control pkt\n", dev->name); | 1223 | "%s(%s): control pkt expected\n", |
1224 | CTCM_FUNTAIL, dev->name); | ||
1338 | priv->stats.rx_dropped++; | 1225 | priv->stats.rx_dropped++; |
1339 | /* mpcginfo only used for non-data transfers */ | 1226 | /* mpcginfo only used for non-data transfers */ |
1340 | kfree(mpcginfo); | 1227 | kfree(mpcginfo); |
@@ -1347,13 +1234,12 @@ done: | |||
1347 | dev_kfree_skb_any(pskb); | 1234 | dev_kfree_skb_any(pskb); |
1348 | if (sendrc == NET_RX_DROP) { | 1235 | if (sendrc == NET_RX_DROP) { |
1349 | printk(KERN_WARNING "%s %s() NETWORK BACKLOG EXCEEDED" | 1236 | printk(KERN_WARNING "%s %s() NETWORK BACKLOG EXCEEDED" |
1350 | " - PACKET DROPPED\n", dev->name, __FUNCTION__); | 1237 | " - PACKET DROPPED\n", dev->name, __func__); |
1351 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); | 1238 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); |
1352 | } | 1239 | } |
1353 | 1240 | ||
1354 | if (do_debug) | 1241 | CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n", |
1355 | ctcm_pr_debug("ctcmpc exit : %s %s(): ch=0x%p id=%s\n", | 1242 | __func__, dev->name, ch, ch->id); |
1356 | dev->name, __FUNCTION__, ch, ch->id); | ||
1357 | } | 1243 | } |
1358 | 1244 | ||
1359 | /** | 1245 | /** |
@@ -1366,15 +1252,14 @@ done: | |||
1366 | */ | 1252 | */ |
1367 | void ctcmpc_bh(unsigned long thischan) | 1253 | void ctcmpc_bh(unsigned long thischan) |
1368 | { | 1254 | { |
1369 | struct channel *ch = (struct channel *)thischan; | 1255 | struct channel *ch = (struct channel *)thischan; |
1370 | struct sk_buff *skb; | 1256 | struct sk_buff *skb; |
1371 | struct net_device *dev = ch->netdev; | 1257 | struct net_device *dev = ch->netdev; |
1372 | struct ctcm_priv *priv = dev->priv; | 1258 | struct ctcm_priv *priv = dev->priv; |
1373 | struct mpc_group *grp = priv->mpcg; | 1259 | struct mpc_group *grp = priv->mpcg; |
1374 | 1260 | ||
1375 | if (do_debug) | 1261 | CTCM_PR_DEBUG("%s cp:%i enter: %s() %s\n", |
1376 | ctcm_pr_debug("%s cp:%i enter: %s() %s\n", | 1262 | dev->name, smp_processor_id(), __func__, ch->id); |
1377 | dev->name, smp_processor_id(), __FUNCTION__, ch->id); | ||
1378 | /* caller has requested driver to throttle back */ | 1263 | /* caller has requested driver to throttle back */ |
1379 | while ((fsm_getstate(grp->fsm) != MPCG_STATE_FLOWC) && | 1264 | while ((fsm_getstate(grp->fsm) != MPCG_STATE_FLOWC) && |
1380 | (skb = skb_dequeue(&ch->io_queue))) { | 1265 | (skb = skb_dequeue(&ch->io_queue))) { |
@@ -1390,9 +1275,8 @@ void ctcmpc_bh(unsigned long thischan) | |||
1390 | if (skb == skb_peek(&ch->io_queue)) | 1275 | if (skb == skb_peek(&ch->io_queue)) |
1391 | break; | 1276 | break; |
1392 | } | 1277 | } |
1393 | if (do_debug) | 1278 | CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n", |
1394 | ctcm_pr_debug("ctcmpc exit : %s %s(): ch=0x%p id=%s\n", | 1279 | __func__, dev->name, ch, ch->id); |
1395 | dev->name, __FUNCTION__, ch, ch->id); | ||
1396 | return; | 1280 | return; |
1397 | } | 1281 | } |
1398 | 1282 | ||
@@ -1403,16 +1287,16 @@ struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv) | |||
1403 | { | 1287 | { |
1404 | struct mpc_group *grp; | 1288 | struct mpc_group *grp; |
1405 | 1289 | ||
1406 | CTCM_DBF_TEXT(MPC_SETUP, 3, __FUNCTION__); | 1290 | CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO, |
1291 | "Enter %s(%p)", CTCM_FUNTAIL, priv); | ||
1407 | 1292 | ||
1408 | grp = kzalloc(sizeof(struct mpc_group), GFP_KERNEL); | 1293 | grp = kzalloc(sizeof(struct mpc_group), GFP_KERNEL); |
1409 | if (grp == NULL) | 1294 | if (grp == NULL) |
1410 | return NULL; | 1295 | return NULL; |
1411 | 1296 | ||
1412 | grp->fsm = | 1297 | grp->fsm = init_fsm("mpcg", mpcg_state_names, mpcg_event_names, |
1413 | init_fsm("mpcg", mpcg_state_names, mpcg_event_names, | 1298 | MPCG_NR_STATES, MPCG_NR_EVENTS, mpcg_fsm, |
1414 | MPCG_NR_STATES, MPCG_NR_EVENTS, mpcg_fsm, | 1299 | mpcg_fsm_len, GFP_KERNEL); |
1415 | mpcg_fsm_len, GFP_KERNEL); | ||
1416 | if (grp->fsm == NULL) { | 1300 | if (grp->fsm == NULL) { |
1417 | kfree(grp); | 1301 | kfree(grp); |
1418 | return NULL; | 1302 | return NULL; |
@@ -1424,7 +1308,6 @@ struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv) | |||
1424 | grp->xid_skb = | 1308 | grp->xid_skb = |
1425 | __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA); | 1309 | __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA); |
1426 | if (grp->xid_skb == NULL) { | 1310 | if (grp->xid_skb == NULL) { |
1427 | printk(KERN_INFO "Couldn't alloc MPCgroup xid_skb\n"); | ||
1428 | kfree_fsm(grp->fsm); | 1311 | kfree_fsm(grp->fsm); |
1429 | kfree(grp); | 1312 | kfree(grp); |
1430 | return NULL; | 1313 | return NULL; |
@@ -1435,7 +1318,7 @@ struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv) | |||
1435 | memcpy(skb_put(grp->xid_skb, TH_HEADER_LENGTH), | 1318 | memcpy(skb_put(grp->xid_skb, TH_HEADER_LENGTH), |
1436 | &thnorm, TH_HEADER_LENGTH); | 1319 | &thnorm, TH_HEADER_LENGTH); |
1437 | 1320 | ||
1438 | grp->xid = (struct xid2 *) skb_tail_pointer(grp->xid_skb); | 1321 | grp->xid = (struct xid2 *)skb_tail_pointer(grp->xid_skb); |
1439 | memcpy(skb_put(grp->xid_skb, XID2_LENGTH), &init_xid, XID2_LENGTH); | 1322 | memcpy(skb_put(grp->xid_skb, XID2_LENGTH), &init_xid, XID2_LENGTH); |
1440 | grp->xid->xid2_adj_id = jiffies | 0xfff00000; | 1323 | grp->xid->xid2_adj_id = jiffies | 0xfff00000; |
1441 | grp->xid->xid2_sender_id = jiffies; | 1324 | grp->xid->xid2_sender_id = jiffies; |
@@ -1446,7 +1329,6 @@ struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv) | |||
1446 | grp->rcvd_xid_skb = | 1329 | grp->rcvd_xid_skb = |
1447 | __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA); | 1330 | __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA); |
1448 | if (grp->rcvd_xid_skb == NULL) { | 1331 | if (grp->rcvd_xid_skb == NULL) { |
1449 | printk(KERN_INFO "Couldn't alloc MPCgroup rcvd_xid_skb\n"); | ||
1450 | kfree_fsm(grp->fsm); | 1332 | kfree_fsm(grp->fsm); |
1451 | dev_kfree_skb(grp->xid_skb); | 1333 | dev_kfree_skb(grp->xid_skb); |
1452 | kfree(grp); | 1334 | kfree(grp); |
@@ -1492,32 +1374,27 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg) | |||
1492 | int rc = 0; | 1374 | int rc = 0; |
1493 | struct channel *wch, *rch; | 1375 | struct channel *wch, *rch; |
1494 | 1376 | ||
1495 | if (dev == NULL) { | 1377 | BUG_ON(dev == NULL); |
1496 | printk(KERN_INFO "%s() dev=NULL\n", __FUNCTION__); | 1378 | CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name); |
1497 | return; | ||
1498 | } | ||
1499 | |||
1500 | ctcm_pr_debug("ctcmpc enter: %s %s()\n", dev->name, __FUNCTION__); | ||
1501 | 1379 | ||
1502 | priv = dev->priv; | 1380 | priv = dev->priv; |
1503 | grp = priv->mpcg; | 1381 | grp = priv->mpcg; |
1504 | grp->flow_off_called = 0; | 1382 | grp->flow_off_called = 0; |
1505 | |||
1506 | fsm_deltimer(&grp->timer); | 1383 | fsm_deltimer(&grp->timer); |
1507 | |||
1508 | if (grp->channels_terminating) | 1384 | if (grp->channels_terminating) |
1509 | goto done; | 1385 | return; |
1510 | 1386 | ||
1511 | grp->channels_terminating = 1; | 1387 | grp->channels_terminating = 1; |
1512 | |||
1513 | grp->saved_state = fsm_getstate(grp->fsm); | 1388 | grp->saved_state = fsm_getstate(grp->fsm); |
1514 | fsm_newstate(grp->fsm, MPCG_STATE_INOP); | 1389 | fsm_newstate(grp->fsm, MPCG_STATE_INOP); |
1515 | if (grp->saved_state > MPCG_STATE_XID7INITF) | 1390 | if (grp->saved_state > MPCG_STATE_XID7INITF) |
1516 | printk(KERN_NOTICE "%s:MPC GROUP INOPERATIVE\n", dev->name); | 1391 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, |
1392 | "%s(%s): MPC GROUP INOPERATIVE", | ||
1393 | CTCM_FUNTAIL, dev->name); | ||
1517 | if ((grp->saved_state != MPCG_STATE_RESET) || | 1394 | if ((grp->saved_state != MPCG_STATE_RESET) || |
1518 | /* dealloc_channel has been called */ | 1395 | /* dealloc_channel has been called */ |
1519 | ((grp->saved_state == MPCG_STATE_RESET) && | 1396 | ((grp->saved_state == MPCG_STATE_RESET) && |
1520 | (grp->port_persist == 0))) | 1397 | (grp->port_persist == 0))) |
1521 | fsm_deltimer(&priv->restart_timer); | 1398 | fsm_deltimer(&priv->restart_timer); |
1522 | 1399 | ||
1523 | wch = priv->channel[WRITE]; | 1400 | wch = priv->channel[WRITE]; |
@@ -1567,29 +1444,24 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg) | |||
1567 | /* This can result in INOP of VTAM PU due to halting of */ | 1444 | /* This can result in INOP of VTAM PU due to halting of */ |
1568 | /* outstanding IO which causes a sense to be returned */ | 1445 | /* outstanding IO which causes a sense to be returned */ |
1569 | /* Only about 3 senses are allowed and then IOS/VTAM will*/ | 1446 | /* Only about 3 senses are allowed and then IOS/VTAM will*/ |
1570 | /* ebcome unreachable without manual intervention */ | 1447 | /* become unreachable without manual intervention */ |
1571 | if ((grp->port_persist == 1) || (grp->alloc_called)) { | 1448 | if ((grp->port_persist == 1) || (grp->alloc_called)) { |
1572 | grp->alloc_called = 0; | 1449 | grp->alloc_called = 0; |
1573 | fsm_deltimer(&priv->restart_timer); | 1450 | fsm_deltimer(&priv->restart_timer); |
1574 | fsm_addtimer(&priv->restart_timer, | 1451 | fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_RESTART, dev); |
1575 | 500, | ||
1576 | DEV_EVENT_RESTART, | ||
1577 | dev); | ||
1578 | fsm_newstate(grp->fsm, MPCG_STATE_RESET); | 1452 | fsm_newstate(grp->fsm, MPCG_STATE_RESET); |
1579 | if (grp->saved_state > MPCG_STATE_XID7INITF) | 1453 | if (grp->saved_state > MPCG_STATE_XID7INITF) |
1580 | printk(KERN_NOTICE "%s:MPC GROUP RECOVERY SCHEDULED\n", | 1454 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS, |
1581 | dev->name); | 1455 | "%s(%s): MPC GROUP RECOVERY SCHEDULED", |
1456 | CTCM_FUNTAIL, dev->name); | ||
1582 | } else { | 1457 | } else { |
1583 | fsm_deltimer(&priv->restart_timer); | 1458 | fsm_deltimer(&priv->restart_timer); |
1584 | fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_STOP, dev); | 1459 | fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_STOP, dev); |
1585 | fsm_newstate(grp->fsm, MPCG_STATE_RESET); | 1460 | fsm_newstate(grp->fsm, MPCG_STATE_RESET); |
1586 | printk(KERN_NOTICE "%s:MPC GROUP RECOVERY NOT ATTEMPTED\n", | 1461 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS, |
1587 | dev->name); | 1462 | "%s(%s): NO MPC GROUP RECOVERY ATTEMPTED", |
1463 | CTCM_FUNTAIL, dev->name); | ||
1588 | } | 1464 | } |
1589 | |||
1590 | done: | ||
1591 | ctcm_pr_debug("ctcmpc exit:%s %s()\n", dev->name, __FUNCTION__); | ||
1592 | return; | ||
1593 | } | 1465 | } |
1594 | 1466 | ||
1595 | /** | 1467 | /** |
@@ -1609,12 +1481,7 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg) | |||
1609 | struct channel *wch; | 1481 | struct channel *wch; |
1610 | struct channel *rch; | 1482 | struct channel *rch; |
1611 | 1483 | ||
1612 | CTCM_DBF_TEXT(MPC_TRACE, 6, __FUNCTION__); | 1484 | BUG_ON(dev == NULL); |
1613 | |||
1614 | if (dev == NULL) { | ||
1615 | CTCM_DBF_TEXT_(MPC_ERROR, 4, "%s: dev=NULL\n", __FUNCTION__); | ||
1616 | return; | ||
1617 | } | ||
1618 | 1485 | ||
1619 | priv = dev->priv; | 1486 | priv = dev->priv; |
1620 | grp = priv->mpcg; | 1487 | grp = priv->mpcg; |
@@ -1633,8 +1500,9 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg) | |||
1633 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); | 1500 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); |
1634 | } | 1501 | } |
1635 | 1502 | ||
1636 | CTCM_DBF_TEXT_(MPC_TRACE, 6, "%s: dev=%s exit", | 1503 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, |
1637 | __FUNCTION__, dev->name); | 1504 | "%s: dev=%s exit", |
1505 | CTCM_FUNTAIL, dev->name); | ||
1638 | return; | 1506 | return; |
1639 | } | 1507 | } |
1640 | 1508 | ||
@@ -1646,25 +1514,25 @@ void mpc_action_discontact(fsm_instance *fi, int event, void *arg) | |||
1646 | { | 1514 | { |
1647 | struct mpcg_info *mpcginfo = arg; | 1515 | struct mpcg_info *mpcginfo = arg; |
1648 | struct channel *ch = mpcginfo->ch; | 1516 | struct channel *ch = mpcginfo->ch; |
1649 | struct net_device *dev = ch->netdev; | 1517 | struct net_device *dev; |
1650 | struct ctcm_priv *priv = dev->priv; | 1518 | struct ctcm_priv *priv; |
1651 | struct mpc_group *grp = priv->mpcg; | 1519 | struct mpc_group *grp; |
1652 | 1520 | ||
1653 | if (ch == NULL) { | 1521 | if (ch) { |
1654 | printk(KERN_INFO "%s() ch=NULL\n", __FUNCTION__); | 1522 | dev = ch->netdev; |
1655 | return; | 1523 | if (dev) { |
1656 | } | 1524 | priv = dev->priv; |
1657 | if (ch->netdev == NULL) { | 1525 | if (priv) { |
1658 | printk(KERN_INFO "%s() dev=NULL\n", __FUNCTION__); | 1526 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, |
1659 | return; | 1527 | "%s: %s: %s\n", |
1528 | CTCM_FUNTAIL, dev->name, ch->id); | ||
1529 | grp = priv->mpcg; | ||
1530 | grp->send_qllc_disc = 1; | ||
1531 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); | ||
1532 | } | ||
1533 | } | ||
1660 | } | 1534 | } |
1661 | 1535 | ||
1662 | ctcm_pr_debug("ctcmpc enter: %s %s()\n", dev->name, __FUNCTION__); | ||
1663 | |||
1664 | grp->send_qllc_disc = 1; | ||
1665 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); | ||
1666 | |||
1667 | ctcm_pr_debug("ctcmpc exit: %s %s()\n", dev->name, __FUNCTION__); | ||
1668 | return; | 1536 | return; |
1669 | } | 1537 | } |
1670 | 1538 | ||
@@ -1675,26 +1543,9 @@ void mpc_action_discontact(fsm_instance *fi, int event, void *arg) | |||
1675 | */ | 1543 | */ |
1676 | void mpc_action_send_discontact(unsigned long thischan) | 1544 | void mpc_action_send_discontact(unsigned long thischan) |
1677 | { | 1545 | { |
1678 | struct channel *ch; | 1546 | int rc; |
1679 | struct net_device *dev; | 1547 | struct channel *ch = (struct channel *)thischan; |
1680 | struct ctcm_priv *priv; | 1548 | unsigned long saveflags = 0; |
1681 | struct mpc_group *grp; | ||
1682 | int rc = 0; | ||
1683 | unsigned long saveflags; | ||
1684 | |||
1685 | ch = (struct channel *)thischan; | ||
1686 | dev = ch->netdev; | ||
1687 | priv = dev->priv; | ||
1688 | grp = priv->mpcg; | ||
1689 | |||
1690 | ctcm_pr_info("ctcmpc: %s cp:%i enter: %s() GrpState:%s ChState:%s\n", | ||
1691 | dev->name, | ||
1692 | smp_processor_id(), | ||
1693 | __FUNCTION__, | ||
1694 | fsm_getstate_str(grp->fsm), | ||
1695 | fsm_getstate_str(ch->fsm)); | ||
1696 | saveflags = 0; /* avoids compiler warning with | ||
1697 | spin_unlock_irqrestore */ | ||
1698 | 1549 | ||
1699 | spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); | 1550 | spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); |
1700 | rc = ccw_device_start(ch->cdev, &ch->ccw[15], | 1551 | rc = ccw_device_start(ch->cdev, &ch->ccw[15], |
@@ -1702,16 +1553,9 @@ void mpc_action_send_discontact(unsigned long thischan) | |||
1702 | spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); | 1553 | spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); |
1703 | 1554 | ||
1704 | if (rc != 0) { | 1555 | if (rc != 0) { |
1705 | ctcm_pr_info("ctcmpc: %s() ch:%s IO failed \n", | 1556 | ctcm_ccw_check_rc(ch, rc, (char *)__func__); |
1706 | __FUNCTION__, | ||
1707 | ch->id); | ||
1708 | ctcm_ccw_check_rc(ch, rc, "send discontact"); | ||
1709 | /* Not checking return code value here */ | ||
1710 | /* Making best effort to notify partner*/ | ||
1711 | /* that MPC Group is going down */ | ||
1712 | } | 1557 | } |
1713 | 1558 | ||
1714 | ctcm_pr_debug("ctcmpc exit: %s %s()\n", dev->name, __FUNCTION__); | ||
1715 | return; | 1559 | return; |
1716 | } | 1560 | } |
1717 | 1561 | ||
@@ -1723,49 +1567,50 @@ void mpc_action_send_discontact(unsigned long thischan) | |||
1723 | */ | 1567 | */ |
1724 | static int mpc_validate_xid(struct mpcg_info *mpcginfo) | 1568 | static int mpc_validate_xid(struct mpcg_info *mpcginfo) |
1725 | { | 1569 | { |
1726 | struct channel *ch = mpcginfo->ch; | 1570 | struct channel *ch = mpcginfo->ch; |
1727 | struct net_device *dev = ch->netdev; | 1571 | struct net_device *dev = ch->netdev; |
1728 | struct ctcm_priv *priv = dev->priv; | 1572 | struct ctcm_priv *priv = dev->priv; |
1729 | struct mpc_group *grp = priv->mpcg; | 1573 | struct mpc_group *grp = priv->mpcg; |
1730 | struct xid2 *xid = mpcginfo->xid; | 1574 | struct xid2 *xid = mpcginfo->xid; |
1731 | int failed = 0; | 1575 | int rc = 0; |
1732 | int rc = 0; | 1576 | __u64 our_id = 0; |
1733 | __u64 our_id, their_id = 0; | 1577 | __u64 their_id = 0; |
1734 | int len; | 1578 | int len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH; |
1735 | |||
1736 | len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH; | ||
1737 | 1579 | ||
1738 | ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__); | 1580 | CTCM_PR_DEBUG("Enter %s: xid=%p\n", __func__, xid); |
1739 | 1581 | ||
1740 | if (mpcginfo->xid == NULL) { | 1582 | if (xid == NULL) { |
1741 | printk(KERN_INFO "%s() xid=NULL\n", __FUNCTION__); | ||
1742 | rc = 1; | 1583 | rc = 1; |
1743 | goto done; | 1584 | /* XID REJECTED: xid == NULL */ |
1585 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, | ||
1586 | "%s(%s): xid = NULL", | ||
1587 | CTCM_FUNTAIL, ch->id); | ||
1588 | goto done; | ||
1744 | } | 1589 | } |
1745 | 1590 | ||
1746 | ctcm_pr_debug("ctcmpc : %s xid received()\n", __FUNCTION__); | 1591 | CTCM_D3_DUMP((char *)xid, XID2_LENGTH); |
1747 | ctcmpc_dumpit((char *)mpcginfo->xid, XID2_LENGTH); | ||
1748 | 1592 | ||
1749 | /*the received direction should be the opposite of ours */ | 1593 | /*the received direction should be the opposite of ours */ |
1750 | if (((CHANNEL_DIRECTION(ch->flags) == READ) ? XID2_WRITE_SIDE : | 1594 | if (((CHANNEL_DIRECTION(ch->flags) == READ) ? XID2_WRITE_SIDE : |
1751 | XID2_READ_SIDE) != xid->xid2_dlc_type) { | 1595 | XID2_READ_SIDE) != xid->xid2_dlc_type) { |
1752 | failed = 1; | 1596 | rc = 2; |
1753 | printk(KERN_INFO "ctcmpc:%s() XID REJECTED - READ-WRITE CH " | 1597 | /* XID REJECTED: r/w channel pairing mismatch */ |
1754 | "Pairing Invalid \n", __FUNCTION__); | 1598 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1599 | "%s(%s): r/w channel pairing mismatch", | ||
1600 | CTCM_FUNTAIL, ch->id); | ||
1601 | goto done; | ||
1755 | } | 1602 | } |
1756 | 1603 | ||
1757 | if (xid->xid2_dlc_type == XID2_READ_SIDE) { | 1604 | if (xid->xid2_dlc_type == XID2_READ_SIDE) { |
1758 | ctcm_pr_debug("ctcmpc: %s(): grpmaxbuf:%d xid2buflen:%d\n", | 1605 | CTCM_PR_DEBUG("%s: grpmaxbuf:%d xid2buflen:%d\n", __func__, |
1759 | __FUNCTION__, grp->group_max_buflen, | 1606 | grp->group_max_buflen, xid->xid2_buf_len); |
1760 | xid->xid2_buf_len); | ||
1761 | 1607 | ||
1762 | if (grp->group_max_buflen == 0 || | 1608 | if (grp->group_max_buflen == 0 || grp->group_max_buflen > |
1763 | grp->group_max_buflen > xid->xid2_buf_len - len) | 1609 | xid->xid2_buf_len - len) |
1764 | grp->group_max_buflen = xid->xid2_buf_len - len; | 1610 | grp->group_max_buflen = xid->xid2_buf_len - len; |
1765 | } | 1611 | } |
1766 | 1612 | ||
1767 | 1613 | if (grp->saved_xid2 == NULL) { | |
1768 | if (grp->saved_xid2 == NULL) { | ||
1769 | grp->saved_xid2 = | 1614 | grp->saved_xid2 = |
1770 | (struct xid2 *)skb_tail_pointer(grp->rcvd_xid_skb); | 1615 | (struct xid2 *)skb_tail_pointer(grp->rcvd_xid_skb); |
1771 | 1616 | ||
@@ -1786,49 +1631,54 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo) | |||
1786 | /* lower id assume the xside role */ | 1631 | /* lower id assume the xside role */ |
1787 | if (our_id < their_id) { | 1632 | if (our_id < their_id) { |
1788 | grp->roll = XSIDE; | 1633 | grp->roll = XSIDE; |
1789 | ctcm_pr_debug("ctcmpc :%s() WE HAVE LOW ID-" | 1634 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, |
1790 | "TAKE XSIDE\n", __FUNCTION__); | 1635 | "%s(%s): WE HAVE LOW ID - TAKE XSIDE", |
1636 | CTCM_FUNTAIL, ch->id); | ||
1791 | } else { | 1637 | } else { |
1792 | grp->roll = YSIDE; | 1638 | grp->roll = YSIDE; |
1793 | ctcm_pr_debug("ctcmpc :%s() WE HAVE HIGH ID-" | 1639 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, |
1794 | "TAKE YSIDE\n", __FUNCTION__); | 1640 | "%s(%s): WE HAVE HIGH ID - TAKE YSIDE", |
1641 | CTCM_FUNTAIL, ch->id); | ||
1795 | } | 1642 | } |
1796 | 1643 | ||
1797 | } else { | 1644 | } else { |
1798 | if (xid->xid2_flag4 != grp->saved_xid2->xid2_flag4) { | 1645 | if (xid->xid2_flag4 != grp->saved_xid2->xid2_flag4) { |
1799 | failed = 1; | 1646 | rc = 3; |
1800 | printk(KERN_INFO "%s XID REJECTED - XID Flag Byte4\n", | 1647 | /* XID REJECTED: xid flag byte4 mismatch */ |
1801 | __FUNCTION__); | 1648 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1649 | "%s(%s): xid flag byte4 mismatch", | ||
1650 | CTCM_FUNTAIL, ch->id); | ||
1802 | } | 1651 | } |
1803 | if (xid->xid2_flag2 == 0x40) { | 1652 | if (xid->xid2_flag2 == 0x40) { |
1804 | failed = 1; | 1653 | rc = 4; |
1805 | printk(KERN_INFO "%s XID REJECTED - XID NOGOOD\n", | 1654 | /* XID REJECTED - xid NOGOOD */ |
1806 | __FUNCTION__); | 1655 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1656 | "%s(%s): xid NOGOOD", | ||
1657 | CTCM_FUNTAIL, ch->id); | ||
1807 | } | 1658 | } |
1808 | if (xid->xid2_adj_id != grp->saved_xid2->xid2_adj_id) { | 1659 | if (xid->xid2_adj_id != grp->saved_xid2->xid2_adj_id) { |
1809 | failed = 1; | 1660 | rc = 5; |
1810 | printk(KERN_INFO "%s XID REJECTED - " | 1661 | /* XID REJECTED - Adjacent Station ID Mismatch */ |
1811 | "Adjacent Station ID Mismatch\n", | 1662 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1812 | __FUNCTION__); | 1663 | "%s(%s): Adjacent Station ID Mismatch", |
1664 | CTCM_FUNTAIL, ch->id); | ||
1813 | } | 1665 | } |
1814 | if (xid->xid2_sender_id != grp->saved_xid2->xid2_sender_id) { | 1666 | if (xid->xid2_sender_id != grp->saved_xid2->xid2_sender_id) { |
1815 | failed = 1; | 1667 | rc = 6; |
1816 | printk(KERN_INFO "%s XID REJECTED - " | 1668 | /* XID REJECTED - Sender Address Mismatch */ |
1817 | "Sender Address Mismatch\n", __FUNCTION__); | 1669 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
1818 | 1670 | "%s(%s): Sender Address Mismatch", | |
1671 | CTCM_FUNTAIL, ch->id); | ||
1819 | } | 1672 | } |
1820 | } | 1673 | } |
1821 | 1674 | ||
1822 | if (failed) { | 1675 | done: |
1676 | if (rc) { | ||
1823 | ctcm_pr_info("ctcmpc : %s() failed\n", __FUNCTION__); | 1677 | ctcm_pr_info("ctcmpc : %s() failed\n", __FUNCTION__); |
1824 | priv->xid->xid2_flag2 = 0x40; | 1678 | priv->xid->xid2_flag2 = 0x40; |
1825 | grp->saved_xid2->xid2_flag2 = 0x40; | 1679 | grp->saved_xid2->xid2_flag2 = 0x40; |
1826 | rc = 1; | ||
1827 | } | 1680 | } |
1828 | 1681 | ||
1829 | done: | ||
1830 | |||
1831 | ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__); | ||
1832 | return rc; | 1682 | return rc; |
1833 | } | 1683 | } |
1834 | 1684 | ||
@@ -1839,46 +1689,20 @@ done: | |||
1839 | static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side) | 1689 | static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side) |
1840 | { | 1690 | { |
1841 | struct channel *ch = arg; | 1691 | struct channel *ch = arg; |
1842 | struct ctcm_priv *priv; | ||
1843 | struct mpc_group *grp = NULL; | ||
1844 | struct net_device *dev = NULL; | ||
1845 | int rc = 0; | 1692 | int rc = 0; |
1846 | int gotlock = 0; | 1693 | int gotlock = 0; |
1847 | unsigned long saveflags = 0; /* avoids compiler warning with | 1694 | unsigned long saveflags = 0; /* avoids compiler warning with |
1848 | spin_unlock_irqrestore */ | 1695 | spin_unlock_irqrestore */ |
1849 | |||
1850 | if (ch == NULL) { | ||
1851 | printk(KERN_INFO "%s ch=NULL\n", __FUNCTION__); | ||
1852 | goto done; | ||
1853 | } | ||
1854 | |||
1855 | if (do_debug) | ||
1856 | ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", | ||
1857 | __FUNCTION__, smp_processor_id(), ch, ch->id); | ||
1858 | |||
1859 | dev = ch->netdev; | ||
1860 | if (dev == NULL) { | ||
1861 | printk(KERN_INFO "%s dev=NULL\n", __FUNCTION__); | ||
1862 | goto done; | ||
1863 | } | ||
1864 | |||
1865 | priv = dev->priv; | ||
1866 | if (priv == NULL) { | ||
1867 | printk(KERN_INFO "%s priv=NULL\n", __FUNCTION__); | ||
1868 | goto done; | ||
1869 | } | ||
1870 | 1696 | ||
1871 | grp = priv->mpcg; | 1697 | CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", |
1872 | if (grp == NULL) { | 1698 | __func__, smp_processor_id(), ch, ch->id); |
1873 | printk(KERN_INFO "%s grp=NULL\n", __FUNCTION__); | ||
1874 | goto done; | ||
1875 | } | ||
1876 | 1699 | ||
1877 | if (ctcm_checkalloc_buffer(ch)) | 1700 | if (ctcm_checkalloc_buffer(ch)) |
1878 | goto done; | 1701 | goto done; |
1879 | 1702 | ||
1880 | /* skb data-buffer referencing: */ | 1703 | /* |
1881 | 1704 | * skb data-buffer referencing: | |
1705 | */ | ||
1882 | ch->trans_skb->data = ch->trans_skb_data; | 1706 | ch->trans_skb->data = ch->trans_skb_data; |
1883 | skb_reset_tail_pointer(ch->trans_skb); | 1707 | skb_reset_tail_pointer(ch->trans_skb); |
1884 | ch->trans_skb->len = 0; | 1708 | ch->trans_skb->len = 0; |
@@ -1911,22 +1735,22 @@ static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side) | |||
1911 | ch->ccw[8].count = 0; | 1735 | ch->ccw[8].count = 0; |
1912 | ch->ccw[8].cda = 0x00; | 1736 | ch->ccw[8].cda = 0x00; |
1913 | 1737 | ||
1738 | if (!(ch->xid_th && ch->xid && ch->xid_id)) | ||
1739 | CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO, | ||
1740 | "%s(%s): xid_th=%p, xid=%p, xid_id=%p", | ||
1741 | CTCM_FUNTAIL, ch->id, ch->xid_th, ch->xid, ch->xid_id); | ||
1742 | |||
1914 | if (side == XSIDE) { | 1743 | if (side == XSIDE) { |
1915 | /* mpc_action_xside_xid */ | 1744 | /* mpc_action_xside_xid */ |
1916 | if (ch->xid_th == NULL) { | 1745 | if (ch->xid_th == NULL) |
1917 | printk(KERN_INFO "%s ch->xid_th=NULL\n", __FUNCTION__); | 1746 | goto done; |
1918 | goto done; | ||
1919 | } | ||
1920 | ch->ccw[9].cmd_code = CCW_CMD_WRITE; | 1747 | ch->ccw[9].cmd_code = CCW_CMD_WRITE; |
1921 | ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC; | 1748 | ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC; |
1922 | ch->ccw[9].count = TH_HEADER_LENGTH; | 1749 | ch->ccw[9].count = TH_HEADER_LENGTH; |
1923 | ch->ccw[9].cda = virt_to_phys(ch->xid_th); | 1750 | ch->ccw[9].cda = virt_to_phys(ch->xid_th); |
1924 | 1751 | ||
1925 | if (ch->xid == NULL) { | 1752 | if (ch->xid == NULL) |
1926 | printk(KERN_INFO "%s ch->xid=NULL\n", __FUNCTION__); | 1753 | goto done; |
1927 | goto done; | ||
1928 | } | ||
1929 | |||
1930 | ch->ccw[10].cmd_code = CCW_CMD_WRITE; | 1754 | ch->ccw[10].cmd_code = CCW_CMD_WRITE; |
1931 | ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC; | 1755 | ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC; |
1932 | ch->ccw[10].count = XID2_LENGTH; | 1756 | ch->ccw[10].count = XID2_LENGTH; |
@@ -1956,28 +1780,22 @@ static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side) | |||
1956 | ch->ccw[10].count = XID2_LENGTH; | 1780 | ch->ccw[10].count = XID2_LENGTH; |
1957 | ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid); | 1781 | ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid); |
1958 | 1782 | ||
1959 | if (ch->xid_th == NULL) { | 1783 | if (ch->xid_th == NULL) |
1960 | printk(KERN_INFO "%s ch->xid_th=NULL\n", __FUNCTION__); | 1784 | goto done; |
1961 | goto done; | ||
1962 | } | ||
1963 | ch->ccw[11].cmd_code = CCW_CMD_WRITE; | 1785 | ch->ccw[11].cmd_code = CCW_CMD_WRITE; |
1964 | ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC; | 1786 | ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC; |
1965 | ch->ccw[11].count = TH_HEADER_LENGTH; | 1787 | ch->ccw[11].count = TH_HEADER_LENGTH; |
1966 | ch->ccw[11].cda = virt_to_phys(ch->xid_th); | 1788 | ch->ccw[11].cda = virt_to_phys(ch->xid_th); |
1967 | 1789 | ||
1968 | if (ch->xid == NULL) { | 1790 | if (ch->xid == NULL) |
1969 | printk(KERN_INFO "%s ch->xid=NULL\n", __FUNCTION__); | 1791 | goto done; |
1970 | goto done; | ||
1971 | } | ||
1972 | ch->ccw[12].cmd_code = CCW_CMD_WRITE; | 1792 | ch->ccw[12].cmd_code = CCW_CMD_WRITE; |
1973 | ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC; | 1793 | ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC; |
1974 | ch->ccw[12].count = XID2_LENGTH; | 1794 | ch->ccw[12].count = XID2_LENGTH; |
1975 | ch->ccw[12].cda = virt_to_phys(ch->xid); | 1795 | ch->ccw[12].cda = virt_to_phys(ch->xid); |
1976 | 1796 | ||
1977 | if (ch->xid_id == NULL) { | 1797 | if (ch->xid_id == NULL) |
1978 | printk(KERN_INFO "%s ch->xid_id=NULL\n", __FUNCTION__); | 1798 | goto done; |
1979 | goto done; | ||
1980 | } | ||
1981 | ch->ccw[13].cmd_code = CCW_CMD_WRITE; | 1799 | ch->ccw[13].cmd_code = CCW_CMD_WRITE; |
1982 | ch->ccw[13].cda = virt_to_phys(ch->xid_id); | 1800 | ch->ccw[13].cda = virt_to_phys(ch->xid_id); |
1983 | 1801 | ||
@@ -1990,12 +1808,11 @@ static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side) | |||
1990 | ch->ccw[14].count = 0; | 1808 | ch->ccw[14].count = 0; |
1991 | ch->ccw[14].cda = 0; | 1809 | ch->ccw[14].cda = 0; |
1992 | 1810 | ||
1993 | if (do_debug_ccw) | 1811 | CTCM_CCW_DUMP((char *)&ch->ccw[8], sizeof(struct ccw1) * 7); |
1994 | ctcmpc_dumpit((char *)&ch->ccw[8], sizeof(struct ccw1) * 7); | 1812 | CTCM_D3_DUMP((char *)ch->xid_th, TH_HEADER_LENGTH); |
1813 | CTCM_D3_DUMP((char *)ch->xid, XID2_LENGTH); | ||
1814 | CTCM_D3_DUMP((char *)ch->xid_id, 4); | ||
1995 | 1815 | ||
1996 | ctcmpc_dumpit((char *)ch->xid_th, TH_HEADER_LENGTH); | ||
1997 | ctcmpc_dumpit((char *)ch->xid, XID2_LENGTH); | ||
1998 | ctcmpc_dumpit((char *)ch->xid_id, 4); | ||
1999 | if (!in_irq()) { | 1816 | if (!in_irq()) { |
2000 | /* Such conditional locking is a known problem for | 1817 | /* Such conditional locking is a known problem for |
2001 | * sparse because its static undeterministic. | 1818 | * sparse because its static undeterministic. |
@@ -2012,16 +1829,13 @@ static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side) | |||
2012 | spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); | 1829 | spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); |
2013 | 1830 | ||
2014 | if (rc != 0) { | 1831 | if (rc != 0) { |
2015 | ctcm_pr_info("ctcmpc: %s() ch:%s IO failed \n", | ||
2016 | __FUNCTION__, ch->id); | ||
2017 | ctcm_ccw_check_rc(ch, rc, | 1832 | ctcm_ccw_check_rc(ch, rc, |
2018 | (side == XSIDE) ? "x-side XID" : "y-side XID"); | 1833 | (side == XSIDE) ? "x-side XID" : "y-side XID"); |
2019 | } | 1834 | } |
2020 | 1835 | ||
2021 | done: | 1836 | done: |
2022 | if (do_debug) | 1837 | CTCM_PR_DEBUG("Exit %s: ch=0x%p id=%s\n", |
2023 | ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n", | 1838 | __func__, ch, ch->id); |
2024 | __FUNCTION__, ch, ch->id); | ||
2025 | return; | 1839 | return; |
2026 | 1840 | ||
2027 | } | 1841 | } |
@@ -2050,41 +1864,19 @@ static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg) | |||
2050 | */ | 1864 | */ |
2051 | static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg) | 1865 | static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg) |
2052 | { | 1866 | { |
2053 | struct channel *ch = arg; | 1867 | struct channel *ch = arg; |
2054 | struct ctcm_priv *priv; | 1868 | struct net_device *dev = ch->netdev; |
2055 | struct mpc_group *grp = NULL; | 1869 | struct ctcm_priv *priv = dev->priv; |
2056 | struct net_device *dev = NULL; | 1870 | struct mpc_group *grp = priv->mpcg; |
2057 | |||
2058 | if (do_debug) | ||
2059 | ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", | ||
2060 | __FUNCTION__, smp_processor_id(), ch, ch->id); | ||
2061 | |||
2062 | if (ch == NULL) { | ||
2063 | printk(KERN_WARNING "%s ch=NULL\n", __FUNCTION__); | ||
2064 | goto done; | ||
2065 | } | ||
2066 | |||
2067 | dev = ch->netdev; | ||
2068 | if (dev == NULL) { | ||
2069 | printk(KERN_WARNING "%s dev=NULL\n", __FUNCTION__); | ||
2070 | goto done; | ||
2071 | } | ||
2072 | |||
2073 | priv = dev->priv; | ||
2074 | if (priv == NULL) { | ||
2075 | printk(KERN_WARNING "%s priv=NULL\n", __FUNCTION__); | ||
2076 | goto done; | ||
2077 | } | ||
2078 | 1871 | ||
2079 | grp = priv->mpcg; | 1872 | CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", |
2080 | if (grp == NULL) { | 1873 | __func__, smp_processor_id(), ch, ch->id); |
2081 | printk(KERN_WARNING "%s grp=NULL\n", __FUNCTION__); | ||
2082 | goto done; | ||
2083 | } | ||
2084 | 1874 | ||
2085 | if (ch->xid == NULL) { | 1875 | if (ch->xid == NULL) { |
2086 | printk(KERN_WARNING "%s ch-xid=NULL\n", __FUNCTION__); | 1876 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
2087 | goto done; | 1877 | "%s(%s): ch->xid == NULL", |
1878 | CTCM_FUNTAIL, dev->name); | ||
1879 | return; | ||
2088 | } | 1880 | } |
2089 | 1881 | ||
2090 | fsm_newstate(ch->fsm, CH_XID0_INPROGRESS); | 1882 | fsm_newstate(ch->fsm, CH_XID0_INPROGRESS); |
@@ -2104,12 +1896,7 @@ static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg) | |||
2104 | 1896 | ||
2105 | fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch); | 1897 | fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch); |
2106 | 1898 | ||
2107 | done: | ||
2108 | if (do_debug) | ||
2109 | ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n", | ||
2110 | __FUNCTION__, ch, ch->id); | ||
2111 | return; | 1899 | return; |
2112 | |||
2113 | } | 1900 | } |
2114 | 1901 | ||
2115 | /* | 1902 | /* |
@@ -2119,32 +1906,16 @@ done: | |||
2119 | static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg) | 1906 | static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg) |
2120 | { | 1907 | { |
2121 | struct net_device *dev = arg; | 1908 | struct net_device *dev = arg; |
2122 | struct ctcm_priv *priv = NULL; | 1909 | struct ctcm_priv *priv = dev->priv; |
2123 | struct mpc_group *grp = NULL; | 1910 | struct mpc_group *grp = NULL; |
2124 | int direction; | 1911 | int direction; |
2125 | int rc = 0; | ||
2126 | int send = 0; | 1912 | int send = 0; |
2127 | 1913 | ||
2128 | ctcm_pr_debug("ctcmpc enter: %s() \n", __FUNCTION__); | 1914 | if (priv) |
2129 | 1915 | grp = priv->mpcg; | |
2130 | if (dev == NULL) { | ||
2131 | printk(KERN_INFO "%s dev=NULL \n", __FUNCTION__); | ||
2132 | rc = 1; | ||
2133 | goto done; | ||
2134 | } | ||
2135 | |||
2136 | priv = dev->priv; | ||
2137 | if (priv == NULL) { | ||
2138 | printk(KERN_INFO "%s priv=NULL \n", __FUNCTION__); | ||
2139 | rc = 1; | ||
2140 | goto done; | ||
2141 | } | ||
2142 | |||
2143 | grp = priv->mpcg; | ||
2144 | if (grp == NULL) { | 1916 | if (grp == NULL) { |
2145 | printk(KERN_INFO "%s grp=NULL \n", __FUNCTION__); | 1917 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); |
2146 | rc = 1; | 1918 | return; |
2147 | goto done; | ||
2148 | } | 1919 | } |
2149 | 1920 | ||
2150 | for (direction = READ; direction <= WRITE; direction++) { | 1921 | for (direction = READ; direction <= WRITE; direction++) { |
@@ -2199,11 +1970,6 @@ static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg) | |||
2199 | fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch); | 1970 | fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch); |
2200 | } | 1971 | } |
2201 | 1972 | ||
2202 | done: | ||
2203 | |||
2204 | if (rc != 0) | ||
2205 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); | ||
2206 | |||
2207 | return; | 1973 | return; |
2208 | } | 1974 | } |
2209 | 1975 | ||
@@ -2214,24 +1980,15 @@ done: | |||
2214 | static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg) | 1980 | static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg) |
2215 | { | 1981 | { |
2216 | 1982 | ||
2217 | struct mpcg_info *mpcginfo = arg; | 1983 | struct mpcg_info *mpcginfo = arg; |
2218 | struct channel *ch = mpcginfo->ch; | 1984 | struct channel *ch = mpcginfo->ch; |
2219 | struct net_device *dev = ch->netdev; | 1985 | struct net_device *dev = ch->netdev; |
2220 | struct ctcm_priv *priv; | 1986 | struct ctcm_priv *priv = dev->priv; |
2221 | struct mpc_group *grp; | 1987 | struct mpc_group *grp = priv->mpcg; |
2222 | |||
2223 | if (do_debug) | ||
2224 | ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", | ||
2225 | __FUNCTION__, smp_processor_id(), ch, ch->id); | ||
2226 | |||
2227 | priv = dev->priv; | ||
2228 | grp = priv->mpcg; | ||
2229 | 1988 | ||
2230 | ctcm_pr_debug("ctcmpc in:%s() %s xid2:%i xid7:%i xidt_p2:%i \n", | 1989 | CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n", |
2231 | __FUNCTION__, ch->id, | 1990 | __func__, ch->id, grp->outstanding_xid2, |
2232 | grp->outstanding_xid2, | 1991 | grp->outstanding_xid7, grp->outstanding_xid7_p2); |
2233 | grp->outstanding_xid7, | ||
2234 | grp->outstanding_xid7_p2); | ||
2235 | 1992 | ||
2236 | if (fsm_getstate(ch->fsm) < CH_XID7_PENDING) | 1993 | if (fsm_getstate(ch->fsm) < CH_XID7_PENDING) |
2237 | fsm_newstate(ch->fsm, CH_XID7_PENDING); | 1994 | fsm_newstate(ch->fsm, CH_XID7_PENDING); |
@@ -2268,17 +2025,12 @@ static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg) | |||
2268 | } | 2025 | } |
2269 | kfree(mpcginfo); | 2026 | kfree(mpcginfo); |
2270 | 2027 | ||
2271 | if (do_debug) { | 2028 | CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n", |
2272 | ctcm_pr_debug("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n", | 2029 | __func__, ch->id, grp->outstanding_xid2, |
2273 | __FUNCTION__, ch->id, | 2030 | grp->outstanding_xid7, grp->outstanding_xid7_p2); |
2274 | grp->outstanding_xid2, | 2031 | CTCM_PR_DEBUG("ctcmpc:%s() %s grpstate: %s chanstate: %s \n", |
2275 | grp->outstanding_xid7, | 2032 | __func__, ch->id, |
2276 | grp->outstanding_xid7_p2); | 2033 | fsm_getstate_str(grp->fsm), fsm_getstate_str(ch->fsm)); |
2277 | ctcm_pr_debug("ctcmpc:%s() %s grpstate: %s chanstate: %s \n", | ||
2278 | __FUNCTION__, ch->id, | ||
2279 | fsm_getstate_str(grp->fsm), | ||
2280 | fsm_getstate_str(ch->fsm)); | ||
2281 | } | ||
2282 | return; | 2034 | return; |
2283 | 2035 | ||
2284 | } | 2036 | } |
@@ -2296,15 +2048,10 @@ static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg) | |||
2296 | struct ctcm_priv *priv = dev->priv; | 2048 | struct ctcm_priv *priv = dev->priv; |
2297 | struct mpc_group *grp = priv->mpcg; | 2049 | struct mpc_group *grp = priv->mpcg; |
2298 | 2050 | ||
2299 | if (do_debug) { | 2051 | CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", |
2300 | ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", | 2052 | __func__, smp_processor_id(), ch, ch->id); |
2301 | __FUNCTION__, smp_processor_id(), ch, ch->id); | 2053 | CTCM_PR_DEBUG("%s: outstanding_xid7: %i, outstanding_xid7_p2: %i\n", |
2302 | 2054 | __func__, grp->outstanding_xid7, grp->outstanding_xid7_p2); | |
2303 | ctcm_pr_debug("ctcmpc: outstanding_xid7: %i, " | ||
2304 | " outstanding_xid7_p2: %i\n", | ||
2305 | grp->outstanding_xid7, | ||
2306 | grp->outstanding_xid7_p2); | ||
2307 | } | ||
2308 | 2055 | ||
2309 | grp->outstanding_xid7--; | 2056 | grp->outstanding_xid7--; |
2310 | ch->xid_skb->data = ch->xid_skb_data; | 2057 | ch->xid_skb->data = ch->xid_skb_data; |
@@ -2337,14 +2084,8 @@ static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg) | |||
2337 | mpc_validate_xid(mpcginfo); | 2084 | mpc_validate_xid(mpcginfo); |
2338 | break; | 2085 | break; |
2339 | } | 2086 | } |
2340 | |||
2341 | kfree(mpcginfo); | 2087 | kfree(mpcginfo); |
2342 | |||
2343 | if (do_debug) | ||
2344 | ctcm_pr_debug("ctcmpc exit: %s(): cp=%i ch=0x%p id=%s\n", | ||
2345 | __FUNCTION__, smp_processor_id(), ch, ch->id); | ||
2346 | return; | 2088 | return; |
2347 | |||
2348 | } | 2089 | } |
2349 | 2090 | ||
2350 | /* | 2091 | /* |
@@ -2353,36 +2094,14 @@ static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg) | |||
2353 | */ | 2094 | */ |
2354 | static int mpc_send_qllc_discontact(struct net_device *dev) | 2095 | static int mpc_send_qllc_discontact(struct net_device *dev) |
2355 | { | 2096 | { |
2356 | int rc = 0; | ||
2357 | __u32 new_len = 0; | 2097 | __u32 new_len = 0; |
2358 | struct sk_buff *skb; | 2098 | struct sk_buff *skb; |
2359 | struct qllc *qllcptr; | 2099 | struct qllc *qllcptr; |
2360 | struct ctcm_priv *priv; | 2100 | struct ctcm_priv *priv = dev->priv; |
2361 | struct mpc_group *grp; | 2101 | struct mpc_group *grp = priv->mpcg; |
2362 | |||
2363 | ctcm_pr_debug("ctcmpc enter: %s()\n", __FUNCTION__); | ||
2364 | |||
2365 | if (dev == NULL) { | ||
2366 | printk(KERN_INFO "%s() dev=NULL\n", __FUNCTION__); | ||
2367 | rc = 1; | ||
2368 | goto done; | ||
2369 | } | ||
2370 | |||
2371 | priv = dev->priv; | ||
2372 | if (priv == NULL) { | ||
2373 | printk(KERN_INFO "%s() priv=NULL\n", __FUNCTION__); | ||
2374 | rc = 1; | ||
2375 | goto done; | ||
2376 | } | ||
2377 | 2102 | ||
2378 | grp = priv->mpcg; | 2103 | CTCM_PR_DEBUG("%s: GROUP STATE: %s\n", |
2379 | if (grp == NULL) { | 2104 | __func__, mpcg_state_names[grp->saved_state]); |
2380 | printk(KERN_INFO "%s() grp=NULL\n", __FUNCTION__); | ||
2381 | rc = 1; | ||
2382 | goto done; | ||
2383 | } | ||
2384 | ctcm_pr_info("ctcmpc: %s() GROUP STATE: %s\n", __FUNCTION__, | ||
2385 | mpcg_state_names[grp->saved_state]); | ||
2386 | 2105 | ||
2387 | switch (grp->saved_state) { | 2106 | switch (grp->saved_state) { |
2388 | /* | 2107 | /* |
@@ -2408,11 +2127,10 @@ static int mpc_send_qllc_discontact(struct net_device *dev) | |||
2408 | new_len = sizeof(struct qllc); | 2127 | new_len = sizeof(struct qllc); |
2409 | qllcptr = kzalloc(new_len, gfp_type() | GFP_DMA); | 2128 | qllcptr = kzalloc(new_len, gfp_type() | GFP_DMA); |
2410 | if (qllcptr == NULL) { | 2129 | if (qllcptr == NULL) { |
2411 | printk(KERN_INFO | 2130 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
2412 | "ctcmpc: Out of memory in %s()\n", | 2131 | "%s(%s): qllcptr allocation error", |
2413 | dev->name); | 2132 | CTCM_FUNTAIL, dev->name); |
2414 | rc = 1; | 2133 | return -ENOMEM; |
2415 | goto done; | ||
2416 | } | 2134 | } |
2417 | 2135 | ||
2418 | qllcptr->qllc_address = 0xcc; | 2136 | qllcptr->qllc_address = 0xcc; |
@@ -2421,31 +2139,29 @@ static int mpc_send_qllc_discontact(struct net_device *dev) | |||
2421 | skb = __dev_alloc_skb(new_len, GFP_ATOMIC); | 2139 | skb = __dev_alloc_skb(new_len, GFP_ATOMIC); |
2422 | 2140 | ||
2423 | if (skb == NULL) { | 2141 | if (skb == NULL) { |
2424 | printk(KERN_INFO "%s Out of memory in mpc_send_qllc\n", | 2142 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
2425 | dev->name); | 2143 | "%s(%s): skb allocation error", |
2144 | CTCM_FUNTAIL, dev->name); | ||
2426 | priv->stats.rx_dropped++; | 2145 | priv->stats.rx_dropped++; |
2427 | rc = 1; | ||
2428 | kfree(qllcptr); | 2146 | kfree(qllcptr); |
2429 | goto done; | 2147 | return -ENOMEM; |
2430 | } | 2148 | } |
2431 | 2149 | ||
2432 | memcpy(skb_put(skb, new_len), qllcptr, new_len); | 2150 | memcpy(skb_put(skb, new_len), qllcptr, new_len); |
2433 | kfree(qllcptr); | 2151 | kfree(qllcptr); |
2434 | 2152 | ||
2435 | if (skb_headroom(skb) < 4) { | 2153 | if (skb_headroom(skb) < 4) { |
2436 | printk(KERN_INFO "ctcmpc: %s() Unable to" | 2154 | CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, |
2437 | " build discontact for %s\n", | 2155 | "%s(%s): skb_headroom error", |
2438 | __FUNCTION__, dev->name); | 2156 | CTCM_FUNTAIL, dev->name); |
2439 | rc = 1; | ||
2440 | dev_kfree_skb_any(skb); | 2157 | dev_kfree_skb_any(skb); |
2441 | goto done; | 2158 | return -ENOMEM; |
2442 | } | 2159 | } |
2443 | 2160 | ||
2444 | *((__u32 *)skb_push(skb, 4)) = priv->channel[READ]->pdu_seq; | 2161 | *((__u32 *)skb_push(skb, 4)) = priv->channel[READ]->pdu_seq; |
2445 | priv->channel[READ]->pdu_seq++; | 2162 | priv->channel[READ]->pdu_seq++; |
2446 | if (do_debug_data) | 2163 | CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n", |
2447 | ctcm_pr_debug("ctcmpc: %s ToDCM_pdu_seq= %08x\n", | 2164 | __func__, priv->channel[READ]->pdu_seq); |
2448 | __FUNCTION__, priv->channel[READ]->pdu_seq); | ||
2449 | 2165 | ||
2450 | /* receipt of CC03 resets anticipated sequence number on | 2166 | /* receipt of CC03 resets anticipated sequence number on |
2451 | receiving side */ | 2167 | receiving side */ |
@@ -2455,7 +2171,7 @@ static int mpc_send_qllc_discontact(struct net_device *dev) | |||
2455 | skb->protocol = htons(ETH_P_SNAP); | 2171 | skb->protocol = htons(ETH_P_SNAP); |
2456 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 2172 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
2457 | 2173 | ||
2458 | ctcmpc_dumpit((char *)skb->data, (sizeof(struct qllc) + 4)); | 2174 | CTCM_D3_DUMP(skb->data, (sizeof(struct qllc) + 4)); |
2459 | 2175 | ||
2460 | netif_rx(skb); | 2176 | netif_rx(skb); |
2461 | break; | 2177 | break; |
@@ -2464,9 +2180,7 @@ static int mpc_send_qllc_discontact(struct net_device *dev) | |||
2464 | 2180 | ||
2465 | } | 2181 | } |
2466 | 2182 | ||
2467 | done: | 2183 | return 0; |
2468 | ctcm_pr_debug("ctcmpc exit: %s()\n", __FUNCTION__); | ||
2469 | return rc; | ||
2470 | } | 2184 | } |
2471 | /* --- This is the END my friend --- */ | 2185 | /* --- This is the END my friend --- */ |
2472 | 2186 | ||
diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h index f99686069a91..5336120cddf1 100644 --- a/drivers/s390/net/ctcm_mpc.h +++ b/drivers/s390/net/ctcm_mpc.h | |||
@@ -231,7 +231,7 @@ static inline void ctcmpc_dump32(char *buf, int len) | |||
231 | int ctcmpc_open(struct net_device *); | 231 | int ctcmpc_open(struct net_device *); |
232 | void ctcm_ccw_check_rc(struct channel *, int, char *); | 232 | void ctcm_ccw_check_rc(struct channel *, int, char *); |
233 | void mpc_group_ready(unsigned long adev); | 233 | void mpc_group_ready(unsigned long adev); |
234 | int mpc_channel_action(struct channel *ch, int direction, int action); | 234 | void mpc_channel_action(struct channel *ch, int direction, int action); |
235 | void mpc_action_send_discontact(unsigned long thischan); | 235 | void mpc_action_send_discontact(unsigned long thischan); |
236 | void mpc_action_discontact(fsm_instance *fi, int event, void *arg); | 236 | void mpc_action_discontact(fsm_instance *fi, int event, void *arg); |
237 | void ctcmpc_bh(unsigned long thischan); | 237 | void ctcmpc_bh(unsigned long thischan); |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index b29afef5c7fb..38de31b55708 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -2651,7 +2651,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2651 | tag = (u16 *)(new_skb->data + 12); | 2651 | tag = (u16 *)(new_skb->data + 12); |
2652 | *tag = __constant_htons(ETH_P_8021Q); | 2652 | *tag = __constant_htons(ETH_P_8021Q); |
2653 | *(tag + 1) = htons(vlan_tx_tag_get(new_skb)); | 2653 | *(tag + 1) = htons(vlan_tx_tag_get(new_skb)); |
2654 | VLAN_TX_SKB_CB(new_skb)->magic = 0; | 2654 | new_skb->vlan_tci = 0; |
2655 | } | 2655 | } |
2656 | } | 2656 | } |
2657 | 2657 | ||
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index b224a28e0c15..7bc296f424ae 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c | |||
@@ -27,6 +27,11 @@ | |||
27 | #include "internal.h" | 27 | #include "internal.h" |
28 | 28 | ||
29 | 29 | ||
30 | static struct net *get_proc_net(const struct inode *inode) | ||
31 | { | ||
32 | return maybe_get_net(PDE_NET(PDE(inode))); | ||
33 | } | ||
34 | |||
30 | int seq_open_net(struct inode *ino, struct file *f, | 35 | int seq_open_net(struct inode *ino, struct file *f, |
31 | const struct seq_operations *ops, int size) | 36 | const struct seq_operations *ops, int size) |
32 | { | 37 | { |
@@ -185,12 +190,6 @@ void proc_net_remove(struct net *net, const char *name) | |||
185 | } | 190 | } |
186 | EXPORT_SYMBOL_GPL(proc_net_remove); | 191 | EXPORT_SYMBOL_GPL(proc_net_remove); |
187 | 192 | ||
188 | struct net *get_proc_net(const struct inode *inode) | ||
189 | { | ||
190 | return maybe_get_net(PDE_NET(PDE(inode))); | ||
191 | } | ||
192 | EXPORT_SYMBOL_GPL(get_proc_net); | ||
193 | |||
194 | static __net_init int proc_net_ns_init(struct net *net) | 193 | static __net_init int proc_net_ns_init(struct net *net) |
195 | { | 194 | { |
196 | struct proc_dir_entry *netd, *net_statd; | 195 | struct proc_dir_entry *netd, *net_statd; |
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h index 0ba21ee0f58c..8300cab30f9a 100644 --- a/include/linux/fs_enet_pd.h +++ b/include/linux/fs_enet_pd.h | |||
@@ -103,10 +103,6 @@ struct fs_mii_bb_platform_info { | |||
103 | struct fs_mii_bit mdio_dir; | 103 | struct fs_mii_bit mdio_dir; |
104 | struct fs_mii_bit mdio_dat; | 104 | struct fs_mii_bit mdio_dat; |
105 | struct fs_mii_bit mdc_dat; | 105 | struct fs_mii_bit mdc_dat; |
106 | int mdio_port; /* port & bit for MDIO */ | ||
107 | int mdio_bit; | ||
108 | int mdc_port; /* port & bit for MDC */ | ||
109 | int mdc_bit; | ||
110 | int delay; /* delay in us */ | 106 | int delay; /* delay in us */ |
111 | int irq[32]; /* irqs per phy's */ | 107 | int irq[32]; /* irqs per phy's */ |
112 | }; | 108 | }; |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 391ad0843a46..641e026eee8f 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -123,6 +123,7 @@ struct ipv6hdr { | |||
123 | struct in6_addr daddr; | 123 | struct in6_addr daddr; |
124 | }; | 124 | }; |
125 | 125 | ||
126 | #ifdef __KERNEL__ | ||
126 | /* | 127 | /* |
127 | * This structure contains configuration options per IPv6 link. | 128 | * This structure contains configuration options per IPv6 link. |
128 | */ | 129 | */ |
@@ -167,6 +168,7 @@ struct ipv6_devconf { | |||
167 | __s32 accept_dad; | 168 | __s32 accept_dad; |
168 | void *sysctl; | 169 | void *sysctl; |
169 | }; | 170 | }; |
171 | #endif | ||
170 | 172 | ||
171 | /* index values for the variables in ipv6_devconf */ | 173 | /* index values for the variables in ipv6_devconf */ |
172 | enum { | 174 | enum { |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 812bcd8b4363..b4d056ceab96 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -996,17 +996,17 @@ static inline void netif_tx_schedule_all(struct net_device *dev) | |||
996 | netif_schedule_queue(netdev_get_tx_queue(dev, i)); | 996 | netif_schedule_queue(netdev_get_tx_queue(dev, i)); |
997 | } | 997 | } |
998 | 998 | ||
999 | static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) | ||
1000 | { | ||
1001 | clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1002 | } | ||
1003 | |||
999 | /** | 1004 | /** |
1000 | * netif_start_queue - allow transmit | 1005 | * netif_start_queue - allow transmit |
1001 | * @dev: network device | 1006 | * @dev: network device |
1002 | * | 1007 | * |
1003 | * Allow upper layers to call the device hard_start_xmit routine. | 1008 | * Allow upper layers to call the device hard_start_xmit routine. |
1004 | */ | 1009 | */ |
1005 | static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) | ||
1006 | { | ||
1007 | clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1008 | } | ||
1009 | |||
1010 | static inline void netif_start_queue(struct net_device *dev) | 1010 | static inline void netif_start_queue(struct net_device *dev) |
1011 | { | 1011 | { |
1012 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); | 1012 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); |
@@ -1022,13 +1022,6 @@ static inline void netif_tx_start_all_queues(struct net_device *dev) | |||
1022 | } | 1022 | } |
1023 | } | 1023 | } |
1024 | 1024 | ||
1025 | /** | ||
1026 | * netif_wake_queue - restart transmit | ||
1027 | * @dev: network device | ||
1028 | * | ||
1029 | * Allow upper layers to call the device hard_start_xmit routine. | ||
1030 | * Used for flow control when transmit resources are available. | ||
1031 | */ | ||
1032 | static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) | 1025 | static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) |
1033 | { | 1026 | { |
1034 | #ifdef CONFIG_NETPOLL_TRAP | 1027 | #ifdef CONFIG_NETPOLL_TRAP |
@@ -1041,6 +1034,13 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) | |||
1041 | __netif_schedule(dev_queue->qdisc); | 1034 | __netif_schedule(dev_queue->qdisc); |
1042 | } | 1035 | } |
1043 | 1036 | ||
1037 | /** | ||
1038 | * netif_wake_queue - restart transmit | ||
1039 | * @dev: network device | ||
1040 | * | ||
1041 | * Allow upper layers to call the device hard_start_xmit routine. | ||
1042 | * Used for flow control when transmit resources are available. | ||
1043 | */ | ||
1044 | static inline void netif_wake_queue(struct net_device *dev) | 1044 | static inline void netif_wake_queue(struct net_device *dev) |
1045 | { | 1045 | { |
1046 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); | 1046 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); |
@@ -1056,6 +1056,11 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev) | |||
1056 | } | 1056 | } |
1057 | } | 1057 | } |
1058 | 1058 | ||
1059 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) | ||
1060 | { | ||
1061 | set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1062 | } | ||
1063 | |||
1059 | /** | 1064 | /** |
1060 | * netif_stop_queue - stop transmitted packets | 1065 | * netif_stop_queue - stop transmitted packets |
1061 | * @dev: network device | 1066 | * @dev: network device |
@@ -1063,11 +1068,6 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev) | |||
1063 | * Stop upper layers calling the device hard_start_xmit routine. | 1068 | * Stop upper layers calling the device hard_start_xmit routine. |
1064 | * Used for flow control when transmit resources are unavailable. | 1069 | * Used for flow control when transmit resources are unavailable. |
1065 | */ | 1070 | */ |
1066 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) | ||
1067 | { | ||
1068 | set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1069 | } | ||
1070 | |||
1071 | static inline void netif_stop_queue(struct net_device *dev) | 1071 | static inline void netif_stop_queue(struct net_device *dev) |
1072 | { | 1072 | { |
1073 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); | 1073 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); |
@@ -1083,17 +1083,17 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev) | |||
1083 | } | 1083 | } |
1084 | } | 1084 | } |
1085 | 1085 | ||
1086 | static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) | ||
1087 | { | ||
1088 | return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1089 | } | ||
1090 | |||
1086 | /** | 1091 | /** |
1087 | * netif_queue_stopped - test if transmit queue is flowblocked | 1092 | * netif_queue_stopped - test if transmit queue is flowblocked |
1088 | * @dev: network device | 1093 | * @dev: network device |
1089 | * | 1094 | * |
1090 | * Test if transmit queue on device is currently unable to send. | 1095 | * Test if transmit queue on device is currently unable to send. |
1091 | */ | 1096 | */ |
1092 | static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) | ||
1093 | { | ||
1094 | return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | ||
1095 | } | ||
1096 | |||
1097 | static inline int netif_queue_stopped(const struct net_device *dev) | 1097 | static inline int netif_queue_stopped(const struct net_device *dev) |
1098 | { | 1098 | { |
1099 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); | 1099 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); |
@@ -1463,13 +1463,6 @@ static inline void netif_rx_complete(struct net_device *dev, | |||
1463 | local_irq_restore(flags); | 1463 | local_irq_restore(flags); |
1464 | } | 1464 | } |
1465 | 1465 | ||
1466 | /** | ||
1467 | * netif_tx_lock - grab network device transmit lock | ||
1468 | * @dev: network device | ||
1469 | * @cpu: cpu number of lock owner | ||
1470 | * | ||
1471 | * Get network device transmit lock | ||
1472 | */ | ||
1473 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) | 1466 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
1474 | { | 1467 | { |
1475 | spin_lock(&txq->_xmit_lock); | 1468 | spin_lock(&txq->_xmit_lock); |
@@ -1482,6 +1475,13 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq) | |||
1482 | txq->xmit_lock_owner = smp_processor_id(); | 1475 | txq->xmit_lock_owner = smp_processor_id(); |
1483 | } | 1476 | } |
1484 | 1477 | ||
1478 | /** | ||
1479 | * netif_tx_lock - grab network device transmit lock | ||
1480 | * @dev: network device | ||
1481 | * @cpu: cpu number of lock owner | ||
1482 | * | ||
1483 | * Get network device transmit lock | ||
1484 | */ | ||
1485 | static inline void netif_tx_lock(struct net_device *dev) | 1485 | static inline void netif_tx_lock(struct net_device *dev) |
1486 | { | 1486 | { |
1487 | int cpu = smp_processor_id(); | 1487 | int cpu = smp_processor_id(); |
@@ -1645,6 +1645,8 @@ extern void dev_seq_stop(struct seq_file *seq, void *v); | |||
1645 | extern int netdev_class_create_file(struct class_attribute *class_attr); | 1645 | extern int netdev_class_create_file(struct class_attribute *class_attr); |
1646 | extern void netdev_class_remove_file(struct class_attribute *class_attr); | 1646 | extern void netdev_class_remove_file(struct class_attribute *class_attr); |
1647 | 1647 | ||
1648 | extern char *netdev_drivername(struct net_device *dev, char *buffer, int len); | ||
1649 | |||
1648 | extern void linkwatch_run_queue(void); | 1650 | extern void linkwatch_run_queue(void); |
1649 | 1651 | ||
1650 | extern int netdev_compute_features(unsigned long all, unsigned long one); | 1652 | extern int netdev_compute_features(unsigned long all, unsigned long one); |
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index fff1d27ddb4c..15a9eaf4a802 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
@@ -305,8 +305,6 @@ static inline struct net *PDE_NET(struct proc_dir_entry *pde) | |||
305 | return pde->parent->data; | 305 | return pde->parent->data; |
306 | } | 306 | } |
307 | 307 | ||
308 | struct net *get_proc_net(const struct inode *inode); | ||
309 | |||
310 | struct proc_maps_private { | 308 | struct proc_maps_private { |
311 | struct pid *pid; | 309 | struct pid *pid; |
312 | struct task_struct *task; | 310 | struct task_struct *task; |
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index db66c7927743..c8effa4b1feb 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h | |||
@@ -193,8 +193,6 @@ struct inet6_dev | |||
193 | struct rcu_head rcu; | 193 | struct rcu_head rcu; |
194 | }; | 194 | }; |
195 | 195 | ||
196 | extern struct ipv6_devconf ipv6_devconf; | ||
197 | |||
198 | static inline void ipv6_eth_mc_map(struct in6_addr *addr, char *buf) | 196 | static inline void ipv6_eth_mc_map(struct in6_addr *addr, char *buf) |
199 | { | 197 | { |
200 | /* | 198 | /* |
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 9313491e3dad..2f8b3c06a101 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h | |||
@@ -68,7 +68,7 @@ extern struct rt6_info *rt6_lookup(struct net *net, | |||
68 | extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev, | 68 | extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev, |
69 | struct neighbour *neigh, | 69 | struct neighbour *neigh, |
70 | const struct in6_addr *addr); | 70 | const struct in6_addr *addr); |
71 | extern int icmp6_dst_gc(int *more); | 71 | extern int icmp6_dst_gc(void); |
72 | 72 | ||
73 | extern void fib6_force_start_gc(struct net *net); | 73 | extern void fib6_force_start_gc(struct net *net); |
74 | 74 | ||
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 5bacd838e88b..2932721180c0 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h | |||
@@ -39,7 +39,7 @@ struct netns_ipv6 { | |||
39 | #endif | 39 | #endif |
40 | struct rt6_info *ip6_null_entry; | 40 | struct rt6_info *ip6_null_entry; |
41 | struct rt6_statistics *rt6_stats; | 41 | struct rt6_statistics *rt6_stats; |
42 | struct timer_list *ip6_fib_timer; | 42 | struct timer_list ip6_fib_timer; |
43 | struct hlist_head *fib_table_hash; | 43 | struct hlist_head *fib_table_hash; |
44 | struct fib6_table *fib6_main_tbl; | 44 | struct fib6_table *fib6_main_tbl; |
45 | struct dst_ops *ip6_dst_ops; | 45 | struct dst_ops *ip6_dst_ops; |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 70eb64a7e1a1..535a18f57a13 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -1161,7 +1161,6 @@ void sctp_outq_init(struct sctp_association *, struct sctp_outq *); | |||
1161 | void sctp_outq_teardown(struct sctp_outq *); | 1161 | void sctp_outq_teardown(struct sctp_outq *); |
1162 | void sctp_outq_free(struct sctp_outq*); | 1162 | void sctp_outq_free(struct sctp_outq*); |
1163 | int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk); | 1163 | int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk); |
1164 | int sctp_outq_flush(struct sctp_outq *, int); | ||
1165 | int sctp_outq_sack(struct sctp_outq *, struct sctp_sackhdr *); | 1164 | int sctp_outq_sack(struct sctp_outq *, struct sctp_sackhdr *); |
1166 | int sctp_outq_is_empty(const struct sctp_outq *); | 1165 | int sctp_outq_is_empty(const struct sctp_outq *); |
1167 | void sctp_outq_restart(struct sctp_outq *); | 1166 | void sctp_outq_restart(struct sctp_outq *); |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index f42bc2b26b85..4bf014e51f8c 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -569,6 +569,7 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) | |||
569 | * separate class since they always nest. | 569 | * separate class since they always nest. |
570 | */ | 570 | */ |
571 | static struct lock_class_key vlan_netdev_xmit_lock_key; | 571 | static struct lock_class_key vlan_netdev_xmit_lock_key; |
572 | static struct lock_class_key vlan_netdev_addr_lock_key; | ||
572 | 573 | ||
573 | static void vlan_dev_set_lockdep_one(struct net_device *dev, | 574 | static void vlan_dev_set_lockdep_one(struct net_device *dev, |
574 | struct netdev_queue *txq, | 575 | struct netdev_queue *txq, |
@@ -581,6 +582,9 @@ static void vlan_dev_set_lockdep_one(struct net_device *dev, | |||
581 | 582 | ||
582 | static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass) | 583 | static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass) |
583 | { | 584 | { |
585 | lockdep_set_class_and_subclass(&dev->addr_list_lock, | ||
586 | &vlan_netdev_addr_lock_key, | ||
587 | subclass); | ||
584 | netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass); | 588 | netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass); |
585 | } | 589 | } |
586 | 590 | ||
diff --git a/net/core/dev.c b/net/core/dev.c index cbc34c0db376..6bf217da9d8f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -261,7 +261,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain); | |||
261 | 261 | ||
262 | DEFINE_PER_CPU(struct softnet_data, softnet_data); | 262 | DEFINE_PER_CPU(struct softnet_data, softnet_data); |
263 | 263 | ||
264 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 264 | #ifdef CONFIG_LOCKDEP |
265 | /* | 265 | /* |
266 | * register_netdevice() inits txq->_xmit_lock and sets lockdep class | 266 | * register_netdevice() inits txq->_xmit_lock and sets lockdep class |
267 | * according to dev->type | 267 | * according to dev->type |
@@ -301,6 +301,7 @@ static const char *netdev_lock_name[] = | |||
301 | "_xmit_NONE"}; | 301 | "_xmit_NONE"}; |
302 | 302 | ||
303 | static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; | 303 | static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; |
304 | static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; | ||
304 | 305 | ||
305 | static inline unsigned short netdev_lock_pos(unsigned short dev_type) | 306 | static inline unsigned short netdev_lock_pos(unsigned short dev_type) |
306 | { | 307 | { |
@@ -313,8 +314,8 @@ static inline unsigned short netdev_lock_pos(unsigned short dev_type) | |||
313 | return ARRAY_SIZE(netdev_lock_type) - 1; | 314 | return ARRAY_SIZE(netdev_lock_type) - 1; |
314 | } | 315 | } |
315 | 316 | ||
316 | static inline void netdev_set_lockdep_class(spinlock_t *lock, | 317 | static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, |
317 | unsigned short dev_type) | 318 | unsigned short dev_type) |
318 | { | 319 | { |
319 | int i; | 320 | int i; |
320 | 321 | ||
@@ -322,9 +323,22 @@ static inline void netdev_set_lockdep_class(spinlock_t *lock, | |||
322 | lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], | 323 | lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], |
323 | netdev_lock_name[i]); | 324 | netdev_lock_name[i]); |
324 | } | 325 | } |
326 | |||
327 | static inline void netdev_set_addr_lockdep_class(struct net_device *dev) | ||
328 | { | ||
329 | int i; | ||
330 | |||
331 | i = netdev_lock_pos(dev->type); | ||
332 | lockdep_set_class_and_name(&dev->addr_list_lock, | ||
333 | &netdev_addr_lock_key[i], | ||
334 | netdev_lock_name[i]); | ||
335 | } | ||
325 | #else | 336 | #else |
326 | static inline void netdev_set_lockdep_class(spinlock_t *lock, | 337 | static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, |
327 | unsigned short dev_type) | 338 | unsigned short dev_type) |
339 | { | ||
340 | } | ||
341 | static inline void netdev_set_addr_lockdep_class(struct net_device *dev) | ||
328 | { | 342 | { |
329 | } | 343 | } |
330 | #endif | 344 | #endif |
@@ -1645,32 +1659,6 @@ out_kfree_skb: | |||
1645 | return 0; | 1659 | return 0; |
1646 | } | 1660 | } |
1647 | 1661 | ||
1648 | /** | ||
1649 | * dev_queue_xmit - transmit a buffer | ||
1650 | * @skb: buffer to transmit | ||
1651 | * | ||
1652 | * Queue a buffer for transmission to a network device. The caller must | ||
1653 | * have set the device and priority and built the buffer before calling | ||
1654 | * this function. The function can be called from an interrupt. | ||
1655 | * | ||
1656 | * A negative errno code is returned on a failure. A success does not | ||
1657 | * guarantee the frame will be transmitted as it may be dropped due | ||
1658 | * to congestion or traffic shaping. | ||
1659 | * | ||
1660 | * ----------------------------------------------------------------------------------- | ||
1661 | * I notice this method can also return errors from the queue disciplines, | ||
1662 | * including NET_XMIT_DROP, which is a positive value. So, errors can also | ||
1663 | * be positive. | ||
1664 | * | ||
1665 | * Regardless of the return value, the skb is consumed, so it is currently | ||
1666 | * difficult to retry a send to this method. (You can bump the ref count | ||
1667 | * before sending to hold a reference for retry if you are careful.) | ||
1668 | * | ||
1669 | * When calling this method, interrupts MUST be enabled. This is because | ||
1670 | * the BH enable code must have IRQs enabled so that it will not deadlock. | ||
1671 | * --BLG | ||
1672 | */ | ||
1673 | |||
1674 | static u32 simple_tx_hashrnd; | 1662 | static u32 simple_tx_hashrnd; |
1675 | static int simple_tx_hashrnd_initialized = 0; | 1663 | static int simple_tx_hashrnd_initialized = 0; |
1676 | 1664 | ||
@@ -1738,6 +1726,31 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, | |||
1738 | return netdev_get_tx_queue(dev, queue_index); | 1726 | return netdev_get_tx_queue(dev, queue_index); |
1739 | } | 1727 | } |
1740 | 1728 | ||
1729 | /** | ||
1730 | * dev_queue_xmit - transmit a buffer | ||
1731 | * @skb: buffer to transmit | ||
1732 | * | ||
1733 | * Queue a buffer for transmission to a network device. The caller must | ||
1734 | * have set the device and priority and built the buffer before calling | ||
1735 | * this function. The function can be called from an interrupt. | ||
1736 | * | ||
1737 | * A negative errno code is returned on a failure. A success does not | ||
1738 | * guarantee the frame will be transmitted as it may be dropped due | ||
1739 | * to congestion or traffic shaping. | ||
1740 | * | ||
1741 | * ----------------------------------------------------------------------------------- | ||
1742 | * I notice this method can also return errors from the queue disciplines, | ||
1743 | * including NET_XMIT_DROP, which is a positive value. So, errors can also | ||
1744 | * be positive. | ||
1745 | * | ||
1746 | * Regardless of the return value, the skb is consumed, so it is currently | ||
1747 | * difficult to retry a send to this method. (You can bump the ref count | ||
1748 | * before sending to hold a reference for retry if you are careful.) | ||
1749 | * | ||
1750 | * When calling this method, interrupts MUST be enabled. This is because | ||
1751 | * the BH enable code must have IRQs enabled so that it will not deadlock. | ||
1752 | * --BLG | ||
1753 | */ | ||
1741 | int dev_queue_xmit(struct sk_buff *skb) | 1754 | int dev_queue_xmit(struct sk_buff *skb) |
1742 | { | 1755 | { |
1743 | struct net_device *dev = skb->dev; | 1756 | struct net_device *dev = skb->dev; |
@@ -3852,7 +3865,7 @@ static void __netdev_init_queue_locks_one(struct net_device *dev, | |||
3852 | void *_unused) | 3865 | void *_unused) |
3853 | { | 3866 | { |
3854 | spin_lock_init(&dev_queue->_xmit_lock); | 3867 | spin_lock_init(&dev_queue->_xmit_lock); |
3855 | netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type); | 3868 | netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type); |
3856 | dev_queue->xmit_lock_owner = -1; | 3869 | dev_queue->xmit_lock_owner = -1; |
3857 | } | 3870 | } |
3858 | 3871 | ||
@@ -3897,6 +3910,7 @@ int register_netdevice(struct net_device *dev) | |||
3897 | net = dev_net(dev); | 3910 | net = dev_net(dev); |
3898 | 3911 | ||
3899 | spin_lock_init(&dev->addr_list_lock); | 3912 | spin_lock_init(&dev->addr_list_lock); |
3913 | netdev_set_addr_lockdep_class(dev); | ||
3900 | netdev_init_queue_locks(dev); | 3914 | netdev_init_queue_locks(dev); |
3901 | 3915 | ||
3902 | dev->iflink = -1; | 3916 | dev->iflink = -1; |
@@ -4207,7 +4221,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
4207 | { | 4221 | { |
4208 | struct netdev_queue *tx; | 4222 | struct netdev_queue *tx; |
4209 | struct net_device *dev; | 4223 | struct net_device *dev; |
4210 | int alloc_size; | 4224 | size_t alloc_size; |
4211 | void *p; | 4225 | void *p; |
4212 | 4226 | ||
4213 | BUG_ON(strlen(name) >= sizeof(dev->name)); | 4227 | BUG_ON(strlen(name) >= sizeof(dev->name)); |
@@ -4227,7 +4241,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
4227 | return NULL; | 4241 | return NULL; |
4228 | } | 4242 | } |
4229 | 4243 | ||
4230 | tx = kzalloc(sizeof(struct netdev_queue) * queue_count, GFP_KERNEL); | 4244 | tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL); |
4231 | if (!tx) { | 4245 | if (!tx) { |
4232 | printk(KERN_ERR "alloc_netdev: Unable to allocate " | 4246 | printk(KERN_ERR "alloc_netdev: Unable to allocate " |
4233 | "tx qdiscs.\n"); | 4247 | "tx qdiscs.\n"); |
@@ -4686,6 +4700,26 @@ err_name: | |||
4686 | return -ENOMEM; | 4700 | return -ENOMEM; |
4687 | } | 4701 | } |
4688 | 4702 | ||
4703 | char *netdev_drivername(struct net_device *dev, char *buffer, int len) | ||
4704 | { | ||
4705 | struct device_driver *driver; | ||
4706 | struct device *parent; | ||
4707 | |||
4708 | if (len <= 0 || !buffer) | ||
4709 | return buffer; | ||
4710 | buffer[0] = 0; | ||
4711 | |||
4712 | parent = dev->dev.parent; | ||
4713 | |||
4714 | if (!parent) | ||
4715 | return buffer; | ||
4716 | |||
4717 | driver = parent->driver; | ||
4718 | if (driver && driver->name) | ||
4719 | strlcpy(buffer, driver->name, len); | ||
4720 | return buffer; | ||
4721 | } | ||
4722 | |||
4689 | static void __net_exit netdev_exit(struct net *net) | 4723 | static void __net_exit netdev_exit(struct net *net) |
4690 | { | 4724 | { |
4691 | kfree(net->dev_name_head); | 4725 | kfree(net->dev_name_head); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 1fa683c0ba9b..a00532de2a8c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -472,7 +472,7 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
472 | } | 472 | } |
473 | if (likely(sysctl_tcp_sack)) { | 473 | if (likely(sysctl_tcp_sack)) { |
474 | opts->options |= OPTION_SACK_ADVERTISE; | 474 | opts->options |= OPTION_SACK_ADVERTISE; |
475 | if (unlikely(!OPTION_TS & opts->options)) | 475 | if (unlikely(!(OPTION_TS & opts->options))) |
476 | size += TCPOLEN_SACKPERM_ALIGNED; | 476 | size += TCPOLEN_SACKPERM_ALIGNED; |
477 | } | 477 | } |
478 | 478 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index a751770947a3..383d17359d01 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1325,6 +1325,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, | |||
1325 | return -ENOPROTOOPT; | 1325 | return -ENOPROTOOPT; |
1326 | if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ | 1326 | if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ |
1327 | val = 8; | 1327 | val = 8; |
1328 | else if (val > USHORT_MAX) | ||
1329 | val = USHORT_MAX; | ||
1328 | up->pcslen = val; | 1330 | up->pcslen = val; |
1329 | up->pcflag |= UDPLITE_SEND_CC; | 1331 | up->pcflag |= UDPLITE_SEND_CC; |
1330 | break; | 1332 | break; |
@@ -1337,6 +1339,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, | |||
1337 | return -ENOPROTOOPT; | 1339 | return -ENOPROTOOPT; |
1338 | if (val != 0 && val < 8) /* Avoid silly minimal values. */ | 1340 | if (val != 0 && val < 8) /* Avoid silly minimal values. */ |
1339 | val = 8; | 1341 | val = 8; |
1342 | else if (val > USHORT_MAX) | ||
1343 | val = USHORT_MAX; | ||
1340 | up->pcrlen = val; | 1344 | up->pcrlen = val; |
1341 | up->pcflag |= UDPLITE_RECV_CC; | 1345 | up->pcflag |= UDPLITE_RECV_CC; |
1342 | break; | 1346 | break; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 9f4fcce6379b..74d543d504a1 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -153,7 +153,7 @@ static int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, | |||
153 | 153 | ||
154 | static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); | 154 | static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); |
155 | 155 | ||
156 | struct ipv6_devconf ipv6_devconf __read_mostly = { | 156 | static struct ipv6_devconf ipv6_devconf __read_mostly = { |
157 | .forwarding = 0, | 157 | .forwarding = 0, |
158 | .hop_limit = IPV6_DEFAULT_HOPLIMIT, | 158 | .hop_limit = IPV6_DEFAULT_HOPLIMIT, |
159 | .mtu6 = IPV6_MIN_MTU, | 159 | .mtu6 = IPV6_MIN_MTU, |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 4de2b9efcacb..08ea2de28d63 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -661,17 +661,17 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, | |||
661 | 661 | ||
662 | static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt) | 662 | static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt) |
663 | { | 663 | { |
664 | if (net->ipv6.ip6_fib_timer->expires == 0 && | 664 | if (!timer_pending(&net->ipv6.ip6_fib_timer) && |
665 | (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE))) | 665 | (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE))) |
666 | mod_timer(net->ipv6.ip6_fib_timer, jiffies + | 666 | mod_timer(&net->ipv6.ip6_fib_timer, |
667 | net->ipv6.sysctl.ip6_rt_gc_interval); | 667 | jiffies + net->ipv6.sysctl.ip6_rt_gc_interval); |
668 | } | 668 | } |
669 | 669 | ||
670 | void fib6_force_start_gc(struct net *net) | 670 | void fib6_force_start_gc(struct net *net) |
671 | { | 671 | { |
672 | if (net->ipv6.ip6_fib_timer->expires == 0) | 672 | if (!timer_pending(&net->ipv6.ip6_fib_timer)) |
673 | mod_timer(net->ipv6.ip6_fib_timer, jiffies + | 673 | mod_timer(&net->ipv6.ip6_fib_timer, |
674 | net->ipv6.sysctl.ip6_rt_gc_interval); | 674 | jiffies + net->ipv6.sysctl.ip6_rt_gc_interval); |
675 | } | 675 | } |
676 | 676 | ||
677 | /* | 677 | /* |
@@ -1447,27 +1447,23 @@ void fib6_run_gc(unsigned long expires, struct net *net) | |||
1447 | gc_args.timeout = expires ? (int)expires : | 1447 | gc_args.timeout = expires ? (int)expires : |
1448 | net->ipv6.sysctl.ip6_rt_gc_interval; | 1448 | net->ipv6.sysctl.ip6_rt_gc_interval; |
1449 | } else { | 1449 | } else { |
1450 | local_bh_disable(); | 1450 | if (!spin_trylock_bh(&fib6_gc_lock)) { |
1451 | if (!spin_trylock(&fib6_gc_lock)) { | 1451 | mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ); |
1452 | mod_timer(net->ipv6.ip6_fib_timer, jiffies + HZ); | ||
1453 | local_bh_enable(); | ||
1454 | return; | 1452 | return; |
1455 | } | 1453 | } |
1456 | gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval; | 1454 | gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval; |
1457 | } | 1455 | } |
1458 | gc_args.more = 0; | ||
1459 | 1456 | ||
1460 | icmp6_dst_gc(&gc_args.more); | 1457 | gc_args.more = icmp6_dst_gc(); |
1461 | 1458 | ||
1462 | fib6_clean_all(net, fib6_age, 0, NULL); | 1459 | fib6_clean_all(net, fib6_age, 0, NULL); |
1463 | 1460 | ||
1464 | if (gc_args.more) | 1461 | if (gc_args.more) |
1465 | mod_timer(net->ipv6.ip6_fib_timer, jiffies + | 1462 | mod_timer(&net->ipv6.ip6_fib_timer, |
1466 | net->ipv6.sysctl.ip6_rt_gc_interval); | 1463 | round_jiffies(jiffies |
1467 | else { | 1464 | + net->ipv6.sysctl.ip6_rt_gc_interval)); |
1468 | del_timer(net->ipv6.ip6_fib_timer); | 1465 | else |
1469 | net->ipv6.ip6_fib_timer->expires = 0; | 1466 | del_timer(&net->ipv6.ip6_fib_timer); |
1470 | } | ||
1471 | spin_unlock_bh(&fib6_gc_lock); | 1467 | spin_unlock_bh(&fib6_gc_lock); |
1472 | } | 1468 | } |
1473 | 1469 | ||
@@ -1478,24 +1474,15 @@ static void fib6_gc_timer_cb(unsigned long arg) | |||
1478 | 1474 | ||
1479 | static int fib6_net_init(struct net *net) | 1475 | static int fib6_net_init(struct net *net) |
1480 | { | 1476 | { |
1481 | int ret; | 1477 | setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net); |
1482 | struct timer_list *timer; | ||
1483 | |||
1484 | ret = -ENOMEM; | ||
1485 | timer = kzalloc(sizeof(*timer), GFP_KERNEL); | ||
1486 | if (!timer) | ||
1487 | goto out; | ||
1488 | |||
1489 | setup_timer(timer, fib6_gc_timer_cb, (unsigned long)net); | ||
1490 | net->ipv6.ip6_fib_timer = timer; | ||
1491 | 1478 | ||
1492 | net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL); | 1479 | net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL); |
1493 | if (!net->ipv6.rt6_stats) | 1480 | if (!net->ipv6.rt6_stats) |
1494 | goto out_timer; | 1481 | goto out_timer; |
1495 | 1482 | ||
1496 | net->ipv6.fib_table_hash = | 1483 | net->ipv6.fib_table_hash = kcalloc(FIB_TABLE_HASHSZ, |
1497 | kzalloc(sizeof(*net->ipv6.fib_table_hash)*FIB_TABLE_HASHSZ, | 1484 | sizeof(*net->ipv6.fib_table_hash), |
1498 | GFP_KERNEL); | 1485 | GFP_KERNEL); |
1499 | if (!net->ipv6.fib_table_hash) | 1486 | if (!net->ipv6.fib_table_hash) |
1500 | goto out_rt6_stats; | 1487 | goto out_rt6_stats; |
1501 | 1488 | ||
@@ -1521,9 +1508,7 @@ static int fib6_net_init(struct net *net) | |||
1521 | #endif | 1508 | #endif |
1522 | fib6_tables_init(net); | 1509 | fib6_tables_init(net); |
1523 | 1510 | ||
1524 | ret = 0; | 1511 | return 0; |
1525 | out: | ||
1526 | return ret; | ||
1527 | 1512 | ||
1528 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | 1513 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
1529 | out_fib6_main_tbl: | 1514 | out_fib6_main_tbl: |
@@ -1534,15 +1519,14 @@ out_fib_table_hash: | |||
1534 | out_rt6_stats: | 1519 | out_rt6_stats: |
1535 | kfree(net->ipv6.rt6_stats); | 1520 | kfree(net->ipv6.rt6_stats); |
1536 | out_timer: | 1521 | out_timer: |
1537 | kfree(timer); | 1522 | return -ENOMEM; |
1538 | goto out; | ||
1539 | } | 1523 | } |
1540 | 1524 | ||
1541 | static void fib6_net_exit(struct net *net) | 1525 | static void fib6_net_exit(struct net *net) |
1542 | { | 1526 | { |
1543 | rt6_ifdown(net, NULL); | 1527 | rt6_ifdown(net, NULL); |
1544 | del_timer_sync(net->ipv6.ip6_fib_timer); | 1528 | del_timer_sync(&net->ipv6.ip6_fib_timer); |
1545 | kfree(net->ipv6.ip6_fib_timer); | 1529 | |
1546 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | 1530 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
1547 | kfree(net->ipv6.fib6_local_tbl); | 1531 | kfree(net->ipv6.fib6_local_tbl); |
1548 | #endif | 1532 | #endif |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 615b328de251..86540b24b27c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -978,13 +978,12 @@ out: | |||
978 | return &rt->u.dst; | 978 | return &rt->u.dst; |
979 | } | 979 | } |
980 | 980 | ||
981 | int icmp6_dst_gc(int *more) | 981 | int icmp6_dst_gc(void) |
982 | { | 982 | { |
983 | struct dst_entry *dst, *next, **pprev; | 983 | struct dst_entry *dst, *next, **pprev; |
984 | int freed; | 984 | int more = 0; |
985 | 985 | ||
986 | next = NULL; | 986 | next = NULL; |
987 | freed = 0; | ||
988 | 987 | ||
989 | spin_lock_bh(&icmp6_dst_lock); | 988 | spin_lock_bh(&icmp6_dst_lock); |
990 | pprev = &icmp6_dst_gc_list; | 989 | pprev = &icmp6_dst_gc_list; |
@@ -993,16 +992,15 @@ int icmp6_dst_gc(int *more) | |||
993 | if (!atomic_read(&dst->__refcnt)) { | 992 | if (!atomic_read(&dst->__refcnt)) { |
994 | *pprev = dst->next; | 993 | *pprev = dst->next; |
995 | dst_free(dst); | 994 | dst_free(dst); |
996 | freed++; | ||
997 | } else { | 995 | } else { |
998 | pprev = &dst->next; | 996 | pprev = &dst->next; |
999 | (*more)++; | 997 | ++more; |
1000 | } | 998 | } |
1001 | } | 999 | } |
1002 | 1000 | ||
1003 | spin_unlock_bh(&icmp6_dst_lock); | 1001 | spin_unlock_bh(&icmp6_dst_lock); |
1004 | 1002 | ||
1005 | return freed; | 1003 | return more; |
1006 | } | 1004 | } |
1007 | 1005 | ||
1008 | static int ip6_dst_gc(struct dst_ops *ops) | 1006 | static int ip6_dst_gc(struct dst_ops *ops) |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index fccc250f95f5..532e4faa29f7 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -73,6 +73,7 @@ static const struct proto_ops nr_proto_ops; | |||
73 | * separate class since they always nest. | 73 | * separate class since they always nest. |
74 | */ | 74 | */ |
75 | static struct lock_class_key nr_netdev_xmit_lock_key; | 75 | static struct lock_class_key nr_netdev_xmit_lock_key; |
76 | static struct lock_class_key nr_netdev_addr_lock_key; | ||
76 | 77 | ||
77 | static void nr_set_lockdep_one(struct net_device *dev, | 78 | static void nr_set_lockdep_one(struct net_device *dev, |
78 | struct netdev_queue *txq, | 79 | struct netdev_queue *txq, |
@@ -83,6 +84,7 @@ static void nr_set_lockdep_one(struct net_device *dev, | |||
83 | 84 | ||
84 | static void nr_set_lockdep_key(struct net_device *dev) | 85 | static void nr_set_lockdep_key(struct net_device *dev) |
85 | { | 86 | { |
87 | lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key); | ||
86 | netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL); | 88 | netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL); |
87 | } | 89 | } |
88 | 90 | ||
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index dbc963b4f5fb..a7f1ce11bc22 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -74,6 +74,7 @@ ax25_address rose_callsign; | |||
74 | * separate class since they always nest. | 74 | * separate class since they always nest. |
75 | */ | 75 | */ |
76 | static struct lock_class_key rose_netdev_xmit_lock_key; | 76 | static struct lock_class_key rose_netdev_xmit_lock_key; |
77 | static struct lock_class_key rose_netdev_addr_lock_key; | ||
77 | 78 | ||
78 | static void rose_set_lockdep_one(struct net_device *dev, | 79 | static void rose_set_lockdep_one(struct net_device *dev, |
79 | struct netdev_queue *txq, | 80 | struct netdev_queue *txq, |
@@ -84,6 +85,7 @@ static void rose_set_lockdep_one(struct net_device *dev, | |||
84 | 85 | ||
85 | static void rose_set_lockdep_key(struct net_device *dev) | 86 | static void rose_set_lockdep_key(struct net_device *dev) |
86 | { | 87 | { |
88 | lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key); | ||
87 | netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); | 89 | netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); |
88 | } | 90 | } |
89 | 91 | ||
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 5219d5f9d754..b0601642e227 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -447,7 +447,7 @@ void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) | |||
447 | } | 447 | } |
448 | EXPORT_SYMBOL(qdisc_watchdog_cancel); | 448 | EXPORT_SYMBOL(qdisc_watchdog_cancel); |
449 | 449 | ||
450 | struct hlist_head *qdisc_class_hash_alloc(unsigned int n) | 450 | static struct hlist_head *qdisc_class_hash_alloc(unsigned int n) |
451 | { | 451 | { |
452 | unsigned int size = n * sizeof(struct hlist_head), i; | 452 | unsigned int size = n * sizeof(struct hlist_head), i; |
453 | struct hlist_head *h; | 453 | struct hlist_head *h; |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index cb625b4d6da5..4ac7e3a8c253 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -212,9 +212,9 @@ static void dev_watchdog(unsigned long arg) | |||
212 | if (some_queue_stopped && | 212 | if (some_queue_stopped && |
213 | time_after(jiffies, (dev->trans_start + | 213 | time_after(jiffies, (dev->trans_start + |
214 | dev->watchdog_timeo))) { | 214 | dev->watchdog_timeo))) { |
215 | printk(KERN_INFO "NETDEV WATCHDOG: %s: " | 215 | char drivername[64]; |
216 | "transmit timed out\n", | 216 | printk(KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n", |
217 | dev->name); | 217 | dev->name, netdev_drivername(dev, drivername, 64)); |
218 | dev->tx_timeout(dev); | 218 | dev->tx_timeout(dev); |
219 | WARN_ON_ONCE(1); | 219 | WARN_ON_ONCE(1); |
220 | } | 220 | } |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 70ead8dc3485..4328ad5439c9 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -71,6 +71,8 @@ static void sctp_mark_missing(struct sctp_outq *q, | |||
71 | 71 | ||
72 | static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); | 72 | static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); |
73 | 73 | ||
74 | static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout); | ||
75 | |||
74 | /* Add data to the front of the queue. */ | 76 | /* Add data to the front of the queue. */ |
75 | static inline void sctp_outq_head_data(struct sctp_outq *q, | 77 | static inline void sctp_outq_head_data(struct sctp_outq *q, |
76 | struct sctp_chunk *ch) | 78 | struct sctp_chunk *ch) |
@@ -712,7 +714,7 @@ int sctp_outq_uncork(struct sctp_outq *q) | |||
712 | * locking concerns must be made. Today we use the sock lock to protect | 714 | * locking concerns must be made. Today we use the sock lock to protect |
713 | * this function. | 715 | * this function. |
714 | */ | 716 | */ |
715 | int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | 717 | static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) |
716 | { | 718 | { |
717 | struct sctp_packet *packet; | 719 | struct sctp_packet *packet; |
718 | struct sctp_packet singleton; | 720 | struct sctp_packet singleton; |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 5dd89831eceb..f268910620be 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -519,8 +519,3 @@ int __init sctp_remaddr_proc_init(void) | |||
519 | 519 | ||
520 | return 0; | 520 | return 0; |
521 | } | 521 | } |
522 | |||
523 | void sctp_assoc_proc_exit(void) | ||
524 | { | ||
525 | remove_proc_entry("remaddr", proc_net_sctp); | ||
526 | } | ||