diff options
author | David Woodhouse <dwmw2@infradead.org> | 2006-08-30 18:30:38 -0400 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2006-08-30 18:30:38 -0400 |
commit | 0a7d5f8ce960e74fa22986bda4af488539796e49 (patch) | |
tree | e29ad17808a5c3410518e22dae8dfe94801b59f3 /drivers/net | |
parent | 0165508c80a2b5d5268d9c5dfa9b30c534a33693 (diff) | |
parent | dc709bd190c130b299ac19d596594256265c042a (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/net')
158 files changed, 19303 insertions, 3861 deletions
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c index 4532b17e40ea..aedfddf20cb3 100644 --- a/drivers/net/3c515.c +++ b/drivers/net/3c515.c | |||
@@ -1003,7 +1003,8 @@ static int corkscrew_start_xmit(struct sk_buff *skb, | |||
1003 | /* Calculate the next Tx descriptor entry. */ | 1003 | /* Calculate the next Tx descriptor entry. */ |
1004 | int entry = vp->cur_tx % TX_RING_SIZE; | 1004 | int entry = vp->cur_tx % TX_RING_SIZE; |
1005 | struct boom_tx_desc *prev_entry; | 1005 | struct boom_tx_desc *prev_entry; |
1006 | unsigned long flags, i; | 1006 | unsigned long flags; |
1007 | int i; | ||
1007 | 1008 | ||
1008 | if (vp->tx_full) /* No room to transmit with */ | 1009 | if (vp->tx_full) /* No room to transmit with */ |
1009 | return 1; | 1010 | return 1; |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 2819de79442c..80e8ca013e44 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -17,172 +17,6 @@ | |||
17 | 410 Severn Ave., Suite 210 | 17 | 410 Severn Ave., Suite 210 |
18 | Annapolis MD 21403 | 18 | Annapolis MD 21403 |
19 | 19 | ||
20 | Linux Kernel Additions: | ||
21 | |||
22 | 0.99H+lk0.9 - David S. Miller - softnet, PCI DMA updates | ||
23 | 0.99H+lk1.0 - Jeff Garzik <jgarzik@pobox.com> | ||
24 | Remove compatibility defines for kernel versions < 2.2.x. | ||
25 | Update for new 2.3.x module interface | ||
26 | LK1.1.2 (March 19, 2000) | ||
27 | * New PCI interface (jgarzik) | ||
28 | |||
29 | LK1.1.3 25 April 2000, Andrew Morton <andrewm@uow.edu.au> | ||
30 | - Merged with 3c575_cb.c | ||
31 | - Don't set RxComplete in boomerang interrupt enable reg | ||
32 | - spinlock in vortex_timer to protect mdio functions | ||
33 | - disable local interrupts around call to vortex_interrupt in | ||
34 | vortex_tx_timeout() (So vortex_interrupt can use spin_lock()) | ||
35 | - Select window 3 in vortex_timer()'s write to Wn3_MAC_Ctrl | ||
36 | - In vortex_start_xmit(), move the lock to _after_ we've altered | ||
37 | vp->cur_tx and vp->tx_full. This defeats the race between | ||
38 | vortex_start_xmit() and vortex_interrupt which was identified | ||
39 | by Bogdan Costescu. | ||
40 | - Merged back support for six new cards from various sources | ||
41 | - Set vortex_have_pci if pci_module_init returns zero (fixes cardbus | ||
42 | insertion oops) | ||
43 | - Tell it that 3c905C has NWAY for 100bT autoneg | ||
44 | - Fix handling of SetStatusEnd in 'Too much work..' code, as | ||
45 | per 2.3.99's 3c575_cb (Dave Hinds). | ||
46 | - Split ISR into two for vortex & boomerang | ||
47 | - Fix MOD_INC/DEC races | ||
48 | - Handle resource allocation failures. | ||
49 | - Fix 3CCFE575CT LED polarity | ||
50 | - Make tx_interrupt_mitigation the default | ||
51 | |||
52 | LK1.1.4 25 April 2000, Andrew Morton <andrewm@uow.edu.au> | ||
53 | - Add extra TxReset to vortex_up() to fix 575_cb hotplug initialisation probs. | ||
54 | - Put vortex_info_tbl into __devinitdata | ||
55 | - In the vortex_error StatsFull HACK, disable stats in vp->intr_enable as well | ||
56 | as in the hardware. | ||
57 | - Increased the loop counter in issue_and_wait from 2,000 to 4,000. | ||
58 | |||
59 | LK1.1.5 28 April 2000, andrewm | ||
60 | - Added powerpc defines (John Daniel <jdaniel@etresoft.com> said these work...) | ||
61 | - Some extra diagnostics | ||
62 | - In vortex_error(), reset the Tx on maxCollisions. Otherwise most | ||
63 | chips usually get a Tx timeout. | ||
64 | - Added extra_reset module parm | ||
65 | - Replaced some inline timer manip with mod_timer | ||
66 | (Franois romieu <Francois.Romieu@nic.fr>) | ||
67 | - In vortex_up(), don't make Wn3_config initialisation dependent upon has_nway | ||
68 | (this came across from 3c575_cb). | ||
69 | |||
70 | LK1.1.6 06 Jun 2000, andrewm | ||
71 | - Backed out the PPC defines. | ||
72 | - Use del_timer_sync(), mod_timer(). | ||
73 | - Fix wrapped ulong comparison in boomerang_rx() | ||
74 | - Add IS_TORNADO, use it to suppress 3c905C checksum error msg | ||
75 | (Donald Becker, I Lee Hetherington <ilh@sls.lcs.mit.edu>) | ||
76 | - Replace union wn3_config with BFINS/BFEXT manipulation for | ||
77 | sparc64 (Pete Zaitcev, Peter Jones) | ||
78 | - In vortex_error, do_tx_reset and vortex_tx_timeout(Vortex): | ||
79 | do a netif_wake_queue() to better recover from errors. (Anders Pedersen, | ||
80 | Donald Becker) | ||
81 | - Print a warning on out-of-memory (rate limited to 1 per 10 secs) | ||
82 | - Added two more Cardbus 575 NICs: 5b57 and 6564 (Paul Wagland) | ||
83 | |||
84 | LK1.1.7 2 Jul 2000 andrewm | ||
85 | - Better handling of shared IRQs | ||
86 | - Reset the transmitter on a Tx reclaim error | ||
87 | - Fixed crash under OOM during vortex_open() (Mark Hemment) | ||
88 | - Fix Rx cessation problem during OOM (help from Mark Hemment) | ||
89 | - The spinlocks around the mdio access were blocking interrupts for 300uS. | ||
90 | Fix all this to use spin_lock_bh() within mdio_read/write | ||
91 | - Only write to TxFreeThreshold if it's a boomerang - other NICs don't | ||
92 | have one. | ||
93 | - Added 802.3x MAC-layer flow control support | ||
94 | |||
95 | LK1.1.8 13 Aug 2000 andrewm | ||
96 | - Ignore request_region() return value - already reserved if Cardbus. | ||
97 | - Merged some additional Cardbus flags from Don's 0.99Qk | ||
98 | - Some fixes for 3c556 (Fred Maciel) | ||
99 | - Fix for EISA initialisation (Jan Rekorajski) | ||
100 | - Renamed MII_XCVR_PWR and EEPROM_230 to align with 3c575_cb and D. Becker's drivers | ||
101 | - Fixed MII_XCVR_PWR for 3CCFE575CT | ||
102 | - Added INVERT_LED_PWR, used it. | ||
103 | - Backed out the extra_reset stuff | ||
104 | |||
105 | LK1.1.9 12 Sep 2000 andrewm | ||
106 | - Backed out the tx_reset_resume flags. It was a no-op. | ||
107 | - In vortex_error, don't reset the Tx on txReclaim errors | ||
108 | - In vortex_error, don't reset the Tx on maxCollisions errors. | ||
109 | Hence backed out all the DownListPtr logic here. | ||
110 | - In vortex_error, give Tornado cards a partial TxReset on | ||
111 | maxCollisions (David Hinds). Defined MAX_COLLISION_RESET for this. | ||
112 | - Redid some driver flags and device names based on pcmcia_cs-3.1.20. | ||
113 | - Fixed a bug where, if vp->tx_full is set when the interface | ||
114 | is downed, it remains set when the interface is upped. Bad | ||
115 | things happen. | ||
116 | |||
117 | LK1.1.10 17 Sep 2000 andrewm | ||
118 | - Added EEPROM_8BIT for 3c555 (Fred Maciel) | ||
119 | - Added experimental support for the 3c556B Laptop Hurricane (Louis Gerbarg) | ||
120 | - Add HAS_NWAY to "3c900 Cyclone 10Mbps TPO" | ||
121 | |||
122 | LK1.1.11 13 Nov 2000 andrewm | ||
123 | - Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER | ||
124 | |||
125 | LK1.1.12 1 Jan 2001 andrewm (2.4.0-pre1) | ||
126 | - Call pci_enable_device before we request our IRQ (Tobias Ringstrom) | ||
127 | - Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra) | ||
128 | - Added extended issue_and_wait for the 3c905CX. | ||
129 | - Look for an MII on PHY index 24 first (3c905CX oddity). | ||
130 | - Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger) | ||
131 | - Don't free skbs we don't own on oom path in vortex_open(). | ||
132 | |||
133 | LK1.1.13 27 Jan 2001 | ||
134 | - Added explicit `medialock' flag so we can truly | ||
135 | lock the media type down with `options'. | ||
136 | - "check ioremap return and some tidbits" (Arnaldo Carvalho de Melo <acme@conectiva.com.br>) | ||
137 | - Added and used EEPROM_NORESET for 3c556B PM resumes. | ||
138 | - Fixed leakage of vp->rx_ring. | ||
139 | - Break out separate HAS_HWCKSM device capability flag. | ||
140 | - Kill vp->tx_full (ANK) | ||
141 | - Merge zerocopy fragment handling (ANK?) | ||
142 | |||
143 | LK1.1.14 15 Feb 2001 | ||
144 | - Enable WOL. Can be turned on with `enable_wol' module option. | ||
145 | - EISA and PCI initialisation fixes (jgarzik, Manfred Spraul) | ||
146 | - If a device's internalconfig register reports it has NWAY, | ||
147 | use it, even if autoselect is enabled. | ||
148 | |||
149 | LK1.1.15 6 June 2001 akpm | ||
150 | - Prevent double counting of received bytes (Lars Christensen) | ||
151 | - Add ethtool support (jgarzik) | ||
152 | - Add module parm descriptions (Andrzej M. Krzysztofowicz) | ||
153 | - Implemented alloc_etherdev() API | ||
154 | - Special-case the 'Tx error 82' message. | ||
155 | |||
156 | LK1.1.16 18 July 2001 akpm | ||
157 | - Make NETIF_F_SG dependent upon nr_free_highpages(), not on CONFIG_HIGHMEM | ||
158 | - Lessen verbosity of bootup messages | ||
159 | - Fix WOL - use new PM API functions. | ||
160 | - Use netif_running() instead of vp->open in suspend/resume. | ||
161 | - Don't reset the interface logic on open/close/rmmod. It upsets | ||
162 | autonegotiation, and hence DHCP (from 0.99T). | ||
163 | - Back out EEPROM_NORESET flag because of the above (we do it for all | ||
164 | NICs). | ||
165 | - Correct 3c982 identification string | ||
166 | - Rename wait_for_completion() to issue_and_wait() to avoid completion.h | ||
167 | clash. | ||
168 | |||
169 | LK1.1.17 18Dec01 akpm | ||
170 | - PCI ID 9805 is a Python-T, not a dual-port Cyclone. Apparently. | ||
171 | And it has NWAY. | ||
172 | - Mask our advertised modes (vp->advertising) with our capabilities | ||
173 | (MII reg5) when deciding which duplex mode to use. | ||
174 | - Add `global_options' as default for options[]. Ditto global_enable_wol, | ||
175 | global_full_duplex. | ||
176 | |||
177 | LK1.1.18 01Jul02 akpm | ||
178 | - Fix for undocumented transceiver power-up bit on some 3c566B's | ||
179 | (Donald Becker, Rahul Karnik) | ||
180 | |||
181 | - See http://www.zip.com.au/~akpm/linux/#3c59x-2.3 for more details. | ||
182 | - Also see Documentation/networking/vortex.txt | ||
183 | |||
184 | LK1.1.19 10Nov02 Marc Zyngier <maz@wild-wind.fr.eu.org> | ||
185 | - EISA sysfs integration. | ||
186 | */ | 20 | */ |
187 | 21 | ||
188 | /* | 22 | /* |
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 1959654cbec8..1428bb7715af 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -1836,9 +1836,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1836 | 1836 | ||
1837 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && | 1837 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && |
1838 | pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) { | 1838 | pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev < 0x20) { |
1839 | printk(KERN_ERR PFX "pci dev %s (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n", | 1839 | dev_err(&pdev->dev, |
1840 | pci_name(pdev), pdev->vendor, pdev->device, pci_rev); | 1840 | "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n", |
1841 | printk(KERN_ERR PFX "Try the \"8139too\" driver instead.\n"); | 1841 | pdev->vendor, pdev->device, pci_rev); |
1842 | dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n"); | ||
1842 | return -ENODEV; | 1843 | return -ENODEV; |
1843 | } | 1844 | } |
1844 | 1845 | ||
@@ -1876,14 +1877,13 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1876 | pciaddr = pci_resource_start(pdev, 1); | 1877 | pciaddr = pci_resource_start(pdev, 1); |
1877 | if (!pciaddr) { | 1878 | if (!pciaddr) { |
1878 | rc = -EIO; | 1879 | rc = -EIO; |
1879 | printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n", | 1880 | dev_err(&pdev->dev, "no MMIO resource\n"); |
1880 | pci_name(pdev)); | ||
1881 | goto err_out_res; | 1881 | goto err_out_res; |
1882 | } | 1882 | } |
1883 | if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) { | 1883 | if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) { |
1884 | rc = -EIO; | 1884 | rc = -EIO; |
1885 | printk(KERN_ERR PFX "MMIO resource (%llx) too small on pci dev %s\n", | 1885 | dev_err(&pdev->dev, "MMIO resource (%llx) too small\n", |
1886 | (unsigned long long)pci_resource_len(pdev, 1), pci_name(pdev)); | 1886 | (unsigned long long)pci_resource_len(pdev, 1)); |
1887 | goto err_out_res; | 1887 | goto err_out_res; |
1888 | } | 1888 | } |
1889 | 1889 | ||
@@ -1897,14 +1897,15 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1897 | 1897 | ||
1898 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | 1898 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); |
1899 | if (rc) { | 1899 | if (rc) { |
1900 | printk(KERN_ERR PFX "No usable DMA configuration, " | 1900 | dev_err(&pdev->dev, |
1901 | "aborting.\n"); | 1901 | "No usable DMA configuration, aborting.\n"); |
1902 | goto err_out_res; | 1902 | goto err_out_res; |
1903 | } | 1903 | } |
1904 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | 1904 | rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); |
1905 | if (rc) { | 1905 | if (rc) { |
1906 | printk(KERN_ERR PFX "No usable consistent DMA configuration, " | 1906 | dev_err(&pdev->dev, |
1907 | "aborting.\n"); | 1907 | "No usable consistent DMA configuration, " |
1908 | "aborting.\n"); | ||
1908 | goto err_out_res; | 1909 | goto err_out_res; |
1909 | } | 1910 | } |
1910 | } | 1911 | } |
@@ -1915,9 +1916,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1915 | regs = ioremap(pciaddr, CP_REGS_SIZE); | 1916 | regs = ioremap(pciaddr, CP_REGS_SIZE); |
1916 | if (!regs) { | 1917 | if (!regs) { |
1917 | rc = -EIO; | 1918 | rc = -EIO; |
1918 | printk(KERN_ERR PFX "Cannot map PCI MMIO (%llx@%llx) on pci dev %s\n", | 1919 | dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n", |
1919 | (unsigned long long)pci_resource_len(pdev, 1), | 1920 | (unsigned long long)pci_resource_len(pdev, 1), |
1920 | (unsigned long long)pciaddr, pci_name(pdev)); | 1921 | (unsigned long long)pciaddr); |
1921 | goto err_out_res; | 1922 | goto err_out_res; |
1922 | } | 1923 | } |
1923 | dev->base_addr = (unsigned long) regs; | 1924 | dev->base_addr = (unsigned long) regs; |
@@ -1986,7 +1987,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1986 | /* enable busmastering and memory-write-invalidate */ | 1987 | /* enable busmastering and memory-write-invalidate */ |
1987 | pci_set_master(pdev); | 1988 | pci_set_master(pdev); |
1988 | 1989 | ||
1989 | if (cp->wol_enabled) cp_set_d3_state (cp); | 1990 | if (cp->wol_enabled) |
1991 | cp_set_d3_state (cp); | ||
1990 | 1992 | ||
1991 | return 0; | 1993 | return 0; |
1992 | 1994 | ||
@@ -2011,7 +2013,8 @@ static void cp_remove_one (struct pci_dev *pdev) | |||
2011 | BUG_ON(!dev); | 2013 | BUG_ON(!dev); |
2012 | unregister_netdev(dev); | 2014 | unregister_netdev(dev); |
2013 | iounmap(cp->regs); | 2015 | iounmap(cp->regs); |
2014 | if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0); | 2016 | if (cp->wol_enabled) |
2017 | pci_set_power_state (pdev, PCI_D0); | ||
2015 | pci_release_regions(pdev); | 2018 | pci_release_regions(pdev); |
2016 | pci_clear_mwi(pdev); | 2019 | pci_clear_mwi(pdev); |
2017 | pci_disable_device(pdev); | 2020 | pci_disable_device(pdev); |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 717506b2b13a..e4f4eaff7679 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -768,7 +768,7 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev, | |||
768 | /* dev and priv zeroed in alloc_etherdev */ | 768 | /* dev and priv zeroed in alloc_etherdev */ |
769 | dev = alloc_etherdev (sizeof (*tp)); | 769 | dev = alloc_etherdev (sizeof (*tp)); |
770 | if (dev == NULL) { | 770 | if (dev == NULL) { |
771 | printk (KERN_ERR PFX "%s: Unable to alloc new net device\n", pci_name(pdev)); | 771 | dev_err(&pdev->dev, "Unable to alloc new net device\n"); |
772 | return -ENOMEM; | 772 | return -ENOMEM; |
773 | } | 773 | } |
774 | SET_MODULE_OWNER(dev); | 774 | SET_MODULE_OWNER(dev); |
@@ -800,31 +800,31 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev, | |||
800 | #ifdef USE_IO_OPS | 800 | #ifdef USE_IO_OPS |
801 | /* make sure PCI base addr 0 is PIO */ | 801 | /* make sure PCI base addr 0 is PIO */ |
802 | if (!(pio_flags & IORESOURCE_IO)) { | 802 | if (!(pio_flags & IORESOURCE_IO)) { |
803 | printk (KERN_ERR PFX "%s: region #0 not a PIO resource, aborting\n", pci_name(pdev)); | 803 | dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n"); |
804 | rc = -ENODEV; | 804 | rc = -ENODEV; |
805 | goto err_out; | 805 | goto err_out; |
806 | } | 806 | } |
807 | /* check for weird/broken PCI region reporting */ | 807 | /* check for weird/broken PCI region reporting */ |
808 | if (pio_len < RTL_MIN_IO_SIZE) { | 808 | if (pio_len < RTL_MIN_IO_SIZE) { |
809 | printk (KERN_ERR PFX "%s: Invalid PCI I/O region size(s), aborting\n", pci_name(pdev)); | 809 | dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n"); |
810 | rc = -ENODEV; | 810 | rc = -ENODEV; |
811 | goto err_out; | 811 | goto err_out; |
812 | } | 812 | } |
813 | #else | 813 | #else |
814 | /* make sure PCI base addr 1 is MMIO */ | 814 | /* make sure PCI base addr 1 is MMIO */ |
815 | if (!(mmio_flags & IORESOURCE_MEM)) { | 815 | if (!(mmio_flags & IORESOURCE_MEM)) { |
816 | printk (KERN_ERR PFX "%s: region #1 not an MMIO resource, aborting\n", pci_name(pdev)); | 816 | dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n"); |
817 | rc = -ENODEV; | 817 | rc = -ENODEV; |
818 | goto err_out; | 818 | goto err_out; |
819 | } | 819 | } |
820 | if (mmio_len < RTL_MIN_IO_SIZE) { | 820 | if (mmio_len < RTL_MIN_IO_SIZE) { |
821 | printk (KERN_ERR PFX "%s: Invalid PCI mem region size(s), aborting\n", pci_name(pdev)); | 821 | dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n"); |
822 | rc = -ENODEV; | 822 | rc = -ENODEV; |
823 | goto err_out; | 823 | goto err_out; |
824 | } | 824 | } |
825 | #endif | 825 | #endif |
826 | 826 | ||
827 | rc = pci_request_regions (pdev, "8139too"); | 827 | rc = pci_request_regions (pdev, DRV_NAME); |
828 | if (rc) | 828 | if (rc) |
829 | goto err_out; | 829 | goto err_out; |
830 | disable_dev_on_err = 1; | 830 | disable_dev_on_err = 1; |
@@ -835,7 +835,7 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev, | |||
835 | #ifdef USE_IO_OPS | 835 | #ifdef USE_IO_OPS |
836 | ioaddr = ioport_map(pio_start, pio_len); | 836 | ioaddr = ioport_map(pio_start, pio_len); |
837 | if (!ioaddr) { | 837 | if (!ioaddr) { |
838 | printk (KERN_ERR PFX "%s: cannot map PIO, aborting\n", pci_name(pdev)); | 838 | dev_err(&pdev->dev, "cannot map PIO, aborting\n"); |
839 | rc = -EIO; | 839 | rc = -EIO; |
840 | goto err_out; | 840 | goto err_out; |
841 | } | 841 | } |
@@ -846,7 +846,7 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev, | |||
846 | /* ioremap MMIO region */ | 846 | /* ioremap MMIO region */ |
847 | ioaddr = pci_iomap(pdev, 1, 0); | 847 | ioaddr = pci_iomap(pdev, 1, 0); |
848 | if (ioaddr == NULL) { | 848 | if (ioaddr == NULL) { |
849 | printk (KERN_ERR PFX "%s: cannot remap MMIO, aborting\n", pci_name(pdev)); | 849 | dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); |
850 | rc = -EIO; | 850 | rc = -EIO; |
851 | goto err_out; | 851 | goto err_out; |
852 | } | 852 | } |
@@ -860,8 +860,7 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev, | |||
860 | 860 | ||
861 | /* check for missing/broken hardware */ | 861 | /* check for missing/broken hardware */ |
862 | if (RTL_R32 (TxConfig) == 0xFFFFFFFF) { | 862 | if (RTL_R32 (TxConfig) == 0xFFFFFFFF) { |
863 | printk (KERN_ERR PFX "%s: Chip not responding, ignoring board\n", | 863 | dev_err(&pdev->dev, "Chip not responding, ignoring board\n"); |
864 | pci_name(pdev)); | ||
865 | rc = -EIO; | 864 | rc = -EIO; |
866 | goto err_out; | 865 | goto err_out; |
867 | } | 866 | } |
@@ -875,9 +874,10 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev, | |||
875 | } | 874 | } |
876 | 875 | ||
877 | /* if unknown chip, assume array element #0, original RTL-8139 in this case */ | 876 | /* if unknown chip, assume array element #0, original RTL-8139 in this case */ |
878 | printk (KERN_DEBUG PFX "%s: unknown chip version, assuming RTL-8139\n", | 877 | dev_printk (KERN_DEBUG, &pdev->dev, |
879 | pci_name(pdev)); | 878 | "unknown chip version, assuming RTL-8139\n"); |
880 | printk (KERN_DEBUG PFX "%s: TxConfig = 0x%lx\n", pci_name(pdev), RTL_R32 (TxConfig)); | 879 | dev_printk (KERN_DEBUG, &pdev->dev, |
880 | "TxConfig = 0x%lx\n", RTL_R32 (TxConfig)); | ||
881 | tp->chipset = 0; | 881 | tp->chipset = 0; |
882 | 882 | ||
883 | match: | 883 | match: |
@@ -954,9 +954,11 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, | |||
954 | 954 | ||
955 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && | 955 | if (pdev->vendor == PCI_VENDOR_ID_REALTEK && |
956 | pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev >= 0x20) { | 956 | pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pci_rev >= 0x20) { |
957 | printk(KERN_INFO PFX "pci dev %s (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n", | 957 | dev_info(&pdev->dev, |
958 | pci_name(pdev), pdev->vendor, pdev->device, pci_rev); | 958 | "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n", |
959 | printk(KERN_INFO PFX "Use the \"8139cp\" driver for improved performance and stability.\n"); | 959 | pdev->vendor, pdev->device, pci_rev); |
960 | dev_info(&pdev->dev, | ||
961 | "Use the \"8139cp\" driver for improved performance and stability.\n"); | ||
960 | } | 962 | } |
961 | 963 | ||
962 | i = rtl8139_init_board (pdev, &dev); | 964 | i = rtl8139_init_board (pdev, &dev); |
@@ -1707,6 +1709,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
1707 | void __iomem *ioaddr = tp->mmio_addr; | 1709 | void __iomem *ioaddr = tp->mmio_addr; |
1708 | unsigned int entry; | 1710 | unsigned int entry; |
1709 | unsigned int len = skb->len; | 1711 | unsigned int len = skb->len; |
1712 | unsigned long flags; | ||
1710 | 1713 | ||
1711 | /* Calculate the next Tx descriptor entry. */ | 1714 | /* Calculate the next Tx descriptor entry. */ |
1712 | entry = tp->cur_tx % NUM_TX_DESC; | 1715 | entry = tp->cur_tx % NUM_TX_DESC; |
@@ -1723,7 +1726,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
1723 | return 0; | 1726 | return 0; |
1724 | } | 1727 | } |
1725 | 1728 | ||
1726 | spin_lock_irq(&tp->lock); | 1729 | spin_lock_irqsave(&tp->lock, flags); |
1727 | RTL_W32_F (TxStatus0 + (entry * sizeof (u32)), | 1730 | RTL_W32_F (TxStatus0 + (entry * sizeof (u32)), |
1728 | tp->tx_flag | max(len, (unsigned int)ETH_ZLEN)); | 1731 | tp->tx_flag | max(len, (unsigned int)ETH_ZLEN)); |
1729 | 1732 | ||
@@ -1734,7 +1737,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
1734 | 1737 | ||
1735 | if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) | 1738 | if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) |
1736 | netif_stop_queue (dev); | 1739 | netif_stop_queue (dev); |
1737 | spin_unlock_irq(&tp->lock); | 1740 | spin_unlock_irqrestore(&tp->lock, flags); |
1738 | 1741 | ||
1739 | if (netif_msg_tx_queued(tp)) | 1742 | if (netif_msg_tx_queued(tp)) |
1740 | printk (KERN_DEBUG "%s: Queued Tx packet size %u to slot %d.\n", | 1743 | printk (KERN_DEBUG "%s: Queued Tx packet size %u to slot %d.\n", |
diff --git a/drivers/net/82596.c b/drivers/net/82596.c index 7e2ca9571467..257d3bce3993 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c | |||
@@ -899,7 +899,7 @@ memory_squeeze: | |||
899 | } | 899 | } |
900 | 900 | ||
901 | 901 | ||
902 | static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) | 902 | static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) |
903 | { | 903 | { |
904 | struct i596_cmd *ptr; | 904 | struct i596_cmd *ptr; |
905 | 905 | ||
@@ -932,7 +932,8 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private | |||
932 | lp->scb.cmd = I596_NULL; | 932 | lp->scb.cmd = I596_NULL; |
933 | } | 933 | } |
934 | 934 | ||
935 | static inline void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) | 935 | static void i596_reset(struct net_device *dev, struct i596_private *lp, |
936 | int ioaddr) | ||
936 | { | 937 | { |
937 | unsigned long flags; | 938 | unsigned long flags; |
938 | 939 | ||
@@ -1578,7 +1579,7 @@ static int debug = -1; | |||
1578 | module_param(debug, int, 0); | 1579 | module_param(debug, int, 0); |
1579 | MODULE_PARM_DESC(debug, "i82596 debug mask"); | 1580 | MODULE_PARM_DESC(debug, "i82596 debug mask"); |
1580 | 1581 | ||
1581 | int init_module(void) | 1582 | int __init init_module(void) |
1582 | { | 1583 | { |
1583 | if (debug >= 0) | 1584 | if (debug >= 0) |
1584 | i596_debug = debug; | 1585 | i596_debug = debug; |
@@ -1588,7 +1589,7 @@ int init_module(void) | |||
1588 | return 0; | 1589 | return 0; |
1589 | } | 1590 | } |
1590 | 1591 | ||
1591 | void cleanup_module(void) | 1592 | void __exit cleanup_module(void) |
1592 | { | 1593 | { |
1593 | unregister_netdev(dev_82596); | 1594 | unregister_netdev(dev_82596); |
1594 | #ifdef __mc68000__ | 1595 | #ifdef __mc68000__ |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 39189903e355..30b3671d833d 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1724,6 +1724,20 @@ config VIA_RHINE_MMIO | |||
1724 | 1724 | ||
1725 | If unsure, say Y. | 1725 | If unsure, say Y. |
1726 | 1726 | ||
1727 | config VIA_RHINE_NAPI | ||
1728 | bool "Use Rx Polling (NAPI)" | ||
1729 | depends on VIA_RHINE | ||
1730 | help | ||
1731 | NAPI is a new driver API designed to reduce CPU and interrupt load | ||
1732 | when the driver is receiving lots of packets from the card. | ||
1733 | |||
1734 | If your estimated Rx load is 10kpps or more, or if the card will be | ||
1735 | deployed on potentially unfriendly networks (e.g. in a firewall), | ||
1736 | then say Y here. | ||
1737 | |||
1738 | See <file:Documentation/networking/NAPI_HOWTO.txt> for more | ||
1739 | information. | ||
1740 | |||
1727 | config LAN_SAA9730 | 1741 | config LAN_SAA9730 |
1728 | bool "Philips SAA9730 Ethernet support (EXPERIMENTAL)" | 1742 | bool "Philips SAA9730 Ethernet support (EXPERIMENTAL)" |
1729 | depends on NET_PCI && EXPERIMENTAL && MIPS | 1743 | depends on NET_PCI && EXPERIMENTAL && MIPS |
@@ -2219,6 +2233,33 @@ config GFAR_NAPI | |||
2219 | bool "NAPI Support" | 2233 | bool "NAPI Support" |
2220 | depends on GIANFAR | 2234 | depends on GIANFAR |
2221 | 2235 | ||
2236 | config UCC_GETH | ||
2237 | tristate "Freescale QE UCC GETH" | ||
2238 | depends on QUICC_ENGINE && UCC_FAST | ||
2239 | help | ||
2240 | This driver supports the Gigabit Ethernet mode of QE UCC. | ||
2241 | QE can be found on MPC836x CPUs. | ||
2242 | |||
2243 | config UGETH_NAPI | ||
2244 | bool "NAPI Support" | ||
2245 | depends on UCC_GETH | ||
2246 | |||
2247 | config UGETH_MAGIC_PACKET | ||
2248 | bool "Magic Packet detection support" | ||
2249 | depends on UCC_GETH | ||
2250 | |||
2251 | config UGETH_FILTERING | ||
2252 | bool "Mac address filtering support" | ||
2253 | depends on UCC_GETH | ||
2254 | |||
2255 | config UGETH_TX_ON_DEMOND | ||
2256 | bool "Transmit on Demond support" | ||
2257 | depends on UCC_GETH | ||
2258 | |||
2259 | config UGETH_HAS_GIGA | ||
2260 | bool | ||
2261 | depends on UCC_GETH && MPC836x | ||
2262 | |||
2222 | config MV643XX_ETH | 2263 | config MV643XX_ETH |
2223 | tristate "MV-643XX Ethernet support" | 2264 | tristate "MV-643XX Ethernet support" |
2224 | depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM | 2265 | depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index c91e95126f78..8427bf9dec9d 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -18,6 +18,9 @@ gianfar_driver-objs := gianfar.o \ | |||
18 | gianfar_mii.o \ | 18 | gianfar_mii.o \ |
19 | gianfar_sysfs.o | 19 | gianfar_sysfs.o |
20 | 20 | ||
21 | obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o | ||
22 | ucc_geth_driver-objs := ucc_geth.o ucc_geth_phy.o | ||
23 | |||
21 | # | 24 | # |
22 | # link order important here | 25 | # link order important here |
23 | # | 26 | # |
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c index 7952dc6d77e3..0fbbcb75af69 100644 --- a/drivers/net/ac3200.c +++ b/drivers/net/ac3200.c | |||
@@ -370,8 +370,7 @@ MODULE_PARM_DESC(mem, "Memory base address(es)"); | |||
370 | MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver"); | 370 | MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver"); |
371 | MODULE_LICENSE("GPL"); | 371 | MODULE_LICENSE("GPL"); |
372 | 372 | ||
373 | int | 373 | int __init init_module(void) |
374 | init_module(void) | ||
375 | { | 374 | { |
376 | struct net_device *dev; | 375 | struct net_device *dev; |
377 | int this_dev, found = 0; | 376 | int this_dev, found = 0; |
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig index b14e89004c3a..0a0e0cd81a23 100644 --- a/drivers/net/appletalk/Kconfig +++ b/drivers/net/appletalk/Kconfig | |||
@@ -29,7 +29,7 @@ config ATALK | |||
29 | even politically correct people are allowed to say Y here. | 29 | even politically correct people are allowed to say Y here. |
30 | 30 | ||
31 | config DEV_APPLETALK | 31 | config DEV_APPLETALK |
32 | bool "Appletalk interfaces support" | 32 | tristate "Appletalk interfaces support" |
33 | depends on ATALK | 33 | depends on ATALK |
34 | help | 34 | help |
35 | AppleTalk is the protocol that Apple computers can use to communicate | 35 | AppleTalk is the protocol that Apple computers can use to communicate |
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 1d01ac0000e4..ae7f828344d9 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c | |||
@@ -1030,7 +1030,7 @@ module_param(io, int, 0); | |||
1030 | module_param(irq, int, 0); | 1030 | module_param(irq, int, 0); |
1031 | module_param(board_type, int, 0); | 1031 | module_param(board_type, int, 0); |
1032 | 1032 | ||
1033 | int init_module(void) | 1033 | int __init init_module(void) |
1034 | { | 1034 | { |
1035 | if (io == 0) | 1035 | if (io == 0) |
1036 | printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", | 1036 | printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", |
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c index 5d7929c79bce..4ca061c2d5b2 100644 --- a/drivers/net/at1700.c +++ b/drivers/net/at1700.c | |||
@@ -901,7 +901,7 @@ MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address"); | |||
901 | MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number"); | 901 | MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number"); |
902 | MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)"); | 902 | MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)"); |
903 | 903 | ||
904 | int init_module(void) | 904 | int __init init_module(void) |
905 | { | 905 | { |
906 | if (io == 0) | 906 | if (io == 0) |
907 | printk("at1700: You should not use auto-probing with insmod!\n"); | 907 | printk("at1700: You should not use auto-probing with insmod!\n"); |
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index cd98d31dee8c..bea0fc0ede2f 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -2120,13 +2120,14 @@ static int __devinit b44_init_one(struct pci_dev *pdev, | |||
2120 | 2120 | ||
2121 | err = pci_enable_device(pdev); | 2121 | err = pci_enable_device(pdev); |
2122 | if (err) { | 2122 | if (err) { |
2123 | printk(KERN_ERR PFX "Cannot enable PCI device, " | 2123 | dev_err(&pdev->dev, "Cannot enable PCI device, " |
2124 | "aborting.\n"); | 2124 | "aborting.\n"); |
2125 | return err; | 2125 | return err; |
2126 | } | 2126 | } |
2127 | 2127 | ||
2128 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | 2128 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
2129 | printk(KERN_ERR PFX "Cannot find proper PCI device " | 2129 | dev_err(&pdev->dev, |
2130 | "Cannot find proper PCI device " | ||
2130 | "base address, aborting.\n"); | 2131 | "base address, aborting.\n"); |
2131 | err = -ENODEV; | 2132 | err = -ENODEV; |
2132 | goto err_out_disable_pdev; | 2133 | goto err_out_disable_pdev; |
@@ -2134,8 +2135,8 @@ static int __devinit b44_init_one(struct pci_dev *pdev, | |||
2134 | 2135 | ||
2135 | err = pci_request_regions(pdev, DRV_MODULE_NAME); | 2136 | err = pci_request_regions(pdev, DRV_MODULE_NAME); |
2136 | if (err) { | 2137 | if (err) { |
2137 | printk(KERN_ERR PFX "Cannot obtain PCI resources, " | 2138 | dev_err(&pdev->dev, |
2138 | "aborting.\n"); | 2139 | "Cannot obtain PCI resources, aborting.\n"); |
2139 | goto err_out_disable_pdev; | 2140 | goto err_out_disable_pdev; |
2140 | } | 2141 | } |
2141 | 2142 | ||
@@ -2143,15 +2144,13 @@ static int __devinit b44_init_one(struct pci_dev *pdev, | |||
2143 | 2144 | ||
2144 | err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK); | 2145 | err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK); |
2145 | if (err) { | 2146 | if (err) { |
2146 | printk(KERN_ERR PFX "No usable DMA configuration, " | 2147 | dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n"); |
2147 | "aborting.\n"); | ||
2148 | goto err_out_free_res; | 2148 | goto err_out_free_res; |
2149 | } | 2149 | } |
2150 | 2150 | ||
2151 | err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK); | 2151 | err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK); |
2152 | if (err) { | 2152 | if (err) { |
2153 | printk(KERN_ERR PFX "No usable DMA configuration, " | 2153 | dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n"); |
2154 | "aborting.\n"); | ||
2155 | goto err_out_free_res; | 2154 | goto err_out_free_res; |
2156 | } | 2155 | } |
2157 | 2156 | ||
@@ -2160,7 +2159,7 @@ static int __devinit b44_init_one(struct pci_dev *pdev, | |||
2160 | 2159 | ||
2161 | dev = alloc_etherdev(sizeof(*bp)); | 2160 | dev = alloc_etherdev(sizeof(*bp)); |
2162 | if (!dev) { | 2161 | if (!dev) { |
2163 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | 2162 | dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n"); |
2164 | err = -ENOMEM; | 2163 | err = -ENOMEM; |
2165 | goto err_out_free_res; | 2164 | goto err_out_free_res; |
2166 | } | 2165 | } |
@@ -2181,8 +2180,7 @@ static int __devinit b44_init_one(struct pci_dev *pdev, | |||
2181 | 2180 | ||
2182 | bp->regs = ioremap(b44reg_base, b44reg_len); | 2181 | bp->regs = ioremap(b44reg_base, b44reg_len); |
2183 | if (bp->regs == 0UL) { | 2182 | if (bp->regs == 0UL) { |
2184 | printk(KERN_ERR PFX "Cannot map device registers, " | 2183 | dev_err(&pdev->dev, "Cannot map device registers, aborting.\n"); |
2185 | "aborting.\n"); | ||
2186 | err = -ENOMEM; | 2184 | err = -ENOMEM; |
2187 | goto err_out_free_dev; | 2185 | goto err_out_free_dev; |
2188 | } | 2186 | } |
@@ -2212,8 +2210,8 @@ static int __devinit b44_init_one(struct pci_dev *pdev, | |||
2212 | 2210 | ||
2213 | err = b44_get_invariants(bp); | 2211 | err = b44_get_invariants(bp); |
2214 | if (err) { | 2212 | if (err) { |
2215 | printk(KERN_ERR PFX "Problem fetching invariants of chip, " | 2213 | dev_err(&pdev->dev, |
2216 | "aborting.\n"); | 2214 | "Problem fetching invariants of chip, aborting.\n"); |
2217 | goto err_out_iounmap; | 2215 | goto err_out_iounmap; |
2218 | } | 2216 | } |
2219 | 2217 | ||
@@ -2233,8 +2231,7 @@ static int __devinit b44_init_one(struct pci_dev *pdev, | |||
2233 | 2231 | ||
2234 | err = register_netdev(dev); | 2232 | err = register_netdev(dev); |
2235 | if (err) { | 2233 | if (err) { |
2236 | printk(KERN_ERR PFX "Cannot register net device, " | 2234 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); |
2237 | "aborting.\n"); | ||
2238 | goto err_out_iounmap; | 2235 | goto err_out_iounmap; |
2239 | } | 2236 | } |
2240 | 2237 | ||
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 4f4db5ae503b..652eb05a6c2d 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -56,8 +56,8 @@ | |||
56 | 56 | ||
57 | #define DRV_MODULE_NAME "bnx2" | 57 | #define DRV_MODULE_NAME "bnx2" |
58 | #define PFX DRV_MODULE_NAME ": " | 58 | #define PFX DRV_MODULE_NAME ": " |
59 | #define DRV_MODULE_VERSION "1.4.43" | 59 | #define DRV_MODULE_VERSION "1.4.44" |
60 | #define DRV_MODULE_RELDATE "June 28, 2006" | 60 | #define DRV_MODULE_RELDATE "August 10, 2006" |
61 | 61 | ||
62 | #define RUN_AT(x) (jiffies + (x)) | 62 | #define RUN_AT(x) (jiffies + (x)) |
63 | 63 | ||
@@ -209,8 +209,10 @@ MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); | |||
209 | 209 | ||
210 | static inline u32 bnx2_tx_avail(struct bnx2 *bp) | 210 | static inline u32 bnx2_tx_avail(struct bnx2 *bp) |
211 | { | 211 | { |
212 | u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons); | 212 | u32 diff; |
213 | 213 | ||
214 | smp_mb(); | ||
215 | diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons); | ||
214 | if (diff > MAX_TX_DESC_CNT) | 216 | if (diff > MAX_TX_DESC_CNT) |
215 | diff = (diff & MAX_TX_DESC_CNT) - 1; | 217 | diff = (diff & MAX_TX_DESC_CNT) - 1; |
216 | return (bp->tx_ring_size - diff); | 218 | return (bp->tx_ring_size - diff); |
@@ -1569,7 +1571,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) | |||
1569 | struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; | 1571 | struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; |
1570 | unsigned long align; | 1572 | unsigned long align; |
1571 | 1573 | ||
1572 | skb = dev_alloc_skb(bp->rx_buf_size); | 1574 | skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); |
1573 | if (skb == NULL) { | 1575 | if (skb == NULL) { |
1574 | return -ENOMEM; | 1576 | return -ENOMEM; |
1575 | } | 1577 | } |
@@ -1578,7 +1580,6 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) | |||
1578 | skb_reserve(skb, 8 - align); | 1580 | skb_reserve(skb, 8 - align); |
1579 | } | 1581 | } |
1580 | 1582 | ||
1581 | skb->dev = bp->dev; | ||
1582 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, | 1583 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, |
1583 | PCI_DMA_FROMDEVICE); | 1584 | PCI_DMA_FROMDEVICE); |
1584 | 1585 | ||
@@ -1639,7 +1640,7 @@ bnx2_tx_int(struct bnx2 *bp) | |||
1639 | skb = tx_buf->skb; | 1640 | skb = tx_buf->skb; |
1640 | #ifdef BCM_TSO | 1641 | #ifdef BCM_TSO |
1641 | /* partial BD completions possible with TSO packets */ | 1642 | /* partial BD completions possible with TSO packets */ |
1642 | if (skb_shinfo(skb)->gso_size) { | 1643 | if (skb_is_gso(skb)) { |
1643 | u16 last_idx, last_ring_idx; | 1644 | u16 last_idx, last_ring_idx; |
1644 | 1645 | ||
1645 | last_idx = sw_cons + | 1646 | last_idx = sw_cons + |
@@ -1686,15 +1687,20 @@ bnx2_tx_int(struct bnx2 *bp) | |||
1686 | } | 1687 | } |
1687 | 1688 | ||
1688 | bp->tx_cons = sw_cons; | 1689 | bp->tx_cons = sw_cons; |
1690 | /* Need to make the tx_cons update visible to bnx2_start_xmit() | ||
1691 | * before checking for netif_queue_stopped(). Without the | ||
1692 | * memory barrier, there is a small possibility that bnx2_start_xmit() | ||
1693 | * will miss it and cause the queue to be stopped forever. | ||
1694 | */ | ||
1695 | smp_mb(); | ||
1689 | 1696 | ||
1690 | if (unlikely(netif_queue_stopped(bp->dev))) { | 1697 | if (unlikely(netif_queue_stopped(bp->dev)) && |
1691 | spin_lock(&bp->tx_lock); | 1698 | (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) { |
1699 | netif_tx_lock(bp->dev); | ||
1692 | if ((netif_queue_stopped(bp->dev)) && | 1700 | if ((netif_queue_stopped(bp->dev)) && |
1693 | (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) { | 1701 | (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) |
1694 | |||
1695 | netif_wake_queue(bp->dev); | 1702 | netif_wake_queue(bp->dev); |
1696 | } | 1703 | netif_tx_unlock(bp->dev); |
1697 | spin_unlock(&bp->tx_lock); | ||
1698 | } | 1704 | } |
1699 | } | 1705 | } |
1700 | 1706 | ||
@@ -1786,7 +1792,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget) | |||
1786 | if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) { | 1792 | if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) { |
1787 | struct sk_buff *new_skb; | 1793 | struct sk_buff *new_skb; |
1788 | 1794 | ||
1789 | new_skb = dev_alloc_skb(len + 2); | 1795 | new_skb = netdev_alloc_skb(bp->dev, len + 2); |
1790 | if (new_skb == NULL) | 1796 | if (new_skb == NULL) |
1791 | goto reuse_rx; | 1797 | goto reuse_rx; |
1792 | 1798 | ||
@@ -1797,7 +1803,6 @@ bnx2_rx_int(struct bnx2 *bp, int budget) | |||
1797 | 1803 | ||
1798 | skb_reserve(new_skb, 2); | 1804 | skb_reserve(new_skb, 2); |
1799 | skb_put(new_skb, len); | 1805 | skb_put(new_skb, len); |
1800 | new_skb->dev = bp->dev; | ||
1801 | 1806 | ||
1802 | bnx2_reuse_rx_skb(bp, skb, | 1807 | bnx2_reuse_rx_skb(bp, skb, |
1803 | sw_ring_cons, sw_ring_prod); | 1808 | sw_ring_cons, sw_ring_prod); |
@@ -3503,6 +3508,8 @@ bnx2_init_tx_ring(struct bnx2 *bp) | |||
3503 | struct tx_bd *txbd; | 3508 | struct tx_bd *txbd; |
3504 | u32 val; | 3509 | u32 val; |
3505 | 3510 | ||
3511 | bp->tx_wake_thresh = bp->tx_ring_size / 2; | ||
3512 | |||
3506 | txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT]; | 3513 | txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT]; |
3507 | 3514 | ||
3508 | txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32; | 3515 | txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32; |
@@ -3952,7 +3959,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) | |||
3952 | return -EINVAL; | 3959 | return -EINVAL; |
3953 | 3960 | ||
3954 | pkt_size = 1514; | 3961 | pkt_size = 1514; |
3955 | skb = dev_alloc_skb(pkt_size); | 3962 | skb = netdev_alloc_skb(bp->dev, pkt_size); |
3956 | if (!skb) | 3963 | if (!skb) |
3957 | return -ENOMEM; | 3964 | return -ENOMEM; |
3958 | packet = skb_put(skb, pkt_size); | 3965 | packet = skb_put(skb, pkt_size); |
@@ -4390,10 +4397,8 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid) | |||
4390 | #endif | 4397 | #endif |
4391 | 4398 | ||
4392 | /* Called with netif_tx_lock. | 4399 | /* Called with netif_tx_lock. |
4393 | * hard_start_xmit is pseudo-lockless - a lock is only required when | 4400 | * bnx2_tx_int() runs without netif_tx_lock unless it needs to call |
4394 | * the tx queue is full. This way, we get the benefit of lockless | 4401 | * netif_wake_queue(). |
4395 | * operations most of the time without the complexities to handle | ||
4396 | * netif_stop_queue/wake_queue race conditions. | ||
4397 | */ | 4402 | */ |
4398 | static int | 4403 | static int |
4399 | bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | 4404 | bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) |
@@ -4512,12 +4517,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4512 | dev->trans_start = jiffies; | 4517 | dev->trans_start = jiffies; |
4513 | 4518 | ||
4514 | if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) { | 4519 | if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) { |
4515 | spin_lock(&bp->tx_lock); | ||
4516 | netif_stop_queue(dev); | 4520 | netif_stop_queue(dev); |
4517 | 4521 | if (bnx2_tx_avail(bp) > bp->tx_wake_thresh) | |
4518 | if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS) | ||
4519 | netif_wake_queue(dev); | 4522 | netif_wake_queue(dev); |
4520 | spin_unlock(&bp->tx_lock); | ||
4521 | } | 4523 | } |
4522 | 4524 | ||
4523 | return NETDEV_TX_OK; | 4525 | return NETDEV_TX_OK; |
@@ -5575,20 +5577,20 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
5575 | /* enable device (incl. PCI PM wakeup), and bus-mastering */ | 5577 | /* enable device (incl. PCI PM wakeup), and bus-mastering */ |
5576 | rc = pci_enable_device(pdev); | 5578 | rc = pci_enable_device(pdev); |
5577 | if (rc) { | 5579 | if (rc) { |
5578 | printk(KERN_ERR PFX "Cannot enable PCI device, aborting."); | 5580 | dev_err(&pdev->dev, "Cannot enable PCI device, aborting."); |
5579 | goto err_out; | 5581 | goto err_out; |
5580 | } | 5582 | } |
5581 | 5583 | ||
5582 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | 5584 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
5583 | printk(KERN_ERR PFX "Cannot find PCI device base address, " | 5585 | dev_err(&pdev->dev, |
5584 | "aborting.\n"); | 5586 | "Cannot find PCI device base address, aborting.\n"); |
5585 | rc = -ENODEV; | 5587 | rc = -ENODEV; |
5586 | goto err_out_disable; | 5588 | goto err_out_disable; |
5587 | } | 5589 | } |
5588 | 5590 | ||
5589 | rc = pci_request_regions(pdev, DRV_MODULE_NAME); | 5591 | rc = pci_request_regions(pdev, DRV_MODULE_NAME); |
5590 | if (rc) { | 5592 | if (rc) { |
5591 | printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n"); | 5593 | dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n"); |
5592 | goto err_out_disable; | 5594 | goto err_out_disable; |
5593 | } | 5595 | } |
5594 | 5596 | ||
@@ -5596,15 +5598,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
5596 | 5598 | ||
5597 | bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); | 5599 | bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); |
5598 | if (bp->pm_cap == 0) { | 5600 | if (bp->pm_cap == 0) { |
5599 | printk(KERN_ERR PFX "Cannot find power management capability, " | 5601 | dev_err(&pdev->dev, |
5600 | "aborting.\n"); | 5602 | "Cannot find power management capability, aborting.\n"); |
5601 | rc = -EIO; | 5603 | rc = -EIO; |
5602 | goto err_out_release; | 5604 | goto err_out_release; |
5603 | } | 5605 | } |
5604 | 5606 | ||
5605 | bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); | 5607 | bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); |
5606 | if (bp->pcix_cap == 0) { | 5608 | if (bp->pcix_cap == 0) { |
5607 | printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n"); | 5609 | dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n"); |
5608 | rc = -EIO; | 5610 | rc = -EIO; |
5609 | goto err_out_release; | 5611 | goto err_out_release; |
5610 | } | 5612 | } |
@@ -5612,14 +5614,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
5612 | if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) { | 5614 | if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) { |
5613 | bp->flags |= USING_DAC_FLAG; | 5615 | bp->flags |= USING_DAC_FLAG; |
5614 | if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) { | 5616 | if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) { |
5615 | printk(KERN_ERR PFX "pci_set_consistent_dma_mask " | 5617 | dev_err(&pdev->dev, |
5616 | "failed, aborting.\n"); | 5618 | "pci_set_consistent_dma_mask failed, aborting.\n"); |
5617 | rc = -EIO; | 5619 | rc = -EIO; |
5618 | goto err_out_release; | 5620 | goto err_out_release; |
5619 | } | 5621 | } |
5620 | } | 5622 | } |
5621 | else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) { | 5623 | else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) { |
5622 | printk(KERN_ERR PFX "System does not support DMA, aborting.\n"); | 5624 | dev_err(&pdev->dev, "System does not support DMA, aborting.\n"); |
5623 | rc = -EIO; | 5625 | rc = -EIO; |
5624 | goto err_out_release; | 5626 | goto err_out_release; |
5625 | } | 5627 | } |
@@ -5628,7 +5630,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
5628 | bp->pdev = pdev; | 5630 | bp->pdev = pdev; |
5629 | 5631 | ||
5630 | spin_lock_init(&bp->phy_lock); | 5632 | spin_lock_init(&bp->phy_lock); |
5631 | spin_lock_init(&bp->tx_lock); | ||
5632 | INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); | 5633 | INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); |
5633 | 5634 | ||
5634 | dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); | 5635 | dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); |
@@ -5639,7 +5640,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
5639 | bp->regview = ioremap_nocache(dev->base_addr, mem_len); | 5640 | bp->regview = ioremap_nocache(dev->base_addr, mem_len); |
5640 | 5641 | ||
5641 | if (!bp->regview) { | 5642 | if (!bp->regview) { |
5642 | printk(KERN_ERR PFX "Cannot map register space, aborting.\n"); | 5643 | dev_err(&pdev->dev, "Cannot map register space, aborting.\n"); |
5643 | rc = -ENOMEM; | 5644 | rc = -ENOMEM; |
5644 | goto err_out_release; | 5645 | goto err_out_release; |
5645 | } | 5646 | } |
@@ -5711,8 +5712,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
5711 | else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) && | 5712 | else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) && |
5712 | !(bp->flags & PCIX_FLAG)) { | 5713 | !(bp->flags & PCIX_FLAG)) { |
5713 | 5714 | ||
5714 | printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, " | 5715 | dev_err(&pdev->dev, |
5715 | "aborting.\n"); | 5716 | "5706 A1 can only be used in a PCIX bus, aborting.\n"); |
5716 | goto err_out_unmap; | 5717 | goto err_out_unmap; |
5717 | } | 5718 | } |
5718 | 5719 | ||
@@ -5733,7 +5734,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
5733 | 5734 | ||
5734 | if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) != | 5735 | if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) != |
5735 | BNX2_DEV_INFO_SIGNATURE_MAGIC) { | 5736 | BNX2_DEV_INFO_SIGNATURE_MAGIC) { |
5736 | printk(KERN_ERR PFX "Firmware not running, aborting.\n"); | 5737 | dev_err(&pdev->dev, "Firmware not running, aborting.\n"); |
5737 | rc = -ENODEV; | 5738 | rc = -ENODEV; |
5738 | goto err_out_unmap; | 5739 | goto err_out_unmap; |
5739 | } | 5740 | } |
@@ -5751,7 +5752,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
5751 | bp->mac_addr[5] = (u8) reg; | 5752 | bp->mac_addr[5] = (u8) reg; |
5752 | 5753 | ||
5753 | bp->tx_ring_size = MAX_TX_DESC_CNT; | 5754 | bp->tx_ring_size = MAX_TX_DESC_CNT; |
5754 | bnx2_set_rx_ring_size(bp, 100); | 5755 | bnx2_set_rx_ring_size(bp, 255); |
5755 | 5756 | ||
5756 | bp->rx_csum = 1; | 5757 | bp->rx_csum = 1; |
5757 | 5758 | ||
@@ -5895,7 +5896,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5895 | #endif | 5896 | #endif |
5896 | 5897 | ||
5897 | if ((rc = register_netdev(dev))) { | 5898 | if ((rc = register_netdev(dev))) { |
5898 | printk(KERN_ERR PFX "Cannot register net device\n"); | 5899 | dev_err(&pdev->dev, "Cannot register net device\n"); |
5899 | if (bp->regview) | 5900 | if (bp->regview) |
5900 | iounmap(bp->regview); | 5901 | iounmap(bp->regview); |
5901 | pci_release_regions(pdev); | 5902 | pci_release_regions(pdev); |
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 658c5ee95c73..fe804763c607 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h | |||
@@ -3890,10 +3890,6 @@ struct bnx2 { | |||
3890 | u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES))); | 3890 | u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES))); |
3891 | u16 tx_prod; | 3891 | u16 tx_prod; |
3892 | 3892 | ||
3893 | struct tx_bd *tx_desc_ring; | ||
3894 | struct sw_bd *tx_buf_ring; | ||
3895 | int tx_ring_size; | ||
3896 | |||
3897 | u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES))); | 3893 | u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES))); |
3898 | u16 hw_tx_cons; | 3894 | u16 hw_tx_cons; |
3899 | 3895 | ||
@@ -3916,9 +3912,11 @@ struct bnx2 { | |||
3916 | struct sw_bd *rx_buf_ring; | 3912 | struct sw_bd *rx_buf_ring; |
3917 | struct rx_bd *rx_desc_ring[MAX_RX_RINGS]; | 3913 | struct rx_bd *rx_desc_ring[MAX_RX_RINGS]; |
3918 | 3914 | ||
3919 | /* Only used to synchronize netif_stop_queue/wake_queue when tx */ | 3915 | /* TX constants */ |
3920 | /* ring is full */ | 3916 | struct tx_bd *tx_desc_ring; |
3921 | spinlock_t tx_lock; | 3917 | struct sw_bd *tx_buf_ring; |
3918 | int tx_ring_size; | ||
3919 | u32 tx_wake_thresh; | ||
3922 | 3920 | ||
3923 | /* End of fields used in the performance code paths. */ | 3921 | /* End of fields used in the performance code paths. */ |
3924 | 3922 | ||
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index d33130f64700..a31544ccb3c4 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -4887,13 +4887,12 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
4887 | 4887 | ||
4888 | err = pci_enable_device(pdev); | 4888 | err = pci_enable_device(pdev); |
4889 | if (err) { | 4889 | if (err) { |
4890 | printk(KERN_ERR PFX "Cannot enable PCI device, " | 4890 | dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); |
4891 | "aborting.\n"); | ||
4892 | return err; | 4891 | return err; |
4893 | } | 4892 | } |
4894 | 4893 | ||
4895 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | 4894 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
4896 | printk(KERN_ERR PFX "Cannot find proper PCI device " | 4895 | dev_err(&pdev->dev, "Cannot find proper PCI device " |
4897 | "base address, aborting.\n"); | 4896 | "base address, aborting.\n"); |
4898 | err = -ENODEV; | 4897 | err = -ENODEV; |
4899 | goto err_out_disable_pdev; | 4898 | goto err_out_disable_pdev; |
@@ -4901,7 +4900,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
4901 | 4900 | ||
4902 | dev = alloc_etherdev(sizeof(*cp)); | 4901 | dev = alloc_etherdev(sizeof(*cp)); |
4903 | if (!dev) { | 4902 | if (!dev) { |
4904 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | 4903 | dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n"); |
4905 | err = -ENOMEM; | 4904 | err = -ENOMEM; |
4906 | goto err_out_disable_pdev; | 4905 | goto err_out_disable_pdev; |
4907 | } | 4906 | } |
@@ -4910,8 +4909,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
4910 | 4909 | ||
4911 | err = pci_request_regions(pdev, dev->name); | 4910 | err = pci_request_regions(pdev, dev->name); |
4912 | if (err) { | 4911 | if (err) { |
4913 | printk(KERN_ERR PFX "Cannot obtain PCI resources, " | 4912 | dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n"); |
4914 | "aborting.\n"); | ||
4915 | goto err_out_free_netdev; | 4913 | goto err_out_free_netdev; |
4916 | } | 4914 | } |
4917 | pci_set_master(pdev); | 4915 | pci_set_master(pdev); |
@@ -4941,7 +4939,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
4941 | if (pci_write_config_byte(pdev, | 4939 | if (pci_write_config_byte(pdev, |
4942 | PCI_CACHE_LINE_SIZE, | 4940 | PCI_CACHE_LINE_SIZE, |
4943 | cas_cacheline_size)) { | 4941 | cas_cacheline_size)) { |
4944 | printk(KERN_ERR PFX "Could not set PCI cache " | 4942 | dev_err(&pdev->dev, "Could not set PCI cache " |
4945 | "line size\n"); | 4943 | "line size\n"); |
4946 | goto err_write_cacheline; | 4944 | goto err_write_cacheline; |
4947 | } | 4945 | } |
@@ -4955,7 +4953,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
4955 | err = pci_set_consistent_dma_mask(pdev, | 4953 | err = pci_set_consistent_dma_mask(pdev, |
4956 | DMA_64BIT_MASK); | 4954 | DMA_64BIT_MASK); |
4957 | if (err < 0) { | 4955 | if (err < 0) { |
4958 | printk(KERN_ERR PFX "Unable to obtain 64-bit DMA " | 4956 | dev_err(&pdev->dev, "Unable to obtain 64-bit DMA " |
4959 | "for consistent allocations\n"); | 4957 | "for consistent allocations\n"); |
4960 | goto err_out_free_res; | 4958 | goto err_out_free_res; |
4961 | } | 4959 | } |
@@ -4963,7 +4961,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
4963 | } else { | 4961 | } else { |
4964 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | 4962 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); |
4965 | if (err) { | 4963 | if (err) { |
4966 | printk(KERN_ERR PFX "No usable DMA configuration, " | 4964 | dev_err(&pdev->dev, "No usable DMA configuration, " |
4967 | "aborting.\n"); | 4965 | "aborting.\n"); |
4968 | goto err_out_free_res; | 4966 | goto err_out_free_res; |
4969 | } | 4967 | } |
@@ -5023,8 +5021,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
5023 | /* give us access to cassini registers */ | 5021 | /* give us access to cassini registers */ |
5024 | cp->regs = pci_iomap(pdev, 0, casreg_len); | 5022 | cp->regs = pci_iomap(pdev, 0, casreg_len); |
5025 | if (cp->regs == 0UL) { | 5023 | if (cp->regs == 0UL) { |
5026 | printk(KERN_ERR PFX "Cannot map device registers, " | 5024 | dev_err(&pdev->dev, "Cannot map device registers, aborting.\n"); |
5027 | "aborting.\n"); | ||
5028 | goto err_out_free_res; | 5025 | goto err_out_free_res; |
5029 | } | 5026 | } |
5030 | cp->casreg_len = casreg_len; | 5027 | cp->casreg_len = casreg_len; |
@@ -5040,8 +5037,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
5040 | pci_alloc_consistent(pdev, sizeof(struct cas_init_block), | 5037 | pci_alloc_consistent(pdev, sizeof(struct cas_init_block), |
5041 | &cp->block_dvma); | 5038 | &cp->block_dvma); |
5042 | if (!cp->init_block) { | 5039 | if (!cp->init_block) { |
5043 | printk(KERN_ERR PFX "Cannot allocate init block, " | 5040 | dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n"); |
5044 | "aborting.\n"); | ||
5045 | goto err_out_iounmap; | 5041 | goto err_out_iounmap; |
5046 | } | 5042 | } |
5047 | 5043 | ||
@@ -5085,8 +5081,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
5085 | dev->features |= NETIF_F_HIGHDMA; | 5081 | dev->features |= NETIF_F_HIGHDMA; |
5086 | 5082 | ||
5087 | if (register_netdev(dev)) { | 5083 | if (register_netdev(dev)) { |
5088 | printk(KERN_ERR PFX "Cannot register net device, " | 5084 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); |
5089 | "aborting.\n"); | ||
5090 | goto err_out_free_consistent; | 5085 | goto err_out_free_consistent; |
5091 | } | 5086 | } |
5092 | 5087 | ||
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 87f94d939ff8..61b3754f50ff 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -1417,7 +1417,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1417 | struct cpl_tx_pkt *cpl; | 1417 | struct cpl_tx_pkt *cpl; |
1418 | 1418 | ||
1419 | #ifdef NETIF_F_TSO | 1419 | #ifdef NETIF_F_TSO |
1420 | if (skb_shinfo(skb)->gso_size) { | 1420 | if (skb_is_gso(skb)) { |
1421 | int eth_type; | 1421 | int eth_type; |
1422 | struct cpl_tx_pkt_lso *hdr; | 1422 | struct cpl_tx_pkt_lso *hdr; |
1423 | 1423 | ||
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index 47eecce35fa4..2dcca79b1f6a 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c | |||
@@ -1905,8 +1905,7 @@ MODULE_LICENSE("GPL"); | |||
1905 | 1905 | ||
1906 | */ | 1906 | */ |
1907 | 1907 | ||
1908 | int | 1908 | int __init init_module(void) |
1909 | init_module(void) | ||
1910 | { | 1909 | { |
1911 | struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); | 1910 | struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); |
1912 | struct net_local *lp; | 1911 | struct net_local *lp; |
diff --git a/drivers/net/declance.c b/drivers/net/declance.c index 2038ca7e49ce..6ad5796121c8 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c | |||
@@ -703,8 +703,8 @@ static irqreturn_t lance_dma_merr_int(const int irq, void *dev_id, | |||
703 | return IRQ_HANDLED; | 703 | return IRQ_HANDLED; |
704 | } | 704 | } |
705 | 705 | ||
706 | static irqreturn_t | 706 | static irqreturn_t lance_interrupt(const int irq, void *dev_id, |
707 | lance_interrupt(const int irq, void *dev_id, struct pt_regs *regs) | 707 | struct pt_regs *regs) |
708 | { | 708 | { |
709 | struct net_device *dev = (struct net_device *) dev_id; | 709 | struct net_device *dev = (struct net_device *) dev_id; |
710 | struct lance_private *lp = netdev_priv(dev); | 710 | struct lance_private *lp = netdev_priv(dev); |
@@ -1253,7 +1253,7 @@ static int __init dec_lance_init(const int type, const int slot) | |||
1253 | return 0; | 1253 | return 0; |
1254 | 1254 | ||
1255 | err_out_free_dev: | 1255 | err_out_free_dev: |
1256 | kfree(dev); | 1256 | free_netdev(dev); |
1257 | 1257 | ||
1258 | err_out: | 1258 | err_out: |
1259 | return ret; | 1259 | return ret; |
@@ -1299,6 +1299,7 @@ static void __exit dec_lance_cleanup(void) | |||
1299 | while (root_lance_dev) { | 1299 | while (root_lance_dev) { |
1300 | struct net_device *dev = root_lance_dev; | 1300 | struct net_device *dev = root_lance_dev; |
1301 | struct lance_private *lp = netdev_priv(dev); | 1301 | struct lance_private *lp = netdev_priv(dev); |
1302 | |||
1302 | unregister_netdev(dev); | 1303 | unregister_netdev(dev); |
1303 | #ifdef CONFIG_TC | 1304 | #ifdef CONFIG_TC |
1304 | if (lp->slot >= 0) | 1305 | if (lp->slot >= 0) |
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 4b6ddb70f921..402961e68c89 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c | |||
@@ -9,49 +9,10 @@ | |||
9 | the Free Software Foundation; either version 2 of the License, or | 9 | the Free Software Foundation; either version 2 of the License, or |
10 | (at your option) any later version. | 10 | (at your option) any later version. |
11 | */ | 11 | */ |
12 | /* | ||
13 | Rev Date Description | ||
14 | ========================================================================== | ||
15 | 0.01 2001/05/03 Created DL2000-based linux driver | ||
16 | 0.02 2001/05/21 Added VLAN and hardware checksum support. | ||
17 | 1.00 2001/06/26 Added jumbo frame support. | ||
18 | 1.01 2001/08/21 Added two parameters, rx_coalesce and rx_timeout. | ||
19 | 1.02 2001/10/08 Supported fiber media. | ||
20 | Added flow control parameters. | ||
21 | 1.03 2001/10/12 Changed the default media to 1000mbps_fd for | ||
22 | the fiber devices. | ||
23 | 1.04 2001/11/08 Fixed Tx stopped when tx very busy. | ||
24 | 1.05 2001/11/22 Fixed Tx stopped when unidirectional tx busy. | ||
25 | 1.06 2001/12/13 Fixed disconnect bug at 10Mbps mode. | ||
26 | Fixed tx_full flag incorrect. | ||
27 | Added tx_coalesce paramter. | ||
28 | 1.07 2002/01/03 Fixed miscount of RX frame error. | ||
29 | 1.08 2002/01/17 Fixed the multicast bug. | ||
30 | 1.09 2002/03/07 Move rx-poll-now to re-fill loop. | ||
31 | Added rio_timer() to watch rx buffers. | ||
32 | 1.10 2002/04/16 Fixed miscount of carrier error. | ||
33 | 1.11 2002/05/23 Added ISR schedule scheme | ||
34 | Fixed miscount of rx frame error for DGE-550SX. | ||
35 | Fixed VLAN bug. | ||
36 | 1.12 2002/06/13 Lock tx_coalesce=1 on 10/100Mbps mode. | ||
37 | 1.13 2002/08/13 1. Fix disconnection (many tx:carrier/rx:frame | ||
38 | errs) with some mainboards. | ||
39 | 2. Use definition "DRV_NAME" "DRV_VERSION" | ||
40 | "DRV_RELDATE" for flexibility. | ||
41 | 1.14 2002/08/14 Support ethtool. | ||
42 | 1.15 2002/08/27 Changed the default media to Auto-Negotiation | ||
43 | for the fiber devices. | ||
44 | 1.16 2002/09/04 More power down time for fiber devices auto- | ||
45 | negotiation. | ||
46 | Fix disconnect bug after ifup and ifdown. | ||
47 | 1.17 2002/10/03 Fix RMON statistics overflow. | ||
48 | Always use I/O mapping to access eeprom, | ||
49 | avoid system freezing with some chipsets. | ||
50 | 12 | ||
51 | */ | ||
52 | #define DRV_NAME "D-Link DL2000-based linux driver" | 13 | #define DRV_NAME "D-Link DL2000-based linux driver" |
53 | #define DRV_VERSION "v1.17b" | 14 | #define DRV_VERSION "v1.18" |
54 | #define DRV_RELDATE "2006/03/10" | 15 | #define DRV_RELDATE "2006/06/27" |
55 | #include "dl2k.h" | 16 | #include "dl2k.h" |
56 | #include <linux/dma-mapping.h> | 17 | #include <linux/dma-mapping.h> |
57 | 18 | ||
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 1b758b707134..3d76fa144c4f 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -339,6 +339,17 @@ static void dm9000_timeout(struct net_device *dev) | |||
339 | spin_unlock_irqrestore(&db->lock,flags); | 339 | spin_unlock_irqrestore(&db->lock,flags); |
340 | } | 340 | } |
341 | 341 | ||
342 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
343 | /* | ||
344 | *Used by netconsole | ||
345 | */ | ||
346 | static void dm9000_poll_controller(struct net_device *dev) | ||
347 | { | ||
348 | disable_irq(dev->irq); | ||
349 | dm9000_interrupt(dev->irq,dev,NULL); | ||
350 | enable_irq(dev->irq); | ||
351 | } | ||
352 | #endif | ||
342 | 353 | ||
343 | /* dm9000_release_board | 354 | /* dm9000_release_board |
344 | * | 355 | * |
@@ -538,6 +549,9 @@ dm9000_probe(struct platform_device *pdev) | |||
538 | ndev->stop = &dm9000_stop; | 549 | ndev->stop = &dm9000_stop; |
539 | ndev->get_stats = &dm9000_get_stats; | 550 | ndev->get_stats = &dm9000_get_stats; |
540 | ndev->set_multicast_list = &dm9000_hash_table; | 551 | ndev->set_multicast_list = &dm9000_hash_table; |
552 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
553 | ndev->poll_controller = &dm9000_poll_controller; | ||
554 | #endif | ||
541 | 555 | ||
542 | #ifdef DM9000_PROGRAM_EEPROM | 556 | #ifdef DM9000_PROGRAM_EEPROM |
543 | program_eeprom(db); | 557 | program_eeprom(db); |
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 36d511729f71..2146cf74425e 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c | |||
@@ -132,6 +132,7 @@ static int __init dummy_init_module(void) | |||
132 | for (i = 0; i < numdummies && !err; i++) | 132 | for (i = 0; i < numdummies && !err; i++) |
133 | err = dummy_init_one(i); | 133 | err = dummy_init_one(i); |
134 | if (err) { | 134 | if (err) { |
135 | i--; | ||
135 | while (--i >= 0) | 136 | while (--i >= 0) |
136 | dummy_free_one(i); | 137 | dummy_free_one(i); |
137 | } | 138 | } |
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 3042d33e2d4d..d304297c496c 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -68,7 +68,6 @@ | |||
68 | #ifdef NETIF_F_TSO | 68 | #ifdef NETIF_F_TSO |
69 | #include <net/checksum.h> | 69 | #include <net/checksum.h> |
70 | #endif | 70 | #endif |
71 | #include <linux/workqueue.h> | ||
72 | #include <linux/mii.h> | 71 | #include <linux/mii.h> |
73 | #include <linux/ethtool.h> | 72 | #include <linux/ethtool.h> |
74 | #include <linux/if_vlan.h> | 73 | #include <linux/if_vlan.h> |
@@ -111,6 +110,9 @@ struct e1000_adapter; | |||
111 | #define E1000_MIN_RXD 80 | 110 | #define E1000_MIN_RXD 80 |
112 | #define E1000_MAX_82544_RXD 4096 | 111 | #define E1000_MAX_82544_RXD 4096 |
113 | 112 | ||
113 | /* this is the size past which hardware will drop packets when setting LPE=0 */ | ||
114 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 | ||
115 | |||
114 | /* Supported Rx Buffer Sizes */ | 116 | /* Supported Rx Buffer Sizes */ |
115 | #define E1000_RXBUFFER_128 128 /* Used for packet split */ | 117 | #define E1000_RXBUFFER_128 128 /* Used for packet split */ |
116 | #define E1000_RXBUFFER_256 256 /* Used for packet split */ | 118 | #define E1000_RXBUFFER_256 256 /* Used for packet split */ |
@@ -143,6 +145,7 @@ struct e1000_adapter; | |||
143 | 145 | ||
144 | #define AUTO_ALL_MODES 0 | 146 | #define AUTO_ALL_MODES 0 |
145 | #define E1000_EEPROM_82544_APM 0x0004 | 147 | #define E1000_EEPROM_82544_APM 0x0004 |
148 | #define E1000_EEPROM_ICH8_APME 0x0004 | ||
146 | #define E1000_EEPROM_APME 0x0400 | 149 | #define E1000_EEPROM_APME 0x0400 |
147 | 150 | ||
148 | #ifndef E1000_MASTER_SLAVE | 151 | #ifndef E1000_MASTER_SLAVE |
@@ -254,7 +257,6 @@ struct e1000_adapter { | |||
254 | spinlock_t tx_queue_lock; | 257 | spinlock_t tx_queue_lock; |
255 | #endif | 258 | #endif |
256 | atomic_t irq_sem; | 259 | atomic_t irq_sem; |
257 | struct work_struct watchdog_task; | ||
258 | struct work_struct reset_task; | 260 | struct work_struct reset_task; |
259 | uint8_t fc_autoneg; | 261 | uint8_t fc_autoneg; |
260 | 262 | ||
@@ -339,8 +341,14 @@ struct e1000_adapter { | |||
339 | #ifdef NETIF_F_TSO | 341 | #ifdef NETIF_F_TSO |
340 | boolean_t tso_force; | 342 | boolean_t tso_force; |
341 | #endif | 343 | #endif |
344 | boolean_t smart_power_down; /* phy smart power down */ | ||
345 | unsigned long flags; | ||
342 | }; | 346 | }; |
343 | 347 | ||
348 | enum e1000_state_t { | ||
349 | __E1000_DRIVER_TESTING, | ||
350 | __E1000_RESETTING, | ||
351 | }; | ||
344 | 352 | ||
345 | /* e1000_main.c */ | 353 | /* e1000_main.c */ |
346 | extern char e1000_driver_name[]; | 354 | extern char e1000_driver_name[]; |
@@ -348,6 +356,7 @@ extern char e1000_driver_version[]; | |||
348 | int e1000_up(struct e1000_adapter *adapter); | 356 | int e1000_up(struct e1000_adapter *adapter); |
349 | void e1000_down(struct e1000_adapter *adapter); | 357 | void e1000_down(struct e1000_adapter *adapter); |
350 | void e1000_reset(struct e1000_adapter *adapter); | 358 | void e1000_reset(struct e1000_adapter *adapter); |
359 | void e1000_reinit_locked(struct e1000_adapter *adapter); | ||
351 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); | 360 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); |
352 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter); | 361 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter); |
353 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); | 362 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); |
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index d19664891768..88a82ba88f57 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c | |||
@@ -109,7 +109,8 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
109 | SUPPORTED_1000baseT_Full| | 109 | SUPPORTED_1000baseT_Full| |
110 | SUPPORTED_Autoneg | | 110 | SUPPORTED_Autoneg | |
111 | SUPPORTED_TP); | 111 | SUPPORTED_TP); |
112 | 112 | if (hw->phy_type == e1000_phy_ife) | |
113 | ecmd->supported &= ~SUPPORTED_1000baseT_Full; | ||
113 | ecmd->advertising = ADVERTISED_TP; | 114 | ecmd->advertising = ADVERTISED_TP; |
114 | 115 | ||
115 | if (hw->autoneg == 1) { | 116 | if (hw->autoneg == 1) { |
@@ -203,11 +204,9 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
203 | 204 | ||
204 | /* reset the link */ | 205 | /* reset the link */ |
205 | 206 | ||
206 | if (netif_running(adapter->netdev)) { | 207 | if (netif_running(adapter->netdev)) |
207 | e1000_down(adapter); | 208 | e1000_reinit_locked(adapter); |
208 | e1000_reset(adapter); | 209 | else |
209 | e1000_up(adapter); | ||
210 | } else | ||
211 | e1000_reset(adapter); | 210 | e1000_reset(adapter); |
212 | 211 | ||
213 | return 0; | 212 | return 0; |
@@ -254,10 +253,9 @@ e1000_set_pauseparam(struct net_device *netdev, | |||
254 | hw->original_fc = hw->fc; | 253 | hw->original_fc = hw->fc; |
255 | 254 | ||
256 | if (adapter->fc_autoneg == AUTONEG_ENABLE) { | 255 | if (adapter->fc_autoneg == AUTONEG_ENABLE) { |
257 | if (netif_running(adapter->netdev)) { | 256 | if (netif_running(adapter->netdev)) |
258 | e1000_down(adapter); | 257 | e1000_reinit_locked(adapter); |
259 | e1000_up(adapter); | 258 | else |
260 | } else | ||
261 | e1000_reset(adapter); | 259 | e1000_reset(adapter); |
262 | } else | 260 | } else |
263 | return ((hw->media_type == e1000_media_type_fiber) ? | 261 | return ((hw->media_type == e1000_media_type_fiber) ? |
@@ -279,10 +277,9 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data) | |||
279 | struct e1000_adapter *adapter = netdev_priv(netdev); | 277 | struct e1000_adapter *adapter = netdev_priv(netdev); |
280 | adapter->rx_csum = data; | 278 | adapter->rx_csum = data; |
281 | 279 | ||
282 | if (netif_running(netdev)) { | 280 | if (netif_running(netdev)) |
283 | e1000_down(adapter); | 281 | e1000_reinit_locked(adapter); |
284 | e1000_up(adapter); | 282 | else |
285 | } else | ||
286 | e1000_reset(adapter); | 283 | e1000_reset(adapter); |
287 | return 0; | 284 | return 0; |
288 | } | 285 | } |
@@ -577,6 +574,7 @@ e1000_get_drvinfo(struct net_device *netdev, | |||
577 | case e1000_82572: | 574 | case e1000_82572: |
578 | case e1000_82573: | 575 | case e1000_82573: |
579 | case e1000_80003es2lan: | 576 | case e1000_80003es2lan: |
577 | case e1000_ich8lan: | ||
580 | sprintf(firmware_version, "%d.%d-%d", | 578 | sprintf(firmware_version, "%d.%d-%d", |
581 | (eeprom_data & 0xF000) >> 12, | 579 | (eeprom_data & 0xF000) >> 12, |
582 | (eeprom_data & 0x0FF0) >> 4, | 580 | (eeprom_data & 0x0FF0) >> 4, |
@@ -631,6 +629,9 @@ e1000_set_ringparam(struct net_device *netdev, | |||
631 | tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; | 629 | tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; |
632 | rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; | 630 | rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; |
633 | 631 | ||
632 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) | ||
633 | msleep(1); | ||
634 | |||
634 | if (netif_running(adapter->netdev)) | 635 | if (netif_running(adapter->netdev)) |
635 | e1000_down(adapter); | 636 | e1000_down(adapter); |
636 | 637 | ||
@@ -691,9 +692,11 @@ e1000_set_ringparam(struct net_device *netdev, | |||
691 | adapter->rx_ring = rx_new; | 692 | adapter->rx_ring = rx_new; |
692 | adapter->tx_ring = tx_new; | 693 | adapter->tx_ring = tx_new; |
693 | if ((err = e1000_up(adapter))) | 694 | if ((err = e1000_up(adapter))) |
694 | return err; | 695 | goto err_setup; |
695 | } | 696 | } |
696 | 697 | ||
698 | clear_bit(__E1000_RESETTING, &adapter->flags); | ||
699 | |||
697 | return 0; | 700 | return 0; |
698 | err_setup_tx: | 701 | err_setup_tx: |
699 | e1000_free_all_rx_resources(adapter); | 702 | e1000_free_all_rx_resources(adapter); |
@@ -701,6 +704,8 @@ err_setup_rx: | |||
701 | adapter->rx_ring = rx_old; | 704 | adapter->rx_ring = rx_old; |
702 | adapter->tx_ring = tx_old; | 705 | adapter->tx_ring = tx_old; |
703 | e1000_up(adapter); | 706 | e1000_up(adapter); |
707 | err_setup: | ||
708 | clear_bit(__E1000_RESETTING, &adapter->flags); | ||
704 | return err; | 709 | return err; |
705 | } | 710 | } |
706 | 711 | ||
@@ -754,6 +759,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
754 | toggle = 0x7FFFF3FF; | 759 | toggle = 0x7FFFF3FF; |
755 | break; | 760 | break; |
756 | case e1000_82573: | 761 | case e1000_82573: |
762 | case e1000_ich8lan: | ||
757 | toggle = 0x7FFFF033; | 763 | toggle = 0x7FFFF033; |
758 | break; | 764 | break; |
759 | default: | 765 | default: |
@@ -773,11 +779,12 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
773 | } | 779 | } |
774 | /* restore previous status */ | 780 | /* restore previous status */ |
775 | E1000_WRITE_REG(&adapter->hw, STATUS, before); | 781 | E1000_WRITE_REG(&adapter->hw, STATUS, before); |
776 | 782 | if (adapter->hw.mac_type != e1000_ich8lan) { | |
777 | REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); | 783 | REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); |
778 | REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); | 784 | REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); |
779 | REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); | 785 | REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); |
780 | REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); | 786 | REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); |
787 | } | ||
781 | REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); | 788 | REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); |
782 | REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); | 789 | REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); |
783 | REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); | 790 | REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); |
@@ -790,20 +797,22 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
790 | REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); | 797 | REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); |
791 | 798 | ||
792 | REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); | 799 | REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); |
793 | REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB); | 800 | before = (adapter->hw.mac_type == e1000_ich8lan ? |
801 | 0x06C3B33E : 0x06DFB3FE); | ||
802 | REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); | ||
794 | REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); | 803 | REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); |
795 | 804 | ||
796 | if (adapter->hw.mac_type >= e1000_82543) { | 805 | if (adapter->hw.mac_type >= e1000_82543) { |
797 | 806 | ||
798 | REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF); | 807 | REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); |
799 | REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | 808 | REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); |
800 | REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); | 809 | if (adapter->hw.mac_type != e1000_ich8lan) |
810 | REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); | ||
801 | REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | 811 | REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); |
802 | REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); | 812 | REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); |
803 | 813 | value = (adapter->hw.mac_type == e1000_ich8lan ? | |
804 | for (i = 0; i < E1000_RAR_ENTRIES; i++) { | 814 | E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES); |
805 | REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF, | 815 | for (i = 0; i < value; i++) { |
806 | 0xFFFFFFFF); | ||
807 | REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, | 816 | REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, |
808 | 0xFFFFFFFF); | 817 | 0xFFFFFFFF); |
809 | } | 818 | } |
@@ -817,7 +826,9 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
817 | 826 | ||
818 | } | 827 | } |
819 | 828 | ||
820 | for (i = 0; i < E1000_MC_TBL_SIZE; i++) | 829 | value = (adapter->hw.mac_type == e1000_ich8lan ? |
830 | E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE); | ||
831 | for (i = 0; i < value; i++) | ||
821 | REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); | 832 | REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); |
822 | 833 | ||
823 | *data = 0; | 834 | *data = 0; |
@@ -889,6 +900,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) | |||
889 | /* Test each interrupt */ | 900 | /* Test each interrupt */ |
890 | for (; i < 10; i++) { | 901 | for (; i < 10; i++) { |
891 | 902 | ||
903 | if (adapter->hw.mac_type == e1000_ich8lan && i == 8) | ||
904 | continue; | ||
892 | /* Interrupt to test */ | 905 | /* Interrupt to test */ |
893 | mask = 1 << i; | 906 | mask = 1 << i; |
894 | 907 | ||
@@ -1246,18 +1259,33 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1246 | } else if (adapter->hw.phy_type == e1000_phy_gg82563) { | 1259 | } else if (adapter->hw.phy_type == e1000_phy_gg82563) { |
1247 | e1000_write_phy_reg(&adapter->hw, | 1260 | e1000_write_phy_reg(&adapter->hw, |
1248 | GG82563_PHY_KMRN_MODE_CTRL, | 1261 | GG82563_PHY_KMRN_MODE_CTRL, |
1249 | 0x1CE); | 1262 | 0x1CC); |
1250 | } | 1263 | } |
1251 | /* force 1000, set loopback */ | ||
1252 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140); | ||
1253 | 1264 | ||
1254 | /* Now set up the MAC to the same speed/duplex as the PHY. */ | ||
1255 | ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); | 1265 | ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); |
1256 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ | 1266 | |
1257 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ | 1267 | if (adapter->hw.phy_type == e1000_phy_ife) { |
1258 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | 1268 | /* force 100, set loopback */ |
1259 | E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ | 1269 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x6100); |
1260 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1270 | |
1271 | /* Now set up the MAC to the same speed/duplex as the PHY. */ | ||
1272 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ | ||
1273 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ | ||
1274 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | ||
1275 | E1000_CTRL_SPD_100 |/* Force Speed to 100 */ | ||
1276 | E1000_CTRL_FD); /* Force Duplex to FULL */ | ||
1277 | } else { | ||
1278 | /* force 1000, set loopback */ | ||
1279 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140); | ||
1280 | |||
1281 | /* Now set up the MAC to the same speed/duplex as the PHY. */ | ||
1282 | ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); | ||
1283 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ | ||
1284 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ | ||
1285 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | ||
1286 | E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ | ||
1287 | E1000_CTRL_FD); /* Force Duplex to FULL */ | ||
1288 | } | ||
1261 | 1289 | ||
1262 | if (adapter->hw.media_type == e1000_media_type_copper && | 1290 | if (adapter->hw.media_type == e1000_media_type_copper && |
1263 | adapter->hw.phy_type == e1000_phy_m88) { | 1291 | adapter->hw.phy_type == e1000_phy_m88) { |
@@ -1317,6 +1345,7 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter) | |||
1317 | case e1000_82572: | 1345 | case e1000_82572: |
1318 | case e1000_82573: | 1346 | case e1000_82573: |
1319 | case e1000_80003es2lan: | 1347 | case e1000_80003es2lan: |
1348 | case e1000_ich8lan: | ||
1320 | return e1000_integrated_phy_loopback(adapter); | 1349 | return e1000_integrated_phy_loopback(adapter); |
1321 | break; | 1350 | break; |
1322 | 1351 | ||
@@ -1568,6 +1597,7 @@ e1000_diag_test(struct net_device *netdev, | |||
1568 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1597 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1569 | boolean_t if_running = netif_running(netdev); | 1598 | boolean_t if_running = netif_running(netdev); |
1570 | 1599 | ||
1600 | set_bit(__E1000_DRIVER_TESTING, &adapter->flags); | ||
1571 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { | 1601 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { |
1572 | /* Offline tests */ | 1602 | /* Offline tests */ |
1573 | 1603 | ||
@@ -1582,7 +1612,8 @@ e1000_diag_test(struct net_device *netdev, | |||
1582 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1612 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1583 | 1613 | ||
1584 | if (if_running) | 1614 | if (if_running) |
1585 | e1000_down(adapter); | 1615 | /* indicate we're in test mode */ |
1616 | dev_close(netdev); | ||
1586 | else | 1617 | else |
1587 | e1000_reset(adapter); | 1618 | e1000_reset(adapter); |
1588 | 1619 | ||
@@ -1607,8 +1638,9 @@ e1000_diag_test(struct net_device *netdev, | |||
1607 | adapter->hw.autoneg = autoneg; | 1638 | adapter->hw.autoneg = autoneg; |
1608 | 1639 | ||
1609 | e1000_reset(adapter); | 1640 | e1000_reset(adapter); |
1641 | clear_bit(__E1000_DRIVER_TESTING, &adapter->flags); | ||
1610 | if (if_running) | 1642 | if (if_running) |
1611 | e1000_up(adapter); | 1643 | dev_open(netdev); |
1612 | } else { | 1644 | } else { |
1613 | /* Online tests */ | 1645 | /* Online tests */ |
1614 | if (e1000_link_test(adapter, &data[4])) | 1646 | if (e1000_link_test(adapter, &data[4])) |
@@ -1619,6 +1651,8 @@ e1000_diag_test(struct net_device *netdev, | |||
1619 | data[1] = 0; | 1651 | data[1] = 0; |
1620 | data[2] = 0; | 1652 | data[2] = 0; |
1621 | data[3] = 0; | 1653 | data[3] = 0; |
1654 | |||
1655 | clear_bit(__E1000_DRIVER_TESTING, &adapter->flags); | ||
1622 | } | 1656 | } |
1623 | msleep_interruptible(4 * 1000); | 1657 | msleep_interruptible(4 * 1000); |
1624 | } | 1658 | } |
@@ -1778,21 +1812,18 @@ e1000_phys_id(struct net_device *netdev, uint32_t data) | |||
1778 | mod_timer(&adapter->blink_timer, jiffies); | 1812 | mod_timer(&adapter->blink_timer, jiffies); |
1779 | msleep_interruptible(data * 1000); | 1813 | msleep_interruptible(data * 1000); |
1780 | del_timer_sync(&adapter->blink_timer); | 1814 | del_timer_sync(&adapter->blink_timer); |
1781 | } else if (adapter->hw.mac_type < e1000_82573) { | 1815 | } else if (adapter->hw.phy_type == e1000_phy_ife) { |
1782 | E1000_WRITE_REG(&adapter->hw, LEDCTL, | 1816 | if (!adapter->blink_timer.function) { |
1783 | (E1000_LEDCTL_LED2_BLINK_RATE | | 1817 | init_timer(&adapter->blink_timer); |
1784 | E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK | | 1818 | adapter->blink_timer.function = e1000_led_blink_callback; |
1785 | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | | 1819 | adapter->blink_timer.data = (unsigned long) adapter; |
1786 | (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) | | 1820 | } |
1787 | (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT))); | 1821 | mod_timer(&adapter->blink_timer, jiffies); |
1788 | msleep_interruptible(data * 1000); | 1822 | msleep_interruptible(data * 1000); |
1823 | del_timer_sync(&adapter->blink_timer); | ||
1824 | e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0); | ||
1789 | } else { | 1825 | } else { |
1790 | E1000_WRITE_REG(&adapter->hw, LEDCTL, | 1826 | e1000_blink_led_start(&adapter->hw); |
1791 | (E1000_LEDCTL_LED2_BLINK_RATE | | ||
1792 | E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK | | ||
1793 | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | | ||
1794 | (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) | | ||
1795 | (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT))); | ||
1796 | msleep_interruptible(data * 1000); | 1827 | msleep_interruptible(data * 1000); |
1797 | } | 1828 | } |
1798 | 1829 | ||
@@ -1807,10 +1838,8 @@ static int | |||
1807 | e1000_nway_reset(struct net_device *netdev) | 1838 | e1000_nway_reset(struct net_device *netdev) |
1808 | { | 1839 | { |
1809 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1840 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1810 | if (netif_running(netdev)) { | 1841 | if (netif_running(netdev)) |
1811 | e1000_down(adapter); | 1842 | e1000_reinit_locked(adapter); |
1812 | e1000_up(adapter); | ||
1813 | } | ||
1814 | return 0; | 1843 | return 0; |
1815 | } | 1844 | } |
1816 | 1845 | ||
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 3959039b16ec..b3b919116e0f 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -101,9 +101,37 @@ static void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, | |||
101 | 101 | ||
102 | #define E1000_WRITE_REG_IO(a, reg, val) \ | 102 | #define E1000_WRITE_REG_IO(a, reg, val) \ |
103 | e1000_write_reg_io((a), E1000_##reg, val) | 103 | e1000_write_reg_io((a), E1000_##reg, val) |
104 | static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw); | 104 | static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, |
105 | uint16_t duplex); | ||
105 | static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); | 106 | static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); |
106 | 107 | ||
108 | static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, | ||
109 | uint32_t segment); | ||
110 | static int32_t e1000_get_software_flag(struct e1000_hw *hw); | ||
111 | static int32_t e1000_get_software_semaphore(struct e1000_hw *hw); | ||
112 | static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw); | ||
113 | static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw); | ||
114 | static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, | ||
115 | uint16_t words, uint16_t *data); | ||
116 | static int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, | ||
117 | uint8_t* data); | ||
118 | static int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, | ||
119 | uint16_t *data); | ||
120 | static int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, | ||
121 | uint16_t *data); | ||
122 | static void e1000_release_software_flag(struct e1000_hw *hw); | ||
123 | static void e1000_release_software_semaphore(struct e1000_hw *hw); | ||
124 | static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, | ||
125 | uint32_t no_snoop); | ||
126 | static int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, | ||
127 | uint32_t index, uint8_t byte); | ||
128 | static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, | ||
129 | uint16_t words, uint16_t *data); | ||
130 | static int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, | ||
131 | uint8_t data); | ||
132 | static int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, | ||
133 | uint16_t data); | ||
134 | |||
107 | /* IGP cable length table */ | 135 | /* IGP cable length table */ |
108 | static const | 136 | static const |
109 | uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = | 137 | uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = |
@@ -156,6 +184,14 @@ e1000_set_phy_type(struct e1000_hw *hw) | |||
156 | hw->phy_type = e1000_phy_igp; | 184 | hw->phy_type = e1000_phy_igp; |
157 | break; | 185 | break; |
158 | } | 186 | } |
187 | case IGP03E1000_E_PHY_ID: | ||
188 | hw->phy_type = e1000_phy_igp_3; | ||
189 | break; | ||
190 | case IFE_E_PHY_ID: | ||
191 | case IFE_PLUS_E_PHY_ID: | ||
192 | case IFE_C_E_PHY_ID: | ||
193 | hw->phy_type = e1000_phy_ife; | ||
194 | break; | ||
159 | case GG82563_E_PHY_ID: | 195 | case GG82563_E_PHY_ID: |
160 | if (hw->mac_type == e1000_80003es2lan) { | 196 | if (hw->mac_type == e1000_80003es2lan) { |
161 | hw->phy_type = e1000_phy_gg82563; | 197 | hw->phy_type = e1000_phy_gg82563; |
@@ -332,6 +368,7 @@ e1000_set_mac_type(struct e1000_hw *hw) | |||
332 | break; | 368 | break; |
333 | case E1000_DEV_ID_82541EI: | 369 | case E1000_DEV_ID_82541EI: |
334 | case E1000_DEV_ID_82541EI_MOBILE: | 370 | case E1000_DEV_ID_82541EI_MOBILE: |
371 | case E1000_DEV_ID_82541ER_LOM: | ||
335 | hw->mac_type = e1000_82541; | 372 | hw->mac_type = e1000_82541; |
336 | break; | 373 | break; |
337 | case E1000_DEV_ID_82541ER: | 374 | case E1000_DEV_ID_82541ER: |
@@ -341,6 +378,7 @@ e1000_set_mac_type(struct e1000_hw *hw) | |||
341 | hw->mac_type = e1000_82541_rev_2; | 378 | hw->mac_type = e1000_82541_rev_2; |
342 | break; | 379 | break; |
343 | case E1000_DEV_ID_82547EI: | 380 | case E1000_DEV_ID_82547EI: |
381 | case E1000_DEV_ID_82547EI_MOBILE: | ||
344 | hw->mac_type = e1000_82547; | 382 | hw->mac_type = e1000_82547; |
345 | break; | 383 | break; |
346 | case E1000_DEV_ID_82547GI: | 384 | case E1000_DEV_ID_82547GI: |
@@ -354,6 +392,7 @@ e1000_set_mac_type(struct e1000_hw *hw) | |||
354 | case E1000_DEV_ID_82572EI_COPPER: | 392 | case E1000_DEV_ID_82572EI_COPPER: |
355 | case E1000_DEV_ID_82572EI_FIBER: | 393 | case E1000_DEV_ID_82572EI_FIBER: |
356 | case E1000_DEV_ID_82572EI_SERDES: | 394 | case E1000_DEV_ID_82572EI_SERDES: |
395 | case E1000_DEV_ID_82572EI: | ||
357 | hw->mac_type = e1000_82572; | 396 | hw->mac_type = e1000_82572; |
358 | break; | 397 | break; |
359 | case E1000_DEV_ID_82573E: | 398 | case E1000_DEV_ID_82573E: |
@@ -361,16 +400,29 @@ e1000_set_mac_type(struct e1000_hw *hw) | |||
361 | case E1000_DEV_ID_82573L: | 400 | case E1000_DEV_ID_82573L: |
362 | hw->mac_type = e1000_82573; | 401 | hw->mac_type = e1000_82573; |
363 | break; | 402 | break; |
403 | case E1000_DEV_ID_80003ES2LAN_COPPER_SPT: | ||
404 | case E1000_DEV_ID_80003ES2LAN_SERDES_SPT: | ||
364 | case E1000_DEV_ID_80003ES2LAN_COPPER_DPT: | 405 | case E1000_DEV_ID_80003ES2LAN_COPPER_DPT: |
365 | case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: | 406 | case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: |
366 | hw->mac_type = e1000_80003es2lan; | 407 | hw->mac_type = e1000_80003es2lan; |
367 | break; | 408 | break; |
409 | case E1000_DEV_ID_ICH8_IGP_M_AMT: | ||
410 | case E1000_DEV_ID_ICH8_IGP_AMT: | ||
411 | case E1000_DEV_ID_ICH8_IGP_C: | ||
412 | case E1000_DEV_ID_ICH8_IFE: | ||
413 | case E1000_DEV_ID_ICH8_IGP_M: | ||
414 | hw->mac_type = e1000_ich8lan; | ||
415 | break; | ||
368 | default: | 416 | default: |
369 | /* Should never have loaded on this device */ | 417 | /* Should never have loaded on this device */ |
370 | return -E1000_ERR_MAC_TYPE; | 418 | return -E1000_ERR_MAC_TYPE; |
371 | } | 419 | } |
372 | 420 | ||
373 | switch(hw->mac_type) { | 421 | switch(hw->mac_type) { |
422 | case e1000_ich8lan: | ||
423 | hw->swfwhw_semaphore_present = TRUE; | ||
424 | hw->asf_firmware_present = TRUE; | ||
425 | break; | ||
374 | case e1000_80003es2lan: | 426 | case e1000_80003es2lan: |
375 | hw->swfw_sync_present = TRUE; | 427 | hw->swfw_sync_present = TRUE; |
376 | /* fall through */ | 428 | /* fall through */ |
@@ -423,6 +475,7 @@ e1000_set_media_type(struct e1000_hw *hw) | |||
423 | case e1000_82542_rev2_1: | 475 | case e1000_82542_rev2_1: |
424 | hw->media_type = e1000_media_type_fiber; | 476 | hw->media_type = e1000_media_type_fiber; |
425 | break; | 477 | break; |
478 | case e1000_ich8lan: | ||
426 | case e1000_82573: | 479 | case e1000_82573: |
427 | /* The STATUS_TBIMODE bit is reserved or reused for the this | 480 | /* The STATUS_TBIMODE bit is reserved or reused for the this |
428 | * device. | 481 | * device. |
@@ -527,6 +580,14 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
527 | } while(timeout); | 580 | } while(timeout); |
528 | } | 581 | } |
529 | 582 | ||
583 | /* Workaround for ICH8 bit corruption issue in FIFO memory */ | ||
584 | if (hw->mac_type == e1000_ich8lan) { | ||
585 | /* Set Tx and Rx buffer allocation to 8k apiece. */ | ||
586 | E1000_WRITE_REG(hw, PBA, E1000_PBA_8K); | ||
587 | /* Set Packet Buffer Size to 16k. */ | ||
588 | E1000_WRITE_REG(hw, PBS, E1000_PBS_16K); | ||
589 | } | ||
590 | |||
530 | /* Issue a global reset to the MAC. This will reset the chip's | 591 | /* Issue a global reset to the MAC. This will reset the chip's |
531 | * transmit, receive, DMA, and link units. It will not effect | 592 | * transmit, receive, DMA, and link units. It will not effect |
532 | * the current PCI configuration. The global reset bit is self- | 593 | * the current PCI configuration. The global reset bit is self- |
@@ -550,6 +611,20 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
550 | /* Reset is performed on a shadow of the control register */ | 611 | /* Reset is performed on a shadow of the control register */ |
551 | E1000_WRITE_REG(hw, CTRL_DUP, (ctrl | E1000_CTRL_RST)); | 612 | E1000_WRITE_REG(hw, CTRL_DUP, (ctrl | E1000_CTRL_RST)); |
552 | break; | 613 | break; |
614 | case e1000_ich8lan: | ||
615 | if (!hw->phy_reset_disable && | ||
616 | e1000_check_phy_reset_block(hw) == E1000_SUCCESS) { | ||
617 | /* e1000_ich8lan PHY HW reset requires MAC CORE reset | ||
618 | * at the same time to make sure the interface between | ||
619 | * MAC and the external PHY is reset. | ||
620 | */ | ||
621 | ctrl |= E1000_CTRL_PHY_RST; | ||
622 | } | ||
623 | |||
624 | e1000_get_software_flag(hw); | ||
625 | E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST)); | ||
626 | msec_delay(5); | ||
627 | break; | ||
553 | default: | 628 | default: |
554 | E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST)); | 629 | E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST)); |
555 | break; | 630 | break; |
@@ -591,6 +666,7 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
591 | /* fall through */ | 666 | /* fall through */ |
592 | case e1000_82571: | 667 | case e1000_82571: |
593 | case e1000_82572: | 668 | case e1000_82572: |
669 | case e1000_ich8lan: | ||
594 | case e1000_80003es2lan: | 670 | case e1000_80003es2lan: |
595 | ret_val = e1000_get_auto_rd_done(hw); | 671 | ret_val = e1000_get_auto_rd_done(hw); |
596 | if(ret_val) | 672 | if(ret_val) |
@@ -633,6 +709,12 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
633 | e1000_pci_set_mwi(hw); | 709 | e1000_pci_set_mwi(hw); |
634 | } | 710 | } |
635 | 711 | ||
712 | if (hw->mac_type == e1000_ich8lan) { | ||
713 | uint32_t kab = E1000_READ_REG(hw, KABGTXD); | ||
714 | kab |= E1000_KABGTXD_BGSQLBIAS; | ||
715 | E1000_WRITE_REG(hw, KABGTXD, kab); | ||
716 | } | ||
717 | |||
636 | return E1000_SUCCESS; | 718 | return E1000_SUCCESS; |
637 | } | 719 | } |
638 | 720 | ||
@@ -675,9 +757,12 @@ e1000_init_hw(struct e1000_hw *hw) | |||
675 | 757 | ||
676 | /* Disabling VLAN filtering. */ | 758 | /* Disabling VLAN filtering. */ |
677 | DEBUGOUT("Initializing the IEEE VLAN\n"); | 759 | DEBUGOUT("Initializing the IEEE VLAN\n"); |
678 | if (hw->mac_type < e1000_82545_rev_3) | 760 | /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */ |
679 | E1000_WRITE_REG(hw, VET, 0); | 761 | if (hw->mac_type != e1000_ich8lan) { |
680 | e1000_clear_vfta(hw); | 762 | if (hw->mac_type < e1000_82545_rev_3) |
763 | E1000_WRITE_REG(hw, VET, 0); | ||
764 | e1000_clear_vfta(hw); | ||
765 | } | ||
681 | 766 | ||
682 | /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ | 767 | /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ |
683 | if(hw->mac_type == e1000_82542_rev2_0) { | 768 | if(hw->mac_type == e1000_82542_rev2_0) { |
@@ -705,8 +790,14 @@ e1000_init_hw(struct e1000_hw *hw) | |||
705 | /* Zero out the Multicast HASH table */ | 790 | /* Zero out the Multicast HASH table */ |
706 | DEBUGOUT("Zeroing the MTA\n"); | 791 | DEBUGOUT("Zeroing the MTA\n"); |
707 | mta_size = E1000_MC_TBL_SIZE; | 792 | mta_size = E1000_MC_TBL_SIZE; |
708 | for(i = 0; i < mta_size; i++) | 793 | if (hw->mac_type == e1000_ich8lan) |
794 | mta_size = E1000_MC_TBL_SIZE_ICH8LAN; | ||
795 | for(i = 0; i < mta_size; i++) { | ||
709 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); | 796 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); |
797 | /* use write flush to prevent Memory Write Block (MWB) from | ||
798 | * occuring when accessing our register space */ | ||
799 | E1000_WRITE_FLUSH(hw); | ||
800 | } | ||
710 | 801 | ||
711 | /* Set the PCI priority bit correctly in the CTRL register. This | 802 | /* Set the PCI priority bit correctly in the CTRL register. This |
712 | * determines if the adapter gives priority to receives, or if it | 803 | * determines if the adapter gives priority to receives, or if it |
@@ -744,6 +835,10 @@ e1000_init_hw(struct e1000_hw *hw) | |||
744 | break; | 835 | break; |
745 | } | 836 | } |
746 | 837 | ||
838 | /* More time needed for PHY to initialize */ | ||
839 | if (hw->mac_type == e1000_ich8lan) | ||
840 | msec_delay(15); | ||
841 | |||
747 | /* Call a subroutine to configure the link and setup flow control. */ | 842 | /* Call a subroutine to configure the link and setup flow control. */ |
748 | ret_val = e1000_setup_link(hw); | 843 | ret_val = e1000_setup_link(hw); |
749 | 844 | ||
@@ -757,6 +852,7 @@ e1000_init_hw(struct e1000_hw *hw) | |||
757 | case e1000_82571: | 852 | case e1000_82571: |
758 | case e1000_82572: | 853 | case e1000_82572: |
759 | case e1000_82573: | 854 | case e1000_82573: |
855 | case e1000_ich8lan: | ||
760 | case e1000_80003es2lan: | 856 | case e1000_80003es2lan: |
761 | ctrl |= E1000_TXDCTL_COUNT_DESC; | 857 | ctrl |= E1000_TXDCTL_COUNT_DESC; |
762 | break; | 858 | break; |
@@ -795,6 +891,7 @@ e1000_init_hw(struct e1000_hw *hw) | |||
795 | /* Fall through */ | 891 | /* Fall through */ |
796 | case e1000_82571: | 892 | case e1000_82571: |
797 | case e1000_82572: | 893 | case e1000_82572: |
894 | case e1000_ich8lan: | ||
798 | ctrl = E1000_READ_REG(hw, TXDCTL1); | 895 | ctrl = E1000_READ_REG(hw, TXDCTL1); |
799 | ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; | 896 | ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; |
800 | if(hw->mac_type >= e1000_82571) | 897 | if(hw->mac_type >= e1000_82571) |
@@ -818,6 +915,11 @@ e1000_init_hw(struct e1000_hw *hw) | |||
818 | */ | 915 | */ |
819 | e1000_clear_hw_cntrs(hw); | 916 | e1000_clear_hw_cntrs(hw); |
820 | 917 | ||
918 | /* ICH8 No-snoop bits are opposite polarity. | ||
919 | * Set to snoop by default after reset. */ | ||
920 | if (hw->mac_type == e1000_ich8lan) | ||
921 | e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL); | ||
922 | |||
821 | if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || | 923 | if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || |
822 | hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { | 924 | hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { |
823 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 925 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); |
@@ -905,6 +1007,7 @@ e1000_setup_link(struct e1000_hw *hw) | |||
905 | */ | 1007 | */ |
906 | if (hw->fc == e1000_fc_default) { | 1008 | if (hw->fc == e1000_fc_default) { |
907 | switch (hw->mac_type) { | 1009 | switch (hw->mac_type) { |
1010 | case e1000_ich8lan: | ||
908 | case e1000_82573: | 1011 | case e1000_82573: |
909 | hw->fc = e1000_fc_full; | 1012 | hw->fc = e1000_fc_full; |
910 | break; | 1013 | break; |
@@ -971,9 +1074,12 @@ e1000_setup_link(struct e1000_hw *hw) | |||
971 | */ | 1074 | */ |
972 | DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); | 1075 | DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); |
973 | 1076 | ||
974 | E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW); | 1077 | /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */ |
975 | E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH); | 1078 | if (hw->mac_type != e1000_ich8lan) { |
976 | E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE); | 1079 | E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE); |
1080 | E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH); | ||
1081 | E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW); | ||
1082 | } | ||
977 | 1083 | ||
978 | E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time); | 1084 | E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time); |
979 | 1085 | ||
@@ -1237,12 +1343,13 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw) | |||
1237 | 1343 | ||
1238 | /* Wait 10ms for MAC to configure PHY from eeprom settings */ | 1344 | /* Wait 10ms for MAC to configure PHY from eeprom settings */ |
1239 | msec_delay(15); | 1345 | msec_delay(15); |
1240 | 1346 | if (hw->mac_type != e1000_ich8lan) { | |
1241 | /* Configure activity LED after PHY reset */ | 1347 | /* Configure activity LED after PHY reset */ |
1242 | led_ctrl = E1000_READ_REG(hw, LEDCTL); | 1348 | led_ctrl = E1000_READ_REG(hw, LEDCTL); |
1243 | led_ctrl &= IGP_ACTIVITY_LED_MASK; | 1349 | led_ctrl &= IGP_ACTIVITY_LED_MASK; |
1244 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); | 1350 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); |
1245 | E1000_WRITE_REG(hw, LEDCTL, led_ctrl); | 1351 | E1000_WRITE_REG(hw, LEDCTL, led_ctrl); |
1352 | } | ||
1246 | 1353 | ||
1247 | /* disable lplu d3 during driver init */ | 1354 | /* disable lplu d3 during driver init */ |
1248 | ret_val = e1000_set_d3_lplu_state(hw, FALSE); | 1355 | ret_val = e1000_set_d3_lplu_state(hw, FALSE); |
@@ -1478,8 +1585,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw) | |||
1478 | if (ret_val) | 1585 | if (ret_val) |
1479 | return ret_val; | 1586 | return ret_val; |
1480 | 1587 | ||
1481 | /* Enable Pass False Carrier on the PHY */ | 1588 | phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; |
1482 | phy_data |= GG82563_KMCR_PASS_FALSE_CARRIER; | ||
1483 | 1589 | ||
1484 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, | 1590 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, |
1485 | phy_data); | 1591 | phy_data); |
@@ -1561,28 +1667,40 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw) | |||
1561 | phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; | 1667 | phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; |
1562 | if(hw->disable_polarity_correction == 1) | 1668 | if(hw->disable_polarity_correction == 1) |
1563 | phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; | 1669 | phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; |
1564 | ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); | 1670 | ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); |
1565 | if(ret_val) | 1671 | if (ret_val) |
1566 | return ret_val; | ||
1567 | |||
1568 | /* Force TX_CLK in the Extended PHY Specific Control Register | ||
1569 | * to 25MHz clock. | ||
1570 | */ | ||
1571 | ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); | ||
1572 | if(ret_val) | ||
1573 | return ret_val; | 1672 | return ret_val; |
1574 | 1673 | ||
1575 | phy_data |= M88E1000_EPSCR_TX_CLK_25; | ||
1576 | |||
1577 | if (hw->phy_revision < M88E1011_I_REV_4) { | 1674 | if (hw->phy_revision < M88E1011_I_REV_4) { |
1578 | /* Configure Master and Slave downshift values */ | 1675 | /* Force TX_CLK in the Extended PHY Specific Control Register |
1579 | phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | | 1676 | * to 25MHz clock. |
1677 | */ | ||
1678 | ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); | ||
1679 | if (ret_val) | ||
1680 | return ret_val; | ||
1681 | |||
1682 | phy_data |= M88E1000_EPSCR_TX_CLK_25; | ||
1683 | |||
1684 | if ((hw->phy_revision == E1000_REVISION_2) && | ||
1685 | (hw->phy_id == M88E1111_I_PHY_ID)) { | ||
1686 | /* Vidalia Phy, set the downshift counter to 5x */ | ||
1687 | phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); | ||
1688 | phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; | ||
1689 | ret_val = e1000_write_phy_reg(hw, | ||
1690 | M88E1000_EXT_PHY_SPEC_CTRL, phy_data); | ||
1691 | if (ret_val) | ||
1692 | return ret_val; | ||
1693 | } else { | ||
1694 | /* Configure Master and Slave downshift values */ | ||
1695 | phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | | ||
1580 | M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); | 1696 | M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); |
1581 | phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | | 1697 | phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | |
1582 | M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); | 1698 | M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); |
1583 | ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); | 1699 | ret_val = e1000_write_phy_reg(hw, |
1584 | if(ret_val) | 1700 | M88E1000_EXT_PHY_SPEC_CTRL, phy_data); |
1585 | return ret_val; | 1701 | if (ret_val) |
1702 | return ret_val; | ||
1703 | } | ||
1586 | } | 1704 | } |
1587 | 1705 | ||
1588 | /* SW Reset the PHY so all changes take effect */ | 1706 | /* SW Reset the PHY so all changes take effect */ |
@@ -1620,6 +1738,10 @@ e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
1620 | if(hw->autoneg_advertised == 0) | 1738 | if(hw->autoneg_advertised == 0) |
1621 | hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 1739 | hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
1622 | 1740 | ||
1741 | /* IFE phy only supports 10/100 */ | ||
1742 | if (hw->phy_type == e1000_phy_ife) | ||
1743 | hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; | ||
1744 | |||
1623 | DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); | 1745 | DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); |
1624 | ret_val = e1000_phy_setup_autoneg(hw); | 1746 | ret_val = e1000_phy_setup_autoneg(hw); |
1625 | if(ret_val) { | 1747 | if(ret_val) { |
@@ -1717,6 +1839,26 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1717 | 1839 | ||
1718 | DEBUGFUNC("e1000_setup_copper_link"); | 1840 | DEBUGFUNC("e1000_setup_copper_link"); |
1719 | 1841 | ||
1842 | switch (hw->mac_type) { | ||
1843 | case e1000_80003es2lan: | ||
1844 | case e1000_ich8lan: | ||
1845 | /* Set the mac to wait the maximum time between each | ||
1846 | * iteration and increase the max iterations when | ||
1847 | * polling the phy; this fixes erroneous timeouts at 10Mbps. */ | ||
1848 | ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); | ||
1849 | if (ret_val) | ||
1850 | return ret_val; | ||
1851 | ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), ®_data); | ||
1852 | if (ret_val) | ||
1853 | return ret_val; | ||
1854 | reg_data |= 0x3F; | ||
1855 | ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data); | ||
1856 | if (ret_val) | ||
1857 | return ret_val; | ||
1858 | default: | ||
1859 | break; | ||
1860 | } | ||
1861 | |||
1720 | /* Check if it is a valid PHY and set PHY mode if necessary. */ | 1862 | /* Check if it is a valid PHY and set PHY mode if necessary. */ |
1721 | ret_val = e1000_copper_link_preconfig(hw); | 1863 | ret_val = e1000_copper_link_preconfig(hw); |
1722 | if(ret_val) | 1864 | if(ret_val) |
@@ -1724,10 +1866,8 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1724 | 1866 | ||
1725 | switch (hw->mac_type) { | 1867 | switch (hw->mac_type) { |
1726 | case e1000_80003es2lan: | 1868 | case e1000_80003es2lan: |
1727 | ret_val = e1000_read_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL, | 1869 | /* Kumeran registers are written-only */ |
1728 | ®_data); | 1870 | reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT; |
1729 | if (ret_val) | ||
1730 | return ret_val; | ||
1731 | reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING; | 1871 | reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING; |
1732 | ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL, | 1872 | ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL, |
1733 | reg_data); | 1873 | reg_data); |
@@ -1739,6 +1879,7 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1739 | } | 1879 | } |
1740 | 1880 | ||
1741 | if (hw->phy_type == e1000_phy_igp || | 1881 | if (hw->phy_type == e1000_phy_igp || |
1882 | hw->phy_type == e1000_phy_igp_3 || | ||
1742 | hw->phy_type == e1000_phy_igp_2) { | 1883 | hw->phy_type == e1000_phy_igp_2) { |
1743 | ret_val = e1000_copper_link_igp_setup(hw); | 1884 | ret_val = e1000_copper_link_igp_setup(hw); |
1744 | if(ret_val) | 1885 | if(ret_val) |
@@ -1803,7 +1944,7 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1803 | * hw - Struct containing variables accessed by shared code | 1944 | * hw - Struct containing variables accessed by shared code |
1804 | ******************************************************************************/ | 1945 | ******************************************************************************/ |
1805 | static int32_t | 1946 | static int32_t |
1806 | e1000_configure_kmrn_for_10_100(struct e1000_hw *hw) | 1947 | e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex) |
1807 | { | 1948 | { |
1808 | int32_t ret_val = E1000_SUCCESS; | 1949 | int32_t ret_val = E1000_SUCCESS; |
1809 | uint32_t tipg; | 1950 | uint32_t tipg; |
@@ -1823,6 +1964,18 @@ e1000_configure_kmrn_for_10_100(struct e1000_hw *hw) | |||
1823 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100; | 1964 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100; |
1824 | E1000_WRITE_REG(hw, TIPG, tipg); | 1965 | E1000_WRITE_REG(hw, TIPG, tipg); |
1825 | 1966 | ||
1967 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); | ||
1968 | |||
1969 | if (ret_val) | ||
1970 | return ret_val; | ||
1971 | |||
1972 | if (duplex == HALF_DUPLEX) | ||
1973 | reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; | ||
1974 | else | ||
1975 | reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; | ||
1976 | |||
1977 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); | ||
1978 | |||
1826 | return ret_val; | 1979 | return ret_val; |
1827 | } | 1980 | } |
1828 | 1981 | ||
@@ -1847,6 +2000,14 @@ e1000_configure_kmrn_for_1000(struct e1000_hw *hw) | |||
1847 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; | 2000 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; |
1848 | E1000_WRITE_REG(hw, TIPG, tipg); | 2001 | E1000_WRITE_REG(hw, TIPG, tipg); |
1849 | 2002 | ||
2003 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); | ||
2004 | |||
2005 | if (ret_val) | ||
2006 | return ret_val; | ||
2007 | |||
2008 | reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; | ||
2009 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); | ||
2010 | |||
1850 | return ret_val; | 2011 | return ret_val; |
1851 | } | 2012 | } |
1852 | 2013 | ||
@@ -1869,10 +2030,13 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1869 | if(ret_val) | 2030 | if(ret_val) |
1870 | return ret_val; | 2031 | return ret_val; |
1871 | 2032 | ||
1872 | /* Read the MII 1000Base-T Control Register (Address 9). */ | 2033 | if (hw->phy_type != e1000_phy_ife) { |
1873 | ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); | 2034 | /* Read the MII 1000Base-T Control Register (Address 9). */ |
1874 | if(ret_val) | 2035 | ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); |
1875 | return ret_val; | 2036 | if (ret_val) |
2037 | return ret_val; | ||
2038 | } else | ||
2039 | mii_1000t_ctrl_reg=0; | ||
1876 | 2040 | ||
1877 | /* Need to parse both autoneg_advertised and fc and set up | 2041 | /* Need to parse both autoneg_advertised and fc and set up |
1878 | * the appropriate PHY registers. First we will parse for | 2042 | * the appropriate PHY registers. First we will parse for |
@@ -1923,6 +2087,9 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1923 | if(hw->autoneg_advertised & ADVERTISE_1000_FULL) { | 2087 | if(hw->autoneg_advertised & ADVERTISE_1000_FULL) { |
1924 | DEBUGOUT("Advertise 1000mb Full duplex\n"); | 2088 | DEBUGOUT("Advertise 1000mb Full duplex\n"); |
1925 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; | 2089 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; |
2090 | if (hw->phy_type == e1000_phy_ife) { | ||
2091 | DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n"); | ||
2092 | } | ||
1926 | } | 2093 | } |
1927 | 2094 | ||
1928 | /* Check for a software override of the flow control settings, and | 2095 | /* Check for a software override of the flow control settings, and |
@@ -1984,9 +2151,11 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1984 | 2151 | ||
1985 | DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); | 2152 | DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); |
1986 | 2153 | ||
1987 | ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); | 2154 | if (hw->phy_type != e1000_phy_ife) { |
1988 | if(ret_val) | 2155 | ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); |
1989 | return ret_val; | 2156 | if (ret_val) |
2157 | return ret_val; | ||
2158 | } | ||
1990 | 2159 | ||
1991 | return E1000_SUCCESS; | 2160 | return E1000_SUCCESS; |
1992 | } | 2161 | } |
@@ -2089,6 +2258,18 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
2089 | 2258 | ||
2090 | /* Need to reset the PHY or these changes will be ignored */ | 2259 | /* Need to reset the PHY or these changes will be ignored */ |
2091 | mii_ctrl_reg |= MII_CR_RESET; | 2260 | mii_ctrl_reg |= MII_CR_RESET; |
2261 | /* Disable MDI-X support for 10/100 */ | ||
2262 | } else if (hw->phy_type == e1000_phy_ife) { | ||
2263 | ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data); | ||
2264 | if (ret_val) | ||
2265 | return ret_val; | ||
2266 | |||
2267 | phy_data &= ~IFE_PMC_AUTO_MDIX; | ||
2268 | phy_data &= ~IFE_PMC_FORCE_MDIX; | ||
2269 | |||
2270 | ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data); | ||
2271 | if (ret_val) | ||
2272 | return ret_val; | ||
2092 | } else { | 2273 | } else { |
2093 | /* Clear Auto-Crossover to force MDI manually. IGP requires MDI | 2274 | /* Clear Auto-Crossover to force MDI manually. IGP requires MDI |
2094 | * forced whenever speed or duplex are forced. | 2275 | * forced whenever speed or duplex are forced. |
@@ -2721,8 +2902,12 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
2721 | */ | 2902 | */ |
2722 | if(hw->tbi_compatibility_en) { | 2903 | if(hw->tbi_compatibility_en) { |
2723 | uint16_t speed, duplex; | 2904 | uint16_t speed, duplex; |
2724 | e1000_get_speed_and_duplex(hw, &speed, &duplex); | 2905 | ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); |
2725 | if(speed != SPEED_1000) { | 2906 | if (ret_val) { |
2907 | DEBUGOUT("Error getting link speed and duplex\n"); | ||
2908 | return ret_val; | ||
2909 | } | ||
2910 | if (speed != SPEED_1000) { | ||
2726 | /* If link speed is not set to gigabit speed, we do not need | 2911 | /* If link speed is not set to gigabit speed, we do not need |
2727 | * to enable TBI compatibility. | 2912 | * to enable TBI compatibility. |
2728 | */ | 2913 | */ |
@@ -2889,7 +3074,13 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw, | |||
2889 | if (*speed == SPEED_1000) | 3074 | if (*speed == SPEED_1000) |
2890 | ret_val = e1000_configure_kmrn_for_1000(hw); | 3075 | ret_val = e1000_configure_kmrn_for_1000(hw); |
2891 | else | 3076 | else |
2892 | ret_val = e1000_configure_kmrn_for_10_100(hw); | 3077 | ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex); |
3078 | if (ret_val) | ||
3079 | return ret_val; | ||
3080 | } | ||
3081 | |||
3082 | if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { | ||
3083 | ret_val = e1000_kumeran_lock_loss_workaround(hw); | ||
2893 | if (ret_val) | 3084 | if (ret_val) |
2894 | return ret_val; | 3085 | return ret_val; |
2895 | } | 3086 | } |
@@ -3069,7 +3260,7 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw) | |||
3069 | return data; | 3260 | return data; |
3070 | } | 3261 | } |
3071 | 3262 | ||
3072 | int32_t | 3263 | static int32_t |
3073 | e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) | 3264 | e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) |
3074 | { | 3265 | { |
3075 | uint32_t swfw_sync = 0; | 3266 | uint32_t swfw_sync = 0; |
@@ -3079,6 +3270,9 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) | |||
3079 | 3270 | ||
3080 | DEBUGFUNC("e1000_swfw_sync_acquire"); | 3271 | DEBUGFUNC("e1000_swfw_sync_acquire"); |
3081 | 3272 | ||
3273 | if (hw->swfwhw_semaphore_present) | ||
3274 | return e1000_get_software_flag(hw); | ||
3275 | |||
3082 | if (!hw->swfw_sync_present) | 3276 | if (!hw->swfw_sync_present) |
3083 | return e1000_get_hw_eeprom_semaphore(hw); | 3277 | return e1000_get_hw_eeprom_semaphore(hw); |
3084 | 3278 | ||
@@ -3110,7 +3304,7 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) | |||
3110 | return E1000_SUCCESS; | 3304 | return E1000_SUCCESS; |
3111 | } | 3305 | } |
3112 | 3306 | ||
3113 | void | 3307 | static void |
3114 | e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) | 3308 | e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) |
3115 | { | 3309 | { |
3116 | uint32_t swfw_sync; | 3310 | uint32_t swfw_sync; |
@@ -3118,6 +3312,11 @@ e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) | |||
3118 | 3312 | ||
3119 | DEBUGFUNC("e1000_swfw_sync_release"); | 3313 | DEBUGFUNC("e1000_swfw_sync_release"); |
3120 | 3314 | ||
3315 | if (hw->swfwhw_semaphore_present) { | ||
3316 | e1000_release_software_flag(hw); | ||
3317 | return; | ||
3318 | } | ||
3319 | |||
3121 | if (!hw->swfw_sync_present) { | 3320 | if (!hw->swfw_sync_present) { |
3122 | e1000_put_hw_eeprom_semaphore(hw); | 3321 | e1000_put_hw_eeprom_semaphore(hw); |
3123 | return; | 3322 | return; |
@@ -3160,7 +3359,8 @@ e1000_read_phy_reg(struct e1000_hw *hw, | |||
3160 | if (e1000_swfw_sync_acquire(hw, swfw)) | 3359 | if (e1000_swfw_sync_acquire(hw, swfw)) |
3161 | return -E1000_ERR_SWFW_SYNC; | 3360 | return -E1000_ERR_SWFW_SYNC; |
3162 | 3361 | ||
3163 | if((hw->phy_type == e1000_phy_igp || | 3362 | if ((hw->phy_type == e1000_phy_igp || |
3363 | hw->phy_type == e1000_phy_igp_3 || | ||
3164 | hw->phy_type == e1000_phy_igp_2) && | 3364 | hw->phy_type == e1000_phy_igp_2) && |
3165 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { | 3365 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { |
3166 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, | 3366 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, |
@@ -3299,7 +3499,8 @@ e1000_write_phy_reg(struct e1000_hw *hw, | |||
3299 | if (e1000_swfw_sync_acquire(hw, swfw)) | 3499 | if (e1000_swfw_sync_acquire(hw, swfw)) |
3300 | return -E1000_ERR_SWFW_SYNC; | 3500 | return -E1000_ERR_SWFW_SYNC; |
3301 | 3501 | ||
3302 | if((hw->phy_type == e1000_phy_igp || | 3502 | if ((hw->phy_type == e1000_phy_igp || |
3503 | hw->phy_type == e1000_phy_igp_3 || | ||
3303 | hw->phy_type == e1000_phy_igp_2) && | 3504 | hw->phy_type == e1000_phy_igp_2) && |
3304 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { | 3505 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { |
3305 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, | 3506 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, |
@@ -3401,7 +3602,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, | |||
3401 | return E1000_SUCCESS; | 3602 | return E1000_SUCCESS; |
3402 | } | 3603 | } |
3403 | 3604 | ||
3404 | int32_t | 3605 | static int32_t |
3405 | e1000_read_kmrn_reg(struct e1000_hw *hw, | 3606 | e1000_read_kmrn_reg(struct e1000_hw *hw, |
3406 | uint32_t reg_addr, | 3607 | uint32_t reg_addr, |
3407 | uint16_t *data) | 3608 | uint16_t *data) |
@@ -3434,7 +3635,7 @@ e1000_read_kmrn_reg(struct e1000_hw *hw, | |||
3434 | return E1000_SUCCESS; | 3635 | return E1000_SUCCESS; |
3435 | } | 3636 | } |
3436 | 3637 | ||
3437 | int32_t | 3638 | static int32_t |
3438 | e1000_write_kmrn_reg(struct e1000_hw *hw, | 3639 | e1000_write_kmrn_reg(struct e1000_hw *hw, |
3439 | uint32_t reg_addr, | 3640 | uint32_t reg_addr, |
3440 | uint16_t data) | 3641 | uint16_t data) |
@@ -3514,7 +3715,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3514 | E1000_WRITE_FLUSH(hw); | 3715 | E1000_WRITE_FLUSH(hw); |
3515 | 3716 | ||
3516 | if (hw->mac_type >= e1000_82571) | 3717 | if (hw->mac_type >= e1000_82571) |
3517 | msec_delay(10); | 3718 | msec_delay_irq(10); |
3518 | e1000_swfw_sync_release(hw, swfw); | 3719 | e1000_swfw_sync_release(hw, swfw); |
3519 | } else { | 3720 | } else { |
3520 | /* Read the Extended Device Control Register, assert the PHY_RESET_DIR | 3721 | /* Read the Extended Device Control Register, assert the PHY_RESET_DIR |
@@ -3544,6 +3745,12 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3544 | ret_val = e1000_get_phy_cfg_done(hw); | 3745 | ret_val = e1000_get_phy_cfg_done(hw); |
3545 | e1000_release_software_semaphore(hw); | 3746 | e1000_release_software_semaphore(hw); |
3546 | 3747 | ||
3748 | if ((hw->mac_type == e1000_ich8lan) && | ||
3749 | (hw->phy_type == e1000_phy_igp_3)) { | ||
3750 | ret_val = e1000_init_lcd_from_nvm(hw); | ||
3751 | if (ret_val) | ||
3752 | return ret_val; | ||
3753 | } | ||
3547 | return ret_val; | 3754 | return ret_val; |
3548 | } | 3755 | } |
3549 | 3756 | ||
@@ -3572,9 +3779,11 @@ e1000_phy_reset(struct e1000_hw *hw) | |||
3572 | case e1000_82541_rev_2: | 3779 | case e1000_82541_rev_2: |
3573 | case e1000_82571: | 3780 | case e1000_82571: |
3574 | case e1000_82572: | 3781 | case e1000_82572: |
3782 | case e1000_ich8lan: | ||
3575 | ret_val = e1000_phy_hw_reset(hw); | 3783 | ret_val = e1000_phy_hw_reset(hw); |
3576 | if(ret_val) | 3784 | if(ret_val) |
3577 | return ret_val; | 3785 | return ret_val; |
3786 | |||
3578 | break; | 3787 | break; |
3579 | default: | 3788 | default: |
3580 | ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); | 3789 | ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); |
@@ -3597,11 +3806,120 @@ e1000_phy_reset(struct e1000_hw *hw) | |||
3597 | } | 3806 | } |
3598 | 3807 | ||
3599 | /****************************************************************************** | 3808 | /****************************************************************************** |
3809 | * Work-around for 82566 power-down: on D3 entry- | ||
3810 | * 1) disable gigabit link | ||
3811 | * 2) write VR power-down enable | ||
3812 | * 3) read it back | ||
3813 | * if successful continue, else issue LCD reset and repeat | ||
3814 | * | ||
3815 | * hw - struct containing variables accessed by shared code | ||
3816 | ******************************************************************************/ | ||
3817 | void | ||
3818 | e1000_phy_powerdown_workaround(struct e1000_hw *hw) | ||
3819 | { | ||
3820 | int32_t reg; | ||
3821 | uint16_t phy_data; | ||
3822 | int32_t retry = 0; | ||
3823 | |||
3824 | DEBUGFUNC("e1000_phy_powerdown_workaround"); | ||
3825 | |||
3826 | if (hw->phy_type != e1000_phy_igp_3) | ||
3827 | return; | ||
3828 | |||
3829 | do { | ||
3830 | /* Disable link */ | ||
3831 | reg = E1000_READ_REG(hw, PHY_CTRL); | ||
3832 | E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | | ||
3833 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | ||
3834 | |||
3835 | /* Write VR power-down enable */ | ||
3836 | e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data); | ||
3837 | e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data | | ||
3838 | IGP3_VR_CTRL_MODE_SHUT); | ||
3839 | |||
3840 | /* Read it back and test */ | ||
3841 | e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data); | ||
3842 | if ((phy_data & IGP3_VR_CTRL_MODE_SHUT) || retry) | ||
3843 | break; | ||
3844 | |||
3845 | /* Issue PHY reset and repeat at most one more time */ | ||
3846 | reg = E1000_READ_REG(hw, CTRL); | ||
3847 | E1000_WRITE_REG(hw, CTRL, reg | E1000_CTRL_PHY_RST); | ||
3848 | retry++; | ||
3849 | } while (retry); | ||
3850 | |||
3851 | return; | ||
3852 | |||
3853 | } | ||
3854 | |||
3855 | /****************************************************************************** | ||
3856 | * Work-around for 82566 Kumeran PCS lock loss: | ||
3857 | * On link status change (i.e. PCI reset, speed change) and link is up and | ||
3858 | * speed is gigabit- | ||
3859 | * 0) if workaround is optionally disabled do nothing | ||
3860 | * 1) wait 1ms for Kumeran link to come up | ||
3861 | * 2) check Kumeran Diagnostic register PCS lock loss bit | ||
3862 | * 3) if not set the link is locked (all is good), otherwise... | ||
3863 | * 4) reset the PHY | ||
3864 | * 5) repeat up to 10 times | ||
3865 | * Note: this is only called for IGP3 copper when speed is 1gb. | ||
3866 | * | ||
3867 | * hw - struct containing variables accessed by shared code | ||
3868 | ******************************************************************************/ | ||
3869 | static int32_t | ||
3870 | e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) | ||
3871 | { | ||
3872 | int32_t ret_val; | ||
3873 | int32_t reg; | ||
3874 | int32_t cnt; | ||
3875 | uint16_t phy_data; | ||
3876 | |||
3877 | if (hw->kmrn_lock_loss_workaround_disabled) | ||
3878 | return E1000_SUCCESS; | ||
3879 | |||
3880 | /* Make sure link is up before proceeding. If not just return. | ||
3881 | * Attempting this while link is negotiating fouls up link | ||
3882 | * stability */ | ||
3883 | ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); | ||
3884 | ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); | ||
3885 | |||
3886 | if (phy_data & MII_SR_LINK_STATUS) { | ||
3887 | for (cnt = 0; cnt < 10; cnt++) { | ||
3888 | /* read once to clear */ | ||
3889 | ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data); | ||
3890 | if (ret_val) | ||
3891 | return ret_val; | ||
3892 | /* and again to get new status */ | ||
3893 | ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data); | ||
3894 | if (ret_val) | ||
3895 | return ret_val; | ||
3896 | |||
3897 | /* check for PCS lock */ | ||
3898 | if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) | ||
3899 | return E1000_SUCCESS; | ||
3900 | |||
3901 | /* Issue PHY reset */ | ||
3902 | e1000_phy_hw_reset(hw); | ||
3903 | msec_delay_irq(5); | ||
3904 | } | ||
3905 | /* Disable GigE link negotiation */ | ||
3906 | reg = E1000_READ_REG(hw, PHY_CTRL); | ||
3907 | E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | | ||
3908 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | ||
3909 | |||
3910 | /* unable to acquire PCS lock */ | ||
3911 | return E1000_ERR_PHY; | ||
3912 | } | ||
3913 | |||
3914 | return E1000_SUCCESS; | ||
3915 | } | ||
3916 | |||
3917 | /****************************************************************************** | ||
3600 | * Probes the expected PHY address for known PHY IDs | 3918 | * Probes the expected PHY address for known PHY IDs |
3601 | * | 3919 | * |
3602 | * hw - Struct containing variables accessed by shared code | 3920 | * hw - Struct containing variables accessed by shared code |
3603 | ******************************************************************************/ | 3921 | ******************************************************************************/ |
3604 | static int32_t | 3922 | int32_t |
3605 | e1000_detect_gig_phy(struct e1000_hw *hw) | 3923 | e1000_detect_gig_phy(struct e1000_hw *hw) |
3606 | { | 3924 | { |
3607 | int32_t phy_init_status, ret_val; | 3925 | int32_t phy_init_status, ret_val; |
@@ -3613,8 +3931,8 @@ e1000_detect_gig_phy(struct e1000_hw *hw) | |||
3613 | /* The 82571 firmware may still be configuring the PHY. In this | 3931 | /* The 82571 firmware may still be configuring the PHY. In this |
3614 | * case, we cannot access the PHY until the configuration is done. So | 3932 | * case, we cannot access the PHY until the configuration is done. So |
3615 | * we explicitly set the PHY values. */ | 3933 | * we explicitly set the PHY values. */ |
3616 | if(hw->mac_type == e1000_82571 || | 3934 | if (hw->mac_type == e1000_82571 || |
3617 | hw->mac_type == e1000_82572) { | 3935 | hw->mac_type == e1000_82572) { |
3618 | hw->phy_id = IGP01E1000_I_PHY_ID; | 3936 | hw->phy_id = IGP01E1000_I_PHY_ID; |
3619 | hw->phy_type = e1000_phy_igp_2; | 3937 | hw->phy_type = e1000_phy_igp_2; |
3620 | return E1000_SUCCESS; | 3938 | return E1000_SUCCESS; |
@@ -3631,7 +3949,7 @@ e1000_detect_gig_phy(struct e1000_hw *hw) | |||
3631 | 3949 | ||
3632 | /* Read the PHY ID Registers to identify which PHY is onboard. */ | 3950 | /* Read the PHY ID Registers to identify which PHY is onboard. */ |
3633 | ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); | 3951 | ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); |
3634 | if(ret_val) | 3952 | if (ret_val) |
3635 | return ret_val; | 3953 | return ret_val; |
3636 | 3954 | ||
3637 | hw->phy_id = (uint32_t) (phy_id_high << 16); | 3955 | hw->phy_id = (uint32_t) (phy_id_high << 16); |
@@ -3669,6 +3987,12 @@ e1000_detect_gig_phy(struct e1000_hw *hw) | |||
3669 | case e1000_80003es2lan: | 3987 | case e1000_80003es2lan: |
3670 | if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE; | 3988 | if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE; |
3671 | break; | 3989 | break; |
3990 | case e1000_ich8lan: | ||
3991 | if (hw->phy_id == IGP03E1000_E_PHY_ID) match = TRUE; | ||
3992 | if (hw->phy_id == IFE_E_PHY_ID) match = TRUE; | ||
3993 | if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = TRUE; | ||
3994 | if (hw->phy_id == IFE_C_E_PHY_ID) match = TRUE; | ||
3995 | break; | ||
3672 | default: | 3996 | default: |
3673 | DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); | 3997 | DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); |
3674 | return -E1000_ERR_CONFIG; | 3998 | return -E1000_ERR_CONFIG; |
@@ -3784,6 +4108,53 @@ e1000_phy_igp_get_info(struct e1000_hw *hw, | |||
3784 | } | 4108 | } |
3785 | 4109 | ||
3786 | /****************************************************************************** | 4110 | /****************************************************************************** |
4111 | * Get PHY information from various PHY registers for ife PHY only. | ||
4112 | * | ||
4113 | * hw - Struct containing variables accessed by shared code | ||
4114 | * phy_info - PHY information structure | ||
4115 | ******************************************************************************/ | ||
4116 | static int32_t | ||
4117 | e1000_phy_ife_get_info(struct e1000_hw *hw, | ||
4118 | struct e1000_phy_info *phy_info) | ||
4119 | { | ||
4120 | int32_t ret_val; | ||
4121 | uint16_t phy_data, polarity; | ||
4122 | |||
4123 | DEBUGFUNC("e1000_phy_ife_get_info"); | ||
4124 | |||
4125 | phy_info->downshift = (e1000_downshift)hw->speed_downgraded; | ||
4126 | phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; | ||
4127 | |||
4128 | ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data); | ||
4129 | if (ret_val) | ||
4130 | return ret_val; | ||
4131 | phy_info->polarity_correction = | ||
4132 | (phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >> | ||
4133 | IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT; | ||
4134 | |||
4135 | if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) { | ||
4136 | ret_val = e1000_check_polarity(hw, &polarity); | ||
4137 | if (ret_val) | ||
4138 | return ret_val; | ||
4139 | } else { | ||
4140 | /* Polarity is forced. */ | ||
4141 | polarity = (phy_data & IFE_PSC_FORCE_POLARITY) >> | ||
4142 | IFE_PSC_FORCE_POLARITY_SHIFT; | ||
4143 | } | ||
4144 | phy_info->cable_polarity = polarity; | ||
4145 | |||
4146 | ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data); | ||
4147 | if (ret_val) | ||
4148 | return ret_val; | ||
4149 | |||
4150 | phy_info->mdix_mode = | ||
4151 | (phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >> | ||
4152 | IFE_PMC_MDIX_MODE_SHIFT; | ||
4153 | |||
4154 | return E1000_SUCCESS; | ||
4155 | } | ||
4156 | |||
4157 | /****************************************************************************** | ||
3787 | * Get PHY information from various PHY registers fot m88 PHY only. | 4158 | * Get PHY information from various PHY registers fot m88 PHY only. |
3788 | * | 4159 | * |
3789 | * hw - Struct containing variables accessed by shared code | 4160 | * hw - Struct containing variables accessed by shared code |
@@ -3898,9 +4269,12 @@ e1000_phy_get_info(struct e1000_hw *hw, | |||
3898 | return -E1000_ERR_CONFIG; | 4269 | return -E1000_ERR_CONFIG; |
3899 | } | 4270 | } |
3900 | 4271 | ||
3901 | if(hw->phy_type == e1000_phy_igp || | 4272 | if (hw->phy_type == e1000_phy_igp || |
4273 | hw->phy_type == e1000_phy_igp_3 || | ||
3902 | hw->phy_type == e1000_phy_igp_2) | 4274 | hw->phy_type == e1000_phy_igp_2) |
3903 | return e1000_phy_igp_get_info(hw, phy_info); | 4275 | return e1000_phy_igp_get_info(hw, phy_info); |
4276 | else if (hw->phy_type == e1000_phy_ife) | ||
4277 | return e1000_phy_ife_get_info(hw, phy_info); | ||
3904 | else | 4278 | else |
3905 | return e1000_phy_m88_get_info(hw, phy_info); | 4279 | return e1000_phy_m88_get_info(hw, phy_info); |
3906 | } | 4280 | } |
@@ -4049,6 +4423,35 @@ e1000_init_eeprom_params(struct e1000_hw *hw) | |||
4049 | eeprom->use_eerd = TRUE; | 4423 | eeprom->use_eerd = TRUE; |
4050 | eeprom->use_eewr = FALSE; | 4424 | eeprom->use_eewr = FALSE; |
4051 | break; | 4425 | break; |
4426 | case e1000_ich8lan: | ||
4427 | { | ||
4428 | int32_t i = 0; | ||
4429 | uint32_t flash_size = E1000_READ_ICH8_REG(hw, ICH8_FLASH_GFPREG); | ||
4430 | |||
4431 | eeprom->type = e1000_eeprom_ich8; | ||
4432 | eeprom->use_eerd = FALSE; | ||
4433 | eeprom->use_eewr = FALSE; | ||
4434 | eeprom->word_size = E1000_SHADOW_RAM_WORDS; | ||
4435 | |||
4436 | /* Zero the shadow RAM structure. But don't load it from NVM | ||
4437 | * so as to save time for driver init */ | ||
4438 | if (hw->eeprom_shadow_ram != NULL) { | ||
4439 | for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { | ||
4440 | hw->eeprom_shadow_ram[i].modified = FALSE; | ||
4441 | hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF; | ||
4442 | } | ||
4443 | } | ||
4444 | |||
4445 | hw->flash_base_addr = (flash_size & ICH8_GFPREG_BASE_MASK) * | ||
4446 | ICH8_FLASH_SECTOR_SIZE; | ||
4447 | |||
4448 | hw->flash_bank_size = ((flash_size >> 16) & ICH8_GFPREG_BASE_MASK) + 1; | ||
4449 | hw->flash_bank_size -= (flash_size & ICH8_GFPREG_BASE_MASK); | ||
4450 | hw->flash_bank_size *= ICH8_FLASH_SECTOR_SIZE; | ||
4451 | hw->flash_bank_size /= 2 * sizeof(uint16_t); | ||
4452 | |||
4453 | break; | ||
4454 | } | ||
4052 | default: | 4455 | default: |
4053 | break; | 4456 | break; |
4054 | } | 4457 | } |
@@ -4469,7 +4872,10 @@ e1000_read_eeprom(struct e1000_hw *hw, | |||
4469 | return ret_val; | 4872 | return ret_val; |
4470 | } | 4873 | } |
4471 | 4874 | ||
4472 | if(eeprom->type == e1000_eeprom_spi) { | 4875 | if (eeprom->type == e1000_eeprom_ich8) |
4876 | return e1000_read_eeprom_ich8(hw, offset, words, data); | ||
4877 | |||
4878 | if (eeprom->type == e1000_eeprom_spi) { | ||
4473 | uint16_t word_in; | 4879 | uint16_t word_in; |
4474 | uint8_t read_opcode = EEPROM_READ_OPCODE_SPI; | 4880 | uint8_t read_opcode = EEPROM_READ_OPCODE_SPI; |
4475 | 4881 | ||
@@ -4636,7 +5042,10 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) | |||
4636 | 5042 | ||
4637 | DEBUGFUNC("e1000_is_onboard_nvm_eeprom"); | 5043 | DEBUGFUNC("e1000_is_onboard_nvm_eeprom"); |
4638 | 5044 | ||
4639 | if(hw->mac_type == e1000_82573) { | 5045 | if (hw->mac_type == e1000_ich8lan) |
5046 | return FALSE; | ||
5047 | |||
5048 | if (hw->mac_type == e1000_82573) { | ||
4640 | eecd = E1000_READ_REG(hw, EECD); | 5049 | eecd = E1000_READ_REG(hw, EECD); |
4641 | 5050 | ||
4642 | /* Isolate bits 15 & 16 */ | 5051 | /* Isolate bits 15 & 16 */ |
@@ -4686,8 +5095,22 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw) | |||
4686 | } | 5095 | } |
4687 | } | 5096 | } |
4688 | 5097 | ||
4689 | for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { | 5098 | if (hw->mac_type == e1000_ich8lan) { |
4690 | if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { | 5099 | /* Drivers must allocate the shadow ram structure for the |
5100 | * EEPROM checksum to be updated. Otherwise, this bit as well | ||
5101 | * as the checksum must both be set correctly for this | ||
5102 | * validation to pass. | ||
5103 | */ | ||
5104 | e1000_read_eeprom(hw, 0x19, 1, &eeprom_data); | ||
5105 | if ((eeprom_data & 0x40) == 0) { | ||
5106 | eeprom_data |= 0x40; | ||
5107 | e1000_write_eeprom(hw, 0x19, 1, &eeprom_data); | ||
5108 | e1000_update_eeprom_checksum(hw); | ||
5109 | } | ||
5110 | } | ||
5111 | |||
5112 | for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { | ||
5113 | if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { | ||
4691 | DEBUGOUT("EEPROM Read Error\n"); | 5114 | DEBUGOUT("EEPROM Read Error\n"); |
4692 | return -E1000_ERR_EEPROM; | 5115 | return -E1000_ERR_EEPROM; |
4693 | } | 5116 | } |
@@ -4713,6 +5136,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw) | |||
4713 | int32_t | 5136 | int32_t |
4714 | e1000_update_eeprom_checksum(struct e1000_hw *hw) | 5137 | e1000_update_eeprom_checksum(struct e1000_hw *hw) |
4715 | { | 5138 | { |
5139 | uint32_t ctrl_ext; | ||
4716 | uint16_t checksum = 0; | 5140 | uint16_t checksum = 0; |
4717 | uint16_t i, eeprom_data; | 5141 | uint16_t i, eeprom_data; |
4718 | 5142 | ||
@@ -4731,6 +5155,14 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw) | |||
4731 | return -E1000_ERR_EEPROM; | 5155 | return -E1000_ERR_EEPROM; |
4732 | } else if (hw->eeprom.type == e1000_eeprom_flash) { | 5156 | } else if (hw->eeprom.type == e1000_eeprom_flash) { |
4733 | e1000_commit_shadow_ram(hw); | 5157 | e1000_commit_shadow_ram(hw); |
5158 | } else if (hw->eeprom.type == e1000_eeprom_ich8) { | ||
5159 | e1000_commit_shadow_ram(hw); | ||
5160 | /* Reload the EEPROM, or else modifications will not appear | ||
5161 | * until after next adapter reset. */ | ||
5162 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | ||
5163 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; | ||
5164 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | ||
5165 | msec_delay(10); | ||
4734 | } | 5166 | } |
4735 | return E1000_SUCCESS; | 5167 | return E1000_SUCCESS; |
4736 | } | 5168 | } |
@@ -4770,6 +5202,9 @@ e1000_write_eeprom(struct e1000_hw *hw, | |||
4770 | if(eeprom->use_eewr == TRUE) | 5202 | if(eeprom->use_eewr == TRUE) |
4771 | return e1000_write_eeprom_eewr(hw, offset, words, data); | 5203 | return e1000_write_eeprom_eewr(hw, offset, words, data); |
4772 | 5204 | ||
5205 | if (eeprom->type == e1000_eeprom_ich8) | ||
5206 | return e1000_write_eeprom_ich8(hw, offset, words, data); | ||
5207 | |||
4773 | /* Prepare the EEPROM for writing */ | 5208 | /* Prepare the EEPROM for writing */ |
4774 | if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) | 5209 | if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) |
4775 | return -E1000_ERR_EEPROM; | 5210 | return -E1000_ERR_EEPROM; |
@@ -4957,11 +5392,17 @@ e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
4957 | uint32_t flop = 0; | 5392 | uint32_t flop = 0; |
4958 | uint32_t i = 0; | 5393 | uint32_t i = 0; |
4959 | int32_t error = E1000_SUCCESS; | 5394 | int32_t error = E1000_SUCCESS; |
4960 | 5395 | uint32_t old_bank_offset = 0; | |
4961 | /* The flop register will be used to determine if flash type is STM */ | 5396 | uint32_t new_bank_offset = 0; |
4962 | flop = E1000_READ_REG(hw, FLOP); | 5397 | uint32_t sector_retries = 0; |
5398 | uint8_t low_byte = 0; | ||
5399 | uint8_t high_byte = 0; | ||
5400 | uint8_t temp_byte = 0; | ||
5401 | boolean_t sector_write_failed = FALSE; | ||
4963 | 5402 | ||
4964 | if (hw->mac_type == e1000_82573) { | 5403 | if (hw->mac_type == e1000_82573) { |
5404 | /* The flop register will be used to determine if flash type is STM */ | ||
5405 | flop = E1000_READ_REG(hw, FLOP); | ||
4965 | for (i=0; i < attempts; i++) { | 5406 | for (i=0; i < attempts; i++) { |
4966 | eecd = E1000_READ_REG(hw, EECD); | 5407 | eecd = E1000_READ_REG(hw, EECD); |
4967 | if ((eecd & E1000_EECD_FLUPD) == 0) { | 5408 | if ((eecd & E1000_EECD_FLUPD) == 0) { |
@@ -4995,6 +5436,106 @@ e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
4995 | } | 5436 | } |
4996 | } | 5437 | } |
4997 | 5438 | ||
5439 | if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) { | ||
5440 | /* We're writing to the opposite bank so if we're on bank 1, | ||
5441 | * write to bank 0 etc. We also need to erase the segment that | ||
5442 | * is going to be written */ | ||
5443 | if (!(E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL)) { | ||
5444 | new_bank_offset = hw->flash_bank_size * 2; | ||
5445 | old_bank_offset = 0; | ||
5446 | e1000_erase_ich8_4k_segment(hw, 1); | ||
5447 | } else { | ||
5448 | old_bank_offset = hw->flash_bank_size * 2; | ||
5449 | new_bank_offset = 0; | ||
5450 | e1000_erase_ich8_4k_segment(hw, 0); | ||
5451 | } | ||
5452 | |||
5453 | do { | ||
5454 | sector_write_failed = FALSE; | ||
5455 | /* Loop for every byte in the shadow RAM, | ||
5456 | * which is in units of words. */ | ||
5457 | for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { | ||
5458 | /* Determine whether to write the value stored | ||
5459 | * in the other NVM bank or a modified value stored | ||
5460 | * in the shadow RAM */ | ||
5461 | if (hw->eeprom_shadow_ram[i].modified == TRUE) { | ||
5462 | low_byte = (uint8_t)hw->eeprom_shadow_ram[i].eeprom_word; | ||
5463 | e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset, | ||
5464 | &temp_byte); | ||
5465 | udelay(100); | ||
5466 | error = e1000_verify_write_ich8_byte(hw, | ||
5467 | (i << 1) + new_bank_offset, | ||
5468 | low_byte); | ||
5469 | if (error != E1000_SUCCESS) | ||
5470 | sector_write_failed = TRUE; | ||
5471 | high_byte = | ||
5472 | (uint8_t)(hw->eeprom_shadow_ram[i].eeprom_word >> 8); | ||
5473 | e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1, | ||
5474 | &temp_byte); | ||
5475 | udelay(100); | ||
5476 | } else { | ||
5477 | e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset, | ||
5478 | &low_byte); | ||
5479 | udelay(100); | ||
5480 | error = e1000_verify_write_ich8_byte(hw, | ||
5481 | (i << 1) + new_bank_offset, low_byte); | ||
5482 | if (error != E1000_SUCCESS) | ||
5483 | sector_write_failed = TRUE; | ||
5484 | e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1, | ||
5485 | &high_byte); | ||
5486 | } | ||
5487 | |||
5488 | /* If the word is 0x13, then make sure the signature bits | ||
5489 | * (15:14) are 11b until the commit has completed. | ||
5490 | * This will allow us to write 10b which indicates the | ||
5491 | * signature is valid. We want to do this after the write | ||
5492 | * has completed so that we don't mark the segment valid | ||
5493 | * while the write is still in progress */ | ||
5494 | if (i == E1000_ICH8_NVM_SIG_WORD) | ||
5495 | high_byte = E1000_ICH8_NVM_SIG_MASK | high_byte; | ||
5496 | |||
5497 | error = e1000_verify_write_ich8_byte(hw, | ||
5498 | (i << 1) + new_bank_offset + 1, high_byte); | ||
5499 | if (error != E1000_SUCCESS) | ||
5500 | sector_write_failed = TRUE; | ||
5501 | |||
5502 | if (sector_write_failed == FALSE) { | ||
5503 | /* Clear the now not used entry in the cache */ | ||
5504 | hw->eeprom_shadow_ram[i].modified = FALSE; | ||
5505 | hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF; | ||
5506 | } | ||
5507 | } | ||
5508 | |||
5509 | /* Don't bother writing the segment valid bits if sector | ||
5510 | * programming failed. */ | ||
5511 | if (sector_write_failed == FALSE) { | ||
5512 | /* Finally validate the new segment by setting bit 15:14 | ||
5513 | * to 10b in word 0x13 , this can be done without an | ||
5514 | * erase as well since these bits are 11 to start with | ||
5515 | * and we need to change bit 14 to 0b */ | ||
5516 | e1000_read_ich8_byte(hw, | ||
5517 | E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset, | ||
5518 | &high_byte); | ||
5519 | high_byte &= 0xBF; | ||
5520 | error = e1000_verify_write_ich8_byte(hw, | ||
5521 | E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset, | ||
5522 | high_byte); | ||
5523 | if (error != E1000_SUCCESS) | ||
5524 | sector_write_failed = TRUE; | ||
5525 | |||
5526 | /* And invalidate the previously valid segment by setting | ||
5527 | * its signature word (0x13) high_byte to 0b. This can be | ||
5528 | * done without an erase because flash erase sets all bits | ||
5529 | * to 1's. We can write 1's to 0's without an erase */ | ||
5530 | error = e1000_verify_write_ich8_byte(hw, | ||
5531 | E1000_ICH8_NVM_SIG_WORD * 2 + 1 + old_bank_offset, | ||
5532 | 0); | ||
5533 | if (error != E1000_SUCCESS) | ||
5534 | sector_write_failed = TRUE; | ||
5535 | } | ||
5536 | } while (++sector_retries < 10 && sector_write_failed == TRUE); | ||
5537 | } | ||
5538 | |||
4998 | return error; | 5539 | return error; |
4999 | } | 5540 | } |
5000 | 5541 | ||
@@ -5102,15 +5643,19 @@ e1000_init_rx_addrs(struct e1000_hw *hw) | |||
5102 | * the other port. */ | 5643 | * the other port. */ |
5103 | if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE)) | 5644 | if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE)) |
5104 | rar_num -= 1; | 5645 | rar_num -= 1; |
5646 | if (hw->mac_type == e1000_ich8lan) | ||
5647 | rar_num = E1000_RAR_ENTRIES_ICH8LAN; | ||
5648 | |||
5105 | /* Zero out the other 15 receive addresses. */ | 5649 | /* Zero out the other 15 receive addresses. */ |
5106 | DEBUGOUT("Clearing RAR[1-15]\n"); | 5650 | DEBUGOUT("Clearing RAR[1-15]\n"); |
5107 | for(i = 1; i < rar_num; i++) { | 5651 | for(i = 1; i < rar_num; i++) { |
5108 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); | 5652 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); |
5653 | E1000_WRITE_FLUSH(hw); | ||
5109 | E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); | 5654 | E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); |
5655 | E1000_WRITE_FLUSH(hw); | ||
5110 | } | 5656 | } |
5111 | } | 5657 | } |
5112 | 5658 | ||
5113 | #if 0 | ||
5114 | /****************************************************************************** | 5659 | /****************************************************************************** |
5115 | * Updates the MAC's list of multicast addresses. | 5660 | * Updates the MAC's list of multicast addresses. |
5116 | * | 5661 | * |
@@ -5125,6 +5670,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw) | |||
5125 | * for the first 15 multicast addresses, and hashes the rest into the | 5670 | * for the first 15 multicast addresses, and hashes the rest into the |
5126 | * multicast table. | 5671 | * multicast table. |
5127 | *****************************************************************************/ | 5672 | *****************************************************************************/ |
5673 | #if 0 | ||
5128 | void | 5674 | void |
5129 | e1000_mc_addr_list_update(struct e1000_hw *hw, | 5675 | e1000_mc_addr_list_update(struct e1000_hw *hw, |
5130 | uint8_t *mc_addr_list, | 5676 | uint8_t *mc_addr_list, |
@@ -5145,6 +5691,8 @@ e1000_mc_addr_list_update(struct e1000_hw *hw, | |||
5145 | /* Clear RAR[1-15] */ | 5691 | /* Clear RAR[1-15] */ |
5146 | DEBUGOUT(" Clearing RAR[1-15]\n"); | 5692 | DEBUGOUT(" Clearing RAR[1-15]\n"); |
5147 | num_rar_entry = E1000_RAR_ENTRIES; | 5693 | num_rar_entry = E1000_RAR_ENTRIES; |
5694 | if (hw->mac_type == e1000_ich8lan) | ||
5695 | num_rar_entry = E1000_RAR_ENTRIES_ICH8LAN; | ||
5148 | /* Reserve a spot for the Locally Administered Address to work around | 5696 | /* Reserve a spot for the Locally Administered Address to work around |
5149 | * an 82571 issue in which a reset on one port will reload the MAC on | 5697 | * an 82571 issue in which a reset on one port will reload the MAC on |
5150 | * the other port. */ | 5698 | * the other port. */ |
@@ -5153,14 +5701,19 @@ e1000_mc_addr_list_update(struct e1000_hw *hw, | |||
5153 | 5701 | ||
5154 | for(i = rar_used_count; i < num_rar_entry; i++) { | 5702 | for(i = rar_used_count; i < num_rar_entry; i++) { |
5155 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); | 5703 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); |
5704 | E1000_WRITE_FLUSH(hw); | ||
5156 | E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); | 5705 | E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); |
5706 | E1000_WRITE_FLUSH(hw); | ||
5157 | } | 5707 | } |
5158 | 5708 | ||
5159 | /* Clear the MTA */ | 5709 | /* Clear the MTA */ |
5160 | DEBUGOUT(" Clearing MTA\n"); | 5710 | DEBUGOUT(" Clearing MTA\n"); |
5161 | num_mta_entry = E1000_NUM_MTA_REGISTERS; | 5711 | num_mta_entry = E1000_NUM_MTA_REGISTERS; |
5712 | if (hw->mac_type == e1000_ich8lan) | ||
5713 | num_mta_entry = E1000_NUM_MTA_REGISTERS_ICH8LAN; | ||
5162 | for(i = 0; i < num_mta_entry; i++) { | 5714 | for(i = 0; i < num_mta_entry; i++) { |
5163 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); | 5715 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); |
5716 | E1000_WRITE_FLUSH(hw); | ||
5164 | } | 5717 | } |
5165 | 5718 | ||
5166 | /* Add the new addresses */ | 5719 | /* Add the new addresses */ |
@@ -5217,24 +5770,46 @@ e1000_hash_mc_addr(struct e1000_hw *hw, | |||
5217 | * LSB MSB | 5770 | * LSB MSB |
5218 | */ | 5771 | */ |
5219 | case 0: | 5772 | case 0: |
5220 | /* [47:36] i.e. 0x563 for above example address */ | 5773 | if (hw->mac_type == e1000_ich8lan) { |
5221 | hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); | 5774 | /* [47:38] i.e. 0x158 for above example address */ |
5775 | hash_value = ((mc_addr[4] >> 6) | (((uint16_t) mc_addr[5]) << 2)); | ||
5776 | } else { | ||
5777 | /* [47:36] i.e. 0x563 for above example address */ | ||
5778 | hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); | ||
5779 | } | ||
5222 | break; | 5780 | break; |
5223 | case 1: | 5781 | case 1: |
5224 | /* [46:35] i.e. 0xAC6 for above example address */ | 5782 | if (hw->mac_type == e1000_ich8lan) { |
5225 | hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5)); | 5783 | /* [46:37] i.e. 0x2B1 for above example address */ |
5784 | hash_value = ((mc_addr[4] >> 5) | (((uint16_t) mc_addr[5]) << 3)); | ||
5785 | } else { | ||
5786 | /* [46:35] i.e. 0xAC6 for above example address */ | ||
5787 | hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5)); | ||
5788 | } | ||
5226 | break; | 5789 | break; |
5227 | case 2: | 5790 | case 2: |
5228 | /* [45:34] i.e. 0x5D8 for above example address */ | 5791 | if (hw->mac_type == e1000_ich8lan) { |
5229 | hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); | 5792 | /*[45:36] i.e. 0x163 for above example address */ |
5793 | hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); | ||
5794 | } else { | ||
5795 | /* [45:34] i.e. 0x5D8 for above example address */ | ||
5796 | hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); | ||
5797 | } | ||
5230 | break; | 5798 | break; |
5231 | case 3: | 5799 | case 3: |
5232 | /* [43:32] i.e. 0x634 for above example address */ | 5800 | if (hw->mac_type == e1000_ich8lan) { |
5233 | hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8)); | 5801 | /* [43:34] i.e. 0x18D for above example address */ |
5802 | hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); | ||
5803 | } else { | ||
5804 | /* [43:32] i.e. 0x634 for above example address */ | ||
5805 | hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8)); | ||
5806 | } | ||
5234 | break; | 5807 | break; |
5235 | } | 5808 | } |
5236 | 5809 | ||
5237 | hash_value &= 0xFFF; | 5810 | hash_value &= 0xFFF; |
5811 | if (hw->mac_type == e1000_ich8lan) | ||
5812 | hash_value &= 0x3FF; | ||
5238 | 5813 | ||
5239 | return hash_value; | 5814 | return hash_value; |
5240 | } | 5815 | } |
@@ -5262,6 +5837,8 @@ e1000_mta_set(struct e1000_hw *hw, | |||
5262 | * register are determined by the lower 5 bits of the value. | 5837 | * register are determined by the lower 5 bits of the value. |
5263 | */ | 5838 | */ |
5264 | hash_reg = (hash_value >> 5) & 0x7F; | 5839 | hash_reg = (hash_value >> 5) & 0x7F; |
5840 | if (hw->mac_type == e1000_ich8lan) | ||
5841 | hash_reg &= 0x1F; | ||
5265 | hash_bit = hash_value & 0x1F; | 5842 | hash_bit = hash_value & 0x1F; |
5266 | 5843 | ||
5267 | mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg); | 5844 | mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg); |
@@ -5275,9 +5852,12 @@ e1000_mta_set(struct e1000_hw *hw, | |||
5275 | if((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) { | 5852 | if((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) { |
5276 | temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1)); | 5853 | temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1)); |
5277 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); | 5854 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); |
5855 | E1000_WRITE_FLUSH(hw); | ||
5278 | E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp); | 5856 | E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp); |
5857 | E1000_WRITE_FLUSH(hw); | ||
5279 | } else { | 5858 | } else { |
5280 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); | 5859 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); |
5860 | E1000_WRITE_FLUSH(hw); | ||
5281 | } | 5861 | } |
5282 | } | 5862 | } |
5283 | 5863 | ||
@@ -5334,7 +5914,9 @@ e1000_rar_set(struct e1000_hw *hw, | |||
5334 | } | 5914 | } |
5335 | 5915 | ||
5336 | E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); | 5916 | E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); |
5917 | E1000_WRITE_FLUSH(hw); | ||
5337 | E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); | 5918 | E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); |
5919 | E1000_WRITE_FLUSH(hw); | ||
5338 | } | 5920 | } |
5339 | 5921 | ||
5340 | /****************************************************************************** | 5922 | /****************************************************************************** |
@@ -5351,12 +5933,18 @@ e1000_write_vfta(struct e1000_hw *hw, | |||
5351 | { | 5933 | { |
5352 | uint32_t temp; | 5934 | uint32_t temp; |
5353 | 5935 | ||
5354 | if((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { | 5936 | if (hw->mac_type == e1000_ich8lan) |
5937 | return; | ||
5938 | |||
5939 | if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { | ||
5355 | temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); | 5940 | temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); |
5356 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); | 5941 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); |
5942 | E1000_WRITE_FLUSH(hw); | ||
5357 | E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); | 5943 | E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); |
5944 | E1000_WRITE_FLUSH(hw); | ||
5358 | } else { | 5945 | } else { |
5359 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); | 5946 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); |
5947 | E1000_WRITE_FLUSH(hw); | ||
5360 | } | 5948 | } |
5361 | } | 5949 | } |
5362 | 5950 | ||
@@ -5373,6 +5961,9 @@ e1000_clear_vfta(struct e1000_hw *hw) | |||
5373 | uint32_t vfta_offset = 0; | 5961 | uint32_t vfta_offset = 0; |
5374 | uint32_t vfta_bit_in_reg = 0; | 5962 | uint32_t vfta_bit_in_reg = 0; |
5375 | 5963 | ||
5964 | if (hw->mac_type == e1000_ich8lan) | ||
5965 | return; | ||
5966 | |||
5376 | if (hw->mac_type == e1000_82573) { | 5967 | if (hw->mac_type == e1000_82573) { |
5377 | if (hw->mng_cookie.vlan_id != 0) { | 5968 | if (hw->mng_cookie.vlan_id != 0) { |
5378 | /* The VFTA is a 4096b bit-field, each identifying a single VLAN | 5969 | /* The VFTA is a 4096b bit-field, each identifying a single VLAN |
@@ -5392,6 +5983,7 @@ e1000_clear_vfta(struct e1000_hw *hw) | |||
5392 | * manageability unit */ | 5983 | * manageability unit */ |
5393 | vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; | 5984 | vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; |
5394 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); | 5985 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); |
5986 | E1000_WRITE_FLUSH(hw); | ||
5395 | } | 5987 | } |
5396 | } | 5988 | } |
5397 | 5989 | ||
@@ -5421,9 +6013,18 @@ e1000_id_led_init(struct e1000_hw * hw) | |||
5421 | DEBUGOUT("EEPROM Read Error\n"); | 6013 | DEBUGOUT("EEPROM Read Error\n"); |
5422 | return -E1000_ERR_EEPROM; | 6014 | return -E1000_ERR_EEPROM; |
5423 | } | 6015 | } |
5424 | if((eeprom_data== ID_LED_RESERVED_0000) || | 6016 | |
5425 | (eeprom_data == ID_LED_RESERVED_FFFF)) eeprom_data = ID_LED_DEFAULT; | 6017 | if ((hw->mac_type == e1000_82573) && |
5426 | for(i = 0; i < 4; i++) { | 6018 | (eeprom_data == ID_LED_RESERVED_82573)) |
6019 | eeprom_data = ID_LED_DEFAULT_82573; | ||
6020 | else if ((eeprom_data == ID_LED_RESERVED_0000) || | ||
6021 | (eeprom_data == ID_LED_RESERVED_FFFF)) { | ||
6022 | if (hw->mac_type == e1000_ich8lan) | ||
6023 | eeprom_data = ID_LED_DEFAULT_ICH8LAN; | ||
6024 | else | ||
6025 | eeprom_data = ID_LED_DEFAULT; | ||
6026 | } | ||
6027 | for (i = 0; i < 4; i++) { | ||
5427 | temp = (eeprom_data >> (i << 2)) & led_mask; | 6028 | temp = (eeprom_data >> (i << 2)) & led_mask; |
5428 | switch(temp) { | 6029 | switch(temp) { |
5429 | case ID_LED_ON1_DEF2: | 6030 | case ID_LED_ON1_DEF2: |
@@ -5519,6 +6120,44 @@ e1000_setup_led(struct e1000_hw *hw) | |||
5519 | } | 6120 | } |
5520 | 6121 | ||
5521 | /****************************************************************************** | 6122 | /****************************************************************************** |
6123 | * Used on 82571 and later Si that has LED blink bits. | ||
6124 | * Callers must use their own timer and should have already called | ||
6125 | * e1000_id_led_init() | ||
6126 | * Call e1000_cleanup led() to stop blinking | ||
6127 | * | ||
6128 | * hw - Struct containing variables accessed by shared code | ||
6129 | *****************************************************************************/ | ||
6130 | int32_t | ||
6131 | e1000_blink_led_start(struct e1000_hw *hw) | ||
6132 | { | ||
6133 | int16_t i; | ||
6134 | uint32_t ledctl_blink = 0; | ||
6135 | |||
6136 | DEBUGFUNC("e1000_id_led_blink_on"); | ||
6137 | |||
6138 | if (hw->mac_type < e1000_82571) { | ||
6139 | /* Nothing to do */ | ||
6140 | return E1000_SUCCESS; | ||
6141 | } | ||
6142 | if (hw->media_type == e1000_media_type_fiber) { | ||
6143 | /* always blink LED0 for PCI-E fiber */ | ||
6144 | ledctl_blink = E1000_LEDCTL_LED0_BLINK | | ||
6145 | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); | ||
6146 | } else { | ||
6147 | /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */ | ||
6148 | ledctl_blink = hw->ledctl_mode2; | ||
6149 | for (i=0; i < 4; i++) | ||
6150 | if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) == | ||
6151 | E1000_LEDCTL_MODE_LED_ON) | ||
6152 | ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8)); | ||
6153 | } | ||
6154 | |||
6155 | E1000_WRITE_REG(hw, LEDCTL, ledctl_blink); | ||
6156 | |||
6157 | return E1000_SUCCESS; | ||
6158 | } | ||
6159 | |||
6160 | /****************************************************************************** | ||
5522 | * Restores the saved state of the SW controlable LED. | 6161 | * Restores the saved state of the SW controlable LED. |
5523 | * | 6162 | * |
5524 | * hw - Struct containing variables accessed by shared code | 6163 | * hw - Struct containing variables accessed by shared code |
@@ -5548,6 +6187,10 @@ e1000_cleanup_led(struct e1000_hw *hw) | |||
5548 | return ret_val; | 6187 | return ret_val; |
5549 | /* Fall Through */ | 6188 | /* Fall Through */ |
5550 | default: | 6189 | default: |
6190 | if (hw->phy_type == e1000_phy_ife) { | ||
6191 | e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); | ||
6192 | break; | ||
6193 | } | ||
5551 | /* Restore LEDCTL settings */ | 6194 | /* Restore LEDCTL settings */ |
5552 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_default); | 6195 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_default); |
5553 | break; | 6196 | break; |
@@ -5592,7 +6235,10 @@ e1000_led_on(struct e1000_hw *hw) | |||
5592 | /* Clear SW Defineable Pin 0 to turn on the LED */ | 6235 | /* Clear SW Defineable Pin 0 to turn on the LED */ |
5593 | ctrl &= ~E1000_CTRL_SWDPIN0; | 6236 | ctrl &= ~E1000_CTRL_SWDPIN0; |
5594 | ctrl |= E1000_CTRL_SWDPIO0; | 6237 | ctrl |= E1000_CTRL_SWDPIO0; |
5595 | } else if(hw->media_type == e1000_media_type_copper) { | 6238 | } else if (hw->phy_type == e1000_phy_ife) { |
6239 | e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, | ||
6240 | (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); | ||
6241 | } else if (hw->media_type == e1000_media_type_copper) { | ||
5596 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode2); | 6242 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode2); |
5597 | return E1000_SUCCESS; | 6243 | return E1000_SUCCESS; |
5598 | } | 6244 | } |
@@ -5640,7 +6286,10 @@ e1000_led_off(struct e1000_hw *hw) | |||
5640 | /* Set SW Defineable Pin 0 to turn off the LED */ | 6286 | /* Set SW Defineable Pin 0 to turn off the LED */ |
5641 | ctrl |= E1000_CTRL_SWDPIN0; | 6287 | ctrl |= E1000_CTRL_SWDPIN0; |
5642 | ctrl |= E1000_CTRL_SWDPIO0; | 6288 | ctrl |= E1000_CTRL_SWDPIO0; |
5643 | } else if(hw->media_type == e1000_media_type_copper) { | 6289 | } else if (hw->phy_type == e1000_phy_ife) { |
6290 | e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, | ||
6291 | (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); | ||
6292 | } else if (hw->media_type == e1000_media_type_copper) { | ||
5644 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1); | 6293 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1); |
5645 | return E1000_SUCCESS; | 6294 | return E1000_SUCCESS; |
5646 | } | 6295 | } |
@@ -5678,12 +6327,16 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw) | |||
5678 | temp = E1000_READ_REG(hw, XOFFRXC); | 6327 | temp = E1000_READ_REG(hw, XOFFRXC); |
5679 | temp = E1000_READ_REG(hw, XOFFTXC); | 6328 | temp = E1000_READ_REG(hw, XOFFTXC); |
5680 | temp = E1000_READ_REG(hw, FCRUC); | 6329 | temp = E1000_READ_REG(hw, FCRUC); |
6330 | |||
6331 | if (hw->mac_type != e1000_ich8lan) { | ||
5681 | temp = E1000_READ_REG(hw, PRC64); | 6332 | temp = E1000_READ_REG(hw, PRC64); |
5682 | temp = E1000_READ_REG(hw, PRC127); | 6333 | temp = E1000_READ_REG(hw, PRC127); |
5683 | temp = E1000_READ_REG(hw, PRC255); | 6334 | temp = E1000_READ_REG(hw, PRC255); |
5684 | temp = E1000_READ_REG(hw, PRC511); | 6335 | temp = E1000_READ_REG(hw, PRC511); |
5685 | temp = E1000_READ_REG(hw, PRC1023); | 6336 | temp = E1000_READ_REG(hw, PRC1023); |
5686 | temp = E1000_READ_REG(hw, PRC1522); | 6337 | temp = E1000_READ_REG(hw, PRC1522); |
6338 | } | ||
6339 | |||
5687 | temp = E1000_READ_REG(hw, GPRC); | 6340 | temp = E1000_READ_REG(hw, GPRC); |
5688 | temp = E1000_READ_REG(hw, BPRC); | 6341 | temp = E1000_READ_REG(hw, BPRC); |
5689 | temp = E1000_READ_REG(hw, MPRC); | 6342 | temp = E1000_READ_REG(hw, MPRC); |
@@ -5703,12 +6356,16 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw) | |||
5703 | temp = E1000_READ_REG(hw, TOTH); | 6356 | temp = E1000_READ_REG(hw, TOTH); |
5704 | temp = E1000_READ_REG(hw, TPR); | 6357 | temp = E1000_READ_REG(hw, TPR); |
5705 | temp = E1000_READ_REG(hw, TPT); | 6358 | temp = E1000_READ_REG(hw, TPT); |
6359 | |||
6360 | if (hw->mac_type != e1000_ich8lan) { | ||
5706 | temp = E1000_READ_REG(hw, PTC64); | 6361 | temp = E1000_READ_REG(hw, PTC64); |
5707 | temp = E1000_READ_REG(hw, PTC127); | 6362 | temp = E1000_READ_REG(hw, PTC127); |
5708 | temp = E1000_READ_REG(hw, PTC255); | 6363 | temp = E1000_READ_REG(hw, PTC255); |
5709 | temp = E1000_READ_REG(hw, PTC511); | 6364 | temp = E1000_READ_REG(hw, PTC511); |
5710 | temp = E1000_READ_REG(hw, PTC1023); | 6365 | temp = E1000_READ_REG(hw, PTC1023); |
5711 | temp = E1000_READ_REG(hw, PTC1522); | 6366 | temp = E1000_READ_REG(hw, PTC1522); |
6367 | } | ||
6368 | |||
5712 | temp = E1000_READ_REG(hw, MPTC); | 6369 | temp = E1000_READ_REG(hw, MPTC); |
5713 | temp = E1000_READ_REG(hw, BPTC); | 6370 | temp = E1000_READ_REG(hw, BPTC); |
5714 | 6371 | ||
@@ -5731,6 +6388,9 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw) | |||
5731 | 6388 | ||
5732 | temp = E1000_READ_REG(hw, IAC); | 6389 | temp = E1000_READ_REG(hw, IAC); |
5733 | temp = E1000_READ_REG(hw, ICRXOC); | 6390 | temp = E1000_READ_REG(hw, ICRXOC); |
6391 | |||
6392 | if (hw->mac_type == e1000_ich8lan) return; | ||
6393 | |||
5734 | temp = E1000_READ_REG(hw, ICRXPTC); | 6394 | temp = E1000_READ_REG(hw, ICRXPTC); |
5735 | temp = E1000_READ_REG(hw, ICRXATC); | 6395 | temp = E1000_READ_REG(hw, ICRXATC); |
5736 | temp = E1000_READ_REG(hw, ICTXPTC); | 6396 | temp = E1000_READ_REG(hw, ICTXPTC); |
@@ -5911,6 +6571,7 @@ e1000_get_bus_info(struct e1000_hw *hw) | |||
5911 | hw->bus_width = e1000_bus_width_pciex_1; | 6571 | hw->bus_width = e1000_bus_width_pciex_1; |
5912 | break; | 6572 | break; |
5913 | case e1000_82571: | 6573 | case e1000_82571: |
6574 | case e1000_ich8lan: | ||
5914 | case e1000_80003es2lan: | 6575 | case e1000_80003es2lan: |
5915 | hw->bus_type = e1000_bus_type_pci_express; | 6576 | hw->bus_type = e1000_bus_type_pci_express; |
5916 | hw->bus_speed = e1000_bus_speed_2500; | 6577 | hw->bus_speed = e1000_bus_speed_2500; |
@@ -5948,8 +6609,6 @@ e1000_get_bus_info(struct e1000_hw *hw) | |||
5948 | break; | 6609 | break; |
5949 | } | 6610 | } |
5950 | } | 6611 | } |
5951 | |||
5952 | #if 0 | ||
5953 | /****************************************************************************** | 6612 | /****************************************************************************** |
5954 | * Reads a value from one of the devices registers using port I/O (as opposed | 6613 | * Reads a value from one of the devices registers using port I/O (as opposed |
5955 | * memory mapped I/O). Only 82544 and newer devices support port I/O. | 6614 | * memory mapped I/O). Only 82544 and newer devices support port I/O. |
@@ -5957,6 +6616,7 @@ e1000_get_bus_info(struct e1000_hw *hw) | |||
5957 | * hw - Struct containing variables accessed by shared code | 6616 | * hw - Struct containing variables accessed by shared code |
5958 | * offset - offset to read from | 6617 | * offset - offset to read from |
5959 | *****************************************************************************/ | 6618 | *****************************************************************************/ |
6619 | #if 0 | ||
5960 | uint32_t | 6620 | uint32_t |
5961 | e1000_read_reg_io(struct e1000_hw *hw, | 6621 | e1000_read_reg_io(struct e1000_hw *hw, |
5962 | uint32_t offset) | 6622 | uint32_t offset) |
@@ -6012,8 +6672,6 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
6012 | { | 6672 | { |
6013 | int32_t ret_val; | 6673 | int32_t ret_val; |
6014 | uint16_t agc_value = 0; | 6674 | uint16_t agc_value = 0; |
6015 | uint16_t cur_agc, min_agc = IGP01E1000_AGC_LENGTH_TABLE_SIZE; | ||
6016 | uint16_t max_agc = 0; | ||
6017 | uint16_t i, phy_data; | 6675 | uint16_t i, phy_data; |
6018 | uint16_t cable_length; | 6676 | uint16_t cable_length; |
6019 | 6677 | ||
@@ -6086,6 +6744,8 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
6086 | break; | 6744 | break; |
6087 | } | 6745 | } |
6088 | } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ | 6746 | } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ |
6747 | uint16_t cur_agc_value; | ||
6748 | uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; | ||
6089 | uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = | 6749 | uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = |
6090 | {IGP01E1000_PHY_AGC_A, | 6750 | {IGP01E1000_PHY_AGC_A, |
6091 | IGP01E1000_PHY_AGC_B, | 6751 | IGP01E1000_PHY_AGC_B, |
@@ -6098,23 +6758,23 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
6098 | if(ret_val) | 6758 | if(ret_val) |
6099 | return ret_val; | 6759 | return ret_val; |
6100 | 6760 | ||
6101 | cur_agc = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; | 6761 | cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; |
6102 | 6762 | ||
6103 | /* Array bound check. */ | 6763 | /* Value bound check. */ |
6104 | if((cur_agc >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || | 6764 | if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || |
6105 | (cur_agc == 0)) | 6765 | (cur_agc_value == 0)) |
6106 | return -E1000_ERR_PHY; | 6766 | return -E1000_ERR_PHY; |
6107 | 6767 | ||
6108 | agc_value += cur_agc; | 6768 | agc_value += cur_agc_value; |
6109 | 6769 | ||
6110 | /* Update minimal AGC value. */ | 6770 | /* Update minimal AGC value. */ |
6111 | if(min_agc > cur_agc) | 6771 | if (min_agc_value > cur_agc_value) |
6112 | min_agc = cur_agc; | 6772 | min_agc_value = cur_agc_value; |
6113 | } | 6773 | } |
6114 | 6774 | ||
6115 | /* Remove the minimal AGC result for length < 50m */ | 6775 | /* Remove the minimal AGC result for length < 50m */ |
6116 | if(agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { | 6776 | if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { |
6117 | agc_value -= min_agc; | 6777 | agc_value -= min_agc_value; |
6118 | 6778 | ||
6119 | /* Get the average length of the remaining 3 channels */ | 6779 | /* Get the average length of the remaining 3 channels */ |
6120 | agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); | 6780 | agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); |
@@ -6130,7 +6790,10 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
6130 | IGP01E1000_AGC_RANGE) : 0; | 6790 | IGP01E1000_AGC_RANGE) : 0; |
6131 | *max_length = e1000_igp_cable_length_table[agc_value] + | 6791 | *max_length = e1000_igp_cable_length_table[agc_value] + |
6132 | IGP01E1000_AGC_RANGE; | 6792 | IGP01E1000_AGC_RANGE; |
6133 | } else if (hw->phy_type == e1000_phy_igp_2) { | 6793 | } else if (hw->phy_type == e1000_phy_igp_2 || |
6794 | hw->phy_type == e1000_phy_igp_3) { | ||
6795 | uint16_t cur_agc_index, max_agc_index = 0; | ||
6796 | uint16_t min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1; | ||
6134 | uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = | 6797 | uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = |
6135 | {IGP02E1000_PHY_AGC_A, | 6798 | {IGP02E1000_PHY_AGC_A, |
6136 | IGP02E1000_PHY_AGC_B, | 6799 | IGP02E1000_PHY_AGC_B, |
@@ -6145,19 +6808,27 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
6145 | /* Getting bits 15:9, which represent the combination of course and | 6808 | /* Getting bits 15:9, which represent the combination of course and |
6146 | * fine gain values. The result is a number that can be put into | 6809 | * fine gain values. The result is a number that can be put into |
6147 | * the lookup table to obtain the approximate cable length. */ | 6810 | * the lookup table to obtain the approximate cable length. */ |
6148 | cur_agc = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & | 6811 | cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & |
6149 | IGP02E1000_AGC_LENGTH_MASK; | 6812 | IGP02E1000_AGC_LENGTH_MASK; |
6150 | 6813 | ||
6151 | /* Remove min & max AGC values from calculation. */ | 6814 | /* Array index bound check. */ |
6152 | if (e1000_igp_2_cable_length_table[min_agc] > e1000_igp_2_cable_length_table[cur_agc]) | 6815 | if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) || |
6153 | min_agc = cur_agc; | 6816 | (cur_agc_index == 0)) |
6154 | if (e1000_igp_2_cable_length_table[max_agc] < e1000_igp_2_cable_length_table[cur_agc]) | 6817 | return -E1000_ERR_PHY; |
6155 | max_agc = cur_agc; | ||
6156 | 6818 | ||
6157 | agc_value += e1000_igp_2_cable_length_table[cur_agc]; | 6819 | /* Remove min & max AGC values from calculation. */ |
6820 | if (e1000_igp_2_cable_length_table[min_agc_index] > | ||
6821 | e1000_igp_2_cable_length_table[cur_agc_index]) | ||
6822 | min_agc_index = cur_agc_index; | ||
6823 | if (e1000_igp_2_cable_length_table[max_agc_index] < | ||
6824 | e1000_igp_2_cable_length_table[cur_agc_index]) | ||
6825 | max_agc_index = cur_agc_index; | ||
6826 | |||
6827 | agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; | ||
6158 | } | 6828 | } |
6159 | 6829 | ||
6160 | agc_value -= (e1000_igp_2_cable_length_table[min_agc] + e1000_igp_2_cable_length_table[max_agc]); | 6830 | agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + |
6831 | e1000_igp_2_cable_length_table[max_agc_index]); | ||
6161 | agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); | 6832 | agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); |
6162 | 6833 | ||
6163 | /* Calculate cable length with the error range of +/- 10 meters. */ | 6834 | /* Calculate cable length with the error range of +/- 10 meters. */ |
@@ -6203,7 +6874,8 @@ e1000_check_polarity(struct e1000_hw *hw, | |||
6203 | return ret_val; | 6874 | return ret_val; |
6204 | *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >> | 6875 | *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >> |
6205 | M88E1000_PSSR_REV_POLARITY_SHIFT; | 6876 | M88E1000_PSSR_REV_POLARITY_SHIFT; |
6206 | } else if(hw->phy_type == e1000_phy_igp || | 6877 | } else if (hw->phy_type == e1000_phy_igp || |
6878 | hw->phy_type == e1000_phy_igp_3 || | ||
6207 | hw->phy_type == e1000_phy_igp_2) { | 6879 | hw->phy_type == e1000_phy_igp_2) { |
6208 | /* Read the Status register to check the speed */ | 6880 | /* Read the Status register to check the speed */ |
6209 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, | 6881 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, |
@@ -6229,6 +6901,13 @@ e1000_check_polarity(struct e1000_hw *hw, | |||
6229 | * 100 Mbps this bit is always 0) */ | 6901 | * 100 Mbps this bit is always 0) */ |
6230 | *polarity = phy_data & IGP01E1000_PSSR_POLARITY_REVERSED; | 6902 | *polarity = phy_data & IGP01E1000_PSSR_POLARITY_REVERSED; |
6231 | } | 6903 | } |
6904 | } else if (hw->phy_type == e1000_phy_ife) { | ||
6905 | ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL, | ||
6906 | &phy_data); | ||
6907 | if (ret_val) | ||
6908 | return ret_val; | ||
6909 | *polarity = (phy_data & IFE_PESC_POLARITY_REVERSED) >> | ||
6910 | IFE_PESC_POLARITY_REVERSED_SHIFT; | ||
6232 | } | 6911 | } |
6233 | return E1000_SUCCESS; | 6912 | return E1000_SUCCESS; |
6234 | } | 6913 | } |
@@ -6256,7 +6935,8 @@ e1000_check_downshift(struct e1000_hw *hw) | |||
6256 | 6935 | ||
6257 | DEBUGFUNC("e1000_check_downshift"); | 6936 | DEBUGFUNC("e1000_check_downshift"); |
6258 | 6937 | ||
6259 | if(hw->phy_type == e1000_phy_igp || | 6938 | if (hw->phy_type == e1000_phy_igp || |
6939 | hw->phy_type == e1000_phy_igp_3 || | ||
6260 | hw->phy_type == e1000_phy_igp_2) { | 6940 | hw->phy_type == e1000_phy_igp_2) { |
6261 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, | 6941 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, |
6262 | &phy_data); | 6942 | &phy_data); |
@@ -6273,6 +6953,9 @@ e1000_check_downshift(struct e1000_hw *hw) | |||
6273 | 6953 | ||
6274 | hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> | 6954 | hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> |
6275 | M88E1000_PSSR_DOWNSHIFT_SHIFT; | 6955 | M88E1000_PSSR_DOWNSHIFT_SHIFT; |
6956 | } else if (hw->phy_type == e1000_phy_ife) { | ||
6957 | /* e1000_phy_ife supports 10/100 speed only */ | ||
6958 | hw->speed_downgraded = FALSE; | ||
6276 | } | 6959 | } |
6277 | 6960 | ||
6278 | return E1000_SUCCESS; | 6961 | return E1000_SUCCESS; |
@@ -6317,7 +7000,9 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw, | |||
6317 | 7000 | ||
6318 | if(speed == SPEED_1000) { | 7001 | if(speed == SPEED_1000) { |
6319 | 7002 | ||
6320 | e1000_get_cable_length(hw, &min_length, &max_length); | 7003 | ret_val = e1000_get_cable_length(hw, &min_length, &max_length); |
7004 | if (ret_val) | ||
7005 | return ret_val; | ||
6321 | 7006 | ||
6322 | if((hw->dsp_config_state == e1000_dsp_config_enabled) && | 7007 | if((hw->dsp_config_state == e1000_dsp_config_enabled) && |
6323 | min_length >= e1000_igp_cable_length_50) { | 7008 | min_length >= e1000_igp_cable_length_50) { |
@@ -6525,20 +7210,27 @@ static int32_t | |||
6525 | e1000_set_d3_lplu_state(struct e1000_hw *hw, | 7210 | e1000_set_d3_lplu_state(struct e1000_hw *hw, |
6526 | boolean_t active) | 7211 | boolean_t active) |
6527 | { | 7212 | { |
7213 | uint32_t phy_ctrl = 0; | ||
6528 | int32_t ret_val; | 7214 | int32_t ret_val; |
6529 | uint16_t phy_data; | 7215 | uint16_t phy_data; |
6530 | DEBUGFUNC("e1000_set_d3_lplu_state"); | 7216 | DEBUGFUNC("e1000_set_d3_lplu_state"); |
6531 | 7217 | ||
6532 | if(hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2) | 7218 | if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2 |
7219 | && hw->phy_type != e1000_phy_igp_3) | ||
6533 | return E1000_SUCCESS; | 7220 | return E1000_SUCCESS; |
6534 | 7221 | ||
6535 | /* During driver activity LPLU should not be used or it will attain link | 7222 | /* During driver activity LPLU should not be used or it will attain link |
6536 | * from the lowest speeds starting from 10Mbps. The capability is used for | 7223 | * from the lowest speeds starting from 10Mbps. The capability is used for |
6537 | * Dx transitions and states */ | 7224 | * Dx transitions and states */ |
6538 | if(hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) { | 7225 | if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) { |
6539 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); | 7226 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); |
6540 | if(ret_val) | 7227 | if (ret_val) |
6541 | return ret_val; | 7228 | return ret_val; |
7229 | } else if (hw->mac_type == e1000_ich8lan) { | ||
7230 | /* MAC writes into PHY register based on the state transition | ||
7231 | * and start auto-negotiation. SW driver can overwrite the settings | ||
7232 | * in CSR PHY power control E1000_PHY_CTRL register. */ | ||
7233 | phy_ctrl = E1000_READ_REG(hw, PHY_CTRL); | ||
6542 | } else { | 7234 | } else { |
6543 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); | 7235 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); |
6544 | if(ret_val) | 7236 | if(ret_val) |
@@ -6553,11 +7245,16 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw, | |||
6553 | if(ret_val) | 7245 | if(ret_val) |
6554 | return ret_val; | 7246 | return ret_val; |
6555 | } else { | 7247 | } else { |
7248 | if (hw->mac_type == e1000_ich8lan) { | ||
7249 | phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; | ||
7250 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | ||
7251 | } else { | ||
6556 | phy_data &= ~IGP02E1000_PM_D3_LPLU; | 7252 | phy_data &= ~IGP02E1000_PM_D3_LPLU; |
6557 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, | 7253 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, |
6558 | phy_data); | 7254 | phy_data); |
6559 | if (ret_val) | 7255 | if (ret_val) |
6560 | return ret_val; | 7256 | return ret_val; |
7257 | } | ||
6561 | } | 7258 | } |
6562 | 7259 | ||
6563 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during | 7260 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during |
@@ -6593,17 +7290,22 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw, | |||
6593 | (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { | 7290 | (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { |
6594 | 7291 | ||
6595 | if(hw->mac_type == e1000_82541_rev_2 || | 7292 | if(hw->mac_type == e1000_82541_rev_2 || |
6596 | hw->mac_type == e1000_82547_rev_2) { | 7293 | hw->mac_type == e1000_82547_rev_2) { |
6597 | phy_data |= IGP01E1000_GMII_FLEX_SPD; | 7294 | phy_data |= IGP01E1000_GMII_FLEX_SPD; |
6598 | ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); | 7295 | ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); |
6599 | if(ret_val) | 7296 | if(ret_val) |
6600 | return ret_val; | 7297 | return ret_val; |
6601 | } else { | 7298 | } else { |
7299 | if (hw->mac_type == e1000_ich8lan) { | ||
7300 | phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; | ||
7301 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | ||
7302 | } else { | ||
6602 | phy_data |= IGP02E1000_PM_D3_LPLU; | 7303 | phy_data |= IGP02E1000_PM_D3_LPLU; |
6603 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, | 7304 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, |
6604 | phy_data); | 7305 | phy_data); |
6605 | if (ret_val) | 7306 | if (ret_val) |
6606 | return ret_val; | 7307 | return ret_val; |
7308 | } | ||
6607 | } | 7309 | } |
6608 | 7310 | ||
6609 | /* When LPLU is enabled we should disable SmartSpeed */ | 7311 | /* When LPLU is enabled we should disable SmartSpeed */ |
@@ -6638,6 +7340,7 @@ static int32_t | |||
6638 | e1000_set_d0_lplu_state(struct e1000_hw *hw, | 7340 | e1000_set_d0_lplu_state(struct e1000_hw *hw, |
6639 | boolean_t active) | 7341 | boolean_t active) |
6640 | { | 7342 | { |
7343 | uint32_t phy_ctrl = 0; | ||
6641 | int32_t ret_val; | 7344 | int32_t ret_val; |
6642 | uint16_t phy_data; | 7345 | uint16_t phy_data; |
6643 | DEBUGFUNC("e1000_set_d0_lplu_state"); | 7346 | DEBUGFUNC("e1000_set_d0_lplu_state"); |
@@ -6645,15 +7348,24 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw, | |||
6645 | if(hw->mac_type <= e1000_82547_rev_2) | 7348 | if(hw->mac_type <= e1000_82547_rev_2) |
6646 | return E1000_SUCCESS; | 7349 | return E1000_SUCCESS; |
6647 | 7350 | ||
7351 | if (hw->mac_type == e1000_ich8lan) { | ||
7352 | phy_ctrl = E1000_READ_REG(hw, PHY_CTRL); | ||
7353 | } else { | ||
6648 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); | 7354 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); |
6649 | if(ret_val) | 7355 | if(ret_val) |
6650 | return ret_val; | 7356 | return ret_val; |
7357 | } | ||
6651 | 7358 | ||
6652 | if (!active) { | 7359 | if (!active) { |
7360 | if (hw->mac_type == e1000_ich8lan) { | ||
7361 | phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; | ||
7362 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | ||
7363 | } else { | ||
6653 | phy_data &= ~IGP02E1000_PM_D0_LPLU; | 7364 | phy_data &= ~IGP02E1000_PM_D0_LPLU; |
6654 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); | 7365 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); |
6655 | if (ret_val) | 7366 | if (ret_val) |
6656 | return ret_val; | 7367 | return ret_val; |
7368 | } | ||
6657 | 7369 | ||
6658 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during | 7370 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during |
6659 | * Dx states where the power conservation is most important. During | 7371 | * Dx states where the power conservation is most important. During |
@@ -6686,10 +7398,15 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw, | |||
6686 | 7398 | ||
6687 | } else { | 7399 | } else { |
6688 | 7400 | ||
7401 | if (hw->mac_type == e1000_ich8lan) { | ||
7402 | phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; | ||
7403 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | ||
7404 | } else { | ||
6689 | phy_data |= IGP02E1000_PM_D0_LPLU; | 7405 | phy_data |= IGP02E1000_PM_D0_LPLU; |
6690 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); | 7406 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); |
6691 | if (ret_val) | 7407 | if (ret_val) |
6692 | return ret_val; | 7408 | return ret_val; |
7409 | } | ||
6693 | 7410 | ||
6694 | /* When LPLU is enabled we should disable SmartSpeed */ | 7411 | /* When LPLU is enabled we should disable SmartSpeed */ |
6695 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); | 7412 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); |
@@ -6928,8 +7645,10 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw, | |||
6928 | 7645 | ||
6929 | length >>= 2; | 7646 | length >>= 2; |
6930 | /* The device driver writes the relevant command block into the ram area. */ | 7647 | /* The device driver writes the relevant command block into the ram area. */ |
6931 | for (i = 0; i < length; i++) | 7648 | for (i = 0; i < length; i++) { |
6932 | E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i)); | 7649 | E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i)); |
7650 | E1000_WRITE_FLUSH(hw); | ||
7651 | } | ||
6933 | 7652 | ||
6934 | return E1000_SUCCESS; | 7653 | return E1000_SUCCESS; |
6935 | } | 7654 | } |
@@ -6961,15 +7680,18 @@ e1000_mng_write_commit( | |||
6961 | * returns - TRUE when the mode is IAMT or FALSE. | 7680 | * returns - TRUE when the mode is IAMT or FALSE. |
6962 | ****************************************************************************/ | 7681 | ****************************************************************************/ |
6963 | boolean_t | 7682 | boolean_t |
6964 | e1000_check_mng_mode( | 7683 | e1000_check_mng_mode(struct e1000_hw *hw) |
6965 | struct e1000_hw *hw) | ||
6966 | { | 7684 | { |
6967 | uint32_t fwsm; | 7685 | uint32_t fwsm; |
6968 | 7686 | ||
6969 | fwsm = E1000_READ_REG(hw, FWSM); | 7687 | fwsm = E1000_READ_REG(hw, FWSM); |
6970 | 7688 | ||
6971 | if((fwsm & E1000_FWSM_MODE_MASK) == | 7689 | if (hw->mac_type == e1000_ich8lan) { |
6972 | (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) | 7690 | if ((fwsm & E1000_FWSM_MODE_MASK) == |
7691 | (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) | ||
7692 | return TRUE; | ||
7693 | } else if ((fwsm & E1000_FWSM_MODE_MASK) == | ||
7694 | (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) | ||
6973 | return TRUE; | 7695 | return TRUE; |
6974 | 7696 | ||
6975 | return FALSE; | 7697 | return FALSE; |
@@ -7209,7 +7931,6 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw) | |||
7209 | E1000_WRITE_REG(hw, CTRL, ctrl); | 7931 | E1000_WRITE_REG(hw, CTRL, ctrl); |
7210 | } | 7932 | } |
7211 | 7933 | ||
7212 | #if 0 | ||
7213 | /*************************************************************************** | 7934 | /*************************************************************************** |
7214 | * | 7935 | * |
7215 | * Enables PCI-Express master access. | 7936 | * Enables PCI-Express master access. |
@@ -7219,6 +7940,7 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw) | |||
7219 | * returns: - none. | 7940 | * returns: - none. |
7220 | * | 7941 | * |
7221 | ***************************************************************************/ | 7942 | ***************************************************************************/ |
7943 | #if 0 | ||
7222 | void | 7944 | void |
7223 | e1000_enable_pciex_master(struct e1000_hw *hw) | 7945 | e1000_enable_pciex_master(struct e1000_hw *hw) |
7224 | { | 7946 | { |
@@ -7299,8 +8021,10 @@ e1000_get_auto_rd_done(struct e1000_hw *hw) | |||
7299 | case e1000_82572: | 8021 | case e1000_82572: |
7300 | case e1000_82573: | 8022 | case e1000_82573: |
7301 | case e1000_80003es2lan: | 8023 | case e1000_80003es2lan: |
7302 | while(timeout) { | 8024 | case e1000_ich8lan: |
7303 | if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break; | 8025 | while (timeout) { |
8026 | if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) | ||
8027 | break; | ||
7304 | else msec_delay(1); | 8028 | else msec_delay(1); |
7305 | timeout--; | 8029 | timeout--; |
7306 | } | 8030 | } |
@@ -7340,7 +8064,7 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw) | |||
7340 | 8064 | ||
7341 | switch (hw->mac_type) { | 8065 | switch (hw->mac_type) { |
7342 | default: | 8066 | default: |
7343 | msec_delay(10); | 8067 | msec_delay_irq(10); |
7344 | break; | 8068 | break; |
7345 | case e1000_80003es2lan: | 8069 | case e1000_80003es2lan: |
7346 | /* Separate *_CFG_DONE_* bit for each port */ | 8070 | /* Separate *_CFG_DONE_* bit for each port */ |
@@ -7457,7 +8181,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) | |||
7457 | * E1000_SUCCESS at any other case. | 8181 | * E1000_SUCCESS at any other case. |
7458 | * | 8182 | * |
7459 | ***************************************************************************/ | 8183 | ***************************************************************************/ |
7460 | int32_t | 8184 | static int32_t |
7461 | e1000_get_software_semaphore(struct e1000_hw *hw) | 8185 | e1000_get_software_semaphore(struct e1000_hw *hw) |
7462 | { | 8186 | { |
7463 | int32_t timeout = hw->eeprom.word_size + 1; | 8187 | int32_t timeout = hw->eeprom.word_size + 1; |
@@ -7492,7 +8216,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw) | |||
7492 | * hw: Struct containing variables accessed by shared code | 8216 | * hw: Struct containing variables accessed by shared code |
7493 | * | 8217 | * |
7494 | ***************************************************************************/ | 8218 | ***************************************************************************/ |
7495 | void | 8219 | static void |
7496 | e1000_release_software_semaphore(struct e1000_hw *hw) | 8220 | e1000_release_software_semaphore(struct e1000_hw *hw) |
7497 | { | 8221 | { |
7498 | uint32_t swsm; | 8222 | uint32_t swsm; |
@@ -7523,6 +8247,13 @@ int32_t | |||
7523 | e1000_check_phy_reset_block(struct e1000_hw *hw) | 8247 | e1000_check_phy_reset_block(struct e1000_hw *hw) |
7524 | { | 8248 | { |
7525 | uint32_t manc = 0; | 8249 | uint32_t manc = 0; |
8250 | uint32_t fwsm = 0; | ||
8251 | |||
8252 | if (hw->mac_type == e1000_ich8lan) { | ||
8253 | fwsm = E1000_READ_REG(hw, FWSM); | ||
8254 | return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS | ||
8255 | : E1000_BLK_PHY_RESET; | ||
8256 | } | ||
7526 | 8257 | ||
7527 | if (hw->mac_type > e1000_82547_rev_2) | 8258 | if (hw->mac_type > e1000_82547_rev_2) |
7528 | manc = E1000_READ_REG(hw, MANC); | 8259 | manc = E1000_READ_REG(hw, MANC); |
@@ -7549,6 +8280,8 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw) | |||
7549 | if((fwsm & E1000_FWSM_MODE_MASK) != 0) | 8280 | if((fwsm & E1000_FWSM_MODE_MASK) != 0) |
7550 | return TRUE; | 8281 | return TRUE; |
7551 | break; | 8282 | break; |
8283 | case e1000_ich8lan: | ||
8284 | return TRUE; | ||
7552 | default: | 8285 | default: |
7553 | break; | 8286 | break; |
7554 | } | 8287 | } |
@@ -7556,4 +8289,854 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw) | |||
7556 | } | 8289 | } |
7557 | 8290 | ||
7558 | 8291 | ||
8292 | /****************************************************************************** | ||
8293 | * Configure PCI-Ex no-snoop | ||
8294 | * | ||
8295 | * hw - Struct containing variables accessed by shared code. | ||
8296 | * no_snoop - Bitmap of no-snoop events. | ||
8297 | * | ||
8298 | * returns: E1000_SUCCESS | ||
8299 | * | ||
8300 | *****************************************************************************/ | ||
8301 | static int32_t | ||
8302 | e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop) | ||
8303 | { | ||
8304 | uint32_t gcr_reg = 0; | ||
8305 | |||
8306 | DEBUGFUNC("e1000_set_pci_ex_no_snoop"); | ||
8307 | |||
8308 | if (hw->bus_type == e1000_bus_type_unknown) | ||
8309 | e1000_get_bus_info(hw); | ||
8310 | |||
8311 | if (hw->bus_type != e1000_bus_type_pci_express) | ||
8312 | return E1000_SUCCESS; | ||
8313 | |||
8314 | if (no_snoop) { | ||
8315 | gcr_reg = E1000_READ_REG(hw, GCR); | ||
8316 | gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL); | ||
8317 | gcr_reg |= no_snoop; | ||
8318 | E1000_WRITE_REG(hw, GCR, gcr_reg); | ||
8319 | } | ||
8320 | if (hw->mac_type == e1000_ich8lan) { | ||
8321 | uint32_t ctrl_ext; | ||
8322 | |||
8323 | E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL); | ||
8324 | |||
8325 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | ||
8326 | ctrl_ext |= E1000_CTRL_EXT_RO_DIS; | ||
8327 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | ||
8328 | } | ||
8329 | |||
8330 | return E1000_SUCCESS; | ||
8331 | } | ||
8332 | |||
8333 | /*************************************************************************** | ||
8334 | * | ||
8335 | * Get software semaphore FLAG bit (SWFLAG). | ||
8336 | * SWFLAG is used to synchronize the access to all shared resource between | ||
8337 | * SW, FW and HW. | ||
8338 | * | ||
8339 | * hw: Struct containing variables accessed by shared code | ||
8340 | * | ||
8341 | ***************************************************************************/ | ||
8342 | static int32_t | ||
8343 | e1000_get_software_flag(struct e1000_hw *hw) | ||
8344 | { | ||
8345 | int32_t timeout = PHY_CFG_TIMEOUT; | ||
8346 | uint32_t extcnf_ctrl; | ||
8347 | |||
8348 | DEBUGFUNC("e1000_get_software_flag"); | ||
8349 | |||
8350 | if (hw->mac_type == e1000_ich8lan) { | ||
8351 | while (timeout) { | ||
8352 | extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); | ||
8353 | extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; | ||
8354 | E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); | ||
8355 | |||
8356 | extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); | ||
8357 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) | ||
8358 | break; | ||
8359 | msec_delay_irq(1); | ||
8360 | timeout--; | ||
8361 | } | ||
8362 | |||
8363 | if (!timeout) { | ||
8364 | DEBUGOUT("FW or HW locks the resource too long.\n"); | ||
8365 | return -E1000_ERR_CONFIG; | ||
8366 | } | ||
8367 | } | ||
8368 | |||
8369 | return E1000_SUCCESS; | ||
8370 | } | ||
8371 | |||
8372 | /*************************************************************************** | ||
8373 | * | ||
8374 | * Release software semaphore FLAG bit (SWFLAG). | ||
8375 | * SWFLAG is used to synchronize the access to all shared resource between | ||
8376 | * SW, FW and HW. | ||
8377 | * | ||
8378 | * hw: Struct containing variables accessed by shared code | ||
8379 | * | ||
8380 | ***************************************************************************/ | ||
8381 | static void | ||
8382 | e1000_release_software_flag(struct e1000_hw *hw) | ||
8383 | { | ||
8384 | uint32_t extcnf_ctrl; | ||
8385 | |||
8386 | DEBUGFUNC("e1000_release_software_flag"); | ||
8387 | |||
8388 | if (hw->mac_type == e1000_ich8lan) { | ||
8389 | extcnf_ctrl= E1000_READ_REG(hw, EXTCNF_CTRL); | ||
8390 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | ||
8391 | E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); | ||
8392 | } | ||
8393 | |||
8394 | return; | ||
8395 | } | ||
8396 | |||
8397 | /*************************************************************************** | ||
8398 | * | ||
8399 | * Disable dynamic power down mode in ife PHY. | ||
8400 | * It can be used to workaround band-gap problem. | ||
8401 | * | ||
8402 | * hw: Struct containing variables accessed by shared code | ||
8403 | * | ||
8404 | ***************************************************************************/ | ||
8405 | #if 0 | ||
8406 | int32_t | ||
8407 | e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw) | ||
8408 | { | ||
8409 | uint16_t phy_data; | ||
8410 | int32_t ret_val = E1000_SUCCESS; | ||
8411 | |||
8412 | DEBUGFUNC("e1000_ife_disable_dynamic_power_down"); | ||
8413 | |||
8414 | if (hw->phy_type == e1000_phy_ife) { | ||
8415 | ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data); | ||
8416 | if (ret_val) | ||
8417 | return ret_val; | ||
8418 | |||
8419 | phy_data |= IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN; | ||
8420 | ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data); | ||
8421 | } | ||
8422 | |||
8423 | return ret_val; | ||
8424 | } | ||
8425 | #endif /* 0 */ | ||
8426 | |||
8427 | /*************************************************************************** | ||
8428 | * | ||
8429 | * Enable dynamic power down mode in ife PHY. | ||
8430 | * It can be used to workaround band-gap problem. | ||
8431 | * | ||
8432 | * hw: Struct containing variables accessed by shared code | ||
8433 | * | ||
8434 | ***************************************************************************/ | ||
8435 | #if 0 | ||
8436 | int32_t | ||
8437 | e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw) | ||
8438 | { | ||
8439 | uint16_t phy_data; | ||
8440 | int32_t ret_val = E1000_SUCCESS; | ||
8441 | |||
8442 | DEBUGFUNC("e1000_ife_enable_dynamic_power_down"); | ||
8443 | |||
8444 | if (hw->phy_type == e1000_phy_ife) { | ||
8445 | ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data); | ||
8446 | if (ret_val) | ||
8447 | return ret_val; | ||
8448 | |||
8449 | phy_data &= ~IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN; | ||
8450 | ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data); | ||
8451 | } | ||
8452 | |||
8453 | return ret_val; | ||
8454 | } | ||
8455 | #endif /* 0 */ | ||
8456 | |||
8457 | /****************************************************************************** | ||
8458 | * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access | ||
8459 | * register. | ||
8460 | * | ||
8461 | * hw - Struct containing variables accessed by shared code | ||
8462 | * offset - offset of word in the EEPROM to read | ||
8463 | * data - word read from the EEPROM | ||
8464 | * words - number of words to read | ||
8465 | *****************************************************************************/ | ||
8466 | static int32_t | ||
8467 | e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, | ||
8468 | uint16_t *data) | ||
8469 | { | ||
8470 | int32_t error = E1000_SUCCESS; | ||
8471 | uint32_t flash_bank = 0; | ||
8472 | uint32_t act_offset = 0; | ||
8473 | uint32_t bank_offset = 0; | ||
8474 | uint16_t word = 0; | ||
8475 | uint16_t i = 0; | ||
8476 | |||
8477 | /* We need to know which is the valid flash bank. In the event | ||
8478 | * that we didn't allocate eeprom_shadow_ram, we may not be | ||
8479 | * managing flash_bank. So it cannot be trusted and needs | ||
8480 | * to be updated with each read. | ||
8481 | */ | ||
8482 | /* Value of bit 22 corresponds to the flash bank we're on. */ | ||
8483 | flash_bank = (E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL) ? 1 : 0; | ||
8484 | |||
8485 | /* Adjust offset appropriately if we're on bank 1 - adjust for word size */ | ||
8486 | bank_offset = flash_bank * (hw->flash_bank_size * 2); | ||
8487 | |||
8488 | error = e1000_get_software_flag(hw); | ||
8489 | if (error != E1000_SUCCESS) | ||
8490 | return error; | ||
8491 | |||
8492 | for (i = 0; i < words; i++) { | ||
8493 | if (hw->eeprom_shadow_ram != NULL && | ||
8494 | hw->eeprom_shadow_ram[offset+i].modified == TRUE) { | ||
8495 | data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word; | ||
8496 | } else { | ||
8497 | /* The NVM part needs a byte offset, hence * 2 */ | ||
8498 | act_offset = bank_offset + ((offset + i) * 2); | ||
8499 | error = e1000_read_ich8_word(hw, act_offset, &word); | ||
8500 | if (error != E1000_SUCCESS) | ||
8501 | break; | ||
8502 | data[i] = word; | ||
8503 | } | ||
8504 | } | ||
8505 | |||
8506 | e1000_release_software_flag(hw); | ||
8507 | |||
8508 | return error; | ||
8509 | } | ||
8510 | |||
8511 | /****************************************************************************** | ||
8512 | * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access | ||
8513 | * register. Actually, writes are written to the shadow ram cache in the hw | ||
8514 | * structure hw->e1000_shadow_ram. e1000_commit_shadow_ram flushes this to | ||
8515 | * the NVM, which occurs when the NVM checksum is updated. | ||
8516 | * | ||
8517 | * hw - Struct containing variables accessed by shared code | ||
8518 | * offset - offset of word in the EEPROM to write | ||
8519 | * words - number of words to write | ||
8520 | * data - words to write to the EEPROM | ||
8521 | *****************************************************************************/ | ||
8522 | static int32_t | ||
8523 | e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, | ||
8524 | uint16_t *data) | ||
8525 | { | ||
8526 | uint32_t i = 0; | ||
8527 | int32_t error = E1000_SUCCESS; | ||
8528 | |||
8529 | error = e1000_get_software_flag(hw); | ||
8530 | if (error != E1000_SUCCESS) | ||
8531 | return error; | ||
8532 | |||
8533 | /* A driver can write to the NVM only if it has eeprom_shadow_ram | ||
8534 | * allocated. Subsequent reads to the modified words are read from | ||
8535 | * this cached structure as well. Writes will only go into this | ||
8536 | * cached structure unless it's followed by a call to | ||
8537 | * e1000_update_eeprom_checksum() where it will commit the changes | ||
8538 | * and clear the "modified" field. | ||
8539 | */ | ||
8540 | if (hw->eeprom_shadow_ram != NULL) { | ||
8541 | for (i = 0; i < words; i++) { | ||
8542 | if ((offset + i) < E1000_SHADOW_RAM_WORDS) { | ||
8543 | hw->eeprom_shadow_ram[offset+i].modified = TRUE; | ||
8544 | hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i]; | ||
8545 | } else { | ||
8546 | error = -E1000_ERR_EEPROM; | ||
8547 | break; | ||
8548 | } | ||
8549 | } | ||
8550 | } else { | ||
8551 | /* Drivers have the option to not allocate eeprom_shadow_ram as long | ||
8552 | * as they don't perform any NVM writes. An attempt in doing so | ||
8553 | * will result in this error. | ||
8554 | */ | ||
8555 | error = -E1000_ERR_EEPROM; | ||
8556 | } | ||
8557 | |||
8558 | e1000_release_software_flag(hw); | ||
8559 | |||
8560 | return error; | ||
8561 | } | ||
8562 | |||
8563 | /****************************************************************************** | ||
8564 | * This function does initial flash setup so that a new read/write/erase cycle | ||
8565 | * can be started. | ||
8566 | * | ||
8567 | * hw - The pointer to the hw structure | ||
8568 | ****************************************************************************/ | ||
8569 | static int32_t | ||
8570 | e1000_ich8_cycle_init(struct e1000_hw *hw) | ||
8571 | { | ||
8572 | union ich8_hws_flash_status hsfsts; | ||
8573 | int32_t error = E1000_ERR_EEPROM; | ||
8574 | int32_t i = 0; | ||
8575 | |||
8576 | DEBUGFUNC("e1000_ich8_cycle_init"); | ||
8577 | |||
8578 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | ||
8579 | |||
8580 | /* May be check the Flash Des Valid bit in Hw status */ | ||
8581 | if (hsfsts.hsf_status.fldesvalid == 0) { | ||
8582 | DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used."); | ||
8583 | return error; | ||
8584 | } | ||
8585 | |||
8586 | /* Clear FCERR in Hw status by writing 1 */ | ||
8587 | /* Clear DAEL in Hw status by writing a 1 */ | ||
8588 | hsfsts.hsf_status.flcerr = 1; | ||
8589 | hsfsts.hsf_status.dael = 1; | ||
8590 | |||
8591 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval); | ||
8592 | |||
8593 | /* Either we should have a hardware SPI cycle in progress bit to check | ||
8594 | * against, in order to start a new cycle or FDONE bit should be changed | ||
8595 | * in the hardware so that it is 1 after harware reset, which can then be | ||
8596 | * used as an indication whether a cycle is in progress or has been | ||
8597 | * completed .. we should also have some software semaphore mechanism to | ||
8598 | * guard FDONE or the cycle in progress bit so that two threads access to | ||
8599 | * those bits can be sequentiallized or a way so that 2 threads dont | ||
8600 | * start the cycle at the same time */ | ||
8601 | |||
8602 | if (hsfsts.hsf_status.flcinprog == 0) { | ||
8603 | /* There is no cycle running at present, so we can start a cycle */ | ||
8604 | /* Begin by setting Flash Cycle Done. */ | ||
8605 | hsfsts.hsf_status.flcdone = 1; | ||
8606 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval); | ||
8607 | error = E1000_SUCCESS; | ||
8608 | } else { | ||
8609 | /* otherwise poll for sometime so the current cycle has a chance | ||
8610 | * to end before giving up. */ | ||
8611 | for (i = 0; i < ICH8_FLASH_COMMAND_TIMEOUT; i++) { | ||
8612 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | ||
8613 | if (hsfsts.hsf_status.flcinprog == 0) { | ||
8614 | error = E1000_SUCCESS; | ||
8615 | break; | ||
8616 | } | ||
8617 | udelay(1); | ||
8618 | } | ||
8619 | if (error == E1000_SUCCESS) { | ||
8620 | /* Successful in waiting for previous cycle to timeout, | ||
8621 | * now set the Flash Cycle Done. */ | ||
8622 | hsfsts.hsf_status.flcdone = 1; | ||
8623 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval); | ||
8624 | } else { | ||
8625 | DEBUGOUT("Flash controller busy, cannot get access"); | ||
8626 | } | ||
8627 | } | ||
8628 | return error; | ||
8629 | } | ||
8630 | |||
8631 | /****************************************************************************** | ||
8632 | * This function starts a flash cycle and waits for its completion | ||
8633 | * | ||
8634 | * hw - The pointer to the hw structure | ||
8635 | ****************************************************************************/ | ||
8636 | static int32_t | ||
8637 | e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout) | ||
8638 | { | ||
8639 | union ich8_hws_flash_ctrl hsflctl; | ||
8640 | union ich8_hws_flash_status hsfsts; | ||
8641 | int32_t error = E1000_ERR_EEPROM; | ||
8642 | uint32_t i = 0; | ||
8643 | |||
8644 | /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ | ||
8645 | hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); | ||
8646 | hsflctl.hsf_ctrl.flcgo = 1; | ||
8647 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); | ||
8648 | |||
8649 | /* wait till FDONE bit is set to 1 */ | ||
8650 | do { | ||
8651 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | ||
8652 | if (hsfsts.hsf_status.flcdone == 1) | ||
8653 | break; | ||
8654 | udelay(1); | ||
8655 | i++; | ||
8656 | } while (i < timeout); | ||
8657 | if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) { | ||
8658 | error = E1000_SUCCESS; | ||
8659 | } | ||
8660 | return error; | ||
8661 | } | ||
8662 | |||
8663 | /****************************************************************************** | ||
8664 | * Reads a byte or word from the NVM using the ICH8 flash access registers. | ||
8665 | * | ||
8666 | * hw - The pointer to the hw structure | ||
8667 | * index - The index of the byte or word to read. | ||
8668 | * size - Size of data to read, 1=byte 2=word | ||
8669 | * data - Pointer to the word to store the value read. | ||
8670 | *****************************************************************************/ | ||
8671 | static int32_t | ||
8672 | e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, | ||
8673 | uint32_t size, uint16_t* data) | ||
8674 | { | ||
8675 | union ich8_hws_flash_status hsfsts; | ||
8676 | union ich8_hws_flash_ctrl hsflctl; | ||
8677 | uint32_t flash_linear_address; | ||
8678 | uint32_t flash_data = 0; | ||
8679 | int32_t error = -E1000_ERR_EEPROM; | ||
8680 | int32_t count = 0; | ||
8681 | |||
8682 | DEBUGFUNC("e1000_read_ich8_data"); | ||
8683 | |||
8684 | if (size < 1 || size > 2 || data == 0x0 || | ||
8685 | index > ICH8_FLASH_LINEAR_ADDR_MASK) | ||
8686 | return error; | ||
8687 | |||
8688 | flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) + | ||
8689 | hw->flash_base_addr; | ||
8690 | |||
8691 | do { | ||
8692 | udelay(1); | ||
8693 | /* Steps */ | ||
8694 | error = e1000_ich8_cycle_init(hw); | ||
8695 | if (error != E1000_SUCCESS) | ||
8696 | break; | ||
8697 | |||
8698 | hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); | ||
8699 | /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ | ||
8700 | hsflctl.hsf_ctrl.fldbcount = size - 1; | ||
8701 | hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_READ; | ||
8702 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); | ||
8703 | |||
8704 | /* Write the last 24 bits of index into Flash Linear address field in | ||
8705 | * Flash Address */ | ||
8706 | /* TODO: TBD maybe check the index against the size of flash */ | ||
8707 | |||
8708 | E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address); | ||
8709 | |||
8710 | error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT); | ||
8711 | |||
8712 | /* Check if FCERR is set to 1, if set to 1, clear it and try the whole | ||
8713 | * sequence a few more times, else read in (shift in) the Flash Data0, | ||
8714 | * the order is least significant byte first msb to lsb */ | ||
8715 | if (error == E1000_SUCCESS) { | ||
8716 | flash_data = E1000_READ_ICH8_REG(hw, ICH8_FLASH_FDATA0); | ||
8717 | if (size == 1) { | ||
8718 | *data = (uint8_t)(flash_data & 0x000000FF); | ||
8719 | } else if (size == 2) { | ||
8720 | *data = (uint16_t)(flash_data & 0x0000FFFF); | ||
8721 | } | ||
8722 | break; | ||
8723 | } else { | ||
8724 | /* If we've gotten here, then things are probably completely hosed, | ||
8725 | * but if the error condition is detected, it won't hurt to give | ||
8726 | * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times. | ||
8727 | */ | ||
8728 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | ||
8729 | if (hsfsts.hsf_status.flcerr == 1) { | ||
8730 | /* Repeat for some time before giving up. */ | ||
8731 | continue; | ||
8732 | } else if (hsfsts.hsf_status.flcdone == 0) { | ||
8733 | DEBUGOUT("Timeout error - flash cycle did not complete."); | ||
8734 | break; | ||
8735 | } | ||
8736 | } | ||
8737 | } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT); | ||
8738 | |||
8739 | return error; | ||
8740 | } | ||
8741 | |||
8742 | /****************************************************************************** | ||
8743 | * Writes One /two bytes to the NVM using the ICH8 flash access registers. | ||
8744 | * | ||
8745 | * hw - The pointer to the hw structure | ||
8746 | * index - The index of the byte/word to read. | ||
8747 | * size - Size of data to read, 1=byte 2=word | ||
8748 | * data - The byte(s) to write to the NVM. | ||
8749 | *****************************************************************************/ | ||
8750 | static int32_t | ||
8751 | e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, | ||
8752 | uint16_t data) | ||
8753 | { | ||
8754 | union ich8_hws_flash_status hsfsts; | ||
8755 | union ich8_hws_flash_ctrl hsflctl; | ||
8756 | uint32_t flash_linear_address; | ||
8757 | uint32_t flash_data = 0; | ||
8758 | int32_t error = -E1000_ERR_EEPROM; | ||
8759 | int32_t count = 0; | ||
8760 | |||
8761 | DEBUGFUNC("e1000_write_ich8_data"); | ||
8762 | |||
8763 | if (size < 1 || size > 2 || data > size * 0xff || | ||
8764 | index > ICH8_FLASH_LINEAR_ADDR_MASK) | ||
8765 | return error; | ||
8766 | |||
8767 | flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) + | ||
8768 | hw->flash_base_addr; | ||
8769 | |||
8770 | do { | ||
8771 | udelay(1); | ||
8772 | /* Steps */ | ||
8773 | error = e1000_ich8_cycle_init(hw); | ||
8774 | if (error != E1000_SUCCESS) | ||
8775 | break; | ||
8776 | |||
8777 | hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); | ||
8778 | /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ | ||
8779 | hsflctl.hsf_ctrl.fldbcount = size -1; | ||
8780 | hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_WRITE; | ||
8781 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); | ||
8782 | |||
8783 | /* Write the last 24 bits of index into Flash Linear address field in | ||
8784 | * Flash Address */ | ||
8785 | E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address); | ||
8786 | |||
8787 | if (size == 1) | ||
8788 | flash_data = (uint32_t)data & 0x00FF; | ||
8789 | else | ||
8790 | flash_data = (uint32_t)data; | ||
8791 | |||
8792 | E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FDATA0, flash_data); | ||
8793 | |||
8794 | /* check if FCERR is set to 1 , if set to 1, clear it and try the whole | ||
8795 | * sequence a few more times else done */ | ||
8796 | error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT); | ||
8797 | if (error == E1000_SUCCESS) { | ||
8798 | break; | ||
8799 | } else { | ||
8800 | /* If we're here, then things are most likely completely hosed, | ||
8801 | * but if the error condition is detected, it won't hurt to give | ||
8802 | * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times. | ||
8803 | */ | ||
8804 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | ||
8805 | if (hsfsts.hsf_status.flcerr == 1) { | ||
8806 | /* Repeat for some time before giving up. */ | ||
8807 | continue; | ||
8808 | } else if (hsfsts.hsf_status.flcdone == 0) { | ||
8809 | DEBUGOUT("Timeout error - flash cycle did not complete."); | ||
8810 | break; | ||
8811 | } | ||
8812 | } | ||
8813 | } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT); | ||
8814 | |||
8815 | return error; | ||
8816 | } | ||
8817 | |||
8818 | /****************************************************************************** | ||
8819 | * Reads a single byte from the NVM using the ICH8 flash access registers. | ||
8820 | * | ||
8821 | * hw - pointer to e1000_hw structure | ||
8822 | * index - The index of the byte to read. | ||
8823 | * data - Pointer to a byte to store the value read. | ||
8824 | *****************************************************************************/ | ||
8825 | static int32_t | ||
8826 | e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data) | ||
8827 | { | ||
8828 | int32_t status = E1000_SUCCESS; | ||
8829 | uint16_t word = 0; | ||
8830 | |||
8831 | status = e1000_read_ich8_data(hw, index, 1, &word); | ||
8832 | if (status == E1000_SUCCESS) { | ||
8833 | *data = (uint8_t)word; | ||
8834 | } | ||
8835 | |||
8836 | return status; | ||
8837 | } | ||
8838 | |||
8839 | /****************************************************************************** | ||
8840 | * Writes a single byte to the NVM using the ICH8 flash access registers. | ||
8841 | * Performs verification by reading back the value and then going through | ||
8842 | * a retry algorithm before giving up. | ||
8843 | * | ||
8844 | * hw - pointer to e1000_hw structure | ||
8845 | * index - The index of the byte to write. | ||
8846 | * byte - The byte to write to the NVM. | ||
8847 | *****************************************************************************/ | ||
8848 | static int32_t | ||
8849 | e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte) | ||
8850 | { | ||
8851 | int32_t error = E1000_SUCCESS; | ||
8852 | int32_t program_retries; | ||
8853 | uint8_t temp_byte; | ||
8854 | |||
8855 | e1000_write_ich8_byte(hw, index, byte); | ||
8856 | udelay(100); | ||
8857 | |||
8858 | for (program_retries = 0; program_retries < 100; program_retries++) { | ||
8859 | e1000_read_ich8_byte(hw, index, &temp_byte); | ||
8860 | if (temp_byte == byte) | ||
8861 | break; | ||
8862 | udelay(10); | ||
8863 | e1000_write_ich8_byte(hw, index, byte); | ||
8864 | udelay(100); | ||
8865 | } | ||
8866 | if (program_retries == 100) | ||
8867 | error = E1000_ERR_EEPROM; | ||
8868 | |||
8869 | return error; | ||
8870 | } | ||
8871 | |||
8872 | /****************************************************************************** | ||
8873 | * Writes a single byte to the NVM using the ICH8 flash access registers. | ||
8874 | * | ||
8875 | * hw - pointer to e1000_hw structure | ||
8876 | * index - The index of the byte to read. | ||
8877 | * data - The byte to write to the NVM. | ||
8878 | *****************************************************************************/ | ||
8879 | static int32_t | ||
8880 | e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data) | ||
8881 | { | ||
8882 | int32_t status = E1000_SUCCESS; | ||
8883 | uint16_t word = (uint16_t)data; | ||
8884 | |||
8885 | status = e1000_write_ich8_data(hw, index, 1, word); | ||
8886 | |||
8887 | return status; | ||
8888 | } | ||
8889 | |||
8890 | /****************************************************************************** | ||
8891 | * Reads a word from the NVM using the ICH8 flash access registers. | ||
8892 | * | ||
8893 | * hw - pointer to e1000_hw structure | ||
8894 | * index - The starting byte index of the word to read. | ||
8895 | * data - Pointer to a word to store the value read. | ||
8896 | *****************************************************************************/ | ||
8897 | static int32_t | ||
8898 | e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data) | ||
8899 | { | ||
8900 | int32_t status = E1000_SUCCESS; | ||
8901 | status = e1000_read_ich8_data(hw, index, 2, data); | ||
8902 | return status; | ||
8903 | } | ||
8904 | |||
8905 | /****************************************************************************** | ||
8906 | * Writes a word to the NVM using the ICH8 flash access registers. | ||
8907 | * | ||
8908 | * hw - pointer to e1000_hw structure | ||
8909 | * index - The starting byte index of the word to read. | ||
8910 | * data - The word to write to the NVM. | ||
8911 | *****************************************************************************/ | ||
8912 | #if 0 | ||
8913 | int32_t | ||
8914 | e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data) | ||
8915 | { | ||
8916 | int32_t status = E1000_SUCCESS; | ||
8917 | status = e1000_write_ich8_data(hw, index, 2, data); | ||
8918 | return status; | ||
8919 | } | ||
8920 | #endif /* 0 */ | ||
8921 | |||
8922 | /****************************************************************************** | ||
8923 | * Erases the bank specified. Each bank is a 4k block. Segments are 0 based. | ||
8924 | * segment N is 4096 * N + flash_reg_addr. | ||
8925 | * | ||
8926 | * hw - pointer to e1000_hw structure | ||
8927 | * segment - 0 for first segment, 1 for second segment, etc. | ||
8928 | *****************************************************************************/ | ||
8929 | static int32_t | ||
8930 | e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment) | ||
8931 | { | ||
8932 | union ich8_hws_flash_status hsfsts; | ||
8933 | union ich8_hws_flash_ctrl hsflctl; | ||
8934 | uint32_t flash_linear_address; | ||
8935 | int32_t count = 0; | ||
8936 | int32_t error = E1000_ERR_EEPROM; | ||
8937 | int32_t iteration, seg_size; | ||
8938 | int32_t sector_size; | ||
8939 | int32_t j = 0; | ||
8940 | int32_t error_flag = 0; | ||
8941 | |||
8942 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | ||
8943 | |||
8944 | /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */ | ||
8945 | /* 00: The Hw sector is 256 bytes, hence we need to erase 16 | ||
8946 | * consecutive sectors. The start index for the nth Hw sector can be | ||
8947 | * calculated as = segment * 4096 + n * 256 | ||
8948 | * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. | ||
8949 | * The start index for the nth Hw sector can be calculated | ||
8950 | * as = segment * 4096 | ||
8951 | * 10: Error condition | ||
8952 | * 11: The Hw sector size is much bigger than the size asked to | ||
8953 | * erase...error condition */ | ||
8954 | if (hsfsts.hsf_status.berasesz == 0x0) { | ||
8955 | /* Hw sector size 256 */ | ||
8956 | sector_size = seg_size = ICH8_FLASH_SEG_SIZE_256; | ||
8957 | iteration = ICH8_FLASH_SECTOR_SIZE / ICH8_FLASH_SEG_SIZE_256; | ||
8958 | } else if (hsfsts.hsf_status.berasesz == 0x1) { | ||
8959 | sector_size = seg_size = ICH8_FLASH_SEG_SIZE_4K; | ||
8960 | iteration = 1; | ||
8961 | } else if (hsfsts.hsf_status.berasesz == 0x3) { | ||
8962 | sector_size = seg_size = ICH8_FLASH_SEG_SIZE_64K; | ||
8963 | iteration = 1; | ||
8964 | } else { | ||
8965 | return error; | ||
8966 | } | ||
8967 | |||
8968 | for (j = 0; j < iteration ; j++) { | ||
8969 | do { | ||
8970 | count++; | ||
8971 | /* Steps */ | ||
8972 | error = e1000_ich8_cycle_init(hw); | ||
8973 | if (error != E1000_SUCCESS) { | ||
8974 | error_flag = 1; | ||
8975 | break; | ||
8976 | } | ||
8977 | |||
8978 | /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash | ||
8979 | * Control */ | ||
8980 | hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); | ||
8981 | hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_ERASE; | ||
8982 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); | ||
8983 | |||
8984 | /* Write the last 24 bits of an index within the block into Flash | ||
8985 | * Linear address field in Flash Address. This probably needs to | ||
8986 | * be calculated here based off the on-chip segment size and the | ||
8987 | * software segment size assumed (4K) */ | ||
8988 | /* TBD */ | ||
8989 | flash_linear_address = segment * sector_size + j * seg_size; | ||
8990 | flash_linear_address &= ICH8_FLASH_LINEAR_ADDR_MASK; | ||
8991 | flash_linear_address += hw->flash_base_addr; | ||
8992 | |||
8993 | E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address); | ||
8994 | |||
8995 | error = e1000_ich8_flash_cycle(hw, 1000000); | ||
8996 | /* Check if FCERR is set to 1. If 1, clear it and try the whole | ||
8997 | * sequence a few more times else Done */ | ||
8998 | if (error == E1000_SUCCESS) { | ||
8999 | break; | ||
9000 | } else { | ||
9001 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | ||
9002 | if (hsfsts.hsf_status.flcerr == 1) { | ||
9003 | /* repeat for some time before giving up */ | ||
9004 | continue; | ||
9005 | } else if (hsfsts.hsf_status.flcdone == 0) { | ||
9006 | error_flag = 1; | ||
9007 | break; | ||
9008 | } | ||
9009 | } | ||
9010 | } while ((count < ICH8_FLASH_CYCLE_REPEAT_COUNT) && !error_flag); | ||
9011 | if (error_flag == 1) | ||
9012 | break; | ||
9013 | } | ||
9014 | if (error_flag != 1) | ||
9015 | error = E1000_SUCCESS; | ||
9016 | return error; | ||
9017 | } | ||
9018 | |||
9019 | /****************************************************************************** | ||
9020 | * | ||
9021 | * Reverse duplex setting without breaking the link. | ||
9022 | * | ||
9023 | * hw: Struct containing variables accessed by shared code | ||
9024 | * | ||
9025 | *****************************************************************************/ | ||
9026 | #if 0 | ||
9027 | int32_t | ||
9028 | e1000_duplex_reversal(struct e1000_hw *hw) | ||
9029 | { | ||
9030 | int32_t ret_val; | ||
9031 | uint16_t phy_data; | ||
9032 | |||
9033 | if (hw->phy_type != e1000_phy_igp_3) | ||
9034 | return E1000_SUCCESS; | ||
9035 | |||
9036 | ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); | ||
9037 | if (ret_val) | ||
9038 | return ret_val; | ||
9039 | |||
9040 | phy_data ^= MII_CR_FULL_DUPLEX; | ||
9041 | |||
9042 | ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); | ||
9043 | if (ret_val) | ||
9044 | return ret_val; | ||
9045 | |||
9046 | ret_val = e1000_read_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, &phy_data); | ||
9047 | if (ret_val) | ||
9048 | return ret_val; | ||
9049 | |||
9050 | phy_data |= IGP3_PHY_MISC_DUPLEX_MANUAL_SET; | ||
9051 | ret_val = e1000_write_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, phy_data); | ||
9052 | |||
9053 | return ret_val; | ||
9054 | } | ||
9055 | #endif /* 0 */ | ||
9056 | |||
9057 | static int32_t | ||
9058 | e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, | ||
9059 | uint32_t cnf_base_addr, uint32_t cnf_size) | ||
9060 | { | ||
9061 | uint32_t ret_val = E1000_SUCCESS; | ||
9062 | uint16_t word_addr, reg_data, reg_addr; | ||
9063 | uint16_t i; | ||
9064 | |||
9065 | /* cnf_base_addr is in DWORD */ | ||
9066 | word_addr = (uint16_t)(cnf_base_addr << 1); | ||
9067 | |||
9068 | /* cnf_size is returned in size of dwords */ | ||
9069 | for (i = 0; i < cnf_size; i++) { | ||
9070 | ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, ®_data); | ||
9071 | if (ret_val) | ||
9072 | return ret_val; | ||
9073 | |||
9074 | ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, ®_addr); | ||
9075 | if (ret_val) | ||
9076 | return ret_val; | ||
9077 | |||
9078 | ret_val = e1000_get_software_flag(hw); | ||
9079 | if (ret_val != E1000_SUCCESS) | ||
9080 | return ret_val; | ||
9081 | |||
9082 | ret_val = e1000_write_phy_reg_ex(hw, (uint32_t)reg_addr, reg_data); | ||
9083 | |||
9084 | e1000_release_software_flag(hw); | ||
9085 | } | ||
9086 | |||
9087 | return ret_val; | ||
9088 | } | ||
9089 | |||
9090 | |||
9091 | static int32_t | ||
9092 | e1000_init_lcd_from_nvm(struct e1000_hw *hw) | ||
9093 | { | ||
9094 | uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop; | ||
9095 | |||
9096 | if (hw->phy_type != e1000_phy_igp_3) | ||
9097 | return E1000_SUCCESS; | ||
9098 | |||
9099 | /* Check if SW needs configure the PHY */ | ||
9100 | reg_data = E1000_READ_REG(hw, FEXTNVM); | ||
9101 | if (!(reg_data & FEXTNVM_SW_CONFIG)) | ||
9102 | return E1000_SUCCESS; | ||
9103 | |||
9104 | /* Wait for basic configuration completes before proceeding*/ | ||
9105 | loop = 0; | ||
9106 | do { | ||
9107 | reg_data = E1000_READ_REG(hw, STATUS) & E1000_STATUS_LAN_INIT_DONE; | ||
9108 | udelay(100); | ||
9109 | loop++; | ||
9110 | } while ((!reg_data) && (loop < 50)); | ||
9111 | |||
9112 | /* Clear the Init Done bit for the next init event */ | ||
9113 | reg_data = E1000_READ_REG(hw, STATUS); | ||
9114 | reg_data &= ~E1000_STATUS_LAN_INIT_DONE; | ||
9115 | E1000_WRITE_REG(hw, STATUS, reg_data); | ||
9116 | |||
9117 | /* Make sure HW does not configure LCD from PHY extended configuration | ||
9118 | before SW configuration */ | ||
9119 | reg_data = E1000_READ_REG(hw, EXTCNF_CTRL); | ||
9120 | if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) { | ||
9121 | reg_data = E1000_READ_REG(hw, EXTCNF_SIZE); | ||
9122 | cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH; | ||
9123 | cnf_size >>= 16; | ||
9124 | if (cnf_size) { | ||
9125 | reg_data = E1000_READ_REG(hw, EXTCNF_CTRL); | ||
9126 | cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER; | ||
9127 | /* cnf_base_addr is in DWORD */ | ||
9128 | cnf_base_addr >>= 16; | ||
9129 | |||
9130 | /* Configure LCD from extended configuration region. */ | ||
9131 | ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr, | ||
9132 | cnf_size); | ||
9133 | if (ret_val) | ||
9134 | return ret_val; | ||
9135 | } | ||
9136 | } | ||
9137 | |||
9138 | return E1000_SUCCESS; | ||
9139 | } | ||
9140 | |||
9141 | |||
7559 | 9142 | ||
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index 467c9ed944f8..375b95518c31 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h | |||
@@ -62,6 +62,7 @@ typedef enum { | |||
62 | e1000_82572, | 62 | e1000_82572, |
63 | e1000_82573, | 63 | e1000_82573, |
64 | e1000_80003es2lan, | 64 | e1000_80003es2lan, |
65 | e1000_ich8lan, | ||
65 | e1000_num_macs | 66 | e1000_num_macs |
66 | } e1000_mac_type; | 67 | } e1000_mac_type; |
67 | 68 | ||
@@ -70,6 +71,7 @@ typedef enum { | |||
70 | e1000_eeprom_spi, | 71 | e1000_eeprom_spi, |
71 | e1000_eeprom_microwire, | 72 | e1000_eeprom_microwire, |
72 | e1000_eeprom_flash, | 73 | e1000_eeprom_flash, |
74 | e1000_eeprom_ich8, | ||
73 | e1000_eeprom_none, /* No NVM support */ | 75 | e1000_eeprom_none, /* No NVM support */ |
74 | e1000_num_eeprom_types | 76 | e1000_num_eeprom_types |
75 | } e1000_eeprom_type; | 77 | } e1000_eeprom_type; |
@@ -98,6 +100,11 @@ typedef enum { | |||
98 | e1000_fc_default = 0xFF | 100 | e1000_fc_default = 0xFF |
99 | } e1000_fc_type; | 101 | } e1000_fc_type; |
100 | 102 | ||
103 | struct e1000_shadow_ram { | ||
104 | uint16_t eeprom_word; | ||
105 | boolean_t modified; | ||
106 | }; | ||
107 | |||
101 | /* PCI bus types */ | 108 | /* PCI bus types */ |
102 | typedef enum { | 109 | typedef enum { |
103 | e1000_bus_type_unknown = 0, | 110 | e1000_bus_type_unknown = 0, |
@@ -218,6 +225,8 @@ typedef enum { | |||
218 | e1000_phy_igp, | 225 | e1000_phy_igp, |
219 | e1000_phy_igp_2, | 226 | e1000_phy_igp_2, |
220 | e1000_phy_gg82563, | 227 | e1000_phy_gg82563, |
228 | e1000_phy_igp_3, | ||
229 | e1000_phy_ife, | ||
221 | e1000_phy_undefined = 0xFF | 230 | e1000_phy_undefined = 0xFF |
222 | } e1000_phy_type; | 231 | } e1000_phy_type; |
223 | 232 | ||
@@ -313,10 +322,9 @@ int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy | |||
313 | int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); | 322 | int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); |
314 | int32_t e1000_phy_hw_reset(struct e1000_hw *hw); | 323 | int32_t e1000_phy_hw_reset(struct e1000_hw *hw); |
315 | int32_t e1000_phy_reset(struct e1000_hw *hw); | 324 | int32_t e1000_phy_reset(struct e1000_hw *hw); |
325 | void e1000_phy_powerdown_workaround(struct e1000_hw *hw); | ||
316 | int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); | 326 | int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); |
317 | int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); | 327 | int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); |
318 | int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data); | ||
319 | int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); | ||
320 | 328 | ||
321 | /* EEPROM Functions */ | 329 | /* EEPROM Functions */ |
322 | int32_t e1000_init_eeprom_params(struct e1000_hw *hw); | 330 | int32_t e1000_init_eeprom_params(struct e1000_hw *hw); |
@@ -331,6 +339,7 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw); | |||
331 | #define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ | 339 | #define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ |
332 | #define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ | 340 | #define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ |
333 | #define E1000_MNG_IAMT_MODE 0x3 | 341 | #define E1000_MNG_IAMT_MODE 0x3 |
342 | #define E1000_MNG_ICH_IAMT_MODE 0x2 | ||
334 | #define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ | 343 | #define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ |
335 | 344 | ||
336 | #define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ | 345 | #define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ |
@@ -386,11 +395,8 @@ int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); | |||
386 | int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); | 395 | int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); |
387 | int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num); | 396 | int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num); |
388 | int32_t e1000_read_mac_addr(struct e1000_hw * hw); | 397 | int32_t e1000_read_mac_addr(struct e1000_hw * hw); |
389 | int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask); | ||
390 | void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); | ||
391 | 398 | ||
392 | /* Filters (multicast, vlan, receive) */ | 399 | /* Filters (multicast, vlan, receive) */ |
393 | void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count); | ||
394 | uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); | 400 | uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); |
395 | void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); | 401 | void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); |
396 | void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); | 402 | void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); |
@@ -401,6 +407,7 @@ int32_t e1000_setup_led(struct e1000_hw *hw); | |||
401 | int32_t e1000_cleanup_led(struct e1000_hw *hw); | 407 | int32_t e1000_cleanup_led(struct e1000_hw *hw); |
402 | int32_t e1000_led_on(struct e1000_hw *hw); | 408 | int32_t e1000_led_on(struct e1000_hw *hw); |
403 | int32_t e1000_led_off(struct e1000_hw *hw); | 409 | int32_t e1000_led_off(struct e1000_hw *hw); |
410 | int32_t e1000_blink_led_start(struct e1000_hw *hw); | ||
404 | 411 | ||
405 | /* Adaptive IFS Functions */ | 412 | /* Adaptive IFS Functions */ |
406 | 413 | ||
@@ -414,15 +421,16 @@ void e1000_pci_clear_mwi(struct e1000_hw *hw); | |||
414 | void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); | 421 | void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); |
415 | void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); | 422 | void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); |
416 | /* Port I/O is only supported on 82544 and newer */ | 423 | /* Port I/O is only supported on 82544 and newer */ |
417 | uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port); | ||
418 | uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset); | ||
419 | void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); | 424 | void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); |
420 | void e1000_enable_pciex_master(struct e1000_hw *hw); | ||
421 | int32_t e1000_disable_pciex_master(struct e1000_hw *hw); | 425 | int32_t e1000_disable_pciex_master(struct e1000_hw *hw); |
422 | int32_t e1000_get_software_semaphore(struct e1000_hw *hw); | ||
423 | void e1000_release_software_semaphore(struct e1000_hw *hw); | ||
424 | int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | 426 | int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); |
425 | 427 | ||
428 | |||
429 | #define E1000_READ_REG_IO(a, reg) \ | ||
430 | e1000_read_reg_io((a), E1000_##reg) | ||
431 | #define E1000_WRITE_REG_IO(a, reg, val) \ | ||
432 | e1000_write_reg_io((a), E1000_##reg, val) | ||
433 | |||
426 | /* PCI Device IDs */ | 434 | /* PCI Device IDs */ |
427 | #define E1000_DEV_ID_82542 0x1000 | 435 | #define E1000_DEV_ID_82542 0x1000 |
428 | #define E1000_DEV_ID_82543GC_FIBER 0x1001 | 436 | #define E1000_DEV_ID_82543GC_FIBER 0x1001 |
@@ -446,6 +454,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | |||
446 | #define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D | 454 | #define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D |
447 | #define E1000_DEV_ID_82541EI 0x1013 | 455 | #define E1000_DEV_ID_82541EI 0x1013 |
448 | #define E1000_DEV_ID_82541EI_MOBILE 0x1018 | 456 | #define E1000_DEV_ID_82541EI_MOBILE 0x1018 |
457 | #define E1000_DEV_ID_82541ER_LOM 0x1014 | ||
449 | #define E1000_DEV_ID_82541ER 0x1078 | 458 | #define E1000_DEV_ID_82541ER 0x1078 |
450 | #define E1000_DEV_ID_82547GI 0x1075 | 459 | #define E1000_DEV_ID_82547GI 0x1075 |
451 | #define E1000_DEV_ID_82541GI 0x1076 | 460 | #define E1000_DEV_ID_82541GI 0x1076 |
@@ -457,18 +466,28 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | |||
457 | #define E1000_DEV_ID_82546GB_PCIE 0x108A | 466 | #define E1000_DEV_ID_82546GB_PCIE 0x108A |
458 | #define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 | 467 | #define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 |
459 | #define E1000_DEV_ID_82547EI 0x1019 | 468 | #define E1000_DEV_ID_82547EI 0x1019 |
469 | #define E1000_DEV_ID_82547EI_MOBILE 0x101A | ||
460 | #define E1000_DEV_ID_82571EB_COPPER 0x105E | 470 | #define E1000_DEV_ID_82571EB_COPPER 0x105E |
461 | #define E1000_DEV_ID_82571EB_FIBER 0x105F | 471 | #define E1000_DEV_ID_82571EB_FIBER 0x105F |
462 | #define E1000_DEV_ID_82571EB_SERDES 0x1060 | 472 | #define E1000_DEV_ID_82571EB_SERDES 0x1060 |
463 | #define E1000_DEV_ID_82572EI_COPPER 0x107D | 473 | #define E1000_DEV_ID_82572EI_COPPER 0x107D |
464 | #define E1000_DEV_ID_82572EI_FIBER 0x107E | 474 | #define E1000_DEV_ID_82572EI_FIBER 0x107E |
465 | #define E1000_DEV_ID_82572EI_SERDES 0x107F | 475 | #define E1000_DEV_ID_82572EI_SERDES 0x107F |
476 | #define E1000_DEV_ID_82572EI 0x10B9 | ||
466 | #define E1000_DEV_ID_82573E 0x108B | 477 | #define E1000_DEV_ID_82573E 0x108B |
467 | #define E1000_DEV_ID_82573E_IAMT 0x108C | 478 | #define E1000_DEV_ID_82573E_IAMT 0x108C |
468 | #define E1000_DEV_ID_82573L 0x109A | 479 | #define E1000_DEV_ID_82573L 0x109A |
469 | #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 | 480 | #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 |
470 | #define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 | 481 | #define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 |
471 | #define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 | 482 | #define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 |
483 | #define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA | ||
484 | #define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB | ||
485 | |||
486 | #define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 | ||
487 | #define E1000_DEV_ID_ICH8_IGP_AMT 0x104A | ||
488 | #define E1000_DEV_ID_ICH8_IGP_C 0x104B | ||
489 | #define E1000_DEV_ID_ICH8_IFE 0x104C | ||
490 | #define E1000_DEV_ID_ICH8_IGP_M 0x104D | ||
472 | 491 | ||
473 | 492 | ||
474 | #define NODE_ADDRESS_SIZE 6 | 493 | #define NODE_ADDRESS_SIZE 6 |
@@ -539,6 +558,14 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | |||
539 | E1000_IMS_RXSEQ | \ | 558 | E1000_IMS_RXSEQ | \ |
540 | E1000_IMS_LSC) | 559 | E1000_IMS_LSC) |
541 | 560 | ||
561 | /* Additional interrupts need to be handled for e1000_ich8lan: | ||
562 | DSW = The FW changed the status of the DISSW bit in FWSM | ||
563 | PHYINT = The LAN connected device generates an interrupt | ||
564 | EPRST = Manageability reset event */ | ||
565 | #define IMS_ICH8LAN_ENABLE_MASK (\ | ||
566 | E1000_IMS_DSW | \ | ||
567 | E1000_IMS_PHYINT | \ | ||
568 | E1000_IMS_EPRST) | ||
542 | 569 | ||
543 | /* Number of high/low register pairs in the RAR. The RAR (Receive Address | 570 | /* Number of high/low register pairs in the RAR. The RAR (Receive Address |
544 | * Registers) holds the directed and multicast addresses that we monitor. We | 571 | * Registers) holds the directed and multicast addresses that we monitor. We |
@@ -546,6 +573,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | |||
546 | * E1000_RAR_ENTRIES - 1 multicast addresses. | 573 | * E1000_RAR_ENTRIES - 1 multicast addresses. |
547 | */ | 574 | */ |
548 | #define E1000_RAR_ENTRIES 15 | 575 | #define E1000_RAR_ENTRIES 15 |
576 | #define E1000_RAR_ENTRIES_ICH8LAN 7 | ||
549 | 577 | ||
550 | #define MIN_NUMBER_OF_DESCRIPTORS 8 | 578 | #define MIN_NUMBER_OF_DESCRIPTORS 8 |
551 | #define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 | 579 | #define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 |
@@ -767,6 +795,9 @@ struct e1000_data_desc { | |||
767 | #define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ | 795 | #define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ |
768 | #define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ | 796 | #define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ |
769 | 797 | ||
798 | #define E1000_NUM_UNICAST_ICH8LAN 7 | ||
799 | #define E1000_MC_TBL_SIZE_ICH8LAN 32 | ||
800 | |||
770 | 801 | ||
771 | /* Receive Address Register */ | 802 | /* Receive Address Register */ |
772 | struct e1000_rar { | 803 | struct e1000_rar { |
@@ -776,6 +807,7 @@ struct e1000_rar { | |||
776 | 807 | ||
777 | /* Number of entries in the Multicast Table Array (MTA). */ | 808 | /* Number of entries in the Multicast Table Array (MTA). */ |
778 | #define E1000_NUM_MTA_REGISTERS 128 | 809 | #define E1000_NUM_MTA_REGISTERS 128 |
810 | #define E1000_NUM_MTA_REGISTERS_ICH8LAN 32 | ||
779 | 811 | ||
780 | /* IPv4 Address Table Entry */ | 812 | /* IPv4 Address Table Entry */ |
781 | struct e1000_ipv4_at_entry { | 813 | struct e1000_ipv4_at_entry { |
@@ -786,6 +818,7 @@ struct e1000_ipv4_at_entry { | |||
786 | /* Four wakeup IP addresses are supported */ | 818 | /* Four wakeup IP addresses are supported */ |
787 | #define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 | 819 | #define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 |
788 | #define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX | 820 | #define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX |
821 | #define E1000_IP4AT_SIZE_ICH8LAN 3 | ||
789 | #define E1000_IP6AT_SIZE 1 | 822 | #define E1000_IP6AT_SIZE 1 |
790 | 823 | ||
791 | /* IPv6 Address Table Entry */ | 824 | /* IPv6 Address Table Entry */ |
@@ -844,6 +877,7 @@ struct e1000_ffvt_entry { | |||
844 | #define E1000_FLA 0x0001C /* Flash Access - RW */ | 877 | #define E1000_FLA 0x0001C /* Flash Access - RW */ |
845 | #define E1000_MDIC 0x00020 /* MDI Control - RW */ | 878 | #define E1000_MDIC 0x00020 /* MDI Control - RW */ |
846 | #define E1000_SCTL 0x00024 /* SerDes Control - RW */ | 879 | #define E1000_SCTL 0x00024 /* SerDes Control - RW */ |
880 | #define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ | ||
847 | #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ | 881 | #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ |
848 | #define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ | 882 | #define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ |
849 | #define E1000_FCT 0x00030 /* Flow Control Type - RW */ | 883 | #define E1000_FCT 0x00030 /* Flow Control Type - RW */ |
@@ -872,6 +906,8 @@ struct e1000_ffvt_entry { | |||
872 | #define E1000_LEDCTL 0x00E00 /* LED Control - RW */ | 906 | #define E1000_LEDCTL 0x00E00 /* LED Control - RW */ |
873 | #define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ | 907 | #define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ |
874 | #define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ | 908 | #define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ |
909 | #define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ | ||
910 | #define FEXTNVM_SW_CONFIG 0x0001 | ||
875 | #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ | 911 | #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ |
876 | #define E1000_PBS 0x01008 /* Packet Buffer Size */ | 912 | #define E1000_PBS 0x01008 /* Packet Buffer Size */ |
877 | #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ | 913 | #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ |
@@ -899,11 +935,13 @@ struct e1000_ffvt_entry { | |||
899 | #define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ | 935 | #define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ |
900 | #define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ | 936 | #define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ |
901 | #define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ | 937 | #define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ |
902 | #define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */ | 938 | #define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ |
939 | #define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ | ||
903 | #define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ | 940 | #define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ |
904 | #define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ | 941 | #define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ |
905 | #define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ | 942 | #define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ |
906 | #define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ | 943 | #define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ |
944 | #define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ | ||
907 | #define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ | 945 | #define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ |
908 | #define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ | 946 | #define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ |
909 | #define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ | 947 | #define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ |
@@ -1050,6 +1088,7 @@ struct e1000_ffvt_entry { | |||
1050 | #define E1000_82542_FLA E1000_FLA | 1088 | #define E1000_82542_FLA E1000_FLA |
1051 | #define E1000_82542_MDIC E1000_MDIC | 1089 | #define E1000_82542_MDIC E1000_MDIC |
1052 | #define E1000_82542_SCTL E1000_SCTL | 1090 | #define E1000_82542_SCTL E1000_SCTL |
1091 | #define E1000_82542_FEXTNVM E1000_FEXTNVM | ||
1053 | #define E1000_82542_FCAL E1000_FCAL | 1092 | #define E1000_82542_FCAL E1000_FCAL |
1054 | #define E1000_82542_FCAH E1000_FCAH | 1093 | #define E1000_82542_FCAH E1000_FCAH |
1055 | #define E1000_82542_FCT E1000_FCT | 1094 | #define E1000_82542_FCT E1000_FCT |
@@ -1073,6 +1112,19 @@ struct e1000_ffvt_entry { | |||
1073 | #define E1000_82542_RDLEN0 E1000_82542_RDLEN | 1112 | #define E1000_82542_RDLEN0 E1000_82542_RDLEN |
1074 | #define E1000_82542_RDH0 E1000_82542_RDH | 1113 | #define E1000_82542_RDH0 E1000_82542_RDH |
1075 | #define E1000_82542_RDT0 E1000_82542_RDT | 1114 | #define E1000_82542_RDT0 E1000_82542_RDT |
1115 | #define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication | ||
1116 | * RX Control - RW */ | ||
1117 | #define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) | ||
1118 | #define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ | ||
1119 | #define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ | ||
1120 | #define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ | ||
1121 | #define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ | ||
1122 | #define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ | ||
1123 | #define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ | ||
1124 | #define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ | ||
1125 | #define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ | ||
1126 | #define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ | ||
1127 | #define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ | ||
1076 | #define E1000_82542_RDTR1 0x00130 | 1128 | #define E1000_82542_RDTR1 0x00130 |
1077 | #define E1000_82542_RDBAL1 0x00138 | 1129 | #define E1000_82542_RDBAL1 0x00138 |
1078 | #define E1000_82542_RDBAH1 0x0013C | 1130 | #define E1000_82542_RDBAH1 0x0013C |
@@ -1110,11 +1162,14 @@ struct e1000_ffvt_entry { | |||
1110 | #define E1000_82542_FLOP E1000_FLOP | 1162 | #define E1000_82542_FLOP E1000_FLOP |
1111 | #define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL | 1163 | #define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL |
1112 | #define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE | 1164 | #define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE |
1165 | #define E1000_82542_PHY_CTRL E1000_PHY_CTRL | ||
1113 | #define E1000_82542_ERT E1000_ERT | 1166 | #define E1000_82542_ERT E1000_ERT |
1114 | #define E1000_82542_RXDCTL E1000_RXDCTL | 1167 | #define E1000_82542_RXDCTL E1000_RXDCTL |
1168 | #define E1000_82542_RXDCTL1 E1000_RXDCTL1 | ||
1115 | #define E1000_82542_RADV E1000_RADV | 1169 | #define E1000_82542_RADV E1000_RADV |
1116 | #define E1000_82542_RSRPD E1000_RSRPD | 1170 | #define E1000_82542_RSRPD E1000_RSRPD |
1117 | #define E1000_82542_TXDMAC E1000_TXDMAC | 1171 | #define E1000_82542_TXDMAC E1000_TXDMAC |
1172 | #define E1000_82542_KABGTXD E1000_KABGTXD | ||
1118 | #define E1000_82542_TDFHS E1000_TDFHS | 1173 | #define E1000_82542_TDFHS E1000_TDFHS |
1119 | #define E1000_82542_TDFTS E1000_TDFTS | 1174 | #define E1000_82542_TDFTS E1000_TDFTS |
1120 | #define E1000_82542_TDFPC E1000_TDFPC | 1175 | #define E1000_82542_TDFPC E1000_TDFPC |
@@ -1310,13 +1365,16 @@ struct e1000_hw_stats { | |||
1310 | 1365 | ||
1311 | /* Structure containing variables used by the shared code (e1000_hw.c) */ | 1366 | /* Structure containing variables used by the shared code (e1000_hw.c) */ |
1312 | struct e1000_hw { | 1367 | struct e1000_hw { |
1313 | uint8_t __iomem *hw_addr; | 1368 | uint8_t *hw_addr; |
1314 | uint8_t *flash_address; | 1369 | uint8_t *flash_address; |
1315 | e1000_mac_type mac_type; | 1370 | e1000_mac_type mac_type; |
1316 | e1000_phy_type phy_type; | 1371 | e1000_phy_type phy_type; |
1317 | uint32_t phy_init_script; | 1372 | uint32_t phy_init_script; |
1318 | e1000_media_type media_type; | 1373 | e1000_media_type media_type; |
1319 | void *back; | 1374 | void *back; |
1375 | struct e1000_shadow_ram *eeprom_shadow_ram; | ||
1376 | uint32_t flash_bank_size; | ||
1377 | uint32_t flash_base_addr; | ||
1320 | e1000_fc_type fc; | 1378 | e1000_fc_type fc; |
1321 | e1000_bus_speed bus_speed; | 1379 | e1000_bus_speed bus_speed; |
1322 | e1000_bus_width bus_width; | 1380 | e1000_bus_width bus_width; |
@@ -1328,6 +1386,7 @@ struct e1000_hw { | |||
1328 | uint32_t asf_firmware_present; | 1386 | uint32_t asf_firmware_present; |
1329 | uint32_t eeprom_semaphore_present; | 1387 | uint32_t eeprom_semaphore_present; |
1330 | uint32_t swfw_sync_present; | 1388 | uint32_t swfw_sync_present; |
1389 | uint32_t swfwhw_semaphore_present; | ||
1331 | unsigned long io_base; | 1390 | unsigned long io_base; |
1332 | uint32_t phy_id; | 1391 | uint32_t phy_id; |
1333 | uint32_t phy_revision; | 1392 | uint32_t phy_revision; |
@@ -1387,6 +1446,7 @@ struct e1000_hw { | |||
1387 | boolean_t in_ifs_mode; | 1446 | boolean_t in_ifs_mode; |
1388 | boolean_t mng_reg_access_disabled; | 1447 | boolean_t mng_reg_access_disabled; |
1389 | boolean_t leave_av_bit_off; | 1448 | boolean_t leave_av_bit_off; |
1449 | boolean_t kmrn_lock_loss_workaround_disabled; | ||
1390 | }; | 1450 | }; |
1391 | 1451 | ||
1392 | 1452 | ||
@@ -1435,6 +1495,7 @@ struct e1000_hw { | |||
1435 | #define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ | 1495 | #define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ |
1436 | #define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ | 1496 | #define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ |
1437 | #define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ | 1497 | #define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ |
1498 | #define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ | ||
1438 | 1499 | ||
1439 | /* Device Status */ | 1500 | /* Device Status */ |
1440 | #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ | 1501 | #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ |
@@ -1449,6 +1510,8 @@ struct e1000_hw { | |||
1449 | #define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ | 1510 | #define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ |
1450 | #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ | 1511 | #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ |
1451 | #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ | 1512 | #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ |
1513 | #define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion | ||
1514 | by EEPROM/Flash */ | ||
1452 | #define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ | 1515 | #define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ |
1453 | #define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ | 1516 | #define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ |
1454 | #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ | 1517 | #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ |
@@ -1506,6 +1569,10 @@ struct e1000_hw { | |||
1506 | #define E1000_STM_OPCODE 0xDB00 | 1569 | #define E1000_STM_OPCODE 0xDB00 |
1507 | #define E1000_HICR_FW_RESET 0xC0 | 1570 | #define E1000_HICR_FW_RESET 0xC0 |
1508 | 1571 | ||
1572 | #define E1000_SHADOW_RAM_WORDS 2048 | ||
1573 | #define E1000_ICH8_NVM_SIG_WORD 0x13 | ||
1574 | #define E1000_ICH8_NVM_SIG_MASK 0xC0 | ||
1575 | |||
1509 | /* EEPROM Read */ | 1576 | /* EEPROM Read */ |
1510 | #define E1000_EERD_START 0x00000001 /* Start Read */ | 1577 | #define E1000_EERD_START 0x00000001 /* Start Read */ |
1511 | #define E1000_EERD_DONE 0x00000010 /* Read Done */ | 1578 | #define E1000_EERD_DONE 0x00000010 /* Read Done */ |
@@ -1551,7 +1618,6 @@ struct e1000_hw { | |||
1551 | #define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 | 1618 | #define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 |
1552 | #define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 | 1619 | #define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 |
1553 | #define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 | 1620 | #define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 |
1554 | #define E1000_CTRL_EXT_CANC 0x04000000 /* Interrupt delay cancellation */ | ||
1555 | #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ | 1621 | #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ |
1556 | #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ | 1622 | #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ |
1557 | #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ | 1623 | #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ |
@@ -1591,12 +1657,31 @@ struct e1000_hw { | |||
1591 | #define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 | 1657 | #define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 |
1592 | 1658 | ||
1593 | /* In-Band Control */ | 1659 | /* In-Band Control */ |
1660 | #define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 | ||
1594 | #define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 | 1661 | #define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 |
1595 | 1662 | ||
1596 | /* Half-Duplex Control */ | 1663 | /* Half-Duplex Control */ |
1597 | #define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 | 1664 | #define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 |
1598 | #define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 | 1665 | #define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 |
1599 | 1666 | ||
1667 | #define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E | ||
1668 | |||
1669 | #define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 | ||
1670 | #define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 | ||
1671 | |||
1672 | #define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 | ||
1673 | #define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 | ||
1674 | #define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 | ||
1675 | |||
1676 | #define E1000_KABGTXD_BGSQLBIAS 0x00050000 | ||
1677 | |||
1678 | #define E1000_PHY_CTRL_SPD_EN 0x00000001 | ||
1679 | #define E1000_PHY_CTRL_D0A_LPLU 0x00000002 | ||
1680 | #define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 | ||
1681 | #define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 | ||
1682 | #define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 | ||
1683 | #define E1000_PHY_CTRL_B2B_EN 0x00000080 | ||
1684 | |||
1600 | /* LED Control */ | 1685 | /* LED Control */ |
1601 | #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F | 1686 | #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F |
1602 | #define E1000_LEDCTL_LED0_MODE_SHIFT 0 | 1687 | #define E1000_LEDCTL_LED0_MODE_SHIFT 0 |
@@ -1666,6 +1751,9 @@ struct e1000_hw { | |||
1666 | #define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ | 1751 | #define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ |
1667 | #define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ | 1752 | #define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ |
1668 | #define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ | 1753 | #define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ |
1754 | #define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ | ||
1755 | #define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ | ||
1756 | #define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */ | ||
1669 | 1757 | ||
1670 | /* Interrupt Cause Set */ | 1758 | /* Interrupt Cause Set */ |
1671 | #define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | 1759 | #define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ |
@@ -1692,6 +1780,9 @@ struct e1000_hw { | |||
1692 | #define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ | 1780 | #define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ |
1693 | #define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ | 1781 | #define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ |
1694 | #define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ | 1782 | #define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ |
1783 | #define E1000_ICS_DSW E1000_ICR_DSW | ||
1784 | #define E1000_ICS_PHYINT E1000_ICR_PHYINT | ||
1785 | #define E1000_ICS_EPRST E1000_ICR_EPRST | ||
1695 | 1786 | ||
1696 | /* Interrupt Mask Set */ | 1787 | /* Interrupt Mask Set */ |
1697 | #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | 1788 | #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ |
@@ -1718,6 +1809,9 @@ struct e1000_hw { | |||
1718 | #define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ | 1809 | #define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ |
1719 | #define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ | 1810 | #define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ |
1720 | #define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ | 1811 | #define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ |
1812 | #define E1000_IMS_DSW E1000_ICR_DSW | ||
1813 | #define E1000_IMS_PHYINT E1000_ICR_PHYINT | ||
1814 | #define E1000_IMS_EPRST E1000_ICR_EPRST | ||
1721 | 1815 | ||
1722 | /* Interrupt Mask Clear */ | 1816 | /* Interrupt Mask Clear */ |
1723 | #define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | 1817 | #define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ |
@@ -1744,6 +1838,9 @@ struct e1000_hw { | |||
1744 | #define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ | 1838 | #define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ |
1745 | #define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ | 1839 | #define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ |
1746 | #define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ | 1840 | #define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ |
1841 | #define E1000_IMC_DSW E1000_ICR_DSW | ||
1842 | #define E1000_IMC_PHYINT E1000_ICR_PHYINT | ||
1843 | #define E1000_IMC_EPRST E1000_ICR_EPRST | ||
1747 | 1844 | ||
1748 | /* Receive Control */ | 1845 | /* Receive Control */ |
1749 | #define E1000_RCTL_RST 0x00000001 /* Software reset */ | 1846 | #define E1000_RCTL_RST 0x00000001 /* Software reset */ |
@@ -1918,9 +2015,10 @@ struct e1000_hw { | |||
1918 | #define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 | 2015 | #define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 |
1919 | #define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 | 2016 | #define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 |
1920 | #define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 | 2017 | #define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 |
1921 | #define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00040000 | 2018 | #define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 |
1922 | #define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 | 2019 | #define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 |
1923 | #define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 | 2020 | #define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 |
2021 | #define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 | ||
1924 | 2022 | ||
1925 | /* Definitions for power management and wakeup registers */ | 2023 | /* Definitions for power management and wakeup registers */ |
1926 | /* Wake Up Control */ | 2024 | /* Wake Up Control */ |
@@ -2010,6 +2108,15 @@ struct e1000_hw { | |||
2010 | #define E1000_FWSM_MODE_SHIFT 1 | 2108 | #define E1000_FWSM_MODE_SHIFT 1 |
2011 | #define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ | 2109 | #define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ |
2012 | 2110 | ||
2111 | #define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ | ||
2112 | #define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ | ||
2113 | #define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ | ||
2114 | #define E1000_FWSM_SKUEL_SHIFT 29 | ||
2115 | #define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ | ||
2116 | #define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ | ||
2117 | #define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ | ||
2118 | #define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ | ||
2119 | |||
2013 | /* FFLT Debug Register */ | 2120 | /* FFLT Debug Register */ |
2014 | #define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ | 2121 | #define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ |
2015 | 2122 | ||
@@ -2082,6 +2189,8 @@ struct e1000_host_command_info { | |||
2082 | E1000_GCR_TXDSCW_NO_SNOOP | \ | 2189 | E1000_GCR_TXDSCW_NO_SNOOP | \ |
2083 | E1000_GCR_TXDSCR_NO_SNOOP) | 2190 | E1000_GCR_TXDSCR_NO_SNOOP) |
2084 | 2191 | ||
2192 | #define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL | ||
2193 | |||
2085 | #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 | 2194 | #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 |
2086 | /* Function Active and Power State to MNG */ | 2195 | /* Function Active and Power State to MNG */ |
2087 | #define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 | 2196 | #define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 |
@@ -2140,8 +2249,10 @@ struct e1000_host_command_info { | |||
2140 | #define EEPROM_PHY_CLASS_WORD 0x0007 | 2249 | #define EEPROM_PHY_CLASS_WORD 0x0007 |
2141 | #define EEPROM_INIT_CONTROL1_REG 0x000A | 2250 | #define EEPROM_INIT_CONTROL1_REG 0x000A |
2142 | #define EEPROM_INIT_CONTROL2_REG 0x000F | 2251 | #define EEPROM_INIT_CONTROL2_REG 0x000F |
2252 | #define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 | ||
2143 | #define EEPROM_INIT_CONTROL3_PORT_B 0x0014 | 2253 | #define EEPROM_INIT_CONTROL3_PORT_B 0x0014 |
2144 | #define EEPROM_INIT_3GIO_3 0x001A | 2254 | #define EEPROM_INIT_3GIO_3 0x001A |
2255 | #define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 | ||
2145 | #define EEPROM_INIT_CONTROL3_PORT_A 0x0024 | 2256 | #define EEPROM_INIT_CONTROL3_PORT_A 0x0024 |
2146 | #define EEPROM_CFG 0x0012 | 2257 | #define EEPROM_CFG 0x0012 |
2147 | #define EEPROM_FLASH_VERSION 0x0032 | 2258 | #define EEPROM_FLASH_VERSION 0x0032 |
@@ -2153,10 +2264,16 @@ struct e1000_host_command_info { | |||
2153 | /* Word definitions for ID LED Settings */ | 2264 | /* Word definitions for ID LED Settings */ |
2154 | #define ID_LED_RESERVED_0000 0x0000 | 2265 | #define ID_LED_RESERVED_0000 0x0000 |
2155 | #define ID_LED_RESERVED_FFFF 0xFFFF | 2266 | #define ID_LED_RESERVED_FFFF 0xFFFF |
2267 | #define ID_LED_RESERVED_82573 0xF746 | ||
2268 | #define ID_LED_DEFAULT_82573 0x1811 | ||
2156 | #define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ | 2269 | #define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ |
2157 | (ID_LED_OFF1_OFF2 << 8) | \ | 2270 | (ID_LED_OFF1_OFF2 << 8) | \ |
2158 | (ID_LED_DEF1_DEF2 << 4) | \ | 2271 | (ID_LED_DEF1_DEF2 << 4) | \ |
2159 | (ID_LED_DEF1_DEF2)) | 2272 | (ID_LED_DEF1_DEF2)) |
2273 | #define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ | ||
2274 | (ID_LED_DEF1_OFF2 << 8) | \ | ||
2275 | (ID_LED_DEF1_ON2 << 4) | \ | ||
2276 | (ID_LED_DEF1_DEF2)) | ||
2160 | #define ID_LED_DEF1_DEF2 0x1 | 2277 | #define ID_LED_DEF1_DEF2 0x1 |
2161 | #define ID_LED_DEF1_ON2 0x2 | 2278 | #define ID_LED_DEF1_ON2 0x2 |
2162 | #define ID_LED_DEF1_OFF2 0x3 | 2279 | #define ID_LED_DEF1_OFF2 0x3 |
@@ -2191,6 +2308,11 @@ struct e1000_host_command_info { | |||
2191 | #define EEPROM_WORD0F_ASM_DIR 0x2000 | 2308 | #define EEPROM_WORD0F_ASM_DIR 0x2000 |
2192 | #define EEPROM_WORD0F_ANE 0x0800 | 2309 | #define EEPROM_WORD0F_ANE 0x0800 |
2193 | #define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 | 2310 | #define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 |
2311 | #define EEPROM_WORD0F_LPLU 0x0001 | ||
2312 | |||
2313 | /* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ | ||
2314 | #define EEPROM_WORD1020_GIGA_DISABLE 0x0010 | ||
2315 | #define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 | ||
2194 | 2316 | ||
2195 | /* Mask bits for fields in Word 0x1a of the EEPROM */ | 2317 | /* Mask bits for fields in Word 0x1a of the EEPROM */ |
2196 | #define EEPROM_WORD1A_ASPM_MASK 0x000C | 2318 | #define EEPROM_WORD1A_ASPM_MASK 0x000C |
@@ -2265,23 +2387,29 @@ struct e1000_host_command_info { | |||
2265 | #define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 | 2387 | #define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 |
2266 | #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 | 2388 | #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 |
2267 | #define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 | 2389 | #define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 |
2268 | #define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x1FFF0000 | 2390 | #define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 |
2269 | 2391 | ||
2270 | #define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF | 2392 | #define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF |
2271 | #define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 | 2393 | #define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 |
2272 | #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 | 2394 | #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 |
2395 | #define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 | ||
2396 | #define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 | ||
2273 | 2397 | ||
2274 | /* PBA constants */ | 2398 | /* PBA constants */ |
2399 | #define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ | ||
2275 | #define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ | 2400 | #define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ |
2276 | #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ | 2401 | #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ |
2277 | #define E1000_PBA_22K 0x0016 | 2402 | #define E1000_PBA_22K 0x0016 |
2278 | #define E1000_PBA_24K 0x0018 | 2403 | #define E1000_PBA_24K 0x0018 |
2279 | #define E1000_PBA_30K 0x001E | 2404 | #define E1000_PBA_30K 0x001E |
2280 | #define E1000_PBA_32K 0x0020 | 2405 | #define E1000_PBA_32K 0x0020 |
2406 | #define E1000_PBA_34K 0x0022 | ||
2281 | #define E1000_PBA_38K 0x0026 | 2407 | #define E1000_PBA_38K 0x0026 |
2282 | #define E1000_PBA_40K 0x0028 | 2408 | #define E1000_PBA_40K 0x0028 |
2283 | #define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ | 2409 | #define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ |
2284 | 2410 | ||
2411 | #define E1000_PBS_16K E1000_PBA_16K | ||
2412 | |||
2285 | /* Flow Control Constants */ | 2413 | /* Flow Control Constants */ |
2286 | #define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 | 2414 | #define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 |
2287 | #define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 | 2415 | #define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 |
@@ -2336,7 +2464,7 @@ struct e1000_host_command_info { | |||
2336 | /* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ | 2464 | /* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ |
2337 | #define AUTO_READ_DONE_TIMEOUT 10 | 2465 | #define AUTO_READ_DONE_TIMEOUT 10 |
2338 | /* Number of milliseconds we wait for PHY configuration done after MAC reset */ | 2466 | /* Number of milliseconds we wait for PHY configuration done after MAC reset */ |
2339 | #define PHY_CFG_TIMEOUT 40 | 2467 | #define PHY_CFG_TIMEOUT 100 |
2340 | 2468 | ||
2341 | #define E1000_TX_BUFFER_SIZE ((uint32_t)1514) | 2469 | #define E1000_TX_BUFFER_SIZE ((uint32_t)1514) |
2342 | 2470 | ||
@@ -2764,6 +2892,17 @@ struct e1000_host_command_info { | |||
2764 | #define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ | 2892 | #define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ |
2765 | #define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ | 2893 | #define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ |
2766 | 2894 | ||
2895 | /* M88EC018 Rev 2 specific DownShift settings */ | ||
2896 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 | ||
2897 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 | ||
2898 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 | ||
2899 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 | ||
2900 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 | ||
2901 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 | ||
2902 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 | ||
2903 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 | ||
2904 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 | ||
2905 | |||
2767 | /* IGP01E1000 Specific Port Config Register - R/W */ | 2906 | /* IGP01E1000 Specific Port Config Register - R/W */ |
2768 | #define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 | 2907 | #define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 |
2769 | #define IGP01E1000_PSCFR_PRE_EN 0x0020 | 2908 | #define IGP01E1000_PSCFR_PRE_EN 0x0020 |
@@ -2990,6 +3129,221 @@ struct e1000_host_command_info { | |||
2990 | #define L1LXT971A_PHY_ID 0x001378E0 | 3129 | #define L1LXT971A_PHY_ID 0x001378E0 |
2991 | #define GG82563_E_PHY_ID 0x01410CA0 | 3130 | #define GG82563_E_PHY_ID 0x01410CA0 |
2992 | 3131 | ||
3132 | |||
3133 | /* Bits... | ||
3134 | * 15-5: page | ||
3135 | * 4-0: register offset | ||
3136 | */ | ||
3137 | #define PHY_PAGE_SHIFT 5 | ||
3138 | #define PHY_REG(page, reg) \ | ||
3139 | (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) | ||
3140 | |||
3141 | #define IGP3_PHY_PORT_CTRL \ | ||
3142 | PHY_REG(769, 17) /* Port General Configuration */ | ||
3143 | #define IGP3_PHY_RATE_ADAPT_CTRL \ | ||
3144 | PHY_REG(769, 25) /* Rate Adapter Control Register */ | ||
3145 | |||
3146 | #define IGP3_KMRN_FIFO_CTRL_STATS \ | ||
3147 | PHY_REG(770, 16) /* KMRN FIFO's control/status register */ | ||
3148 | #define IGP3_KMRN_POWER_MNG_CTRL \ | ||
3149 | PHY_REG(770, 17) /* KMRN Power Management Control Register */ | ||
3150 | #define IGP3_KMRN_INBAND_CTRL \ | ||
3151 | PHY_REG(770, 18) /* KMRN Inband Control Register */ | ||
3152 | #define IGP3_KMRN_DIAG \ | ||
3153 | PHY_REG(770, 19) /* KMRN Diagnostic register */ | ||
3154 | #define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ | ||
3155 | #define IGP3_KMRN_ACK_TIMEOUT \ | ||
3156 | PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ | ||
3157 | |||
3158 | #define IGP3_VR_CTRL \ | ||
3159 | PHY_REG(776, 18) /* Voltage regulator control register */ | ||
3160 | #define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ | ||
3161 | |||
3162 | #define IGP3_CAPABILITY \ | ||
3163 | PHY_REG(776, 19) /* IGP3 Capability Register */ | ||
3164 | |||
3165 | /* Capabilities for SKU Control */ | ||
3166 | #define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ | ||
3167 | #define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ | ||
3168 | #define IGP3_CAP_ASF 0x0004 /* Support ASF */ | ||
3169 | #define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ | ||
3170 | #define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ | ||
3171 | #define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ | ||
3172 | #define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ | ||
3173 | #define IGP3_CAP_RSS 0x0080 /* Support RSS */ | ||
3174 | #define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ | ||
3175 | #define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ | ||
3176 | |||
3177 | #define IGP3_PPC_JORDAN_EN 0x0001 | ||
3178 | #define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 | ||
3179 | |||
3180 | #define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 | ||
3181 | #define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E | ||
3182 | #define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 | ||
3183 | #define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 | ||
3184 | |||
3185 | #define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ | ||
3186 | #define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ | ||
3187 | |||
3188 | #define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) | ||
3189 | #define IGP3_KMRN_EC_DIS_INBAND 0x0080 | ||
3190 | |||
3191 | #define IGP03E1000_E_PHY_ID 0x02A80390 | ||
3192 | #define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ | ||
3193 | #define IFE_PLUS_E_PHY_ID 0x02A80320 | ||
3194 | #define IFE_C_E_PHY_ID 0x02A80310 | ||
3195 | |||
3196 | #define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ | ||
3197 | #define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ | ||
3198 | #define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ | ||
3199 | #define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnet Counter */ | ||
3200 | #define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ | ||
3201 | #define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ | ||
3202 | #define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ | ||
3203 | #define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ | ||
3204 | #define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ | ||
3205 | #define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ | ||
3206 | #define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ | ||
3207 | #define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ | ||
3208 | #define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ | ||
3209 | |||
3210 | #define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Defaut 1 = Disable auto reduced power down */ | ||
3211 | #define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ | ||
3212 | #define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ | ||
3213 | #define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ | ||
3214 | #define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ | ||
3215 | #define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ | ||
3216 | #define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ | ||
3217 | #define IFE_PESC_POLARITY_REVERSED_SHIFT 8 | ||
3218 | |||
3219 | #define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dyanmic Power Down disabled */ | ||
3220 | #define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ | ||
3221 | #define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ | ||
3222 | #define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ | ||
3223 | #define IFE_PSC_FORCE_POLARITY_SHIFT 5 | ||
3224 | #define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 | ||
3225 | |||
3226 | #define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ | ||
3227 | #define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ | ||
3228 | #define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ | ||
3229 | #define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorthm is completed */ | ||
3230 | #define IFE_PMC_MDIX_MODE_SHIFT 6 | ||
3231 | #define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ | ||
3232 | |||
3233 | #define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ | ||
3234 | #define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ | ||
3235 | #define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ | ||
3236 | #define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ | ||
3237 | #define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ | ||
3238 | #define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ | ||
3239 | #define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ | ||
3240 | #define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ | ||
3241 | #define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ | ||
3242 | #define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ | ||
3243 | #define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ | ||
3244 | |||
3245 | #define ICH8_FLASH_COMMAND_TIMEOUT 500 /* 500 ms , should be adjusted */ | ||
3246 | #define ICH8_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles , should be adjusted */ | ||
3247 | #define ICH8_FLASH_SEG_SIZE_256 256 | ||
3248 | #define ICH8_FLASH_SEG_SIZE_4K 4096 | ||
3249 | #define ICH8_FLASH_SEG_SIZE_64K 65536 | ||
3250 | |||
3251 | #define ICH8_CYCLE_READ 0x0 | ||
3252 | #define ICH8_CYCLE_RESERVED 0x1 | ||
3253 | #define ICH8_CYCLE_WRITE 0x2 | ||
3254 | #define ICH8_CYCLE_ERASE 0x3 | ||
3255 | |||
3256 | #define ICH8_FLASH_GFPREG 0x0000 | ||
3257 | #define ICH8_FLASH_HSFSTS 0x0004 | ||
3258 | #define ICH8_FLASH_HSFCTL 0x0006 | ||
3259 | #define ICH8_FLASH_FADDR 0x0008 | ||
3260 | #define ICH8_FLASH_FDATA0 0x0010 | ||
3261 | #define ICH8_FLASH_FRACC 0x0050 | ||
3262 | #define ICH8_FLASH_FREG0 0x0054 | ||
3263 | #define ICH8_FLASH_FREG1 0x0058 | ||
3264 | #define ICH8_FLASH_FREG2 0x005C | ||
3265 | #define ICH8_FLASH_FREG3 0x0060 | ||
3266 | #define ICH8_FLASH_FPR0 0x0074 | ||
3267 | #define ICH8_FLASH_FPR1 0x0078 | ||
3268 | #define ICH8_FLASH_SSFSTS 0x0090 | ||
3269 | #define ICH8_FLASH_SSFCTL 0x0092 | ||
3270 | #define ICH8_FLASH_PREOP 0x0094 | ||
3271 | #define ICH8_FLASH_OPTYPE 0x0096 | ||
3272 | #define ICH8_FLASH_OPMENU 0x0098 | ||
3273 | |||
3274 | #define ICH8_FLASH_REG_MAPSIZE 0x00A0 | ||
3275 | #define ICH8_FLASH_SECTOR_SIZE 4096 | ||
3276 | #define ICH8_GFPREG_BASE_MASK 0x1FFF | ||
3277 | #define ICH8_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF | ||
3278 | |||
3279 | /* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ | ||
3280 | /* Offset 04h HSFSTS */ | ||
3281 | union ich8_hws_flash_status { | ||
3282 | struct ich8_hsfsts { | ||
3283 | #ifdef E1000_BIG_ENDIAN | ||
3284 | uint16_t reserved2 :6; | ||
3285 | uint16_t fldesvalid :1; | ||
3286 | uint16_t flockdn :1; | ||
3287 | uint16_t flcdone :1; | ||
3288 | uint16_t flcerr :1; | ||
3289 | uint16_t dael :1; | ||
3290 | uint16_t berasesz :2; | ||
3291 | uint16_t flcinprog :1; | ||
3292 | uint16_t reserved1 :2; | ||
3293 | #else | ||
3294 | uint16_t flcdone :1; /* bit 0 Flash Cycle Done */ | ||
3295 | uint16_t flcerr :1; /* bit 1 Flash Cycle Error */ | ||
3296 | uint16_t dael :1; /* bit 2 Direct Access error Log */ | ||
3297 | uint16_t berasesz :2; /* bit 4:3 Block/Sector Erase Size */ | ||
3298 | uint16_t flcinprog :1; /* bit 5 flash SPI cycle in Progress */ | ||
3299 | uint16_t reserved1 :2; /* bit 13:6 Reserved */ | ||
3300 | uint16_t reserved2 :6; /* bit 13:6 Reserved */ | ||
3301 | uint16_t fldesvalid :1; /* bit 14 Flash Descriptor Valid */ | ||
3302 | uint16_t flockdn :1; /* bit 15 Flash Configuration Lock-Down */ | ||
3303 | #endif | ||
3304 | } hsf_status; | ||
3305 | uint16_t regval; | ||
3306 | }; | ||
3307 | |||
3308 | /* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */ | ||
3309 | /* Offset 06h FLCTL */ | ||
3310 | union ich8_hws_flash_ctrl { | ||
3311 | struct ich8_hsflctl { | ||
3312 | #ifdef E1000_BIG_ENDIAN | ||
3313 | uint16_t fldbcount :2; | ||
3314 | uint16_t flockdn :6; | ||
3315 | uint16_t flcgo :1; | ||
3316 | uint16_t flcycle :2; | ||
3317 | uint16_t reserved :5; | ||
3318 | #else | ||
3319 | uint16_t flcgo :1; /* 0 Flash Cycle Go */ | ||
3320 | uint16_t flcycle :2; /* 2:1 Flash Cycle */ | ||
3321 | uint16_t reserved :5; /* 7:3 Reserved */ | ||
3322 | uint16_t fldbcount :2; /* 9:8 Flash Data Byte Count */ | ||
3323 | uint16_t flockdn :6; /* 15:10 Reserved */ | ||
3324 | #endif | ||
3325 | } hsf_ctrl; | ||
3326 | uint16_t regval; | ||
3327 | }; | ||
3328 | |||
3329 | /* ICH8 Flash Region Access Permissions */ | ||
3330 | union ich8_hws_flash_regacc { | ||
3331 | struct ich8_flracc { | ||
3332 | #ifdef E1000_BIG_ENDIAN | ||
3333 | uint32_t gmwag :8; | ||
3334 | uint32_t gmrag :8; | ||
3335 | uint32_t grwa :8; | ||
3336 | uint32_t grra :8; | ||
3337 | #else | ||
3338 | uint32_t grra :8; /* 0:7 GbE region Read Access */ | ||
3339 | uint32_t grwa :8; /* 8:15 GbE region Write Access */ | ||
3340 | uint32_t gmrag :8; /* 23:16 GbE Master Read Access Grant */ | ||
3341 | uint32_t gmwag :8; /* 31:24 GbE Master Write Access Grant */ | ||
3342 | #endif | ||
3343 | } hsf_flregacc; | ||
3344 | uint16_t regval; | ||
3345 | }; | ||
3346 | |||
2993 | /* Miscellaneous PHY bit definitions. */ | 3347 | /* Miscellaneous PHY bit definitions. */ |
2994 | #define PHY_PREAMBLE 0xFFFFFFFF | 3348 | #define PHY_PREAMBLE 0xFFFFFFFF |
2995 | #define PHY_SOF 0x01 | 3349 | #define PHY_SOF 0x01 |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index f77624f5f17b..726f43d55937 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | |||
36 | #else | 36 | #else |
37 | #define DRIVERNAPI "-NAPI" | 37 | #define DRIVERNAPI "-NAPI" |
38 | #endif | 38 | #endif |
39 | #define DRV_VERSION "7.0.38-k4"DRIVERNAPI | 39 | #define DRV_VERSION "7.1.9-k4"DRIVERNAPI |
40 | char e1000_driver_version[] = DRV_VERSION; | 40 | char e1000_driver_version[] = DRV_VERSION; |
41 | static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; | 41 | static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; |
42 | 42 | ||
@@ -73,6 +73,11 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
73 | INTEL_E1000_ETHERNET_DEVICE(0x1026), | 73 | INTEL_E1000_ETHERNET_DEVICE(0x1026), |
74 | INTEL_E1000_ETHERNET_DEVICE(0x1027), | 74 | INTEL_E1000_ETHERNET_DEVICE(0x1027), |
75 | INTEL_E1000_ETHERNET_DEVICE(0x1028), | 75 | INTEL_E1000_ETHERNET_DEVICE(0x1028), |
76 | INTEL_E1000_ETHERNET_DEVICE(0x1049), | ||
77 | INTEL_E1000_ETHERNET_DEVICE(0x104A), | ||
78 | INTEL_E1000_ETHERNET_DEVICE(0x104B), | ||
79 | INTEL_E1000_ETHERNET_DEVICE(0x104C), | ||
80 | INTEL_E1000_ETHERNET_DEVICE(0x104D), | ||
76 | INTEL_E1000_ETHERNET_DEVICE(0x105E), | 81 | INTEL_E1000_ETHERNET_DEVICE(0x105E), |
77 | INTEL_E1000_ETHERNET_DEVICE(0x105F), | 82 | INTEL_E1000_ETHERNET_DEVICE(0x105F), |
78 | INTEL_E1000_ETHERNET_DEVICE(0x1060), | 83 | INTEL_E1000_ETHERNET_DEVICE(0x1060), |
@@ -96,6 +101,8 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
96 | INTEL_E1000_ETHERNET_DEVICE(0x109A), | 101 | INTEL_E1000_ETHERNET_DEVICE(0x109A), |
97 | INTEL_E1000_ETHERNET_DEVICE(0x10B5), | 102 | INTEL_E1000_ETHERNET_DEVICE(0x10B5), |
98 | INTEL_E1000_ETHERNET_DEVICE(0x10B9), | 103 | INTEL_E1000_ETHERNET_DEVICE(0x10B9), |
104 | INTEL_E1000_ETHERNET_DEVICE(0x10BA), | ||
105 | INTEL_E1000_ETHERNET_DEVICE(0x10BB), | ||
99 | /* required last entry */ | 106 | /* required last entry */ |
100 | {0,} | 107 | {0,} |
101 | }; | 108 | }; |
@@ -133,7 +140,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, | |||
133 | static void e1000_set_multi(struct net_device *netdev); | 140 | static void e1000_set_multi(struct net_device *netdev); |
134 | static void e1000_update_phy_info(unsigned long data); | 141 | static void e1000_update_phy_info(unsigned long data); |
135 | static void e1000_watchdog(unsigned long data); | 142 | static void e1000_watchdog(unsigned long data); |
136 | static void e1000_watchdog_task(struct e1000_adapter *adapter); | ||
137 | static void e1000_82547_tx_fifo_stall(unsigned long data); | 143 | static void e1000_82547_tx_fifo_stall(unsigned long data); |
138 | static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev); | 144 | static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev); |
139 | static struct net_device_stats * e1000_get_stats(struct net_device *netdev); | 145 | static struct net_device_stats * e1000_get_stats(struct net_device *netdev); |
@@ -178,8 +184,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); | |||
178 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); | 184 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); |
179 | static void e1000_restore_vlan(struct e1000_adapter *adapter); | 185 | static void e1000_restore_vlan(struct e1000_adapter *adapter); |
180 | 186 | ||
181 | #ifdef CONFIG_PM | ||
182 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); | 187 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); |
188 | #ifdef CONFIG_PM | ||
183 | static int e1000_resume(struct pci_dev *pdev); | 189 | static int e1000_resume(struct pci_dev *pdev); |
184 | #endif | 190 | #endif |
185 | static void e1000_shutdown(struct pci_dev *pdev); | 191 | static void e1000_shutdown(struct pci_dev *pdev); |
@@ -206,8 +212,8 @@ static struct pci_driver e1000_driver = { | |||
206 | .probe = e1000_probe, | 212 | .probe = e1000_probe, |
207 | .remove = __devexit_p(e1000_remove), | 213 | .remove = __devexit_p(e1000_remove), |
208 | /* Power Managment Hooks */ | 214 | /* Power Managment Hooks */ |
209 | #ifdef CONFIG_PM | ||
210 | .suspend = e1000_suspend, | 215 | .suspend = e1000_suspend, |
216 | #ifdef CONFIG_PM | ||
211 | .resume = e1000_resume, | 217 | .resume = e1000_resume, |
212 | #endif | 218 | #endif |
213 | .shutdown = e1000_shutdown, | 219 | .shutdown = e1000_shutdown, |
@@ -261,6 +267,44 @@ e1000_exit_module(void) | |||
261 | 267 | ||
262 | module_exit(e1000_exit_module); | 268 | module_exit(e1000_exit_module); |
263 | 269 | ||
270 | static int e1000_request_irq(struct e1000_adapter *adapter) | ||
271 | { | ||
272 | struct net_device *netdev = adapter->netdev; | ||
273 | int flags, err = 0; | ||
274 | |||
275 | flags = IRQF_SHARED; | ||
276 | #ifdef CONFIG_PCI_MSI | ||
277 | if (adapter->hw.mac_type > e1000_82547_rev_2) { | ||
278 | adapter->have_msi = TRUE; | ||
279 | if ((err = pci_enable_msi(adapter->pdev))) { | ||
280 | DPRINTK(PROBE, ERR, | ||
281 | "Unable to allocate MSI interrupt Error: %d\n", err); | ||
282 | adapter->have_msi = FALSE; | ||
283 | } | ||
284 | } | ||
285 | if (adapter->have_msi) | ||
286 | flags &= ~IRQF_SHARED; | ||
287 | #endif | ||
288 | if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags, | ||
289 | netdev->name, netdev))) | ||
290 | DPRINTK(PROBE, ERR, | ||
291 | "Unable to allocate interrupt Error: %d\n", err); | ||
292 | |||
293 | return err; | ||
294 | } | ||
295 | |||
296 | static void e1000_free_irq(struct e1000_adapter *adapter) | ||
297 | { | ||
298 | struct net_device *netdev = adapter->netdev; | ||
299 | |||
300 | free_irq(adapter->pdev->irq, netdev); | ||
301 | |||
302 | #ifdef CONFIG_PCI_MSI | ||
303 | if (adapter->have_msi) | ||
304 | pci_disable_msi(adapter->pdev); | ||
305 | #endif | ||
306 | } | ||
307 | |||
264 | /** | 308 | /** |
265 | * e1000_irq_disable - Mask off interrupt generation on the NIC | 309 | * e1000_irq_disable - Mask off interrupt generation on the NIC |
266 | * @adapter: board private structure | 310 | * @adapter: board private structure |
@@ -329,6 +373,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter) | |||
329 | { | 373 | { |
330 | uint32_t ctrl_ext; | 374 | uint32_t ctrl_ext; |
331 | uint32_t swsm; | 375 | uint32_t swsm; |
376 | uint32_t extcnf; | ||
332 | 377 | ||
333 | /* Let firmware taken over control of h/w */ | 378 | /* Let firmware taken over control of h/w */ |
334 | switch (adapter->hw.mac_type) { | 379 | switch (adapter->hw.mac_type) { |
@@ -343,6 +388,11 @@ e1000_release_hw_control(struct e1000_adapter *adapter) | |||
343 | swsm = E1000_READ_REG(&adapter->hw, SWSM); | 388 | swsm = E1000_READ_REG(&adapter->hw, SWSM); |
344 | E1000_WRITE_REG(&adapter->hw, SWSM, | 389 | E1000_WRITE_REG(&adapter->hw, SWSM, |
345 | swsm & ~E1000_SWSM_DRV_LOAD); | 390 | swsm & ~E1000_SWSM_DRV_LOAD); |
391 | case e1000_ich8lan: | ||
392 | extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT); | ||
393 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | ||
394 | extcnf & ~E1000_CTRL_EXT_DRV_LOAD); | ||
395 | break; | ||
346 | default: | 396 | default: |
347 | break; | 397 | break; |
348 | } | 398 | } |
@@ -364,6 +414,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter) | |||
364 | { | 414 | { |
365 | uint32_t ctrl_ext; | 415 | uint32_t ctrl_ext; |
366 | uint32_t swsm; | 416 | uint32_t swsm; |
417 | uint32_t extcnf; | ||
367 | /* Let firmware know the driver has taken over */ | 418 | /* Let firmware know the driver has taken over */ |
368 | switch (adapter->hw.mac_type) { | 419 | switch (adapter->hw.mac_type) { |
369 | case e1000_82571: | 420 | case e1000_82571: |
@@ -378,6 +429,11 @@ e1000_get_hw_control(struct e1000_adapter *adapter) | |||
378 | E1000_WRITE_REG(&adapter->hw, SWSM, | 429 | E1000_WRITE_REG(&adapter->hw, SWSM, |
379 | swsm | E1000_SWSM_DRV_LOAD); | 430 | swsm | E1000_SWSM_DRV_LOAD); |
380 | break; | 431 | break; |
432 | case e1000_ich8lan: | ||
433 | extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL); | ||
434 | E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL, | ||
435 | extcnf | E1000_EXTCNF_CTRL_SWFLAG); | ||
436 | break; | ||
381 | default: | 437 | default: |
382 | break; | 438 | break; |
383 | } | 439 | } |
@@ -387,18 +443,10 @@ int | |||
387 | e1000_up(struct e1000_adapter *adapter) | 443 | e1000_up(struct e1000_adapter *adapter) |
388 | { | 444 | { |
389 | struct net_device *netdev = adapter->netdev; | 445 | struct net_device *netdev = adapter->netdev; |
390 | int i, err; | 446 | int i; |
391 | 447 | ||
392 | /* hardware has been reset, we need to reload some things */ | 448 | /* hardware has been reset, we need to reload some things */ |
393 | 449 | ||
394 | /* Reset the PHY if it was previously powered down */ | ||
395 | if (adapter->hw.media_type == e1000_media_type_copper) { | ||
396 | uint16_t mii_reg; | ||
397 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | ||
398 | if (mii_reg & MII_CR_POWER_DOWN) | ||
399 | e1000_phy_hw_reset(&adapter->hw); | ||
400 | } | ||
401 | |||
402 | e1000_set_multi(netdev); | 450 | e1000_set_multi(netdev); |
403 | 451 | ||
404 | e1000_restore_vlan(adapter); | 452 | e1000_restore_vlan(adapter); |
@@ -415,24 +463,6 @@ e1000_up(struct e1000_adapter *adapter) | |||
415 | E1000_DESC_UNUSED(ring)); | 463 | E1000_DESC_UNUSED(ring)); |
416 | } | 464 | } |
417 | 465 | ||
418 | #ifdef CONFIG_PCI_MSI | ||
419 | if (adapter->hw.mac_type > e1000_82547_rev_2) { | ||
420 | adapter->have_msi = TRUE; | ||
421 | if ((err = pci_enable_msi(adapter->pdev))) { | ||
422 | DPRINTK(PROBE, ERR, | ||
423 | "Unable to allocate MSI interrupt Error: %d\n", err); | ||
424 | adapter->have_msi = FALSE; | ||
425 | } | ||
426 | } | ||
427 | #endif | ||
428 | if ((err = request_irq(adapter->pdev->irq, &e1000_intr, | ||
429 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, | ||
430 | netdev->name, netdev))) { | ||
431 | DPRINTK(PROBE, ERR, | ||
432 | "Unable to allocate interrupt Error: %d\n", err); | ||
433 | return err; | ||
434 | } | ||
435 | |||
436 | adapter->tx_queue_len = netdev->tx_queue_len; | 466 | adapter->tx_queue_len = netdev->tx_queue_len; |
437 | 467 | ||
438 | mod_timer(&adapter->watchdog_timer, jiffies); | 468 | mod_timer(&adapter->watchdog_timer, jiffies); |
@@ -445,21 +475,60 @@ e1000_up(struct e1000_adapter *adapter) | |||
445 | return 0; | 475 | return 0; |
446 | } | 476 | } |
447 | 477 | ||
478 | /** | ||
479 | * e1000_power_up_phy - restore link in case the phy was powered down | ||
480 | * @adapter: address of board private structure | ||
481 | * | ||
482 | * The phy may be powered down to save power and turn off link when the | ||
483 | * driver is unloaded and wake on lan is not enabled (among others) | ||
484 | * *** this routine MUST be followed by a call to e1000_reset *** | ||
485 | * | ||
486 | **/ | ||
487 | |||
488 | static void e1000_power_up_phy(struct e1000_adapter *adapter) | ||
489 | { | ||
490 | uint16_t mii_reg = 0; | ||
491 | |||
492 | /* Just clear the power down bit to wake the phy back up */ | ||
493 | if (adapter->hw.media_type == e1000_media_type_copper) { | ||
494 | /* according to the manual, the phy will retain its | ||
495 | * settings across a power-down/up cycle */ | ||
496 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | ||
497 | mii_reg &= ~MII_CR_POWER_DOWN; | ||
498 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); | ||
499 | } | ||
500 | } | ||
501 | |||
502 | static void e1000_power_down_phy(struct e1000_adapter *adapter) | ||
503 | { | ||
504 | boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) && | ||
505 | e1000_check_mng_mode(&adapter->hw); | ||
506 | /* Power down the PHY so no link is implied when interface is down | ||
507 | * The PHY cannot be powered down if any of the following is TRUE | ||
508 | * (a) WoL is enabled | ||
509 | * (b) AMT is active | ||
510 | * (c) SoL/IDER session is active */ | ||
511 | if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && | ||
512 | adapter->hw.mac_type != e1000_ich8lan && | ||
513 | adapter->hw.media_type == e1000_media_type_copper && | ||
514 | !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) && | ||
515 | !mng_mode_enabled && | ||
516 | !e1000_check_phy_reset_block(&adapter->hw)) { | ||
517 | uint16_t mii_reg = 0; | ||
518 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | ||
519 | mii_reg |= MII_CR_POWER_DOWN; | ||
520 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); | ||
521 | mdelay(1); | ||
522 | } | ||
523 | } | ||
524 | |||
448 | void | 525 | void |
449 | e1000_down(struct e1000_adapter *adapter) | 526 | e1000_down(struct e1000_adapter *adapter) |
450 | { | 527 | { |
451 | struct net_device *netdev = adapter->netdev; | 528 | struct net_device *netdev = adapter->netdev; |
452 | boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) && | ||
453 | e1000_check_mng_mode(&adapter->hw); | ||
454 | 529 | ||
455 | e1000_irq_disable(adapter); | 530 | e1000_irq_disable(adapter); |
456 | 531 | ||
457 | free_irq(adapter->pdev->irq, netdev); | ||
458 | #ifdef CONFIG_PCI_MSI | ||
459 | if (adapter->hw.mac_type > e1000_82547_rev_2 && | ||
460 | adapter->have_msi == TRUE) | ||
461 | pci_disable_msi(adapter->pdev); | ||
462 | #endif | ||
463 | del_timer_sync(&adapter->tx_fifo_stall_timer); | 532 | del_timer_sync(&adapter->tx_fifo_stall_timer); |
464 | del_timer_sync(&adapter->watchdog_timer); | 533 | del_timer_sync(&adapter->watchdog_timer); |
465 | del_timer_sync(&adapter->phy_info_timer); | 534 | del_timer_sync(&adapter->phy_info_timer); |
@@ -476,23 +545,17 @@ e1000_down(struct e1000_adapter *adapter) | |||
476 | e1000_reset(adapter); | 545 | e1000_reset(adapter); |
477 | e1000_clean_all_tx_rings(adapter); | 546 | e1000_clean_all_tx_rings(adapter); |
478 | e1000_clean_all_rx_rings(adapter); | 547 | e1000_clean_all_rx_rings(adapter); |
548 | } | ||
479 | 549 | ||
480 | /* Power down the PHY so no link is implied when interface is down * | 550 | void |
481 | * The PHY cannot be powered down if any of the following is TRUE * | 551 | e1000_reinit_locked(struct e1000_adapter *adapter) |
482 | * (a) WoL is enabled | 552 | { |
483 | * (b) AMT is active | 553 | WARN_ON(in_interrupt()); |
484 | * (c) SoL/IDER session is active */ | 554 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) |
485 | if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && | 555 | msleep(1); |
486 | adapter->hw.media_type == e1000_media_type_copper && | 556 | e1000_down(adapter); |
487 | !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) && | 557 | e1000_up(adapter); |
488 | !mng_mode_enabled && | 558 | clear_bit(__E1000_RESETTING, &adapter->flags); |
489 | !e1000_check_phy_reset_block(&adapter->hw)) { | ||
490 | uint16_t mii_reg; | ||
491 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | ||
492 | mii_reg |= MII_CR_POWER_DOWN; | ||
493 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); | ||
494 | mdelay(1); | ||
495 | } | ||
496 | } | 559 | } |
497 | 560 | ||
498 | void | 561 | void |
@@ -518,6 +581,9 @@ e1000_reset(struct e1000_adapter *adapter) | |||
518 | case e1000_82573: | 581 | case e1000_82573: |
519 | pba = E1000_PBA_12K; | 582 | pba = E1000_PBA_12K; |
520 | break; | 583 | break; |
584 | case e1000_ich8lan: | ||
585 | pba = E1000_PBA_8K; | ||
586 | break; | ||
521 | default: | 587 | default: |
522 | pba = E1000_PBA_48K; | 588 | pba = E1000_PBA_48K; |
523 | break; | 589 | break; |
@@ -542,6 +608,12 @@ e1000_reset(struct e1000_adapter *adapter) | |||
542 | /* Set the FC high water mark to 90% of the FIFO size. | 608 | /* Set the FC high water mark to 90% of the FIFO size. |
543 | * Required to clear last 3 LSB */ | 609 | * Required to clear last 3 LSB */ |
544 | fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8; | 610 | fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8; |
611 | /* We can't use 90% on small FIFOs because the remainder | ||
612 | * would be less than 1 full frame. In this case, we size | ||
613 | * it to allow at least a full frame above the high water | ||
614 | * mark. */ | ||
615 | if (pba < E1000_PBA_16K) | ||
616 | fc_high_water_mark = (pba * 1024) - 1600; | ||
545 | 617 | ||
546 | adapter->hw.fc_high_water = fc_high_water_mark; | 618 | adapter->hw.fc_high_water = fc_high_water_mark; |
547 | adapter->hw.fc_low_water = fc_high_water_mark - 8; | 619 | adapter->hw.fc_low_water = fc_high_water_mark - 8; |
@@ -564,6 +636,23 @@ e1000_reset(struct e1000_adapter *adapter) | |||
564 | 636 | ||
565 | e1000_reset_adaptive(&adapter->hw); | 637 | e1000_reset_adaptive(&adapter->hw); |
566 | e1000_phy_get_info(&adapter->hw, &adapter->phy_info); | 638 | e1000_phy_get_info(&adapter->hw, &adapter->phy_info); |
639 | |||
640 | if (!adapter->smart_power_down && | ||
641 | (adapter->hw.mac_type == e1000_82571 || | ||
642 | adapter->hw.mac_type == e1000_82572)) { | ||
643 | uint16_t phy_data = 0; | ||
644 | /* speed up time to link by disabling smart power down, ignore | ||
645 | * the return value of this function because there is nothing | ||
646 | * different we would do if it failed */ | ||
647 | e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, | ||
648 | &phy_data); | ||
649 | phy_data &= ~IGP02E1000_PM_SPD; | ||
650 | e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, | ||
651 | phy_data); | ||
652 | } | ||
653 | |||
654 | if (adapter->hw.mac_type < e1000_ich8lan) | ||
655 | /* FIXME: this code is duplicate and wrong for PCI Express */ | ||
567 | if (adapter->en_mng_pt) { | 656 | if (adapter->en_mng_pt) { |
568 | manc = E1000_READ_REG(&adapter->hw, MANC); | 657 | manc = E1000_READ_REG(&adapter->hw, MANC); |
569 | manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST); | 658 | manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST); |
@@ -590,6 +679,7 @@ e1000_probe(struct pci_dev *pdev, | |||
590 | struct net_device *netdev; | 679 | struct net_device *netdev; |
591 | struct e1000_adapter *adapter; | 680 | struct e1000_adapter *adapter; |
592 | unsigned long mmio_start, mmio_len; | 681 | unsigned long mmio_start, mmio_len; |
682 | unsigned long flash_start, flash_len; | ||
593 | 683 | ||
594 | static int cards_found = 0; | 684 | static int cards_found = 0; |
595 | static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */ | 685 | static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */ |
@@ -599,10 +689,12 @@ e1000_probe(struct pci_dev *pdev, | |||
599 | if ((err = pci_enable_device(pdev))) | 689 | if ((err = pci_enable_device(pdev))) |
600 | return err; | 690 | return err; |
601 | 691 | ||
602 | if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { | 692 | if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && |
693 | !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { | ||
603 | pci_using_dac = 1; | 694 | pci_using_dac = 1; |
604 | } else { | 695 | } else { |
605 | if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { | 696 | if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) && |
697 | (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { | ||
606 | E1000_ERR("No usable DMA configuration, aborting\n"); | 698 | E1000_ERR("No usable DMA configuration, aborting\n"); |
607 | return err; | 699 | return err; |
608 | } | 700 | } |
@@ -682,6 +774,19 @@ e1000_probe(struct pci_dev *pdev, | |||
682 | if ((err = e1000_sw_init(adapter))) | 774 | if ((err = e1000_sw_init(adapter))) |
683 | goto err_sw_init; | 775 | goto err_sw_init; |
684 | 776 | ||
777 | /* Flash BAR mapping must happen after e1000_sw_init | ||
778 | * because it depends on mac_type */ | ||
779 | if ((adapter->hw.mac_type == e1000_ich8lan) && | ||
780 | (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { | ||
781 | flash_start = pci_resource_start(pdev, 1); | ||
782 | flash_len = pci_resource_len(pdev, 1); | ||
783 | adapter->hw.flash_address = ioremap(flash_start, flash_len); | ||
784 | if (!adapter->hw.flash_address) { | ||
785 | err = -EIO; | ||
786 | goto err_flashmap; | ||
787 | } | ||
788 | } | ||
789 | |||
685 | if ((err = e1000_check_phy_reset_block(&adapter->hw))) | 790 | if ((err = e1000_check_phy_reset_block(&adapter->hw))) |
686 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); | 791 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); |
687 | 792 | ||
@@ -700,6 +805,8 @@ e1000_probe(struct pci_dev *pdev, | |||
700 | NETIF_F_HW_VLAN_TX | | 805 | NETIF_F_HW_VLAN_TX | |
701 | NETIF_F_HW_VLAN_RX | | 806 | NETIF_F_HW_VLAN_RX | |
702 | NETIF_F_HW_VLAN_FILTER; | 807 | NETIF_F_HW_VLAN_FILTER; |
808 | if (adapter->hw.mac_type == e1000_ich8lan) | ||
809 | netdev->features &= ~NETIF_F_HW_VLAN_FILTER; | ||
703 | } | 810 | } |
704 | 811 | ||
705 | #ifdef NETIF_F_TSO | 812 | #ifdef NETIF_F_TSO |
@@ -715,11 +822,17 @@ e1000_probe(struct pci_dev *pdev, | |||
715 | if (pci_using_dac) | 822 | if (pci_using_dac) |
716 | netdev->features |= NETIF_F_HIGHDMA; | 823 | netdev->features |= NETIF_F_HIGHDMA; |
717 | 824 | ||
718 | /* hard_start_xmit is safe against parallel locking */ | ||
719 | netdev->features |= NETIF_F_LLTX; | 825 | netdev->features |= NETIF_F_LLTX; |
720 | 826 | ||
721 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); | 827 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); |
722 | 828 | ||
829 | /* initialize eeprom parameters */ | ||
830 | |||
831 | if (e1000_init_eeprom_params(&adapter->hw)) { | ||
832 | E1000_ERR("EEPROM initialization failed\n"); | ||
833 | return -EIO; | ||
834 | } | ||
835 | |||
723 | /* before reading the EEPROM, reset the controller to | 836 | /* before reading the EEPROM, reset the controller to |
724 | * put the device in a known good starting state */ | 837 | * put the device in a known good starting state */ |
725 | 838 | ||
@@ -758,9 +871,6 @@ e1000_probe(struct pci_dev *pdev, | |||
758 | adapter->watchdog_timer.function = &e1000_watchdog; | 871 | adapter->watchdog_timer.function = &e1000_watchdog; |
759 | adapter->watchdog_timer.data = (unsigned long) adapter; | 872 | adapter->watchdog_timer.data = (unsigned long) adapter; |
760 | 873 | ||
761 | INIT_WORK(&adapter->watchdog_task, | ||
762 | (void (*)(void *))e1000_watchdog_task, adapter); | ||
763 | |||
764 | init_timer(&adapter->phy_info_timer); | 874 | init_timer(&adapter->phy_info_timer); |
765 | adapter->phy_info_timer.function = &e1000_update_phy_info; | 875 | adapter->phy_info_timer.function = &e1000_update_phy_info; |
766 | adapter->phy_info_timer.data = (unsigned long) adapter; | 876 | adapter->phy_info_timer.data = (unsigned long) adapter; |
@@ -790,6 +900,11 @@ e1000_probe(struct pci_dev *pdev, | |||
790 | EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); | 900 | EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); |
791 | eeprom_apme_mask = E1000_EEPROM_82544_APM; | 901 | eeprom_apme_mask = E1000_EEPROM_82544_APM; |
792 | break; | 902 | break; |
903 | case e1000_ich8lan: | ||
904 | e1000_read_eeprom(&adapter->hw, | ||
905 | EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data); | ||
906 | eeprom_apme_mask = E1000_EEPROM_ICH8_APME; | ||
907 | break; | ||
793 | case e1000_82546: | 908 | case e1000_82546: |
794 | case e1000_82546_rev_3: | 909 | case e1000_82546_rev_3: |
795 | case e1000_82571: | 910 | case e1000_82571: |
@@ -849,6 +964,9 @@ e1000_probe(struct pci_dev *pdev, | |||
849 | return 0; | 964 | return 0; |
850 | 965 | ||
851 | err_register: | 966 | err_register: |
967 | if (adapter->hw.flash_address) | ||
968 | iounmap(adapter->hw.flash_address); | ||
969 | err_flashmap: | ||
852 | err_sw_init: | 970 | err_sw_init: |
853 | err_eeprom: | 971 | err_eeprom: |
854 | iounmap(adapter->hw.hw_addr); | 972 | iounmap(adapter->hw.hw_addr); |
@@ -882,6 +1000,7 @@ e1000_remove(struct pci_dev *pdev) | |||
882 | flush_scheduled_work(); | 1000 | flush_scheduled_work(); |
883 | 1001 | ||
884 | if (adapter->hw.mac_type >= e1000_82540 && | 1002 | if (adapter->hw.mac_type >= e1000_82540 && |
1003 | adapter->hw.mac_type != e1000_ich8lan && | ||
885 | adapter->hw.media_type == e1000_media_type_copper) { | 1004 | adapter->hw.media_type == e1000_media_type_copper) { |
886 | manc = E1000_READ_REG(&adapter->hw, MANC); | 1005 | manc = E1000_READ_REG(&adapter->hw, MANC); |
887 | if (manc & E1000_MANC_SMBUS_EN) { | 1006 | if (manc & E1000_MANC_SMBUS_EN) { |
@@ -910,6 +1029,8 @@ e1000_remove(struct pci_dev *pdev) | |||
910 | #endif | 1029 | #endif |
911 | 1030 | ||
912 | iounmap(adapter->hw.hw_addr); | 1031 | iounmap(adapter->hw.hw_addr); |
1032 | if (adapter->hw.flash_address) | ||
1033 | iounmap(adapter->hw.flash_address); | ||
913 | pci_release_regions(pdev); | 1034 | pci_release_regions(pdev); |
914 | 1035 | ||
915 | free_netdev(netdev); | 1036 | free_netdev(netdev); |
@@ -947,7 +1068,7 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
947 | 1068 | ||
948 | pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); | 1069 | pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); |
949 | 1070 | ||
950 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE; | 1071 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
951 | adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; | 1072 | adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; |
952 | hw->max_frame_size = netdev->mtu + | 1073 | hw->max_frame_size = netdev->mtu + |
953 | ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | 1074 | ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
@@ -960,13 +1081,6 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
960 | return -EIO; | 1081 | return -EIO; |
961 | } | 1082 | } |
962 | 1083 | ||
963 | /* initialize eeprom parameters */ | ||
964 | |||
965 | if (e1000_init_eeprom_params(hw)) { | ||
966 | E1000_ERR("EEPROM initialization failed\n"); | ||
967 | return -EIO; | ||
968 | } | ||
969 | |||
970 | switch (hw->mac_type) { | 1084 | switch (hw->mac_type) { |
971 | default: | 1085 | default: |
972 | break; | 1086 | break; |
@@ -1078,6 +1192,10 @@ e1000_open(struct net_device *netdev) | |||
1078 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1192 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1079 | int err; | 1193 | int err; |
1080 | 1194 | ||
1195 | /* disallow open during test */ | ||
1196 | if (test_bit(__E1000_DRIVER_TESTING, &adapter->flags)) | ||
1197 | return -EBUSY; | ||
1198 | |||
1081 | /* allocate transmit descriptors */ | 1199 | /* allocate transmit descriptors */ |
1082 | 1200 | ||
1083 | if ((err = e1000_setup_all_tx_resources(adapter))) | 1201 | if ((err = e1000_setup_all_tx_resources(adapter))) |
@@ -1088,6 +1206,12 @@ e1000_open(struct net_device *netdev) | |||
1088 | if ((err = e1000_setup_all_rx_resources(adapter))) | 1206 | if ((err = e1000_setup_all_rx_resources(adapter))) |
1089 | goto err_setup_rx; | 1207 | goto err_setup_rx; |
1090 | 1208 | ||
1209 | err = e1000_request_irq(adapter); | ||
1210 | if (err) | ||
1211 | goto err_up; | ||
1212 | |||
1213 | e1000_power_up_phy(adapter); | ||
1214 | |||
1091 | if ((err = e1000_up(adapter))) | 1215 | if ((err = e1000_up(adapter))) |
1092 | goto err_up; | 1216 | goto err_up; |
1093 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 1217 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
@@ -1131,7 +1255,10 @@ e1000_close(struct net_device *netdev) | |||
1131 | { | 1255 | { |
1132 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1256 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1133 | 1257 | ||
1258 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); | ||
1134 | e1000_down(adapter); | 1259 | e1000_down(adapter); |
1260 | e1000_power_down_phy(adapter); | ||
1261 | e1000_free_irq(adapter); | ||
1135 | 1262 | ||
1136 | e1000_free_all_tx_resources(adapter); | 1263 | e1000_free_all_tx_resources(adapter); |
1137 | e1000_free_all_rx_resources(adapter); | 1264 | e1000_free_all_rx_resources(adapter); |
@@ -1189,8 +1316,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter, | |||
1189 | int size; | 1316 | int size; |
1190 | 1317 | ||
1191 | size = sizeof(struct e1000_buffer) * txdr->count; | 1318 | size = sizeof(struct e1000_buffer) * txdr->count; |
1192 | 1319 | txdr->buffer_info = vmalloc(size); | |
1193 | txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); | ||
1194 | if (!txdr->buffer_info) { | 1320 | if (!txdr->buffer_info) { |
1195 | DPRINTK(PROBE, ERR, | 1321 | DPRINTK(PROBE, ERR, |
1196 | "Unable to allocate memory for the transmit descriptor ring\n"); | 1322 | "Unable to allocate memory for the transmit descriptor ring\n"); |
@@ -1302,11 +1428,11 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1302 | tdba = adapter->tx_ring[0].dma; | 1428 | tdba = adapter->tx_ring[0].dma; |
1303 | tdlen = adapter->tx_ring[0].count * | 1429 | tdlen = adapter->tx_ring[0].count * |
1304 | sizeof(struct e1000_tx_desc); | 1430 | sizeof(struct e1000_tx_desc); |
1305 | E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); | ||
1306 | E1000_WRITE_REG(hw, TDBAH, (tdba >> 32)); | ||
1307 | E1000_WRITE_REG(hw, TDLEN, tdlen); | 1431 | E1000_WRITE_REG(hw, TDLEN, tdlen); |
1308 | E1000_WRITE_REG(hw, TDH, 0); | 1432 | E1000_WRITE_REG(hw, TDBAH, (tdba >> 32)); |
1433 | E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); | ||
1309 | E1000_WRITE_REG(hw, TDT, 0); | 1434 | E1000_WRITE_REG(hw, TDT, 0); |
1435 | E1000_WRITE_REG(hw, TDH, 0); | ||
1310 | adapter->tx_ring[0].tdh = E1000_TDH; | 1436 | adapter->tx_ring[0].tdh = E1000_TDH; |
1311 | adapter->tx_ring[0].tdt = E1000_TDT; | 1437 | adapter->tx_ring[0].tdt = E1000_TDT; |
1312 | break; | 1438 | break; |
@@ -1418,7 +1544,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter, | |||
1418 | int size, desc_len; | 1544 | int size, desc_len; |
1419 | 1545 | ||
1420 | size = sizeof(struct e1000_buffer) * rxdr->count; | 1546 | size = sizeof(struct e1000_buffer) * rxdr->count; |
1421 | rxdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); | 1547 | rxdr->buffer_info = vmalloc(size); |
1422 | if (!rxdr->buffer_info) { | 1548 | if (!rxdr->buffer_info) { |
1423 | DPRINTK(PROBE, ERR, | 1549 | DPRINTK(PROBE, ERR, |
1424 | "Unable to allocate memory for the receive descriptor ring\n"); | 1550 | "Unable to allocate memory for the receive descriptor ring\n"); |
@@ -1560,9 +1686,6 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1560 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | 1686 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | |
1561 | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); | 1687 | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); |
1562 | 1688 | ||
1563 | if (adapter->hw.mac_type > e1000_82543) | ||
1564 | rctl |= E1000_RCTL_SECRC; | ||
1565 | |||
1566 | if (adapter->hw.tbi_compatibility_on == 1) | 1689 | if (adapter->hw.tbi_compatibility_on == 1) |
1567 | rctl |= E1000_RCTL_SBP; | 1690 | rctl |= E1000_RCTL_SBP; |
1568 | else | 1691 | else |
@@ -1628,7 +1751,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1628 | rfctl |= E1000_RFCTL_IPV6_DIS; | 1751 | rfctl |= E1000_RFCTL_IPV6_DIS; |
1629 | E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); | 1752 | E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); |
1630 | 1753 | ||
1631 | rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; | 1754 | rctl |= E1000_RCTL_DTYP_PS; |
1632 | 1755 | ||
1633 | psrctl |= adapter->rx_ps_bsize0 >> | 1756 | psrctl |= adapter->rx_ps_bsize0 >> |
1634 | E1000_PSRCTL_BSIZE0_SHIFT; | 1757 | E1000_PSRCTL_BSIZE0_SHIFT; |
@@ -1712,11 +1835,11 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1712 | case 1: | 1835 | case 1: |
1713 | default: | 1836 | default: |
1714 | rdba = adapter->rx_ring[0].dma; | 1837 | rdba = adapter->rx_ring[0].dma; |
1715 | E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); | ||
1716 | E1000_WRITE_REG(hw, RDBAH, (rdba >> 32)); | ||
1717 | E1000_WRITE_REG(hw, RDLEN, rdlen); | 1838 | E1000_WRITE_REG(hw, RDLEN, rdlen); |
1718 | E1000_WRITE_REG(hw, RDH, 0); | 1839 | E1000_WRITE_REG(hw, RDBAH, (rdba >> 32)); |
1840 | E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); | ||
1719 | E1000_WRITE_REG(hw, RDT, 0); | 1841 | E1000_WRITE_REG(hw, RDT, 0); |
1842 | E1000_WRITE_REG(hw, RDH, 0); | ||
1720 | adapter->rx_ring[0].rdh = E1000_RDH; | 1843 | adapter->rx_ring[0].rdh = E1000_RDH; |
1721 | adapter->rx_ring[0].rdt = E1000_RDT; | 1844 | adapter->rx_ring[0].rdt = E1000_RDT; |
1722 | break; | 1845 | break; |
@@ -1741,9 +1864,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1741 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); | 1864 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); |
1742 | } | 1865 | } |
1743 | 1866 | ||
1744 | if (hw->mac_type == e1000_82573) | ||
1745 | E1000_WRITE_REG(hw, ERT, 0x0100); | ||
1746 | |||
1747 | /* Enable Receives */ | 1867 | /* Enable Receives */ |
1748 | E1000_WRITE_REG(hw, RCTL, rctl); | 1868 | E1000_WRITE_REG(hw, RCTL, rctl); |
1749 | } | 1869 | } |
@@ -2083,6 +2203,12 @@ e1000_set_multi(struct net_device *netdev) | |||
2083 | uint32_t rctl; | 2203 | uint32_t rctl; |
2084 | uint32_t hash_value; | 2204 | uint32_t hash_value; |
2085 | int i, rar_entries = E1000_RAR_ENTRIES; | 2205 | int i, rar_entries = E1000_RAR_ENTRIES; |
2206 | int mta_reg_count = (hw->mac_type == e1000_ich8lan) ? | ||
2207 | E1000_NUM_MTA_REGISTERS_ICH8LAN : | ||
2208 | E1000_NUM_MTA_REGISTERS; | ||
2209 | |||
2210 | if (adapter->hw.mac_type == e1000_ich8lan) | ||
2211 | rar_entries = E1000_RAR_ENTRIES_ICH8LAN; | ||
2086 | 2212 | ||
2087 | /* reserve RAR[14] for LAA over-write work-around */ | 2213 | /* reserve RAR[14] for LAA over-write work-around */ |
2088 | if (adapter->hw.mac_type == e1000_82571) | 2214 | if (adapter->hw.mac_type == e1000_82571) |
@@ -2121,14 +2247,18 @@ e1000_set_multi(struct net_device *netdev) | |||
2121 | mc_ptr = mc_ptr->next; | 2247 | mc_ptr = mc_ptr->next; |
2122 | } else { | 2248 | } else { |
2123 | E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); | 2249 | E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); |
2250 | E1000_WRITE_FLUSH(hw); | ||
2124 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); | 2251 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); |
2252 | E1000_WRITE_FLUSH(hw); | ||
2125 | } | 2253 | } |
2126 | } | 2254 | } |
2127 | 2255 | ||
2128 | /* clear the old settings from the multicast hash table */ | 2256 | /* clear the old settings from the multicast hash table */ |
2129 | 2257 | ||
2130 | for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++) | 2258 | for (i = 0; i < mta_reg_count; i++) { |
2131 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); | 2259 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); |
2260 | E1000_WRITE_FLUSH(hw); | ||
2261 | } | ||
2132 | 2262 | ||
2133 | /* load any remaining addresses into the hash table */ | 2263 | /* load any remaining addresses into the hash table */ |
2134 | 2264 | ||
@@ -2201,19 +2331,19 @@ static void | |||
2201 | e1000_watchdog(unsigned long data) | 2331 | e1000_watchdog(unsigned long data) |
2202 | { | 2332 | { |
2203 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 2333 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
2204 | |||
2205 | /* Do the rest outside of interrupt context */ | ||
2206 | schedule_work(&adapter->watchdog_task); | ||
2207 | } | ||
2208 | |||
2209 | static void | ||
2210 | e1000_watchdog_task(struct e1000_adapter *adapter) | ||
2211 | { | ||
2212 | struct net_device *netdev = adapter->netdev; | 2334 | struct net_device *netdev = adapter->netdev; |
2213 | struct e1000_tx_ring *txdr = adapter->tx_ring; | 2335 | struct e1000_tx_ring *txdr = adapter->tx_ring; |
2214 | uint32_t link, tctl; | 2336 | uint32_t link, tctl; |
2215 | 2337 | int32_t ret_val; | |
2216 | e1000_check_for_link(&adapter->hw); | 2338 | |
2339 | ret_val = e1000_check_for_link(&adapter->hw); | ||
2340 | if ((ret_val == E1000_ERR_PHY) && | ||
2341 | (adapter->hw.phy_type == e1000_phy_igp_3) && | ||
2342 | (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { | ||
2343 | /* See e1000_kumeran_lock_loss_workaround() */ | ||
2344 | DPRINTK(LINK, INFO, | ||
2345 | "Gigabit has been disabled, downgrading speed\n"); | ||
2346 | } | ||
2217 | if (adapter->hw.mac_type == e1000_82573) { | 2347 | if (adapter->hw.mac_type == e1000_82573) { |
2218 | e1000_enable_tx_pkt_filtering(&adapter->hw); | 2348 | e1000_enable_tx_pkt_filtering(&adapter->hw); |
2219 | if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) | 2349 | if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) |
@@ -2394,7 +2524,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2394 | uint8_t ipcss, ipcso, tucss, tucso, hdr_len; | 2524 | uint8_t ipcss, ipcso, tucss, tucso, hdr_len; |
2395 | int err; | 2525 | int err; |
2396 | 2526 | ||
2397 | if (skb_shinfo(skb)->gso_size) { | 2527 | if (skb_is_gso(skb)) { |
2398 | if (skb_header_cloned(skb)) { | 2528 | if (skb_header_cloned(skb)) { |
2399 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 2529 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
2400 | if (err) | 2530 | if (err) |
@@ -2519,7 +2649,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2519 | * tso gets written back prematurely before the data is fully | 2649 | * tso gets written back prematurely before the data is fully |
2520 | * DMA'd to the controller */ | 2650 | * DMA'd to the controller */ |
2521 | if (!skb->data_len && tx_ring->last_tx_tso && | 2651 | if (!skb->data_len && tx_ring->last_tx_tso && |
2522 | !skb_shinfo(skb)->gso_size) { | 2652 | !skb_is_gso(skb)) { |
2523 | tx_ring->last_tx_tso = 0; | 2653 | tx_ring->last_tx_tso = 0; |
2524 | size -= 4; | 2654 | size -= 4; |
2525 | } | 2655 | } |
@@ -2779,9 +2909,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2779 | case e1000_82571: | 2909 | case e1000_82571: |
2780 | case e1000_82572: | 2910 | case e1000_82572: |
2781 | case e1000_82573: | 2911 | case e1000_82573: |
2912 | case e1000_ich8lan: | ||
2782 | pull_size = min((unsigned int)4, skb->data_len); | 2913 | pull_size = min((unsigned int)4, skb->data_len); |
2783 | if (!__pskb_pull_tail(skb, pull_size)) { | 2914 | if (!__pskb_pull_tail(skb, pull_size)) { |
2784 | printk(KERN_ERR | 2915 | DPRINTK(DRV, ERR, |
2785 | "__pskb_pull_tail failed.\n"); | 2916 | "__pskb_pull_tail failed.\n"); |
2786 | dev_kfree_skb_any(skb); | 2917 | dev_kfree_skb_any(skb); |
2787 | return NETDEV_TX_OK; | 2918 | return NETDEV_TX_OK; |
@@ -2806,8 +2937,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2806 | 2937 | ||
2807 | #ifdef NETIF_F_TSO | 2938 | #ifdef NETIF_F_TSO |
2808 | /* Controller Erratum workaround */ | 2939 | /* Controller Erratum workaround */ |
2809 | if (!skb->data_len && tx_ring->last_tx_tso && | 2940 | if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) |
2810 | !skb_shinfo(skb)->gso_size) | ||
2811 | count++; | 2941 | count++; |
2812 | #endif | 2942 | #endif |
2813 | 2943 | ||
@@ -2919,8 +3049,7 @@ e1000_reset_task(struct net_device *netdev) | |||
2919 | { | 3049 | { |
2920 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3050 | struct e1000_adapter *adapter = netdev_priv(netdev); |
2921 | 3051 | ||
2922 | e1000_down(adapter); | 3052 | e1000_reinit_locked(adapter); |
2923 | e1000_up(adapter); | ||
2924 | } | 3053 | } |
2925 | 3054 | ||
2926 | /** | 3055 | /** |
@@ -2964,6 +3093,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
2964 | /* Adapter-specific max frame size limits. */ | 3093 | /* Adapter-specific max frame size limits. */ |
2965 | switch (adapter->hw.mac_type) { | 3094 | switch (adapter->hw.mac_type) { |
2966 | case e1000_undefined ... e1000_82542_rev2_1: | 3095 | case e1000_undefined ... e1000_82542_rev2_1: |
3096 | case e1000_ich8lan: | ||
2967 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | 3097 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { |
2968 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); | 3098 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); |
2969 | return -EINVAL; | 3099 | return -EINVAL; |
@@ -2997,7 +3127,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
2997 | break; | 3127 | break; |
2998 | } | 3128 | } |
2999 | 3129 | ||
3000 | /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | 3130 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN |
3001 | * means we reserve 2 more, this pushes us to allocate from the next | 3131 | * means we reserve 2 more, this pushes us to allocate from the next |
3002 | * larger slab size | 3132 | * larger slab size |
3003 | * i.e. RXBUFFER_2048 --> size-4096 slab */ | 3133 | * i.e. RXBUFFER_2048 --> size-4096 slab */ |
@@ -3018,7 +3148,6 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3018 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; | 3148 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; |
3019 | 3149 | ||
3020 | /* adjust allocation if LPE protects us, and we aren't using SBP */ | 3150 | /* adjust allocation if LPE protects us, and we aren't using SBP */ |
3021 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 | ||
3022 | if (!adapter->hw.tbi_compatibility_on && | 3151 | if (!adapter->hw.tbi_compatibility_on && |
3023 | ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || | 3152 | ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || |
3024 | (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) | 3153 | (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) |
@@ -3026,10 +3155,8 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3026 | 3155 | ||
3027 | netdev->mtu = new_mtu; | 3156 | netdev->mtu = new_mtu; |
3028 | 3157 | ||
3029 | if (netif_running(netdev)) { | 3158 | if (netif_running(netdev)) |
3030 | e1000_down(adapter); | 3159 | e1000_reinit_locked(adapter); |
3031 | e1000_up(adapter); | ||
3032 | } | ||
3033 | 3160 | ||
3034 | adapter->hw.max_frame_size = max_frame; | 3161 | adapter->hw.max_frame_size = max_frame; |
3035 | 3162 | ||
@@ -3074,12 +3201,15 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3074 | adapter->stats.bprc += E1000_READ_REG(hw, BPRC); | 3201 | adapter->stats.bprc += E1000_READ_REG(hw, BPRC); |
3075 | adapter->stats.mprc += E1000_READ_REG(hw, MPRC); | 3202 | adapter->stats.mprc += E1000_READ_REG(hw, MPRC); |
3076 | adapter->stats.roc += E1000_READ_REG(hw, ROC); | 3203 | adapter->stats.roc += E1000_READ_REG(hw, ROC); |
3204 | |||
3205 | if (adapter->hw.mac_type != e1000_ich8lan) { | ||
3077 | adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); | 3206 | adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); |
3078 | adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); | 3207 | adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); |
3079 | adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); | 3208 | adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); |
3080 | adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); | 3209 | adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); |
3081 | adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); | 3210 | adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); |
3082 | adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); | 3211 | adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); |
3212 | } | ||
3083 | 3213 | ||
3084 | adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); | 3214 | adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); |
3085 | adapter->stats.mpc += E1000_READ_REG(hw, MPC); | 3215 | adapter->stats.mpc += E1000_READ_REG(hw, MPC); |
@@ -3107,12 +3237,16 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3107 | adapter->stats.totl += E1000_READ_REG(hw, TOTL); | 3237 | adapter->stats.totl += E1000_READ_REG(hw, TOTL); |
3108 | adapter->stats.toth += E1000_READ_REG(hw, TOTH); | 3238 | adapter->stats.toth += E1000_READ_REG(hw, TOTH); |
3109 | adapter->stats.tpr += E1000_READ_REG(hw, TPR); | 3239 | adapter->stats.tpr += E1000_READ_REG(hw, TPR); |
3240 | |||
3241 | if (adapter->hw.mac_type != e1000_ich8lan) { | ||
3110 | adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); | 3242 | adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); |
3111 | adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); | 3243 | adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); |
3112 | adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); | 3244 | adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); |
3113 | adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); | 3245 | adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); |
3114 | adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); | 3246 | adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); |
3115 | adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); | 3247 | adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); |
3248 | } | ||
3249 | |||
3116 | adapter->stats.mptc += E1000_READ_REG(hw, MPTC); | 3250 | adapter->stats.mptc += E1000_READ_REG(hw, MPTC); |
3117 | adapter->stats.bptc += E1000_READ_REG(hw, BPTC); | 3251 | adapter->stats.bptc += E1000_READ_REG(hw, BPTC); |
3118 | 3252 | ||
@@ -3134,6 +3268,8 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3134 | if (hw->mac_type > e1000_82547_rev_2) { | 3268 | if (hw->mac_type > e1000_82547_rev_2) { |
3135 | adapter->stats.iac += E1000_READ_REG(hw, IAC); | 3269 | adapter->stats.iac += E1000_READ_REG(hw, IAC); |
3136 | adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); | 3270 | adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); |
3271 | |||
3272 | if (adapter->hw.mac_type != e1000_ich8lan) { | ||
3137 | adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); | 3273 | adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); |
3138 | adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); | 3274 | adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); |
3139 | adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); | 3275 | adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); |
@@ -3141,6 +3277,7 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3141 | adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); | 3277 | adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); |
3142 | adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); | 3278 | adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); |
3143 | adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); | 3279 | adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); |
3280 | } | ||
3144 | } | 3281 | } |
3145 | 3282 | ||
3146 | /* Fill out the OS statistics structure */ | 3283 | /* Fill out the OS statistics structure */ |
@@ -3249,8 +3386,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3249 | E1000_WRITE_REG(hw, IMC, ~0); | 3386 | E1000_WRITE_REG(hw, IMC, ~0); |
3250 | E1000_WRITE_FLUSH(hw); | 3387 | E1000_WRITE_FLUSH(hw); |
3251 | } | 3388 | } |
3252 | if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) | 3389 | if (likely(netif_rx_schedule_prep(netdev))) |
3253 | __netif_rx_schedule(&adapter->polling_netdev[0]); | 3390 | __netif_rx_schedule(netdev); |
3254 | else | 3391 | else |
3255 | e1000_irq_enable(adapter); | 3392 | e1000_irq_enable(adapter); |
3256 | #else | 3393 | #else |
@@ -3293,34 +3430,26 @@ e1000_clean(struct net_device *poll_dev, int *budget) | |||
3293 | { | 3430 | { |
3294 | struct e1000_adapter *adapter; | 3431 | struct e1000_adapter *adapter; |
3295 | int work_to_do = min(*budget, poll_dev->quota); | 3432 | int work_to_do = min(*budget, poll_dev->quota); |
3296 | int tx_cleaned = 0, i = 0, work_done = 0; | 3433 | int tx_cleaned = 0, work_done = 0; |
3297 | 3434 | ||
3298 | /* Must NOT use netdev_priv macro here. */ | 3435 | /* Must NOT use netdev_priv macro here. */ |
3299 | adapter = poll_dev->priv; | 3436 | adapter = poll_dev->priv; |
3300 | 3437 | ||
3301 | /* Keep link state information with original netdev */ | 3438 | /* Keep link state information with original netdev */ |
3302 | if (!netif_carrier_ok(adapter->netdev)) | 3439 | if (!netif_carrier_ok(poll_dev)) |
3303 | goto quit_polling; | 3440 | goto quit_polling; |
3304 | 3441 | ||
3305 | while (poll_dev != &adapter->polling_netdev[i]) { | 3442 | /* e1000_clean is called per-cpu. This lock protects |
3306 | i++; | 3443 | * tx_ring[0] from being cleaned by multiple cpus |
3307 | BUG_ON(i == adapter->num_rx_queues); | 3444 | * simultaneously. A failure obtaining the lock means |
3445 | * tx_ring[0] is currently being cleaned anyway. */ | ||
3446 | if (spin_trylock(&adapter->tx_queue_lock)) { | ||
3447 | tx_cleaned = e1000_clean_tx_irq(adapter, | ||
3448 | &adapter->tx_ring[0]); | ||
3449 | spin_unlock(&adapter->tx_queue_lock); | ||
3308 | } | 3450 | } |
3309 | 3451 | ||
3310 | if (likely(adapter->num_tx_queues == 1)) { | 3452 | adapter->clean_rx(adapter, &adapter->rx_ring[0], |
3311 | /* e1000_clean is called per-cpu. This lock protects | ||
3312 | * tx_ring[0] from being cleaned by multiple cpus | ||
3313 | * simultaneously. A failure obtaining the lock means | ||
3314 | * tx_ring[0] is currently being cleaned anyway. */ | ||
3315 | if (spin_trylock(&adapter->tx_queue_lock)) { | ||
3316 | tx_cleaned = e1000_clean_tx_irq(adapter, | ||
3317 | &adapter->tx_ring[0]); | ||
3318 | spin_unlock(&adapter->tx_queue_lock); | ||
3319 | } | ||
3320 | } else | ||
3321 | tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); | ||
3322 | |||
3323 | adapter->clean_rx(adapter, &adapter->rx_ring[i], | ||
3324 | &work_done, work_to_do); | 3453 | &work_done, work_to_do); |
3325 | 3454 | ||
3326 | *budget -= work_done; | 3455 | *budget -= work_done; |
@@ -3328,7 +3457,7 @@ e1000_clean(struct net_device *poll_dev, int *budget) | |||
3328 | 3457 | ||
3329 | /* If no Tx and not enough Rx work done, exit the polling mode */ | 3458 | /* If no Tx and not enough Rx work done, exit the polling mode */ |
3330 | if ((!tx_cleaned && (work_done == 0)) || | 3459 | if ((!tx_cleaned && (work_done == 0)) || |
3331 | !netif_running(adapter->netdev)) { | 3460 | !netif_running(poll_dev)) { |
3332 | quit_polling: | 3461 | quit_polling: |
3333 | netif_rx_complete(poll_dev); | 3462 | netif_rx_complete(poll_dev); |
3334 | e1000_irq_enable(adapter); | 3463 | e1000_irq_enable(adapter); |
@@ -3543,11 +3672,15 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3543 | 3672 | ||
3544 | length = le16_to_cpu(rx_desc->length); | 3673 | length = le16_to_cpu(rx_desc->length); |
3545 | 3674 | ||
3675 | /* adjust length to remove Ethernet CRC */ | ||
3676 | length -= 4; | ||
3677 | |||
3546 | if (unlikely(!(status & E1000_RXD_STAT_EOP))) { | 3678 | if (unlikely(!(status & E1000_RXD_STAT_EOP))) { |
3547 | /* All receives must fit into a single buffer */ | 3679 | /* All receives must fit into a single buffer */ |
3548 | E1000_DBG("%s: Receive packet consumed multiple" | 3680 | E1000_DBG("%s: Receive packet consumed multiple" |
3549 | " buffers\n", netdev->name); | 3681 | " buffers\n", netdev->name); |
3550 | dev_kfree_skb_irq(skb); | 3682 | /* recycle */ |
3683 | buffer_info-> skb = skb; | ||
3551 | goto next_desc; | 3684 | goto next_desc; |
3552 | } | 3685 | } |
3553 | 3686 | ||
@@ -3575,7 +3708,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3575 | #define E1000_CB_LENGTH 256 | 3708 | #define E1000_CB_LENGTH 256 |
3576 | if (length < E1000_CB_LENGTH) { | 3709 | if (length < E1000_CB_LENGTH) { |
3577 | struct sk_buff *new_skb = | 3710 | struct sk_buff *new_skb = |
3578 | dev_alloc_skb(length + NET_IP_ALIGN); | 3711 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); |
3579 | if (new_skb) { | 3712 | if (new_skb) { |
3580 | skb_reserve(new_skb, NET_IP_ALIGN); | 3713 | skb_reserve(new_skb, NET_IP_ALIGN); |
3581 | new_skb->dev = netdev; | 3714 | new_skb->dev = netdev; |
@@ -3675,7 +3808,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3675 | buffer_info = &rx_ring->buffer_info[i]; | 3808 | buffer_info = &rx_ring->buffer_info[i]; |
3676 | 3809 | ||
3677 | while (staterr & E1000_RXD_STAT_DD) { | 3810 | while (staterr & E1000_RXD_STAT_DD) { |
3678 | buffer_info = &rx_ring->buffer_info[i]; | ||
3679 | ps_page = &rx_ring->ps_page[i]; | 3811 | ps_page = &rx_ring->ps_page[i]; |
3680 | ps_page_dma = &rx_ring->ps_page_dma[i]; | 3812 | ps_page_dma = &rx_ring->ps_page_dma[i]; |
3681 | #ifdef CONFIG_E1000_NAPI | 3813 | #ifdef CONFIG_E1000_NAPI |
@@ -3747,8 +3879,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3747 | pci_dma_sync_single_for_device(pdev, | 3879 | pci_dma_sync_single_for_device(pdev, |
3748 | ps_page_dma->ps_page_dma[0], | 3880 | ps_page_dma->ps_page_dma[0], |
3749 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 3881 | PAGE_SIZE, PCI_DMA_FROMDEVICE); |
3882 | /* remove the CRC */ | ||
3883 | l1 -= 4; | ||
3750 | skb_put(skb, l1); | 3884 | skb_put(skb, l1); |
3751 | length += l1; | ||
3752 | goto copydone; | 3885 | goto copydone; |
3753 | } /* if */ | 3886 | } /* if */ |
3754 | } | 3887 | } |
@@ -3767,6 +3900,10 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3767 | skb->truesize += length; | 3900 | skb->truesize += length; |
3768 | } | 3901 | } |
3769 | 3902 | ||
3903 | /* strip the ethernet crc, problem is we're using pages now so | ||
3904 | * this whole operation can get a little cpu intensive */ | ||
3905 | pskb_trim(skb, skb->len - 4); | ||
3906 | |||
3770 | copydone: | 3907 | copydone: |
3771 | e1000_rx_checksum(adapter, staterr, | 3908 | e1000_rx_checksum(adapter, staterr, |
3772 | le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); | 3909 | le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); |
@@ -3842,7 +3979,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
3842 | 3979 | ||
3843 | while (cleaned_count--) { | 3980 | while (cleaned_count--) { |
3844 | if (!(skb = buffer_info->skb)) | 3981 | if (!(skb = buffer_info->skb)) |
3845 | skb = dev_alloc_skb(bufsz); | 3982 | skb = netdev_alloc_skb(netdev, bufsz); |
3846 | else { | 3983 | else { |
3847 | skb_trim(skb, 0); | 3984 | skb_trim(skb, 0); |
3848 | goto map_skb; | 3985 | goto map_skb; |
@@ -3860,7 +3997,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
3860 | DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " | 3997 | DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " |
3861 | "at %p\n", bufsz, skb->data); | 3998 | "at %p\n", bufsz, skb->data); |
3862 | /* Try again, without freeing the previous */ | 3999 | /* Try again, without freeing the previous */ |
3863 | skb = dev_alloc_skb(bufsz); | 4000 | skb = netdev_alloc_skb(netdev, bufsz); |
3864 | /* Failed allocation, critical failure */ | 4001 | /* Failed allocation, critical failure */ |
3865 | if (!skb) { | 4002 | if (!skb) { |
3866 | dev_kfree_skb(oldskb); | 4003 | dev_kfree_skb(oldskb); |
@@ -3984,7 +4121,8 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
3984 | rx_desc->read.buffer_addr[j+1] = ~0; | 4121 | rx_desc->read.buffer_addr[j+1] = ~0; |
3985 | } | 4122 | } |
3986 | 4123 | ||
3987 | skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); | 4124 | skb = netdev_alloc_skb(netdev, |
4125 | adapter->rx_ps_bsize0 + NET_IP_ALIGN); | ||
3988 | 4126 | ||
3989 | if (unlikely(!skb)) { | 4127 | if (unlikely(!skb)) { |
3990 | adapter->alloc_rx_buff_failed++; | 4128 | adapter->alloc_rx_buff_failed++; |
@@ -4180,10 +4318,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4180 | return retval; | 4318 | return retval; |
4181 | } | 4319 | } |
4182 | } | 4320 | } |
4183 | if (netif_running(adapter->netdev)) { | 4321 | if (netif_running(adapter->netdev)) |
4184 | e1000_down(adapter); | 4322 | e1000_reinit_locked(adapter); |
4185 | e1000_up(adapter); | 4323 | else |
4186 | } else | ||
4187 | e1000_reset(adapter); | 4324 | e1000_reset(adapter); |
4188 | break; | 4325 | break; |
4189 | case M88E1000_PHY_SPEC_CTRL: | 4326 | case M88E1000_PHY_SPEC_CTRL: |
@@ -4200,10 +4337,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4200 | case PHY_CTRL: | 4337 | case PHY_CTRL: |
4201 | if (mii_reg & MII_CR_POWER_DOWN) | 4338 | if (mii_reg & MII_CR_POWER_DOWN) |
4202 | break; | 4339 | break; |
4203 | if (netif_running(adapter->netdev)) { | 4340 | if (netif_running(adapter->netdev)) |
4204 | e1000_down(adapter); | 4341 | e1000_reinit_locked(adapter); |
4205 | e1000_up(adapter); | 4342 | else |
4206 | } else | ||
4207 | e1000_reset(adapter); | 4343 | e1000_reset(adapter); |
4208 | break; | 4344 | break; |
4209 | } | 4345 | } |
@@ -4250,11 +4386,13 @@ e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) | |||
4250 | pci_write_config_word(adapter->pdev, reg, *value); | 4386 | pci_write_config_word(adapter->pdev, reg, *value); |
4251 | } | 4387 | } |
4252 | 4388 | ||
4389 | #if 0 | ||
4253 | uint32_t | 4390 | uint32_t |
4254 | e1000_io_read(struct e1000_hw *hw, unsigned long port) | 4391 | e1000_io_read(struct e1000_hw *hw, unsigned long port) |
4255 | { | 4392 | { |
4256 | return inl(port); | 4393 | return inl(port); |
4257 | } | 4394 | } |
4395 | #endif /* 0 */ | ||
4258 | 4396 | ||
4259 | void | 4397 | void |
4260 | e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) | 4398 | e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) |
@@ -4277,18 +4415,21 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
4277 | ctrl |= E1000_CTRL_VME; | 4415 | ctrl |= E1000_CTRL_VME; |
4278 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 4416 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); |
4279 | 4417 | ||
4418 | if (adapter->hw.mac_type != e1000_ich8lan) { | ||
4280 | /* enable VLAN receive filtering */ | 4419 | /* enable VLAN receive filtering */ |
4281 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 4420 | rctl = E1000_READ_REG(&adapter->hw, RCTL); |
4282 | rctl |= E1000_RCTL_VFE; | 4421 | rctl |= E1000_RCTL_VFE; |
4283 | rctl &= ~E1000_RCTL_CFIEN; | 4422 | rctl &= ~E1000_RCTL_CFIEN; |
4284 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 4423 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
4285 | e1000_update_mng_vlan(adapter); | 4424 | e1000_update_mng_vlan(adapter); |
4425 | } | ||
4286 | } else { | 4426 | } else { |
4287 | /* disable VLAN tag insert/strip */ | 4427 | /* disable VLAN tag insert/strip */ |
4288 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 4428 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); |
4289 | ctrl &= ~E1000_CTRL_VME; | 4429 | ctrl &= ~E1000_CTRL_VME; |
4290 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 4430 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); |
4291 | 4431 | ||
4432 | if (adapter->hw.mac_type != e1000_ich8lan) { | ||
4292 | /* disable VLAN filtering */ | 4433 | /* disable VLAN filtering */ |
4293 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 4434 | rctl = E1000_READ_REG(&adapter->hw, RCTL); |
4294 | rctl &= ~E1000_RCTL_VFE; | 4435 | rctl &= ~E1000_RCTL_VFE; |
@@ -4297,6 +4438,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
4297 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | 4438 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
4298 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 4439 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
4299 | } | 4440 | } |
4441 | } | ||
4300 | } | 4442 | } |
4301 | 4443 | ||
4302 | e1000_irq_enable(adapter); | 4444 | e1000_irq_enable(adapter); |
@@ -4458,12 +4600,16 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4458 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4600 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4459 | uint32_t ctrl, ctrl_ext, rctl, manc, status; | 4601 | uint32_t ctrl, ctrl_ext, rctl, manc, status; |
4460 | uint32_t wufc = adapter->wol; | 4602 | uint32_t wufc = adapter->wol; |
4603 | #ifdef CONFIG_PM | ||
4461 | int retval = 0; | 4604 | int retval = 0; |
4605 | #endif | ||
4462 | 4606 | ||
4463 | netif_device_detach(netdev); | 4607 | netif_device_detach(netdev); |
4464 | 4608 | ||
4465 | if (netif_running(netdev)) | 4609 | if (netif_running(netdev)) { |
4610 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); | ||
4466 | e1000_down(adapter); | 4611 | e1000_down(adapter); |
4612 | } | ||
4467 | 4613 | ||
4468 | #ifdef CONFIG_PM | 4614 | #ifdef CONFIG_PM |
4469 | /* Implement our own version of pci_save_state(pdev) because pci- | 4615 | /* Implement our own version of pci_save_state(pdev) because pci- |
@@ -4521,7 +4667,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4521 | pci_enable_wake(pdev, PCI_D3cold, 0); | 4667 | pci_enable_wake(pdev, PCI_D3cold, 0); |
4522 | } | 4668 | } |
4523 | 4669 | ||
4670 | /* FIXME: this code is incorrect for PCI Express */ | ||
4524 | if (adapter->hw.mac_type >= e1000_82540 && | 4671 | if (adapter->hw.mac_type >= e1000_82540 && |
4672 | adapter->hw.mac_type != e1000_ich8lan && | ||
4525 | adapter->hw.media_type == e1000_media_type_copper) { | 4673 | adapter->hw.media_type == e1000_media_type_copper) { |
4526 | manc = E1000_READ_REG(&adapter->hw, MANC); | 4674 | manc = E1000_READ_REG(&adapter->hw, MANC); |
4527 | if (manc & E1000_MANC_SMBUS_EN) { | 4675 | if (manc & E1000_MANC_SMBUS_EN) { |
@@ -4532,6 +4680,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4532 | } | 4680 | } |
4533 | } | 4681 | } |
4534 | 4682 | ||
4683 | if (adapter->hw.phy_type == e1000_phy_igp_3) | ||
4684 | e1000_phy_powerdown_workaround(&adapter->hw); | ||
4685 | |||
4535 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | 4686 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
4536 | * would have already happened in close and is redundant. */ | 4687 | * would have already happened in close and is redundant. */ |
4537 | e1000_release_hw_control(adapter); | 4688 | e1000_release_hw_control(adapter); |
@@ -4567,7 +4718,9 @@ e1000_resume(struct pci_dev *pdev) | |||
4567 | 4718 | ||
4568 | netif_device_attach(netdev); | 4719 | netif_device_attach(netdev); |
4569 | 4720 | ||
4721 | /* FIXME: this code is incorrect for PCI Express */ | ||
4570 | if (adapter->hw.mac_type >= e1000_82540 && | 4722 | if (adapter->hw.mac_type >= e1000_82540 && |
4723 | adapter->hw.mac_type != e1000_ich8lan && | ||
4571 | adapter->hw.media_type == e1000_media_type_copper) { | 4724 | adapter->hw.media_type == e1000_media_type_copper) { |
4572 | manc = E1000_READ_REG(&adapter->hw, MANC); | 4725 | manc = E1000_READ_REG(&adapter->hw, MANC); |
4573 | manc &= ~(E1000_MANC_ARP_EN); | 4726 | manc &= ~(E1000_MANC_ARP_EN); |
@@ -4601,6 +4754,7 @@ static void | |||
4601 | e1000_netpoll(struct net_device *netdev) | 4754 | e1000_netpoll(struct net_device *netdev) |
4602 | { | 4755 | { |
4603 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4756 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4757 | |||
4604 | disable_irq(adapter->pdev->irq); | 4758 | disable_irq(adapter->pdev->irq); |
4605 | e1000_intr(adapter->pdev->irq, netdev, NULL); | 4759 | e1000_intr(adapter->pdev->irq, netdev, NULL); |
4606 | e1000_clean_tx_irq(adapter, adapter->tx_ring); | 4760 | e1000_clean_tx_irq(adapter, adapter->tx_ring); |
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h index 048d052be29d..2d3e8b06cab0 100644 --- a/drivers/net/e1000/e1000_osdep.h +++ b/drivers/net/e1000/e1000_osdep.h | |||
@@ -127,4 +127,17 @@ typedef enum { | |||
127 | 127 | ||
128 | #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) | 128 | #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) |
129 | 129 | ||
130 | #define E1000_WRITE_ICH8_REG(a, reg, value) ( \ | ||
131 | writel((value), ((a)->flash_address + reg))) | ||
132 | |||
133 | #define E1000_READ_ICH8_REG(a, reg) ( \ | ||
134 | readl((a)->flash_address + reg)) | ||
135 | |||
136 | #define E1000_WRITE_ICH8_REG16(a, reg, value) ( \ | ||
137 | writew((value), ((a)->flash_address + reg))) | ||
138 | |||
139 | #define E1000_READ_ICH8_REG16(a, reg) ( \ | ||
140 | readw((a)->flash_address + reg)) | ||
141 | |||
142 | |||
130 | #endif /* _E1000_OSDEP_H_ */ | 143 | #endif /* _E1000_OSDEP_H_ */ |
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c index e55f8969a0fb..0ef413172c68 100644 --- a/drivers/net/e1000/e1000_param.c +++ b/drivers/net/e1000/e1000_param.c | |||
@@ -45,6 +45,16 @@ | |||
45 | */ | 45 | */ |
46 | 46 | ||
47 | #define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } | 47 | #define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } |
48 | /* Module Parameters are always initialized to -1, so that the driver | ||
49 | * can tell the difference between no user specified value or the | ||
50 | * user asking for the default value. | ||
51 | * The true default values are loaded in when e1000_check_options is called. | ||
52 | * | ||
53 | * This is a GCC extension to ANSI C. | ||
54 | * See the item "Labeled Elements in Initializers" in the section | ||
55 | * "Extensions to the C Language Family" of the GCC documentation. | ||
56 | */ | ||
57 | |||
48 | #define E1000_PARAM(X, desc) \ | 58 | #define E1000_PARAM(X, desc) \ |
49 | static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ | 59 | static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ |
50 | static int num_##X = 0; \ | 60 | static int num_##X = 0; \ |
@@ -183,6 +193,24 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); | |||
183 | 193 | ||
184 | E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); | 194 | E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); |
185 | 195 | ||
196 | /* Enable Smart Power Down of the PHY | ||
197 | * | ||
198 | * Valid Range: 0, 1 | ||
199 | * | ||
200 | * Default Value: 0 (disabled) | ||
201 | */ | ||
202 | |||
203 | E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); | ||
204 | |||
205 | /* Enable Kumeran Lock Loss workaround | ||
206 | * | ||
207 | * Valid Range: 0, 1 | ||
208 | * | ||
209 | * Default Value: 1 (enabled) | ||
210 | */ | ||
211 | |||
212 | E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); | ||
213 | |||
186 | #define AUTONEG_ADV_DEFAULT 0x2F | 214 | #define AUTONEG_ADV_DEFAULT 0x2F |
187 | #define AUTONEG_ADV_MASK 0x2F | 215 | #define AUTONEG_ADV_MASK 0x2F |
188 | #define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL | 216 | #define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL |
@@ -296,6 +324,7 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
296 | DPRINTK(PROBE, NOTICE, | 324 | DPRINTK(PROBE, NOTICE, |
297 | "Warning: no configuration for board #%i\n", bd); | 325 | "Warning: no configuration for board #%i\n", bd); |
298 | DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); | 326 | DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); |
327 | bd = E1000_MAX_NIC; | ||
299 | } | 328 | } |
300 | 329 | ||
301 | { /* Transmit Descriptor Count */ | 330 | { /* Transmit Descriptor Count */ |
@@ -313,14 +342,9 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
313 | opt.arg.r.max = mac_type < e1000_82544 ? | 342 | opt.arg.r.max = mac_type < e1000_82544 ? |
314 | E1000_MAX_TXD : E1000_MAX_82544_TXD; | 343 | E1000_MAX_TXD : E1000_MAX_82544_TXD; |
315 | 344 | ||
316 | if (num_TxDescriptors > bd) { | 345 | tx_ring->count = TxDescriptors[bd]; |
317 | tx_ring->count = TxDescriptors[bd]; | 346 | e1000_validate_option(&tx_ring->count, &opt, adapter); |
318 | e1000_validate_option(&tx_ring->count, &opt, adapter); | 347 | E1000_ROUNDUP(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE); |
319 | E1000_ROUNDUP(tx_ring->count, | ||
320 | REQ_TX_DESCRIPTOR_MULTIPLE); | ||
321 | } else { | ||
322 | tx_ring->count = opt.def; | ||
323 | } | ||
324 | for (i = 0; i < adapter->num_tx_queues; i++) | 348 | for (i = 0; i < adapter->num_tx_queues; i++) |
325 | tx_ring[i].count = tx_ring->count; | 349 | tx_ring[i].count = tx_ring->count; |
326 | } | 350 | } |
@@ -339,14 +363,9 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
339 | opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD : | 363 | opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD : |
340 | E1000_MAX_82544_RXD; | 364 | E1000_MAX_82544_RXD; |
341 | 365 | ||
342 | if (num_RxDescriptors > bd) { | 366 | rx_ring->count = RxDescriptors[bd]; |
343 | rx_ring->count = RxDescriptors[bd]; | 367 | e1000_validate_option(&rx_ring->count, &opt, adapter); |
344 | e1000_validate_option(&rx_ring->count, &opt, adapter); | 368 | E1000_ROUNDUP(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); |
345 | E1000_ROUNDUP(rx_ring->count, | ||
346 | REQ_RX_DESCRIPTOR_MULTIPLE); | ||
347 | } else { | ||
348 | rx_ring->count = opt.def; | ||
349 | } | ||
350 | for (i = 0; i < adapter->num_rx_queues; i++) | 369 | for (i = 0; i < adapter->num_rx_queues; i++) |
351 | rx_ring[i].count = rx_ring->count; | 370 | rx_ring[i].count = rx_ring->count; |
352 | } | 371 | } |
@@ -358,13 +377,9 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
358 | .def = OPTION_ENABLED | 377 | .def = OPTION_ENABLED |
359 | }; | 378 | }; |
360 | 379 | ||
361 | if (num_XsumRX > bd) { | 380 | int rx_csum = XsumRX[bd]; |
362 | int rx_csum = XsumRX[bd]; | 381 | e1000_validate_option(&rx_csum, &opt, adapter); |
363 | e1000_validate_option(&rx_csum, &opt, adapter); | 382 | adapter->rx_csum = rx_csum; |
364 | adapter->rx_csum = rx_csum; | ||
365 | } else { | ||
366 | adapter->rx_csum = opt.def; | ||
367 | } | ||
368 | } | 383 | } |
369 | { /* Flow Control */ | 384 | { /* Flow Control */ |
370 | 385 | ||
@@ -384,13 +399,9 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
384 | .p = fc_list }} | 399 | .p = fc_list }} |
385 | }; | 400 | }; |
386 | 401 | ||
387 | if (num_FlowControl > bd) { | 402 | int fc = FlowControl[bd]; |
388 | int fc = FlowControl[bd]; | 403 | e1000_validate_option(&fc, &opt, adapter); |
389 | e1000_validate_option(&fc, &opt, adapter); | 404 | adapter->hw.fc = adapter->hw.original_fc = fc; |
390 | adapter->hw.fc = adapter->hw.original_fc = fc; | ||
391 | } else { | ||
392 | adapter->hw.fc = adapter->hw.original_fc = opt.def; | ||
393 | } | ||
394 | } | 405 | } |
395 | { /* Transmit Interrupt Delay */ | 406 | { /* Transmit Interrupt Delay */ |
396 | struct e1000_option opt = { | 407 | struct e1000_option opt = { |
@@ -402,13 +413,8 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
402 | .max = MAX_TXDELAY }} | 413 | .max = MAX_TXDELAY }} |
403 | }; | 414 | }; |
404 | 415 | ||
405 | if (num_TxIntDelay > bd) { | 416 | adapter->tx_int_delay = TxIntDelay[bd]; |
406 | adapter->tx_int_delay = TxIntDelay[bd]; | 417 | e1000_validate_option(&adapter->tx_int_delay, &opt, adapter); |
407 | e1000_validate_option(&adapter->tx_int_delay, &opt, | ||
408 | adapter); | ||
409 | } else { | ||
410 | adapter->tx_int_delay = opt.def; | ||
411 | } | ||
412 | } | 418 | } |
413 | { /* Transmit Absolute Interrupt Delay */ | 419 | { /* Transmit Absolute Interrupt Delay */ |
414 | struct e1000_option opt = { | 420 | struct e1000_option opt = { |
@@ -420,13 +426,9 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
420 | .max = MAX_TXABSDELAY }} | 426 | .max = MAX_TXABSDELAY }} |
421 | }; | 427 | }; |
422 | 428 | ||
423 | if (num_TxAbsIntDelay > bd) { | 429 | adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; |
424 | adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; | 430 | e1000_validate_option(&adapter->tx_abs_int_delay, &opt, |
425 | e1000_validate_option(&adapter->tx_abs_int_delay, &opt, | 431 | adapter); |
426 | adapter); | ||
427 | } else { | ||
428 | adapter->tx_abs_int_delay = opt.def; | ||
429 | } | ||
430 | } | 432 | } |
431 | { /* Receive Interrupt Delay */ | 433 | { /* Receive Interrupt Delay */ |
432 | struct e1000_option opt = { | 434 | struct e1000_option opt = { |
@@ -438,13 +440,8 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
438 | .max = MAX_RXDELAY }} | 440 | .max = MAX_RXDELAY }} |
439 | }; | 441 | }; |
440 | 442 | ||
441 | if (num_RxIntDelay > bd) { | 443 | adapter->rx_int_delay = RxIntDelay[bd]; |
442 | adapter->rx_int_delay = RxIntDelay[bd]; | 444 | e1000_validate_option(&adapter->rx_int_delay, &opt, adapter); |
443 | e1000_validate_option(&adapter->rx_int_delay, &opt, | ||
444 | adapter); | ||
445 | } else { | ||
446 | adapter->rx_int_delay = opt.def; | ||
447 | } | ||
448 | } | 445 | } |
449 | { /* Receive Absolute Interrupt Delay */ | 446 | { /* Receive Absolute Interrupt Delay */ |
450 | struct e1000_option opt = { | 447 | struct e1000_option opt = { |
@@ -456,13 +453,9 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
456 | .max = MAX_RXABSDELAY }} | 453 | .max = MAX_RXABSDELAY }} |
457 | }; | 454 | }; |
458 | 455 | ||
459 | if (num_RxAbsIntDelay > bd) { | 456 | adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; |
460 | adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; | 457 | e1000_validate_option(&adapter->rx_abs_int_delay, &opt, |
461 | e1000_validate_option(&adapter->rx_abs_int_delay, &opt, | 458 | adapter); |
462 | adapter); | ||
463 | } else { | ||
464 | adapter->rx_abs_int_delay = opt.def; | ||
465 | } | ||
466 | } | 459 | } |
467 | { /* Interrupt Throttling Rate */ | 460 | { /* Interrupt Throttling Rate */ |
468 | struct e1000_option opt = { | 461 | struct e1000_option opt = { |
@@ -474,26 +467,44 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
474 | .max = MAX_ITR }} | 467 | .max = MAX_ITR }} |
475 | }; | 468 | }; |
476 | 469 | ||
477 | if (num_InterruptThrottleRate > bd) { | 470 | adapter->itr = InterruptThrottleRate[bd]; |
478 | adapter->itr = InterruptThrottleRate[bd]; | 471 | switch (adapter->itr) { |
479 | switch (adapter->itr) { | 472 | case 0: |
480 | case 0: | 473 | DPRINTK(PROBE, INFO, "%s turned off\n", opt.name); |
481 | DPRINTK(PROBE, INFO, "%s turned off\n", | 474 | break; |
482 | opt.name); | 475 | case 1: |
483 | break; | 476 | DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", |
484 | case 1: | 477 | opt.name); |
485 | DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", | 478 | break; |
486 | opt.name); | 479 | default: |
487 | break; | 480 | e1000_validate_option(&adapter->itr, &opt, adapter); |
488 | default: | 481 | break; |
489 | e1000_validate_option(&adapter->itr, &opt, | ||
490 | adapter); | ||
491 | break; | ||
492 | } | ||
493 | } else { | ||
494 | adapter->itr = opt.def; | ||
495 | } | 482 | } |
496 | } | 483 | } |
484 | { /* Smart Power Down */ | ||
485 | struct e1000_option opt = { | ||
486 | .type = enable_option, | ||
487 | .name = "PHY Smart Power Down", | ||
488 | .err = "defaulting to Disabled", | ||
489 | .def = OPTION_DISABLED | ||
490 | }; | ||
491 | |||
492 | int spd = SmartPowerDownEnable[bd]; | ||
493 | e1000_validate_option(&spd, &opt, adapter); | ||
494 | adapter->smart_power_down = spd; | ||
495 | } | ||
496 | { /* Kumeran Lock Loss Workaround */ | ||
497 | struct e1000_option opt = { | ||
498 | .type = enable_option, | ||
499 | .name = "Kumeran Lock Loss Workaround", | ||
500 | .err = "defaulting to Enabled", | ||
501 | .def = OPTION_ENABLED | ||
502 | }; | ||
503 | |||
504 | int kmrn_lock_loss = KumeranLockLoss[bd]; | ||
505 | e1000_validate_option(&kmrn_lock_loss, &opt, adapter); | ||
506 | adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss; | ||
507 | } | ||
497 | 508 | ||
498 | switch (adapter->hw.media_type) { | 509 | switch (adapter->hw.media_type) { |
499 | case e1000_media_type_fiber: | 510 | case e1000_media_type_fiber: |
@@ -519,17 +530,18 @@ static void __devinit | |||
519 | e1000_check_fiber_options(struct e1000_adapter *adapter) | 530 | e1000_check_fiber_options(struct e1000_adapter *adapter) |
520 | { | 531 | { |
521 | int bd = adapter->bd_number; | 532 | int bd = adapter->bd_number; |
522 | if (num_Speed > bd) { | 533 | bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd; |
534 | if ((Speed[bd] != OPTION_UNSET)) { | ||
523 | DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " | 535 | DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " |
524 | "parameter ignored\n"); | 536 | "parameter ignored\n"); |
525 | } | 537 | } |
526 | 538 | ||
527 | if (num_Duplex > bd) { | 539 | if ((Duplex[bd] != OPTION_UNSET)) { |
528 | DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " | 540 | DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " |
529 | "parameter ignored\n"); | 541 | "parameter ignored\n"); |
530 | } | 542 | } |
531 | 543 | ||
532 | if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { | 544 | if ((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) { |
533 | DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " | 545 | DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " |
534 | "not valid for fiber adapters, " | 546 | "not valid for fiber adapters, " |
535 | "parameter ignored\n"); | 547 | "parameter ignored\n"); |
@@ -548,6 +560,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter) | |||
548 | { | 560 | { |
549 | int speed, dplx, an; | 561 | int speed, dplx, an; |
550 | int bd = adapter->bd_number; | 562 | int bd = adapter->bd_number; |
563 | bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd; | ||
551 | 564 | ||
552 | { /* Speed */ | 565 | { /* Speed */ |
553 | struct e1000_opt_list speed_list[] = {{ 0, "" }, | 566 | struct e1000_opt_list speed_list[] = {{ 0, "" }, |
@@ -564,12 +577,8 @@ e1000_check_copper_options(struct e1000_adapter *adapter) | |||
564 | .p = speed_list }} | 577 | .p = speed_list }} |
565 | }; | 578 | }; |
566 | 579 | ||
567 | if (num_Speed > bd) { | 580 | speed = Speed[bd]; |
568 | speed = Speed[bd]; | 581 | e1000_validate_option(&speed, &opt, adapter); |
569 | e1000_validate_option(&speed, &opt, adapter); | ||
570 | } else { | ||
571 | speed = opt.def; | ||
572 | } | ||
573 | } | 582 | } |
574 | { /* Duplex */ | 583 | { /* Duplex */ |
575 | struct e1000_opt_list dplx_list[] = {{ 0, "" }, | 584 | struct e1000_opt_list dplx_list[] = {{ 0, "" }, |
@@ -591,15 +600,11 @@ e1000_check_copper_options(struct e1000_adapter *adapter) | |||
591 | "Speed/Duplex/AutoNeg parameter ignored.\n"); | 600 | "Speed/Duplex/AutoNeg parameter ignored.\n"); |
592 | return; | 601 | return; |
593 | } | 602 | } |
594 | if (num_Duplex > bd) { | 603 | dplx = Duplex[bd]; |
595 | dplx = Duplex[bd]; | 604 | e1000_validate_option(&dplx, &opt, adapter); |
596 | e1000_validate_option(&dplx, &opt, adapter); | ||
597 | } else { | ||
598 | dplx = opt.def; | ||
599 | } | ||
600 | } | 605 | } |
601 | 606 | ||
602 | if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { | 607 | if (AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) { |
603 | DPRINTK(PROBE, INFO, | 608 | DPRINTK(PROBE, INFO, |
604 | "AutoNeg specified along with Speed or Duplex, " | 609 | "AutoNeg specified along with Speed or Duplex, " |
605 | "parameter ignored\n"); | 610 | "parameter ignored\n"); |
@@ -648,19 +653,15 @@ e1000_check_copper_options(struct e1000_adapter *adapter) | |||
648 | .p = an_list }} | 653 | .p = an_list }} |
649 | }; | 654 | }; |
650 | 655 | ||
651 | if (num_AutoNeg > bd) { | 656 | an = AutoNeg[bd]; |
652 | an = AutoNeg[bd]; | 657 | e1000_validate_option(&an, &opt, adapter); |
653 | e1000_validate_option(&an, &opt, adapter); | ||
654 | } else { | ||
655 | an = opt.def; | ||
656 | } | ||
657 | adapter->hw.autoneg_advertised = an; | 658 | adapter->hw.autoneg_advertised = an; |
658 | } | 659 | } |
659 | 660 | ||
660 | switch (speed + dplx) { | 661 | switch (speed + dplx) { |
661 | case 0: | 662 | case 0: |
662 | adapter->hw.autoneg = adapter->fc_autoneg = 1; | 663 | adapter->hw.autoneg = adapter->fc_autoneg = 1; |
663 | if ((num_Speed > bd) && (speed != 0 || dplx != 0)) | 664 | if (Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET) |
664 | DPRINTK(PROBE, INFO, | 665 | DPRINTK(PROBE, INFO, |
665 | "Speed and duplex autonegotiation enabled\n"); | 666 | "Speed and duplex autonegotiation enabled\n"); |
666 | break; | 667 | break; |
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c index e5c5cd2a2712..e4e733a380e3 100644 --- a/drivers/net/e2100.c +++ b/drivers/net/e2100.c | |||
@@ -425,8 +425,8 @@ MODULE_LICENSE("GPL"); | |||
425 | 425 | ||
426 | /* This is set up so that only a single autoprobe takes place per call. | 426 | /* This is set up so that only a single autoprobe takes place per call. |
427 | ISA device autoprobes on a running machine are not recommended. */ | 427 | ISA device autoprobes on a running machine are not recommended. */ |
428 | int | 428 | |
429 | init_module(void) | 429 | int __init init_module(void) |
430 | { | 430 | { |
431 | struct net_device *dev; | 431 | struct net_device *dev; |
432 | int this_dev, found = 0; | 432 | int this_dev, found = 0; |
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index 20d31430c74f..8dc61d65dd23 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c | |||
@@ -1807,8 +1807,7 @@ MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)"); | |||
1807 | MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); | 1807 | MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); |
1808 | MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); | 1808 | MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); |
1809 | 1809 | ||
1810 | int | 1810 | int __init init_module(void) |
1811 | init_module(void) | ||
1812 | { | 1811 | { |
1813 | struct net_device *dev; | 1812 | struct net_device *dev; |
1814 | int i; | 1813 | int i; |
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c index 2ad327542927..e445988c92ee 100644 --- a/drivers/net/eepro100.c +++ b/drivers/net/eepro100.c | |||
@@ -555,12 +555,12 @@ static int __devinit eepro100_init_one (struct pci_dev *pdev, | |||
555 | 555 | ||
556 | if (!request_region(pci_resource_start(pdev, 1), | 556 | if (!request_region(pci_resource_start(pdev, 1), |
557 | pci_resource_len(pdev, 1), "eepro100")) { | 557 | pci_resource_len(pdev, 1), "eepro100")) { |
558 | printk (KERN_ERR "eepro100: cannot reserve I/O ports\n"); | 558 | dev_err(&pdev->dev, "eepro100: cannot reserve I/O ports\n"); |
559 | goto err_out_none; | 559 | goto err_out_none; |
560 | } | 560 | } |
561 | if (!request_mem_region(pci_resource_start(pdev, 0), | 561 | if (!request_mem_region(pci_resource_start(pdev, 0), |
562 | pci_resource_len(pdev, 0), "eepro100")) { | 562 | pci_resource_len(pdev, 0), "eepro100")) { |
563 | printk (KERN_ERR "eepro100: cannot reserve MMIO region\n"); | 563 | dev_err(&pdev->dev, "eepro100: cannot reserve MMIO region\n"); |
564 | goto err_out_free_pio_region; | 564 | goto err_out_free_pio_region; |
565 | } | 565 | } |
566 | 566 | ||
@@ -573,7 +573,7 @@ static int __devinit eepro100_init_one (struct pci_dev *pdev, | |||
573 | 573 | ||
574 | ioaddr = pci_iomap(pdev, pci_bar, 0); | 574 | ioaddr = pci_iomap(pdev, pci_bar, 0); |
575 | if (!ioaddr) { | 575 | if (!ioaddr) { |
576 | printk (KERN_ERR "eepro100: cannot remap IO\n"); | 576 | dev_err(&pdev->dev, "eepro100: cannot remap IO\n"); |
577 | goto err_out_free_mmio_region; | 577 | goto err_out_free_mmio_region; |
578 | } | 578 | } |
579 | 579 | ||
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 33291bcf6d4c..0701c1d810ca 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c | |||
@@ -1698,7 +1698,7 @@ MODULE_LICENSE("GPL"); | |||
1698 | * are specified, we verify and then use them. If no parameters are given, we | 1698 | * are specified, we verify and then use them. If no parameters are given, we |
1699 | * autoprobe for one card only. | 1699 | * autoprobe for one card only. |
1700 | */ | 1700 | */ |
1701 | int init_module(void) | 1701 | int __init init_module(void) |
1702 | { | 1702 | { |
1703 | struct net_device *dev; | 1703 | struct net_device *dev; |
1704 | int this_dev, found = 0; | 1704 | int this_dev, found = 0; |
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index 9f3e09a3d88c..a67650ccf084 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c | |||
@@ -19,62 +19,15 @@ | |||
19 | 19 | ||
20 | Information and updates available at | 20 | Information and updates available at |
21 | http://www.scyld.com/network/epic100.html | 21 | http://www.scyld.com/network/epic100.html |
22 | [this link no longer provides anything useful -jgarzik] | ||
22 | 23 | ||
23 | --------------------------------------------------------------------- | 24 | --------------------------------------------------------------------- |
24 | 25 | ||
25 | Linux kernel-specific changes: | ||
26 | |||
27 | LK1.1.2 (jgarzik): | ||
28 | * Merge becker version 1.09 (4/08/2000) | ||
29 | |||
30 | LK1.1.3: | ||
31 | * Major bugfix to 1.09 driver (Francis Romieu) | ||
32 | |||
33 | LK1.1.4 (jgarzik): | ||
34 | * Merge becker test version 1.09 (5/29/2000) | ||
35 | |||
36 | LK1.1.5: | ||
37 | * Fix locking (jgarzik) | ||
38 | * Limit 83c175 probe to ethernet-class PCI devices (rgooch) | ||
39 | |||
40 | LK1.1.6: | ||
41 | * Merge becker version 1.11 | ||
42 | * Move pci_enable_device before any PCI BAR len checks | ||
43 | |||
44 | LK1.1.7: | ||
45 | * { fill me in } | ||
46 | |||
47 | LK1.1.8: | ||
48 | * ethtool driver info support (jgarzik) | ||
49 | |||
50 | LK1.1.9: | ||
51 | * ethtool media get/set support (jgarzik) | ||
52 | |||
53 | LK1.1.10: | ||
54 | * revert MII transceiver init change (jgarzik) | ||
55 | |||
56 | LK1.1.11: | ||
57 | * implement ETHTOOL_[GS]SET, _NWAY_RST, _[GS]MSGLVL, _GLINK (jgarzik) | ||
58 | * replace some MII-related magic numbers with constants | ||
59 | |||
60 | LK1.1.12: | ||
61 | * fix power-up sequence | ||
62 | |||
63 | LK1.1.13: | ||
64 | * revert version 1.1.12, power-up sequence "fix" | ||
65 | |||
66 | LK1.1.14 (Kryzsztof Halasa): | ||
67 | * fix spurious bad initializations | ||
68 | * pound phy a la SMSC's app note on the subject | ||
69 | |||
70 | AC1.1.14ac | ||
71 | * fix power up/down for ethtool that broke in 1.11 | ||
72 | |||
73 | */ | 26 | */ |
74 | 27 | ||
75 | #define DRV_NAME "epic100" | 28 | #define DRV_NAME "epic100" |
76 | #define DRV_VERSION "1.11+LK1.1.14+AC1.1.14" | 29 | #define DRV_VERSION "2.0" |
77 | #define DRV_RELDATE "June 2, 2004" | 30 | #define DRV_RELDATE "June 27, 2006" |
78 | 31 | ||
79 | /* The user-configurable values. | 32 | /* The user-configurable values. |
80 | These may be modified when a driver module is loaded.*/ | 33 | These may be modified when a driver module is loaded.*/ |
@@ -204,19 +157,15 @@ typedef enum { | |||
204 | 157 | ||
205 | struct epic_chip_info { | 158 | struct epic_chip_info { |
206 | const char *name; | 159 | const char *name; |
207 | int io_size; /* Needed for I/O region check or ioremap(). */ | ||
208 | int drv_flags; /* Driver use, intended as capability flags. */ | 160 | int drv_flags; /* Driver use, intended as capability flags. */ |
209 | }; | 161 | }; |
210 | 162 | ||
211 | 163 | ||
212 | /* indexed by chip_t */ | 164 | /* indexed by chip_t */ |
213 | static const struct epic_chip_info pci_id_tbl[] = { | 165 | static const struct epic_chip_info pci_id_tbl[] = { |
214 | { "SMSC EPIC/100 83c170", | 166 | { "SMSC EPIC/100 83c170", TYPE2_INTR | NO_MII | MII_PWRDWN }, |
215 | EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN }, | 167 | { "SMSC EPIC/100 83c170", TYPE2_INTR }, |
216 | { "SMSC EPIC/100 83c170", | 168 | { "SMSC EPIC/C 83c175", TYPE2_INTR | MII_PWRDWN }, |
217 | EPIC_TOTAL_SIZE, TYPE2_INTR }, | ||
218 | { "SMSC EPIC/C 83c175", | ||
219 | EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN }, | ||
220 | }; | 169 | }; |
221 | 170 | ||
222 | 171 | ||
@@ -385,8 +334,8 @@ static int __devinit epic_init_one (struct pci_dev *pdev, | |||
385 | goto out; | 334 | goto out; |
386 | irq = pdev->irq; | 335 | irq = pdev->irq; |
387 | 336 | ||
388 | if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) { | 337 | if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) { |
389 | printk (KERN_ERR "card %d: no PCI region space\n", card_idx); | 338 | dev_err(&pdev->dev, "no PCI region space\n"); |
390 | ret = -ENODEV; | 339 | ret = -ENODEV; |
391 | goto err_out_disable; | 340 | goto err_out_disable; |
392 | } | 341 | } |
@@ -401,7 +350,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev, | |||
401 | 350 | ||
402 | dev = alloc_etherdev(sizeof (*ep)); | 351 | dev = alloc_etherdev(sizeof (*ep)); |
403 | if (!dev) { | 352 | if (!dev) { |
404 | printk (KERN_ERR "card %d: no memory for eth device\n", card_idx); | 353 | dev_err(&pdev->dev, "no memory for eth device\n"); |
405 | goto err_out_free_res; | 354 | goto err_out_free_res; |
406 | } | 355 | } |
407 | SET_MODULE_OWNER(dev); | 356 | SET_MODULE_OWNER(dev); |
@@ -413,7 +362,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev, | |||
413 | ioaddr = pci_resource_start (pdev, 1); | 362 | ioaddr = pci_resource_start (pdev, 1); |
414 | ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1)); | 363 | ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1)); |
415 | if (!ioaddr) { | 364 | if (!ioaddr) { |
416 | printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx); | 365 | dev_err(&pdev->dev, "ioremap failed\n"); |
417 | goto err_out_free_netdev; | 366 | goto err_out_free_netdev; |
418 | } | 367 | } |
419 | #endif | 368 | #endif |
@@ -473,8 +422,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev, | |||
473 | ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4)); | 422 | ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4)); |
474 | 423 | ||
475 | if (debug > 2) { | 424 | if (debug > 2) { |
476 | printk(KERN_DEBUG DRV_NAME "(%s): EEPROM contents\n", | 425 | dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n"); |
477 | pci_name(pdev)); | ||
478 | for (i = 0; i < 64; i++) | 426 | for (i = 0; i < 64; i++) |
479 | printk(" %4.4x%s", read_eeprom(ioaddr, i), | 427 | printk(" %4.4x%s", read_eeprom(ioaddr, i), |
480 | i % 16 == 15 ? "\n" : ""); | 428 | i % 16 == 15 ? "\n" : ""); |
@@ -496,21 +444,23 @@ static int __devinit epic_init_one (struct pci_dev *pdev, | |||
496 | int mii_status = mdio_read(dev, phy, MII_BMSR); | 444 | int mii_status = mdio_read(dev, phy, MII_BMSR); |
497 | if (mii_status != 0xffff && mii_status != 0x0000) { | 445 | if (mii_status != 0xffff && mii_status != 0x0000) { |
498 | ep->phys[phy_idx++] = phy; | 446 | ep->phys[phy_idx++] = phy; |
499 | printk(KERN_INFO DRV_NAME "(%s): MII transceiver #%d control " | 447 | dev_info(&pdev->dev, |
500 | "%4.4x status %4.4x.\n", | 448 | "MII transceiver #%d control " |
501 | pci_name(pdev), phy, mdio_read(dev, phy, 0), mii_status); | 449 | "%4.4x status %4.4x.\n", |
450 | phy, mdio_read(dev, phy, 0), mii_status); | ||
502 | } | 451 | } |
503 | } | 452 | } |
504 | ep->mii_phy_cnt = phy_idx; | 453 | ep->mii_phy_cnt = phy_idx; |
505 | if (phy_idx != 0) { | 454 | if (phy_idx != 0) { |
506 | phy = ep->phys[0]; | 455 | phy = ep->phys[0]; |
507 | ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE); | 456 | ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE); |
508 | printk(KERN_INFO DRV_NAME "(%s): Autonegotiation advertising %4.4x link " | 457 | dev_info(&pdev->dev, |
458 | "Autonegotiation advertising %4.4x link " | ||
509 | "partner %4.4x.\n", | 459 | "partner %4.4x.\n", |
510 | pci_name(pdev), ep->mii.advertising, mdio_read(dev, phy, 5)); | 460 | ep->mii.advertising, mdio_read(dev, phy, 5)); |
511 | } else if ( ! (ep->chip_flags & NO_MII)) { | 461 | } else if ( ! (ep->chip_flags & NO_MII)) { |
512 | printk(KERN_WARNING DRV_NAME "(%s): ***WARNING***: No MII transceiver found!\n", | 462 | dev_warn(&pdev->dev, |
513 | pci_name(pdev)); | 463 | "***WARNING***: No MII transceiver found!\n"); |
514 | /* Use the known PHY address of the EPII. */ | 464 | /* Use the known PHY address of the EPII. */ |
515 | ep->phys[0] = 3; | 465 | ep->phys[0] = 3; |
516 | } | 466 | } |
@@ -525,8 +475,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev, | |||
525 | /* The lower four bits are the media type. */ | 475 | /* The lower four bits are the media type. */ |
526 | if (duplex) { | 476 | if (duplex) { |
527 | ep->mii.force_media = ep->mii.full_duplex = 1; | 477 | ep->mii.force_media = ep->mii.full_duplex = 1; |
528 | printk(KERN_INFO DRV_NAME "(%s): Forced full duplex operation requested.\n", | 478 | dev_info(&pdev->dev, "Forced full duplex requested.\n"); |
529 | pci_name(pdev)); | ||
530 | } | 479 | } |
531 | dev->if_port = ep->default_port = option; | 480 | dev->if_port = ep->default_port = option; |
532 | 481 | ||
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c index 6b0ab1eac3fb..fd7b32a24ea4 100644 --- a/drivers/net/es3210.c +++ b/drivers/net/es3210.c | |||
@@ -421,8 +421,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)"); | |||
421 | MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver"); | 421 | MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver"); |
422 | MODULE_LICENSE("GPL"); | 422 | MODULE_LICENSE("GPL"); |
423 | 423 | ||
424 | int | 424 | int __init init_module(void) |
425 | init_module(void) | ||
426 | { | 425 | { |
427 | struct net_device *dev; | 426 | struct net_device *dev; |
428 | int this_dev, found = 0; | 427 | int this_dev, found = 0; |
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index 4bf76f86d8e9..ca42efa9143c 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c | |||
@@ -1434,7 +1434,7 @@ MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto, | |||
1434 | module_param(debug, int, 0); | 1434 | module_param(debug, int, 0); |
1435 | MODULE_PARM_DESC(debug, "eth16i debug level (0-6)"); | 1435 | MODULE_PARM_DESC(debug, "eth16i debug level (0-6)"); |
1436 | 1436 | ||
1437 | int init_module(void) | 1437 | int __init init_module(void) |
1438 | { | 1438 | { |
1439 | int this_dev, found = 0; | 1439 | int this_dev, found = 0; |
1440 | struct net_device *dev; | 1440 | struct net_device *dev; |
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index c701951dcd6f..567e27413cfd 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c | |||
@@ -92,7 +92,7 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 }; | |||
92 | #include <asm/uaccess.h> | 92 | #include <asm/uaccess.h> |
93 | 93 | ||
94 | /* These identify the driver base version and may not be removed. */ | 94 | /* These identify the driver base version and may not be removed. */ |
95 | static char version[] __devinitdata = | 95 | static char version[] = |
96 | KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; | 96 | KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; |
97 | 97 | ||
98 | 98 | ||
@@ -124,7 +124,9 @@ MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered mult | |||
124 | MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex"); | 124 | MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex"); |
125 | MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)"); | 125 | MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)"); |
126 | 126 | ||
127 | #define MIN_REGION_SIZE 136 | 127 | enum { |
128 | MIN_REGION_SIZE = 136, | ||
129 | }; | ||
128 | 130 | ||
129 | /* A chip capabilities table, matching the entries in pci_tbl[] above. */ | 131 | /* A chip capabilities table, matching the entries in pci_tbl[] above. */ |
130 | enum chip_capability_flags { | 132 | enum chip_capability_flags { |
@@ -146,14 +148,13 @@ enum phy_type_flags { | |||
146 | 148 | ||
147 | struct chip_info { | 149 | struct chip_info { |
148 | char *chip_name; | 150 | char *chip_name; |
149 | int io_size; | ||
150 | int flags; | 151 | int flags; |
151 | }; | 152 | }; |
152 | 153 | ||
153 | static const struct chip_info skel_netdrv_tbl[] = { | 154 | static const struct chip_info skel_netdrv_tbl[] __devinitdata = { |
154 | {"100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR}, | 155 | { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, |
155 | {"100/10M Ethernet PCI Adapter", 136, HAS_CHIP_XCVR}, | 156 | { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR }, |
156 | {"1000/100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR}, | 157 | { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, |
157 | }; | 158 | }; |
158 | 159 | ||
159 | /* Offsets to the Command and Status Registers. */ | 160 | /* Offsets to the Command and Status Registers. */ |
@@ -504,13 +505,14 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev, | |||
504 | 505 | ||
505 | len = pci_resource_len(pdev, bar); | 506 | len = pci_resource_len(pdev, bar); |
506 | if (len < MIN_REGION_SIZE) { | 507 | if (len < MIN_REGION_SIZE) { |
507 | printk(KERN_ERR "%s: region size %ld too small, aborting\n", | 508 | dev_err(&pdev->dev, |
508 | boardname, len); | 509 | "region size %ld too small, aborting\n", len); |
509 | return -ENODEV; | 510 | return -ENODEV; |
510 | } | 511 | } |
511 | 512 | ||
512 | i = pci_request_regions(pdev, boardname); | 513 | i = pci_request_regions(pdev, boardname); |
513 | if (i) return i; | 514 | if (i) |
515 | return i; | ||
514 | 516 | ||
515 | irq = pdev->irq; | 517 | irq = pdev->irq; |
516 | 518 | ||
@@ -576,9 +578,9 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev, | |||
576 | 578 | ||
577 | if (mii_status != 0xffff && mii_status != 0x0000) { | 579 | if (mii_status != 0xffff && mii_status != 0x0000) { |
578 | np->phys[phy_idx++] = phy; | 580 | np->phys[phy_idx++] = phy; |
579 | printk(KERN_INFO | 581 | dev_info(&pdev->dev, |
580 | "%s: MII PHY found at address %d, status " | 582 | "MII PHY found at address %d, status " |
581 | "0x%4.4x.\n", dev->name, phy, mii_status); | 583 | "0x%4.4x.\n", phy, mii_status); |
582 | /* get phy type */ | 584 | /* get phy type */ |
583 | { | 585 | { |
584 | unsigned int data; | 586 | unsigned int data; |
@@ -601,10 +603,10 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev, | |||
601 | } | 603 | } |
602 | 604 | ||
603 | np->mii_cnt = phy_idx; | 605 | np->mii_cnt = phy_idx; |
604 | if (phy_idx == 0) { | 606 | if (phy_idx == 0) |
605 | printk(KERN_WARNING "%s: MII PHY not found -- this device may " | 607 | dev_warn(&pdev->dev, |
606 | "not operate correctly.\n", dev->name); | 608 | "MII PHY not found -- this device may " |
607 | } | 609 | "not operate correctly.\n"); |
608 | } else { | 610 | } else { |
609 | np->phys[0] = 32; | 611 | np->phys[0] = 32; |
610 | /* 89/6/23 add, (begin) */ | 612 | /* 89/6/23 add, (begin) */ |
@@ -630,7 +632,7 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev, | |||
630 | np->mii.full_duplex = full_duplex[card_idx]; | 632 | np->mii.full_duplex = full_duplex[card_idx]; |
631 | 633 | ||
632 | if (np->mii.full_duplex) { | 634 | if (np->mii.full_duplex) { |
633 | printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name); | 635 | dev_info(&pdev->dev, "Media type forced to Full Duplex.\n"); |
634 | /* 89/6/13 add, (begin) */ | 636 | /* 89/6/13 add, (begin) */ |
635 | // if (np->PHYType==MarvellPHY) | 637 | // if (np->PHYType==MarvellPHY) |
636 | if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) { | 638 | if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) { |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 037d870712ff..11b8f1b43dd5 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -240,10 +240,12 @@ enum { | |||
240 | #define NVREG_RNDSEED_FORCE2 0x2d00 | 240 | #define NVREG_RNDSEED_FORCE2 0x2d00 |
241 | #define NVREG_RNDSEED_FORCE3 0x7400 | 241 | #define NVREG_RNDSEED_FORCE3 0x7400 |
242 | 242 | ||
243 | NvRegUnknownSetupReg1 = 0xA0, | 243 | NvRegTxDeferral = 0xA0, |
244 | #define NVREG_UNKSETUP1_VAL 0x16070f | 244 | #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f |
245 | NvRegUnknownSetupReg2 = 0xA4, | 245 | #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f |
246 | #define NVREG_UNKSETUP2_VAL 0x16 | 246 | #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f |
247 | NvRegRxDeferral = 0xA4, | ||
248 | #define NVREG_RX_DEFERRAL_DEFAULT 0x16 | ||
247 | NvRegMacAddrA = 0xA8, | 249 | NvRegMacAddrA = 0xA8, |
248 | NvRegMacAddrB = 0xAC, | 250 | NvRegMacAddrB = 0xAC, |
249 | NvRegMulticastAddrA = 0xB0, | 251 | NvRegMulticastAddrA = 0xB0, |
@@ -269,8 +271,10 @@ enum { | |||
269 | #define NVREG_LINKSPEED_MASK (0xFFF) | 271 | #define NVREG_LINKSPEED_MASK (0xFFF) |
270 | NvRegUnknownSetupReg5 = 0x130, | 272 | NvRegUnknownSetupReg5 = 0x130, |
271 | #define NVREG_UNKSETUP5_BIT31 (1<<31) | 273 | #define NVREG_UNKSETUP5_BIT31 (1<<31) |
272 | NvRegUnknownSetupReg3 = 0x13c, | 274 | NvRegTxWatermark = 0x13c, |
273 | #define NVREG_UNKSETUP3_VAL1 0x200010 | 275 | #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 |
276 | #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 | ||
277 | #define NVREG_TX_WM_DESC2_3_1000 0xfe08000 | ||
274 | NvRegTxRxControl = 0x144, | 278 | NvRegTxRxControl = 0x144, |
275 | #define NVREG_TXRXCTL_KICK 0x0001 | 279 | #define NVREG_TXRXCTL_KICK 0x0001 |
276 | #define NVREG_TXRXCTL_BIT1 0x0002 | 280 | #define NVREG_TXRXCTL_BIT1 0x0002 |
@@ -658,7 +662,7 @@ static const struct register_test nv_registers_test[] = { | |||
658 | { NvRegMisc1, 0x03c }, | 662 | { NvRegMisc1, 0x03c }, |
659 | { NvRegOffloadConfig, 0x03ff }, | 663 | { NvRegOffloadConfig, 0x03ff }, |
660 | { NvRegMulticastAddrA, 0xffffffff }, | 664 | { NvRegMulticastAddrA, 0xffffffff }, |
661 | { NvRegUnknownSetupReg3, 0x0ff }, | 665 | { NvRegTxWatermark, 0x0ff }, |
662 | { NvRegWakeUpFlags, 0x07777 }, | 666 | { NvRegWakeUpFlags, 0x07777 }, |
663 | { 0,0 } | 667 | { 0,0 } |
664 | }; | 668 | }; |
@@ -1495,7 +1499,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1495 | np->tx_skbuff[nr] = skb; | 1499 | np->tx_skbuff[nr] = skb; |
1496 | 1500 | ||
1497 | #ifdef NETIF_F_TSO | 1501 | #ifdef NETIF_F_TSO |
1498 | if (skb_shinfo(skb)->gso_size) | 1502 | if (skb_is_gso(skb)) |
1499 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); | 1503 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); |
1500 | else | 1504 | else |
1501 | #endif | 1505 | #endif |
@@ -2127,7 +2131,7 @@ static int nv_update_linkspeed(struct net_device *dev) | |||
2127 | int newdup = np->duplex; | 2131 | int newdup = np->duplex; |
2128 | int mii_status; | 2132 | int mii_status; |
2129 | int retval = 0; | 2133 | int retval = 0; |
2130 | u32 control_1000, status_1000, phyreg, pause_flags; | 2134 | u32 control_1000, status_1000, phyreg, pause_flags, txreg; |
2131 | 2135 | ||
2132 | /* BMSR_LSTATUS is latched, read it twice: | 2136 | /* BMSR_LSTATUS is latched, read it twice: |
2133 | * we want the current value. | 2137 | * we want the current value. |
@@ -2245,6 +2249,26 @@ set_speed: | |||
2245 | phyreg |= PHY_1000; | 2249 | phyreg |= PHY_1000; |
2246 | writel(phyreg, base + NvRegPhyInterface); | 2250 | writel(phyreg, base + NvRegPhyInterface); |
2247 | 2251 | ||
2252 | if (phyreg & PHY_RGMII) { | ||
2253 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) | ||
2254 | txreg = NVREG_TX_DEFERRAL_RGMII_1000; | ||
2255 | else | ||
2256 | txreg = NVREG_TX_DEFERRAL_RGMII_10_100; | ||
2257 | } else { | ||
2258 | txreg = NVREG_TX_DEFERRAL_DEFAULT; | ||
2259 | } | ||
2260 | writel(txreg, base + NvRegTxDeferral); | ||
2261 | |||
2262 | if (np->desc_ver == DESC_VER_1) { | ||
2263 | txreg = NVREG_TX_WM_DESC1_DEFAULT; | ||
2264 | } else { | ||
2265 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) | ||
2266 | txreg = NVREG_TX_WM_DESC2_3_1000; | ||
2267 | else | ||
2268 | txreg = NVREG_TX_WM_DESC2_3_DEFAULT; | ||
2269 | } | ||
2270 | writel(txreg, base + NvRegTxWatermark); | ||
2271 | |||
2248 | writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), | 2272 | writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), |
2249 | base + NvRegMisc1); | 2273 | base + NvRegMisc1); |
2250 | pci_push(base); | 2274 | pci_push(base); |
@@ -3910,7 +3934,10 @@ static int nv_open(struct net_device *dev) | |||
3910 | 3934 | ||
3911 | /* 5) continue setup */ | 3935 | /* 5) continue setup */ |
3912 | writel(np->linkspeed, base + NvRegLinkSpeed); | 3936 | writel(np->linkspeed, base + NvRegLinkSpeed); |
3913 | writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); | 3937 | if (np->desc_ver == DESC_VER_1) |
3938 | writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); | ||
3939 | else | ||
3940 | writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); | ||
3914 | writel(np->txrxctl_bits, base + NvRegTxRxControl); | 3941 | writel(np->txrxctl_bits, base + NvRegTxRxControl); |
3915 | writel(np->vlanctl_bits, base + NvRegVlanControl); | 3942 | writel(np->vlanctl_bits, base + NvRegVlanControl); |
3916 | pci_push(base); | 3943 | pci_push(base); |
@@ -3932,8 +3959,8 @@ static int nv_open(struct net_device *dev) | |||
3932 | writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); | 3959 | writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); |
3933 | get_random_bytes(&i, sizeof(i)); | 3960 | get_random_bytes(&i, sizeof(i)); |
3934 | writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); | 3961 | writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); |
3935 | writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1); | 3962 | writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); |
3936 | writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2); | 3963 | writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); |
3937 | if (poll_interval == -1) { | 3964 | if (poll_interval == -1) { |
3938 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) | 3965 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) |
3939 | writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); | 3966 | writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); |
diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/fs_enet/Makefile index d6dd3f2fb43e..02d4dc18ba69 100644 --- a/drivers/net/fs_enet/Makefile +++ b/drivers/net/fs_enet/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_FS_ENET) += fs_enet.o | 5 | obj-$(CONFIG_FS_ENET) += fs_enet.o |
6 | 6 | ||
7 | obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o | 7 | obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o mii-fec.o |
8 | obj-$(CONFIG_8260) += mac-fcc.o | 8 | obj-$(CONFIG_CPM2) += mac-fcc.o mii-bitbang.o |
9 | 9 | ||
10 | fs_enet-objs := fs_enet-main.o fs_enet-mii.o mii-bitbang.o mii-fixed.o | 10 | fs_enet-objs := fs_enet-main.o |
diff --git a/drivers/net/fs_enet/fec.h b/drivers/net/fs_enet/fec.h new file mode 100644 index 000000000000..e980527e2b99 --- /dev/null +++ b/drivers/net/fs_enet/fec.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef FS_ENET_FEC_H | ||
2 | #define FS_ENET_FEC_H | ||
3 | |||
4 | /* CRC polynomium used by the FEC for the multicast group filtering */ | ||
5 | #define FEC_CRC_POLY 0x04C11DB7 | ||
6 | |||
7 | #define FEC_MAX_MULTICAST_ADDRS 64 | ||
8 | |||
9 | /* Interrupt events/masks. | ||
10 | */ | ||
11 | #define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */ | ||
12 | #define FEC_ENET_BABR 0x40000000U /* Babbling receiver */ | ||
13 | #define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */ | ||
14 | #define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */ | ||
15 | #define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */ | ||
16 | #define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */ | ||
17 | #define FEC_ENET_RXF 0x02000000U /* Full frame received */ | ||
18 | #define FEC_ENET_RXB 0x01000000U /* A buffer was received */ | ||
19 | #define FEC_ENET_MII 0x00800000U /* MII interrupt */ | ||
20 | #define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */ | ||
21 | |||
22 | #define FEC_ECNTRL_PINMUX 0x00000004 | ||
23 | #define FEC_ECNTRL_ETHER_EN 0x00000002 | ||
24 | #define FEC_ECNTRL_RESET 0x00000001 | ||
25 | |||
26 | #define FEC_RCNTRL_BC_REJ 0x00000010 | ||
27 | #define FEC_RCNTRL_PROM 0x00000008 | ||
28 | #define FEC_RCNTRL_MII_MODE 0x00000004 | ||
29 | #define FEC_RCNTRL_DRT 0x00000002 | ||
30 | #define FEC_RCNTRL_LOOP 0x00000001 | ||
31 | |||
32 | #define FEC_TCNTRL_FDEN 0x00000004 | ||
33 | #define FEC_TCNTRL_HBC 0x00000002 | ||
34 | #define FEC_TCNTRL_GTS 0x00000001 | ||
35 | |||
36 | |||
37 | |||
38 | /* | ||
39 | * Delay to wait for FEC reset command to complete (in us) | ||
40 | */ | ||
41 | #define FEC_RESET_DELAY 50 | ||
42 | #endif | ||
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index f6abff5846b3..df62506a1787 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/bitops.h> | 37 | #include <linux/bitops.h> |
38 | #include <linux/fs.h> | 38 | #include <linux/fs.h> |
39 | #include <linux/platform_device.h> | 39 | #include <linux/platform_device.h> |
40 | #include <linux/phy.h> | ||
40 | 41 | ||
41 | #include <linux/vmalloc.h> | 42 | #include <linux/vmalloc.h> |
42 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
@@ -682,35 +683,6 @@ static void fs_free_irq(struct net_device *dev, int irq) | |||
682 | (*fep->ops->post_free_irq)(dev, irq); | 683 | (*fep->ops->post_free_irq)(dev, irq); |
683 | } | 684 | } |
684 | 685 | ||
685 | /**********************************************************************************/ | ||
686 | |||
687 | /* This interrupt occurs when the PHY detects a link change. */ | ||
688 | static irqreturn_t | ||
689 | fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
690 | { | ||
691 | struct net_device *dev = dev_id; | ||
692 | struct fs_enet_private *fep; | ||
693 | const struct fs_platform_info *fpi; | ||
694 | |||
695 | fep = netdev_priv(dev); | ||
696 | fpi = fep->fpi; | ||
697 | |||
698 | /* | ||
699 | * Acknowledge the interrupt if possible. If we have not | ||
700 | * found the PHY yet we can't process or acknowledge the | ||
701 | * interrupt now. Instead we ignore this interrupt for now, | ||
702 | * which we can do since it is edge triggered. It will be | ||
703 | * acknowledged later by fs_enet_open(). | ||
704 | */ | ||
705 | if (!fep->phy) | ||
706 | return IRQ_NONE; | ||
707 | |||
708 | fs_mii_ack_int(dev); | ||
709 | fs_mii_link_status_change_check(dev, 0); | ||
710 | |||
711 | return IRQ_HANDLED; | ||
712 | } | ||
713 | |||
714 | static void fs_timeout(struct net_device *dev) | 686 | static void fs_timeout(struct net_device *dev) |
715 | { | 687 | { |
716 | struct fs_enet_private *fep = netdev_priv(dev); | 688 | struct fs_enet_private *fep = netdev_priv(dev); |
@@ -722,10 +694,13 @@ static void fs_timeout(struct net_device *dev) | |||
722 | spin_lock_irqsave(&fep->lock, flags); | 694 | spin_lock_irqsave(&fep->lock, flags); |
723 | 695 | ||
724 | if (dev->flags & IFF_UP) { | 696 | if (dev->flags & IFF_UP) { |
697 | phy_stop(fep->phydev); | ||
725 | (*fep->ops->stop)(dev); | 698 | (*fep->ops->stop)(dev); |
726 | (*fep->ops->restart)(dev); | 699 | (*fep->ops->restart)(dev); |
700 | phy_start(fep->phydev); | ||
727 | } | 701 | } |
728 | 702 | ||
703 | phy_start(fep->phydev); | ||
729 | wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); | 704 | wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); |
730 | spin_unlock_irqrestore(&fep->lock, flags); | 705 | spin_unlock_irqrestore(&fep->lock, flags); |
731 | 706 | ||
@@ -733,35 +708,112 @@ static void fs_timeout(struct net_device *dev) | |||
733 | netif_wake_queue(dev); | 708 | netif_wake_queue(dev); |
734 | } | 709 | } |
735 | 710 | ||
711 | /*----------------------------------------------------------------------------- | ||
712 | * generic link-change handler - should be sufficient for most cases | ||
713 | *-----------------------------------------------------------------------------*/ | ||
714 | static void generic_adjust_link(struct net_device *dev) | ||
715 | { | ||
716 | struct fs_enet_private *fep = netdev_priv(dev); | ||
717 | struct phy_device *phydev = fep->phydev; | ||
718 | int new_state = 0; | ||
719 | |||
720 | if (phydev->link) { | ||
721 | |||
722 | /* adjust to duplex mode */ | ||
723 | if (phydev->duplex != fep->oldduplex){ | ||
724 | new_state = 1; | ||
725 | fep->oldduplex = phydev->duplex; | ||
726 | } | ||
727 | |||
728 | if (phydev->speed != fep->oldspeed) { | ||
729 | new_state = 1; | ||
730 | fep->oldspeed = phydev->speed; | ||
731 | } | ||
732 | |||
733 | if (!fep->oldlink) { | ||
734 | new_state = 1; | ||
735 | fep->oldlink = 1; | ||
736 | netif_schedule(dev); | ||
737 | netif_carrier_on(dev); | ||
738 | netif_start_queue(dev); | ||
739 | } | ||
740 | |||
741 | if (new_state) | ||
742 | fep->ops->restart(dev); | ||
743 | |||
744 | } else if (fep->oldlink) { | ||
745 | new_state = 1; | ||
746 | fep->oldlink = 0; | ||
747 | fep->oldspeed = 0; | ||
748 | fep->oldduplex = -1; | ||
749 | netif_carrier_off(dev); | ||
750 | netif_stop_queue(dev); | ||
751 | } | ||
752 | |||
753 | if (new_state && netif_msg_link(fep)) | ||
754 | phy_print_status(phydev); | ||
755 | } | ||
756 | |||
757 | |||
758 | static void fs_adjust_link(struct net_device *dev) | ||
759 | { | ||
760 | struct fs_enet_private *fep = netdev_priv(dev); | ||
761 | unsigned long flags; | ||
762 | |||
763 | spin_lock_irqsave(&fep->lock, flags); | ||
764 | |||
765 | if(fep->ops->adjust_link) | ||
766 | fep->ops->adjust_link(dev); | ||
767 | else | ||
768 | generic_adjust_link(dev); | ||
769 | |||
770 | spin_unlock_irqrestore(&fep->lock, flags); | ||
771 | } | ||
772 | |||
773 | static int fs_init_phy(struct net_device *dev) | ||
774 | { | ||
775 | struct fs_enet_private *fep = netdev_priv(dev); | ||
776 | struct phy_device *phydev; | ||
777 | |||
778 | fep->oldlink = 0; | ||
779 | fep->oldspeed = 0; | ||
780 | fep->oldduplex = -1; | ||
781 | if(fep->fpi->bus_id) | ||
782 | phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0); | ||
783 | else { | ||
784 | printk("No phy bus ID specified in BSP code\n"); | ||
785 | return -EINVAL; | ||
786 | } | ||
787 | if (IS_ERR(phydev)) { | ||
788 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | ||
789 | return PTR_ERR(phydev); | ||
790 | } | ||
791 | |||
792 | fep->phydev = phydev; | ||
793 | |||
794 | return 0; | ||
795 | } | ||
796 | |||
797 | |||
736 | static int fs_enet_open(struct net_device *dev) | 798 | static int fs_enet_open(struct net_device *dev) |
737 | { | 799 | { |
738 | struct fs_enet_private *fep = netdev_priv(dev); | 800 | struct fs_enet_private *fep = netdev_priv(dev); |
739 | const struct fs_platform_info *fpi = fep->fpi; | ||
740 | int r; | 801 | int r; |
802 | int err; | ||
741 | 803 | ||
742 | /* Install our interrupt handler. */ | 804 | /* Install our interrupt handler. */ |
743 | r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); | 805 | r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); |
744 | if (r != 0) { | 806 | if (r != 0) { |
745 | printk(KERN_ERR DRV_MODULE_NAME | 807 | printk(KERN_ERR DRV_MODULE_NAME |
746 | ": %s Could not allocate FEC IRQ!", dev->name); | 808 | ": %s Could not allocate FS_ENET IRQ!", dev->name); |
747 | return -EINVAL; | 809 | return -EINVAL; |
748 | } | 810 | } |
749 | 811 | ||
750 | /* Install our phy interrupt handler */ | 812 | err = fs_init_phy(dev); |
751 | if (fpi->phy_irq != -1) { | 813 | if(err) |
752 | 814 | return err; | |
753 | r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt); | ||
754 | if (r != 0) { | ||
755 | printk(KERN_ERR DRV_MODULE_NAME | ||
756 | ": %s Could not allocate PHY IRQ!", dev->name); | ||
757 | fs_free_irq(dev, fep->interrupt); | ||
758 | return -EINVAL; | ||
759 | } | ||
760 | } | ||
761 | 815 | ||
762 | fs_mii_startup(dev); | 816 | phy_start(fep->phydev); |
763 | netif_carrier_off(dev); | ||
764 | fs_mii_link_status_change_check(dev, 1); | ||
765 | 817 | ||
766 | return 0; | 818 | return 0; |
767 | } | 819 | } |
@@ -769,20 +821,19 @@ static int fs_enet_open(struct net_device *dev) | |||
769 | static int fs_enet_close(struct net_device *dev) | 821 | static int fs_enet_close(struct net_device *dev) |
770 | { | 822 | { |
771 | struct fs_enet_private *fep = netdev_priv(dev); | 823 | struct fs_enet_private *fep = netdev_priv(dev); |
772 | const struct fs_platform_info *fpi = fep->fpi; | ||
773 | unsigned long flags; | 824 | unsigned long flags; |
774 | 825 | ||
775 | netif_stop_queue(dev); | 826 | netif_stop_queue(dev); |
776 | netif_carrier_off(dev); | 827 | netif_carrier_off(dev); |
777 | fs_mii_shutdown(dev); | 828 | phy_stop(fep->phydev); |
778 | 829 | ||
779 | spin_lock_irqsave(&fep->lock, flags); | 830 | spin_lock_irqsave(&fep->lock, flags); |
780 | (*fep->ops->stop)(dev); | 831 | (*fep->ops->stop)(dev); |
781 | spin_unlock_irqrestore(&fep->lock, flags); | 832 | spin_unlock_irqrestore(&fep->lock, flags); |
782 | 833 | ||
783 | /* release any irqs */ | 834 | /* release any irqs */ |
784 | if (fpi->phy_irq != -1) | 835 | phy_disconnect(fep->phydev); |
785 | fs_free_irq(dev, fpi->phy_irq); | 836 | fep->phydev = NULL; |
786 | fs_free_irq(dev, fep->interrupt); | 837 | fs_free_irq(dev, fep->interrupt); |
787 | 838 | ||
788 | return 0; | 839 | return 0; |
@@ -830,33 +881,19 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
830 | static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 881 | static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
831 | { | 882 | { |
832 | struct fs_enet_private *fep = netdev_priv(dev); | 883 | struct fs_enet_private *fep = netdev_priv(dev); |
833 | unsigned long flags; | 884 | return phy_ethtool_gset(fep->phydev, cmd); |
834 | int rc; | ||
835 | |||
836 | spin_lock_irqsave(&fep->lock, flags); | ||
837 | rc = mii_ethtool_gset(&fep->mii_if, cmd); | ||
838 | spin_unlock_irqrestore(&fep->lock, flags); | ||
839 | |||
840 | return rc; | ||
841 | } | 885 | } |
842 | 886 | ||
843 | static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 887 | static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
844 | { | 888 | { |
845 | struct fs_enet_private *fep = netdev_priv(dev); | 889 | struct fs_enet_private *fep = netdev_priv(dev); |
846 | unsigned long flags; | 890 | phy_ethtool_sset(fep->phydev, cmd); |
847 | int rc; | 891 | return 0; |
848 | |||
849 | spin_lock_irqsave(&fep->lock, flags); | ||
850 | rc = mii_ethtool_sset(&fep->mii_if, cmd); | ||
851 | spin_unlock_irqrestore(&fep->lock, flags); | ||
852 | |||
853 | return rc; | ||
854 | } | 892 | } |
855 | 893 | ||
856 | static int fs_nway_reset(struct net_device *dev) | 894 | static int fs_nway_reset(struct net_device *dev) |
857 | { | 895 | { |
858 | struct fs_enet_private *fep = netdev_priv(dev); | 896 | return 0; |
859 | return mii_nway_restart(&fep->mii_if); | ||
860 | } | 897 | } |
861 | 898 | ||
862 | static u32 fs_get_msglevel(struct net_device *dev) | 899 | static u32 fs_get_msglevel(struct net_device *dev) |
@@ -898,7 +935,7 @@ static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
898 | return -EINVAL; | 935 | return -EINVAL; |
899 | 936 | ||
900 | spin_lock_irqsave(&fep->lock, flags); | 937 | spin_lock_irqsave(&fep->lock, flags); |
901 | rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL); | 938 | rc = phy_mii_ioctl(fep->phydev, mii, cmd); |
902 | spin_unlock_irqrestore(&fep->lock, flags); | 939 | spin_unlock_irqrestore(&fep->lock, flags); |
903 | return rc; | 940 | return rc; |
904 | } | 941 | } |
@@ -1030,12 +1067,6 @@ static struct net_device *fs_init_instance(struct device *dev, | |||
1030 | } | 1067 | } |
1031 | registered = 1; | 1068 | registered = 1; |
1032 | 1069 | ||
1033 | err = fs_mii_connect(ndev); | ||
1034 | if (err != 0) { | ||
1035 | printk(KERN_ERR DRV_MODULE_NAME | ||
1036 | ": %s fs_mii_connect failed.\n", ndev->name); | ||
1037 | goto err; | ||
1038 | } | ||
1039 | 1070 | ||
1040 | return ndev; | 1071 | return ndev; |
1041 | 1072 | ||
@@ -1073,8 +1104,6 @@ static int fs_cleanup_instance(struct net_device *ndev) | |||
1073 | 1104 | ||
1074 | fpi = fep->fpi; | 1105 | fpi = fep->fpi; |
1075 | 1106 | ||
1076 | fs_mii_disconnect(ndev); | ||
1077 | |||
1078 | unregister_netdev(ndev); | 1107 | unregister_netdev(ndev); |
1079 | 1108 | ||
1080 | dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), | 1109 | dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), |
@@ -1196,17 +1225,39 @@ static int __init fs_init(void) | |||
1196 | r = setup_immap(); | 1225 | r = setup_immap(); |
1197 | if (r != 0) | 1226 | if (r != 0) |
1198 | return r; | 1227 | return r; |
1199 | r = driver_register(&fs_enet_fec_driver); | 1228 | |
1229 | #ifdef CONFIG_FS_ENET_HAS_FCC | ||
1230 | /* let's insert mii stuff */ | ||
1231 | r = fs_enet_mdio_bb_init(); | ||
1232 | |||
1233 | if (r != 0) { | ||
1234 | printk(KERN_ERR DRV_MODULE_NAME | ||
1235 | "BB PHY init failed.\n"); | ||
1236 | return r; | ||
1237 | } | ||
1238 | r = driver_register(&fs_enet_fcc_driver); | ||
1200 | if (r != 0) | 1239 | if (r != 0) |
1201 | goto err; | 1240 | goto err; |
1241 | #endif | ||
1202 | 1242 | ||
1203 | r = driver_register(&fs_enet_fcc_driver); | 1243 | #ifdef CONFIG_FS_ENET_HAS_FEC |
1244 | r = fs_enet_mdio_fec_init(); | ||
1245 | if (r != 0) { | ||
1246 | printk(KERN_ERR DRV_MODULE_NAME | ||
1247 | "FEC PHY init failed.\n"); | ||
1248 | return r; | ||
1249 | } | ||
1250 | |||
1251 | r = driver_register(&fs_enet_fec_driver); | ||
1204 | if (r != 0) | 1252 | if (r != 0) |
1205 | goto err; | 1253 | goto err; |
1254 | #endif | ||
1206 | 1255 | ||
1256 | #ifdef CONFIG_FS_ENET_HAS_SCC | ||
1207 | r = driver_register(&fs_enet_scc_driver); | 1257 | r = driver_register(&fs_enet_scc_driver); |
1208 | if (r != 0) | 1258 | if (r != 0) |
1209 | goto err; | 1259 | goto err; |
1260 | #endif | ||
1210 | 1261 | ||
1211 | return 0; | 1262 | return 0; |
1212 | err: | 1263 | err: |
diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c deleted file mode 100644 index b7e6e21725cb..000000000000 --- a/drivers/net/fs_enet/fs_enet-mii.c +++ /dev/null | |||
@@ -1,505 +0,0 @@ | |||
1 | /* | ||
2 | * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. | ||
3 | * | ||
4 | * Copyright (c) 2003 Intracom S.A. | ||
5 | * by Pantelis Antoniou <panto@intracom.gr> | ||
6 | * | ||
7 | * 2005 (c) MontaVista Software, Inc. | ||
8 | * Vitaly Bordug <vbordug@ru.mvista.com> | ||
9 | * | ||
10 | * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> | ||
11 | * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> | ||
12 | * | ||
13 | * This file is licensed under the terms of the GNU General Public License | ||
14 | * version 2. This program is licensed "as is" without any warranty of any | ||
15 | * kind, whether express or implied. | ||
16 | */ | ||
17 | |||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/string.h> | ||
24 | #include <linux/ptrace.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/ioport.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/pci.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include <linux/netdevice.h> | ||
33 | #include <linux/etherdevice.h> | ||
34 | #include <linux/skbuff.h> | ||
35 | #include <linux/spinlock.h> | ||
36 | #include <linux/mii.h> | ||
37 | #include <linux/ethtool.h> | ||
38 | #include <linux/bitops.h> | ||
39 | |||
40 | #include <asm/pgtable.h> | ||
41 | #include <asm/irq.h> | ||
42 | #include <asm/uaccess.h> | ||
43 | |||
44 | #include "fs_enet.h" | ||
45 | |||
46 | /*************************************************/ | ||
47 | |||
48 | /* | ||
49 | * Generic PHY support. | ||
50 | * Should work for all PHYs, but link change is detected by polling | ||
51 | */ | ||
52 | |||
53 | static void generic_timer_callback(unsigned long data) | ||
54 | { | ||
55 | struct net_device *dev = (struct net_device *)data; | ||
56 | struct fs_enet_private *fep = netdev_priv(dev); | ||
57 | |||
58 | fep->phy_timer_list.expires = jiffies + HZ / 2; | ||
59 | |||
60 | add_timer(&fep->phy_timer_list); | ||
61 | |||
62 | fs_mii_link_status_change_check(dev, 0); | ||
63 | } | ||
64 | |||
65 | static void generic_startup(struct net_device *dev) | ||
66 | { | ||
67 | struct fs_enet_private *fep = netdev_priv(dev); | ||
68 | |||
69 | fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */ | ||
70 | fep->phy_timer_list.data = (unsigned long)dev; | ||
71 | fep->phy_timer_list.function = generic_timer_callback; | ||
72 | add_timer(&fep->phy_timer_list); | ||
73 | } | ||
74 | |||
75 | static void generic_shutdown(struct net_device *dev) | ||
76 | { | ||
77 | struct fs_enet_private *fep = netdev_priv(dev); | ||
78 | |||
79 | del_timer_sync(&fep->phy_timer_list); | ||
80 | } | ||
81 | |||
82 | /* ------------------------------------------------------------------------- */ | ||
83 | /* The Davicom DM9161 is used on the NETTA board */ | ||
84 | |||
85 | /* register definitions */ | ||
86 | |||
87 | #define MII_DM9161_ANAR 4 /* Aux. Config Register */ | ||
88 | #define MII_DM9161_ACR 16 /* Aux. Config Register */ | ||
89 | #define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */ | ||
90 | #define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */ | ||
91 | #define MII_DM9161_INTR 21 /* Interrupt Register */ | ||
92 | #define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */ | ||
93 | #define MII_DM9161_DISCR 23 /* Disconnect Counter Register */ | ||
94 | |||
95 | static void dm9161_startup(struct net_device *dev) | ||
96 | { | ||
97 | struct fs_enet_private *fep = netdev_priv(dev); | ||
98 | |||
99 | fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000); | ||
100 | /* Start autonegotiation */ | ||
101 | fs_mii_write(dev, fep->mii_if.phy_id, MII_BMCR, 0x1200); | ||
102 | |||
103 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
104 | schedule_timeout(HZ*8); | ||
105 | } | ||
106 | |||
107 | static void dm9161_ack_int(struct net_device *dev) | ||
108 | { | ||
109 | struct fs_enet_private *fep = netdev_priv(dev); | ||
110 | |||
111 | fs_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR); | ||
112 | } | ||
113 | |||
114 | static void dm9161_shutdown(struct net_device *dev) | ||
115 | { | ||
116 | struct fs_enet_private *fep = netdev_priv(dev); | ||
117 | |||
118 | fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00); | ||
119 | } | ||
120 | |||
121 | /**********************************************************************************/ | ||
122 | |||
123 | static const struct phy_info phy_info[] = { | ||
124 | { | ||
125 | .id = 0x00181b88, | ||
126 | .name = "DM9161", | ||
127 | .startup = dm9161_startup, | ||
128 | .ack_int = dm9161_ack_int, | ||
129 | .shutdown = dm9161_shutdown, | ||
130 | }, { | ||
131 | .id = 0, | ||
132 | .name = "GENERIC", | ||
133 | .startup = generic_startup, | ||
134 | .shutdown = generic_shutdown, | ||
135 | }, | ||
136 | }; | ||
137 | |||
138 | /**********************************************************************************/ | ||
139 | |||
140 | static int phy_id_detect(struct net_device *dev) | ||
141 | { | ||
142 | struct fs_enet_private *fep = netdev_priv(dev); | ||
143 | const struct fs_platform_info *fpi = fep->fpi; | ||
144 | struct fs_enet_mii_bus *bus = fep->mii_bus; | ||
145 | int i, r, start, end, phytype, physubtype; | ||
146 | const struct phy_info *phy; | ||
147 | int phy_hwid, phy_id; | ||
148 | |||
149 | phy_hwid = -1; | ||
150 | fep->phy = NULL; | ||
151 | |||
152 | /* auto-detect? */ | ||
153 | if (fpi->phy_addr == -1) { | ||
154 | start = 1; | ||
155 | end = 32; | ||
156 | } else { /* direct */ | ||
157 | start = fpi->phy_addr; | ||
158 | end = start + 1; | ||
159 | } | ||
160 | |||
161 | for (phy_id = start; phy_id < end; phy_id++) { | ||
162 | /* skip already used phy addresses on this bus */ | ||
163 | if (bus->usage_map & (1 << phy_id)) | ||
164 | continue; | ||
165 | r = fs_mii_read(dev, phy_id, MII_PHYSID1); | ||
166 | if (r == -1 || (phytype = (r & 0xffff)) == 0xffff) | ||
167 | continue; | ||
168 | r = fs_mii_read(dev, phy_id, MII_PHYSID2); | ||
169 | if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff) | ||
170 | continue; | ||
171 | phy_hwid = (phytype << 16) | physubtype; | ||
172 | if (phy_hwid != -1) | ||
173 | break; | ||
174 | } | ||
175 | |||
176 | if (phy_hwid == -1) { | ||
177 | printk(KERN_ERR DRV_MODULE_NAME | ||
178 | ": %s No PHY detected! range=0x%02x-0x%02x\n", | ||
179 | dev->name, start, end); | ||
180 | return -1; | ||
181 | } | ||
182 | |||
183 | for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++) | ||
184 | if (phy->id == (phy_hwid >> 4) || phy->id == 0) | ||
185 | break; | ||
186 | |||
187 | if (i >= ARRAY_SIZE(phy_info)) { | ||
188 | printk(KERN_ERR DRV_MODULE_NAME | ||
189 | ": %s PHY id 0x%08x is not supported!\n", | ||
190 | dev->name, phy_hwid); | ||
191 | return -1; | ||
192 | } | ||
193 | |||
194 | fep->phy = phy; | ||
195 | |||
196 | /* mark this address as used */ | ||
197 | bus->usage_map |= (1 << phy_id); | ||
198 | |||
199 | printk(KERN_INFO DRV_MODULE_NAME | ||
200 | ": %s Phy @ 0x%x, type %s (0x%08x)%s\n", | ||
201 | dev->name, phy_id, fep->phy->name, phy_hwid, | ||
202 | fpi->phy_addr == -1 ? " (auto-detected)" : ""); | ||
203 | |||
204 | return phy_id; | ||
205 | } | ||
206 | |||
207 | void fs_mii_startup(struct net_device *dev) | ||
208 | { | ||
209 | struct fs_enet_private *fep = netdev_priv(dev); | ||
210 | |||
211 | if (fep->phy->startup) | ||
212 | (*fep->phy->startup) (dev); | ||
213 | } | ||
214 | |||
215 | void fs_mii_shutdown(struct net_device *dev) | ||
216 | { | ||
217 | struct fs_enet_private *fep = netdev_priv(dev); | ||
218 | |||
219 | if (fep->phy->shutdown) | ||
220 | (*fep->phy->shutdown) (dev); | ||
221 | } | ||
222 | |||
223 | void fs_mii_ack_int(struct net_device *dev) | ||
224 | { | ||
225 | struct fs_enet_private *fep = netdev_priv(dev); | ||
226 | |||
227 | if (fep->phy->ack_int) | ||
228 | (*fep->phy->ack_int) (dev); | ||
229 | } | ||
230 | |||
231 | #define MII_LINK 0x0001 | ||
232 | #define MII_HALF 0x0002 | ||
233 | #define MII_FULL 0x0004 | ||
234 | #define MII_BASE4 0x0008 | ||
235 | #define MII_10M 0x0010 | ||
236 | #define MII_100M 0x0020 | ||
237 | #define MII_1G 0x0040 | ||
238 | #define MII_10G 0x0080 | ||
239 | |||
240 | /* return full mii info at one gulp, with a usable form */ | ||
241 | static unsigned int mii_full_status(struct mii_if_info *mii) | ||
242 | { | ||
243 | unsigned int status; | ||
244 | int bmsr, adv, lpa, neg; | ||
245 | struct fs_enet_private* fep = netdev_priv(mii->dev); | ||
246 | |||
247 | /* first, a dummy read, needed to latch some MII phys */ | ||
248 | (void)mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); | ||
249 | bmsr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); | ||
250 | |||
251 | /* no link */ | ||
252 | if ((bmsr & BMSR_LSTATUS) == 0) | ||
253 | return 0; | ||
254 | |||
255 | status = MII_LINK; | ||
256 | |||
257 | /* Lets look what ANEG says if it's supported - otherwize we shall | ||
258 | take the right values from the platform info*/ | ||
259 | if(!mii->force_media) { | ||
260 | /* autoneg not completed; don't bother */ | ||
261 | if ((bmsr & BMSR_ANEGCOMPLETE) == 0) | ||
262 | return 0; | ||
263 | |||
264 | adv = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_ADVERTISE); | ||
265 | lpa = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_LPA); | ||
266 | |||
267 | neg = lpa & adv; | ||
268 | } else { | ||
269 | neg = fep->fpi->bus_info->lpa; | ||
270 | } | ||
271 | |||
272 | if (neg & LPA_100FULL) | ||
273 | status |= MII_FULL | MII_100M; | ||
274 | else if (neg & LPA_100BASE4) | ||
275 | status |= MII_FULL | MII_BASE4 | MII_100M; | ||
276 | else if (neg & LPA_100HALF) | ||
277 | status |= MII_HALF | MII_100M; | ||
278 | else if (neg & LPA_10FULL) | ||
279 | status |= MII_FULL | MII_10M; | ||
280 | else | ||
281 | status |= MII_HALF | MII_10M; | ||
282 | |||
283 | return status; | ||
284 | } | ||
285 | |||
286 | void fs_mii_link_status_change_check(struct net_device *dev, int init_media) | ||
287 | { | ||
288 | struct fs_enet_private *fep = netdev_priv(dev); | ||
289 | struct mii_if_info *mii = &fep->mii_if; | ||
290 | unsigned int mii_status; | ||
291 | int ok_to_print, link, duplex, speed; | ||
292 | unsigned long flags; | ||
293 | |||
294 | ok_to_print = netif_msg_link(fep); | ||
295 | |||
296 | mii_status = mii_full_status(mii); | ||
297 | |||
298 | if (!init_media && mii_status == fep->last_mii_status) | ||
299 | return; | ||
300 | |||
301 | fep->last_mii_status = mii_status; | ||
302 | |||
303 | link = !!(mii_status & MII_LINK); | ||
304 | duplex = !!(mii_status & MII_FULL); | ||
305 | speed = (mii_status & MII_100M) ? 100 : 10; | ||
306 | |||
307 | if (link == 0) { | ||
308 | netif_carrier_off(mii->dev); | ||
309 | netif_stop_queue(dev); | ||
310 | if (!init_media) { | ||
311 | spin_lock_irqsave(&fep->lock, flags); | ||
312 | (*fep->ops->stop)(dev); | ||
313 | spin_unlock_irqrestore(&fep->lock, flags); | ||
314 | } | ||
315 | |||
316 | if (ok_to_print) | ||
317 | printk(KERN_INFO "%s: link down\n", mii->dev->name); | ||
318 | |||
319 | } else { | ||
320 | |||
321 | mii->full_duplex = duplex; | ||
322 | |||
323 | netif_carrier_on(mii->dev); | ||
324 | |||
325 | spin_lock_irqsave(&fep->lock, flags); | ||
326 | fep->duplex = duplex; | ||
327 | fep->speed = speed; | ||
328 | (*fep->ops->restart)(dev); | ||
329 | spin_unlock_irqrestore(&fep->lock, flags); | ||
330 | |||
331 | netif_start_queue(dev); | ||
332 | |||
333 | if (ok_to_print) | ||
334 | printk(KERN_INFO "%s: link up, %dMbps, %s-duplex\n", | ||
335 | dev->name, speed, duplex ? "full" : "half"); | ||
336 | } | ||
337 | } | ||
338 | |||
339 | /**********************************************************************************/ | ||
340 | |||
341 | int fs_mii_read(struct net_device *dev, int phy_id, int location) | ||
342 | { | ||
343 | struct fs_enet_private *fep = netdev_priv(dev); | ||
344 | struct fs_enet_mii_bus *bus = fep->mii_bus; | ||
345 | |||
346 | unsigned long flags; | ||
347 | int ret; | ||
348 | |||
349 | spin_lock_irqsave(&bus->mii_lock, flags); | ||
350 | ret = (*bus->mii_read)(bus, phy_id, location); | ||
351 | spin_unlock_irqrestore(&bus->mii_lock, flags); | ||
352 | |||
353 | return ret; | ||
354 | } | ||
355 | |||
356 | void fs_mii_write(struct net_device *dev, int phy_id, int location, int value) | ||
357 | { | ||
358 | struct fs_enet_private *fep = netdev_priv(dev); | ||
359 | struct fs_enet_mii_bus *bus = fep->mii_bus; | ||
360 | unsigned long flags; | ||
361 | |||
362 | spin_lock_irqsave(&bus->mii_lock, flags); | ||
363 | (*bus->mii_write)(bus, phy_id, location, value); | ||
364 | spin_unlock_irqrestore(&bus->mii_lock, flags); | ||
365 | } | ||
366 | |||
367 | /*****************************************************************************/ | ||
368 | |||
369 | /* list of all registered mii buses */ | ||
370 | static LIST_HEAD(fs_mii_bus_list); | ||
371 | |||
372 | static struct fs_enet_mii_bus *lookup_bus(int method, int id) | ||
373 | { | ||
374 | struct list_head *ptr; | ||
375 | struct fs_enet_mii_bus *bus; | ||
376 | |||
377 | list_for_each(ptr, &fs_mii_bus_list) { | ||
378 | bus = list_entry(ptr, struct fs_enet_mii_bus, list); | ||
379 | if (bus->bus_info->method == method && | ||
380 | bus->bus_info->id == id) | ||
381 | return bus; | ||
382 | } | ||
383 | return NULL; | ||
384 | } | ||
385 | |||
386 | static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi) | ||
387 | { | ||
388 | struct fs_enet_mii_bus *bus; | ||
389 | int ret = 0; | ||
390 | |||
391 | bus = kmalloc(sizeof(*bus), GFP_KERNEL); | ||
392 | if (bus == NULL) { | ||
393 | ret = -ENOMEM; | ||
394 | goto err; | ||
395 | } | ||
396 | memset(bus, 0, sizeof(*bus)); | ||
397 | spin_lock_init(&bus->mii_lock); | ||
398 | bus->bus_info = bi; | ||
399 | bus->refs = 0; | ||
400 | bus->usage_map = 0; | ||
401 | |||
402 | /* perform initialization */ | ||
403 | switch (bi->method) { | ||
404 | |||
405 | case fsmii_fixed: | ||
406 | ret = fs_mii_fixed_init(bus); | ||
407 | if (ret != 0) | ||
408 | goto err; | ||
409 | break; | ||
410 | |||
411 | case fsmii_bitbang: | ||
412 | ret = fs_mii_bitbang_init(bus); | ||
413 | if (ret != 0) | ||
414 | goto err; | ||
415 | break; | ||
416 | #ifdef CONFIG_FS_ENET_HAS_FEC | ||
417 | case fsmii_fec: | ||
418 | ret = fs_mii_fec_init(bus); | ||
419 | if (ret != 0) | ||
420 | goto err; | ||
421 | break; | ||
422 | #endif | ||
423 | default: | ||
424 | ret = -EINVAL; | ||
425 | goto err; | ||
426 | } | ||
427 | |||
428 | list_add(&bus->list, &fs_mii_bus_list); | ||
429 | |||
430 | return bus; | ||
431 | |||
432 | err: | ||
433 | kfree(bus); | ||
434 | return ERR_PTR(ret); | ||
435 | } | ||
436 | |||
437 | static void destroy_bus(struct fs_enet_mii_bus *bus) | ||
438 | { | ||
439 | /* remove from bus list */ | ||
440 | list_del(&bus->list); | ||
441 | |||
442 | /* nothing more needed */ | ||
443 | kfree(bus); | ||
444 | } | ||
445 | |||
446 | int fs_mii_connect(struct net_device *dev) | ||
447 | { | ||
448 | struct fs_enet_private *fep = netdev_priv(dev); | ||
449 | const struct fs_platform_info *fpi = fep->fpi; | ||
450 | struct fs_enet_mii_bus *bus = NULL; | ||
451 | |||
452 | /* check method validity */ | ||
453 | switch (fpi->bus_info->method) { | ||
454 | case fsmii_fixed: | ||
455 | case fsmii_bitbang: | ||
456 | break; | ||
457 | #ifdef CONFIG_FS_ENET_HAS_FEC | ||
458 | case fsmii_fec: | ||
459 | break; | ||
460 | #endif | ||
461 | default: | ||
462 | printk(KERN_ERR DRV_MODULE_NAME | ||
463 | ": %s Unknown MII bus method (%d)!\n", | ||
464 | dev->name, fpi->bus_info->method); | ||
465 | return -EINVAL; | ||
466 | } | ||
467 | |||
468 | bus = lookup_bus(fpi->bus_info->method, fpi->bus_info->id); | ||
469 | |||
470 | /* if not found create new bus */ | ||
471 | if (bus == NULL) { | ||
472 | bus = create_bus(fpi->bus_info); | ||
473 | if (IS_ERR(bus)) { | ||
474 | printk(KERN_ERR DRV_MODULE_NAME | ||
475 | ": %s MII bus creation failure!\n", dev->name); | ||
476 | return PTR_ERR(bus); | ||
477 | } | ||
478 | } | ||
479 | |||
480 | bus->refs++; | ||
481 | |||
482 | fep->mii_bus = bus; | ||
483 | |||
484 | fep->mii_if.dev = dev; | ||
485 | fep->mii_if.phy_id_mask = 0x1f; | ||
486 | fep->mii_if.reg_num_mask = 0x1f; | ||
487 | fep->mii_if.mdio_read = fs_mii_read; | ||
488 | fep->mii_if.mdio_write = fs_mii_write; | ||
489 | fep->mii_if.force_media = fpi->bus_info->disable_aneg; | ||
490 | fep->mii_if.phy_id = phy_id_detect(dev); | ||
491 | |||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | void fs_mii_disconnect(struct net_device *dev) | ||
496 | { | ||
497 | struct fs_enet_private *fep = netdev_priv(dev); | ||
498 | struct fs_enet_mii_bus *bus = NULL; | ||
499 | |||
500 | bus = fep->mii_bus; | ||
501 | fep->mii_bus = NULL; | ||
502 | |||
503 | if (--bus->refs <= 0) | ||
504 | destroy_bus(bus); | ||
505 | } | ||
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h index e7ec96c964a9..95022c005f75 100644 --- a/drivers/net/fs_enet/fs_enet.h +++ b/drivers/net/fs_enet/fs_enet.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/netdevice.h> | 5 | #include <linux/netdevice.h> |
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <linux/list.h> | 7 | #include <linux/list.h> |
8 | #include <linux/phy.h> | ||
8 | 9 | ||
9 | #include <linux/fs_enet_pd.h> | 10 | #include <linux/fs_enet_pd.h> |
10 | 11 | ||
@@ -12,12 +13,30 @@ | |||
12 | 13 | ||
13 | #ifdef CONFIG_CPM1 | 14 | #ifdef CONFIG_CPM1 |
14 | #include <asm/commproc.h> | 15 | #include <asm/commproc.h> |
16 | |||
17 | struct fec_info { | ||
18 | fec_t* fecp; | ||
19 | u32 mii_speed; | ||
20 | }; | ||
15 | #endif | 21 | #endif |
16 | 22 | ||
17 | #ifdef CONFIG_CPM2 | 23 | #ifdef CONFIG_CPM2 |
18 | #include <asm/cpm2.h> | 24 | #include <asm/cpm2.h> |
19 | #endif | 25 | #endif |
20 | 26 | ||
27 | /* This is used to operate with pins. | ||
28 | Note that the actual port size may | ||
29 | be different; cpm(s) handle it OK */ | ||
30 | struct bb_info { | ||
31 | u8 mdio_dat_msk; | ||
32 | u8 mdio_dir_msk; | ||
33 | u8 *mdio_dir; | ||
34 | u8 *mdio_dat; | ||
35 | u8 mdc_msk; | ||
36 | u8 *mdc_dat; | ||
37 | int delay; | ||
38 | }; | ||
39 | |||
21 | /* hw driver ops */ | 40 | /* hw driver ops */ |
22 | struct fs_ops { | 41 | struct fs_ops { |
23 | int (*setup_data)(struct net_device *dev); | 42 | int (*setup_data)(struct net_device *dev); |
@@ -25,6 +44,7 @@ struct fs_ops { | |||
25 | void (*free_bd)(struct net_device *dev); | 44 | void (*free_bd)(struct net_device *dev); |
26 | void (*cleanup_data)(struct net_device *dev); | 45 | void (*cleanup_data)(struct net_device *dev); |
27 | void (*set_multicast_list)(struct net_device *dev); | 46 | void (*set_multicast_list)(struct net_device *dev); |
47 | void (*adjust_link)(struct net_device *dev); | ||
28 | void (*restart)(struct net_device *dev); | 48 | void (*restart)(struct net_device *dev); |
29 | void (*stop)(struct net_device *dev); | 49 | void (*stop)(struct net_device *dev); |
30 | void (*pre_request_irq)(struct net_device *dev, int irq); | 50 | void (*pre_request_irq)(struct net_device *dev, int irq); |
@@ -100,10 +120,6 @@ struct fs_enet_mii_bus { | |||
100 | }; | 120 | }; |
101 | }; | 121 | }; |
102 | 122 | ||
103 | int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus); | ||
104 | int fs_mii_fixed_init(struct fs_enet_mii_bus *bus); | ||
105 | int fs_mii_fec_init(struct fs_enet_mii_bus *bus); | ||
106 | |||
107 | struct fs_enet_private { | 123 | struct fs_enet_private { |
108 | struct device *dev; /* pointer back to the device (must be initialized first) */ | 124 | struct device *dev; /* pointer back to the device (must be initialized first) */ |
109 | spinlock_t lock; /* during all ops except TX pckt processing */ | 125 | spinlock_t lock; /* during all ops except TX pckt processing */ |
@@ -130,7 +146,8 @@ struct fs_enet_private { | |||
130 | struct fs_enet_mii_bus *mii_bus; | 146 | struct fs_enet_mii_bus *mii_bus; |
131 | int interrupt; | 147 | int interrupt; |
132 | 148 | ||
133 | int duplex, speed; /* current settings */ | 149 | struct phy_device *phydev; |
150 | int oldduplex, oldspeed, oldlink; /* current settings */ | ||
134 | 151 | ||
135 | /* event masks */ | 152 | /* event masks */ |
136 | u32 ev_napi_rx; /* mask of NAPI rx events */ | 153 | u32 ev_napi_rx; /* mask of NAPI rx events */ |
@@ -168,15 +185,9 @@ struct fs_enet_private { | |||
168 | }; | 185 | }; |
169 | 186 | ||
170 | /***************************************************************************/ | 187 | /***************************************************************************/ |
171 | 188 | int fs_enet_mdio_bb_init(void); | |
172 | int fs_mii_read(struct net_device *dev, int phy_id, int location); | 189 | int fs_mii_fixed_init(struct fs_enet_mii_bus *bus); |
173 | void fs_mii_write(struct net_device *dev, int phy_id, int location, int value); | 190 | int fs_enet_mdio_fec_init(void); |
174 | |||
175 | void fs_mii_startup(struct net_device *dev); | ||
176 | void fs_mii_shutdown(struct net_device *dev); | ||
177 | void fs_mii_ack_int(struct net_device *dev); | ||
178 | |||
179 | void fs_mii_link_status_change_check(struct net_device *dev, int init_media); | ||
180 | 191 | ||
181 | void fs_init_bds(struct net_device *dev); | 192 | void fs_init_bds(struct net_device *dev); |
182 | void fs_cleanup_bds(struct net_device *dev); | 193 | void fs_cleanup_bds(struct net_device *dev); |
@@ -194,7 +205,6 @@ int fs_enet_platform_init(void); | |||
194 | void fs_enet_platform_cleanup(void); | 205 | void fs_enet_platform_cleanup(void); |
195 | 206 | ||
196 | /***************************************************************************/ | 207 | /***************************************************************************/ |
197 | |||
198 | /* buffer descriptor access macros */ | 208 | /* buffer descriptor access macros */ |
199 | 209 | ||
200 | /* access macros */ | 210 | /* access macros */ |
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c index 64e20982c1fe..1ff2597b8495 100644 --- a/drivers/net/fs_enet/mac-fcc.c +++ b/drivers/net/fs_enet/mac-fcc.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/bitops.h> | 34 | #include <linux/bitops.h> |
35 | #include <linux/fs.h> | 35 | #include <linux/fs.h> |
36 | #include <linux/platform_device.h> | 36 | #include <linux/platform_device.h> |
37 | #include <linux/phy.h> | ||
37 | 38 | ||
38 | #include <asm/immap_cpm2.h> | 39 | #include <asm/immap_cpm2.h> |
39 | #include <asm/mpc8260.h> | 40 | #include <asm/mpc8260.h> |
@@ -122,22 +123,32 @@ static int do_pd_setup(struct fs_enet_private *fep) | |||
122 | 123 | ||
123 | /* Attach the memory for the FCC Parameter RAM */ | 124 | /* Attach the memory for the FCC Parameter RAM */ |
124 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram"); | 125 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram"); |
125 | fep->fcc.ep = (void *)r->start; | 126 | fep->fcc.ep = (void *)ioremap(r->start, r->end - r->start + 1); |
126 | |||
127 | if (fep->fcc.ep == NULL) | 127 | if (fep->fcc.ep == NULL) |
128 | return -EINVAL; | 128 | return -EINVAL; |
129 | 129 | ||
130 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs"); | 130 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs"); |
131 | fep->fcc.fccp = (void *)r->start; | 131 | fep->fcc.fccp = (void *)ioremap(r->start, r->end - r->start + 1); |
132 | |||
133 | if (fep->fcc.fccp == NULL) | 132 | if (fep->fcc.fccp == NULL) |
134 | return -EINVAL; | 133 | return -EINVAL; |
135 | 134 | ||
136 | fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c; | 135 | if (fep->fpi->fcc_regs_c) { |
136 | |||
137 | fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c; | ||
138 | } else { | ||
139 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, | ||
140 | "fcc_regs_c"); | ||
141 | fep->fcc.fcccp = (void *)ioremap(r->start, | ||
142 | r->end - r->start + 1); | ||
143 | } | ||
137 | 144 | ||
138 | if (fep->fcc.fcccp == NULL) | 145 | if (fep->fcc.fcccp == NULL) |
139 | return -EINVAL; | 146 | return -EINVAL; |
140 | 147 | ||
148 | fep->fcc.mem = (void *)fep->fpi->mem_offset; | ||
149 | if (fep->fcc.mem == NULL) | ||
150 | return -EINVAL; | ||
151 | |||
141 | return 0; | 152 | return 0; |
142 | } | 153 | } |
143 | 154 | ||
@@ -155,8 +166,6 @@ static int setup_data(struct net_device *dev) | |||
155 | if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */ | 166 | if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */ |
156 | return -EINVAL; | 167 | return -EINVAL; |
157 | 168 | ||
158 | fep->fcc.mem = (void *)fpi->mem_offset; | ||
159 | |||
160 | if (do_pd_setup(fep) != 0) | 169 | if (do_pd_setup(fep) != 0) |
161 | return -EINVAL; | 170 | return -EINVAL; |
162 | 171 | ||
@@ -394,7 +403,7 @@ static void restart(struct net_device *dev) | |||
394 | 403 | ||
395 | /* adjust to speed (for RMII mode) */ | 404 | /* adjust to speed (for RMII mode) */ |
396 | if (fpi->use_rmii) { | 405 | if (fpi->use_rmii) { |
397 | if (fep->speed == 100) | 406 | if (fep->phydev->speed == 100) |
398 | C8(fcccp, fcc_gfemr, 0x20); | 407 | C8(fcccp, fcc_gfemr, 0x20); |
399 | else | 408 | else |
400 | S8(fcccp, fcc_gfemr, 0x20); | 409 | S8(fcccp, fcc_gfemr, 0x20); |
@@ -420,7 +429,7 @@ static void restart(struct net_device *dev) | |||
420 | S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); | 429 | S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); |
421 | 430 | ||
422 | /* adjust to duplex mode */ | 431 | /* adjust to duplex mode */ |
423 | if (fep->duplex) | 432 | if (fep->phydev->duplex) |
424 | S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); | 433 | S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); |
425 | else | 434 | else |
426 | C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); | 435 | C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); |
@@ -486,7 +495,10 @@ static void rx_bd_done(struct net_device *dev) | |||
486 | 495 | ||
487 | static void tx_kickstart(struct net_device *dev) | 496 | static void tx_kickstart(struct net_device *dev) |
488 | { | 497 | { |
489 | /* nothing */ | 498 | struct fs_enet_private *fep = netdev_priv(dev); |
499 | fcc_t *fccp = fep->fcc.fccp; | ||
500 | |||
501 | S32(fccp, fcc_ftodr, 0x80); | ||
490 | } | 502 | } |
491 | 503 | ||
492 | static u32 get_int_events(struct net_device *dev) | 504 | static u32 get_int_events(struct net_device *dev) |
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c index e09547077529..c2c5fd419bd0 100644 --- a/drivers/net/fs_enet/mac-fec.c +++ b/drivers/net/fs_enet/mac-fec.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #endif | 46 | #endif |
47 | 47 | ||
48 | #include "fs_enet.h" | 48 | #include "fs_enet.h" |
49 | #include "fec.h" | ||
49 | 50 | ||
50 | /*************************************************/ | 51 | /*************************************************/ |
51 | 52 | ||
@@ -75,50 +76,8 @@ | |||
75 | /* clear bits */ | 76 | /* clear bits */ |
76 | #define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) | 77 | #define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) |
77 | 78 | ||
78 | |||
79 | /* CRC polynomium used by the FEC for the multicast group filtering */ | ||
80 | #define FEC_CRC_POLY 0x04C11DB7 | ||
81 | |||
82 | #define FEC_MAX_MULTICAST_ADDRS 64 | ||
83 | |||
84 | /* Interrupt events/masks. | ||
85 | */ | ||
86 | #define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */ | ||
87 | #define FEC_ENET_BABR 0x40000000U /* Babbling receiver */ | ||
88 | #define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */ | ||
89 | #define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */ | ||
90 | #define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */ | ||
91 | #define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */ | ||
92 | #define FEC_ENET_RXF 0x02000000U /* Full frame received */ | ||
93 | #define FEC_ENET_RXB 0x01000000U /* A buffer was received */ | ||
94 | #define FEC_ENET_MII 0x00800000U /* MII interrupt */ | ||
95 | #define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */ | ||
96 | |||
97 | #define FEC_ECNTRL_PINMUX 0x00000004 | ||
98 | #define FEC_ECNTRL_ETHER_EN 0x00000002 | ||
99 | #define FEC_ECNTRL_RESET 0x00000001 | ||
100 | |||
101 | #define FEC_RCNTRL_BC_REJ 0x00000010 | ||
102 | #define FEC_RCNTRL_PROM 0x00000008 | ||
103 | #define FEC_RCNTRL_MII_MODE 0x00000004 | ||
104 | #define FEC_RCNTRL_DRT 0x00000002 | ||
105 | #define FEC_RCNTRL_LOOP 0x00000001 | ||
106 | |||
107 | #define FEC_TCNTRL_FDEN 0x00000004 | ||
108 | #define FEC_TCNTRL_HBC 0x00000002 | ||
109 | #define FEC_TCNTRL_GTS 0x00000001 | ||
110 | |||
111 | |||
112 | /* Make MII read/write commands for the FEC. | ||
113 | */ | ||
114 | #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) | ||
115 | #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) | ||
116 | #define mk_mii_end 0 | ||
117 | |||
118 | #define FEC_MII_LOOPS 10000 | ||
119 | |||
120 | /* | 79 | /* |
121 | * Delay to wait for FEC reset command to complete (in us) | 80 | * Delay to wait for FEC reset command to complete (in us) |
122 | */ | 81 | */ |
123 | #define FEC_RESET_DELAY 50 | 82 | #define FEC_RESET_DELAY 50 |
124 | 83 | ||
@@ -303,13 +262,15 @@ static void restart(struct net_device *dev) | |||
303 | int r; | 262 | int r; |
304 | u32 addrhi, addrlo; | 263 | u32 addrhi, addrlo; |
305 | 264 | ||
265 | struct mii_bus* mii = fep->phydev->bus; | ||
266 | struct fec_info* fec_inf = mii->priv; | ||
267 | |||
306 | r = whack_reset(fep->fec.fecp); | 268 | r = whack_reset(fep->fec.fecp); |
307 | if (r != 0) | 269 | if (r != 0) |
308 | printk(KERN_ERR DRV_MODULE_NAME | 270 | printk(KERN_ERR DRV_MODULE_NAME |
309 | ": %s FEC Reset FAILED!\n", dev->name); | 271 | ": %s FEC Reset FAILED!\n", dev->name); |
310 | |||
311 | /* | 272 | /* |
312 | * Set station address. | 273 | * Set station address. |
313 | */ | 274 | */ |
314 | addrhi = ((u32) dev->dev_addr[0] << 24) | | 275 | addrhi = ((u32) dev->dev_addr[0] << 24) | |
315 | ((u32) dev->dev_addr[1] << 16) | | 276 | ((u32) dev->dev_addr[1] << 16) | |
@@ -350,12 +311,12 @@ static void restart(struct net_device *dev) | |||
350 | FW(fecp, fun_code, 0x78000000); | 311 | FW(fecp, fun_code, 0x78000000); |
351 | 312 | ||
352 | /* | 313 | /* |
353 | * Set MII speed. | 314 | * Set MII speed. |
354 | */ | 315 | */ |
355 | FW(fecp, mii_speed, fep->mii_bus->fec.mii_speed); | 316 | FW(fecp, mii_speed, fec_inf->mii_speed); |
356 | 317 | ||
357 | /* | 318 | /* |
358 | * Clear any outstanding interrupt. | 319 | * Clear any outstanding interrupt. |
359 | */ | 320 | */ |
360 | FW(fecp, ievent, 0xffc0); | 321 | FW(fecp, ievent, 0xffc0); |
361 | FW(fecp, ivec, (fep->interrupt / 2) << 29); | 322 | FW(fecp, ivec, (fep->interrupt / 2) << 29); |
@@ -390,11 +351,12 @@ static void restart(struct net_device *dev) | |||
390 | } | 351 | } |
391 | #endif | 352 | #endif |
392 | 353 | ||
354 | |||
393 | FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ | 355 | FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ |
394 | /* | 356 | /* |
395 | * adjust to duplex mode | 357 | * adjust to duplex mode |
396 | */ | 358 | */ |
397 | if (fep->duplex) { | 359 | if (fep->phydev->duplex) { |
398 | FC(fecp, r_cntrl, FEC_RCNTRL_DRT); | 360 | FC(fecp, r_cntrl, FEC_RCNTRL_DRT); |
399 | FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ | 361 | FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ |
400 | } else { | 362 | } else { |
@@ -418,9 +380,11 @@ static void restart(struct net_device *dev) | |||
418 | static void stop(struct net_device *dev) | 380 | static void stop(struct net_device *dev) |
419 | { | 381 | { |
420 | struct fs_enet_private *fep = netdev_priv(dev); | 382 | struct fs_enet_private *fep = netdev_priv(dev); |
383 | const struct fs_platform_info *fpi = fep->fpi; | ||
421 | fec_t *fecp = fep->fec.fecp; | 384 | fec_t *fecp = fep->fec.fecp; |
422 | struct fs_enet_mii_bus *bus = fep->mii_bus; | 385 | |
423 | const struct fs_mii_bus_info *bi = bus->bus_info; | 386 | struct fec_info* feci= fep->phydev->bus->priv; |
387 | |||
424 | int i; | 388 | int i; |
425 | 389 | ||
426 | if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) | 390 | if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) |
@@ -444,11 +408,11 @@ static void stop(struct net_device *dev) | |||
444 | fs_cleanup_bds(dev); | 408 | fs_cleanup_bds(dev); |
445 | 409 | ||
446 | /* shut down FEC1? that's where the mii bus is */ | 410 | /* shut down FEC1? that's where the mii bus is */ |
447 | if (fep->fec.idx == 0 && bus->refs > 1 && bi->method == fsmii_fec) { | 411 | if (fpi->has_phy) { |
448 | FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ | 412 | FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ |
449 | FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); | 413 | FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); |
450 | FW(fecp, ievent, FEC_ENET_MII); | 414 | FW(fecp, ievent, FEC_ENET_MII); |
451 | FW(fecp, mii_speed, bus->fec.mii_speed); | 415 | FW(fecp, mii_speed, feci->mii_speed); |
452 | } | 416 | } |
453 | } | 417 | } |
454 | 418 | ||
@@ -583,73 +547,3 @@ const struct fs_ops fs_fec_ops = { | |||
583 | .free_bd = free_bd, | 547 | .free_bd = free_bd, |
584 | }; | 548 | }; |
585 | 549 | ||
586 | /***********************************************************************/ | ||
587 | |||
588 | static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location) | ||
589 | { | ||
590 | fec_t *fecp = bus->fec.fecp; | ||
591 | int i, ret = -1; | ||
592 | |||
593 | if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) | ||
594 | BUG(); | ||
595 | |||
596 | /* Add PHY address to register command. */ | ||
597 | FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location)); | ||
598 | |||
599 | for (i = 0; i < FEC_MII_LOOPS; i++) | ||
600 | if ((FR(fecp, ievent) & FEC_ENET_MII) != 0) | ||
601 | break; | ||
602 | |||
603 | if (i < FEC_MII_LOOPS) { | ||
604 | FW(fecp, ievent, FEC_ENET_MII); | ||
605 | ret = FR(fecp, mii_data) & 0xffff; | ||
606 | } | ||
607 | |||
608 | return ret; | ||
609 | } | ||
610 | |||
611 | static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int value) | ||
612 | { | ||
613 | fec_t *fecp = bus->fec.fecp; | ||
614 | int i; | ||
615 | |||
616 | /* this must never happen */ | ||
617 | if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) | ||
618 | BUG(); | ||
619 | |||
620 | /* Add PHY address to register command. */ | ||
621 | FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value)); | ||
622 | |||
623 | for (i = 0; i < FEC_MII_LOOPS; i++) | ||
624 | if ((FR(fecp, ievent) & FEC_ENET_MII) != 0) | ||
625 | break; | ||
626 | |||
627 | if (i < FEC_MII_LOOPS) | ||
628 | FW(fecp, ievent, FEC_ENET_MII); | ||
629 | } | ||
630 | |||
631 | int fs_mii_fec_init(struct fs_enet_mii_bus *bus) | ||
632 | { | ||
633 | bd_t *bd = (bd_t *)__res; | ||
634 | const struct fs_mii_bus_info *bi = bus->bus_info; | ||
635 | fec_t *fecp; | ||
636 | |||
637 | if (bi->id != 0) | ||
638 | return -1; | ||
639 | |||
640 | bus->fec.fecp = &((immap_t *)fs_enet_immap)->im_cpm.cp_fec; | ||
641 | bus->fec.mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) | ||
642 | & 0x3F) << 1; | ||
643 | |||
644 | fecp = bus->fec.fecp; | ||
645 | |||
646 | FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ | ||
647 | FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); | ||
648 | FW(fecp, ievent, FEC_ENET_MII); | ||
649 | FW(fecp, mii_speed, bus->fec.mii_speed); | ||
650 | |||
651 | bus->mii_read = mii_read; | ||
652 | bus->mii_write = mii_write; | ||
653 | |||
654 | return 0; | ||
655 | } | ||
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c index eaa24fab645f..95ec5872c507 100644 --- a/drivers/net/fs_enet/mac-scc.c +++ b/drivers/net/fs_enet/mac-scc.c | |||
@@ -369,7 +369,7 @@ static void restart(struct net_device *dev) | |||
369 | W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); | 369 | W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); |
370 | 370 | ||
371 | /* Set full duplex mode if needed */ | 371 | /* Set full duplex mode if needed */ |
372 | if (fep->duplex) | 372 | if (fep->phydev->duplex) |
373 | S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); | 373 | S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); |
374 | 374 | ||
375 | S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); | 375 | S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); |
@@ -500,6 +500,8 @@ static void tx_restart(struct net_device *dev) | |||
500 | scc_cr_cmd(fep, CPM_CR_RESTART_TX); | 500 | scc_cr_cmd(fep, CPM_CR_RESTART_TX); |
501 | } | 501 | } |
502 | 502 | ||
503 | |||
504 | |||
503 | /*************************************************************************/ | 505 | /*************************************************************************/ |
504 | 506 | ||
505 | const struct fs_ops fs_scc_ops = { | 507 | const struct fs_ops fs_scc_ops = { |
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c index 48f9cf83ab6f..0b9b8b5c847c 100644 --- a/drivers/net/fs_enet/mii-bitbang.c +++ b/drivers/net/fs_enet/mii-bitbang.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/mii.h> | 33 | #include <linux/mii.h> |
34 | #include <linux/ethtool.h> | 34 | #include <linux/ethtool.h> |
35 | #include <linux/bitops.h> | 35 | #include <linux/bitops.h> |
36 | #include <linux/platform_device.h> | ||
36 | 37 | ||
37 | #include <asm/pgtable.h> | 38 | #include <asm/pgtable.h> |
38 | #include <asm/irq.h> | 39 | #include <asm/irq.h> |
@@ -40,129 +41,25 @@ | |||
40 | 41 | ||
41 | #include "fs_enet.h" | 42 | #include "fs_enet.h" |
42 | 43 | ||
43 | #ifdef CONFIG_8xx | 44 | static int bitbang_prep_bit(u8 **datp, u8 *mskp, |
44 | static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit) | 45 | struct fs_mii_bit *mii_bit) |
45 | { | 46 | { |
46 | immap_t *im = (immap_t *)fs_enet_immap; | 47 | void *dat; |
47 | void *dir, *dat, *ppar; | ||
48 | int adv; | 48 | int adv; |
49 | u8 msk; | 49 | u8 msk; |
50 | 50 | ||
51 | switch (port) { | 51 | dat = (void*) mii_bit->offset; |
52 | case fsiop_porta: | ||
53 | dir = &im->im_ioport.iop_padir; | ||
54 | dat = &im->im_ioport.iop_padat; | ||
55 | ppar = &im->im_ioport.iop_papar; | ||
56 | break; | ||
57 | |||
58 | case fsiop_portb: | ||
59 | dir = &im->im_cpm.cp_pbdir; | ||
60 | dat = &im->im_cpm.cp_pbdat; | ||
61 | ppar = &im->im_cpm.cp_pbpar; | ||
62 | break; | ||
63 | |||
64 | case fsiop_portc: | ||
65 | dir = &im->im_ioport.iop_pcdir; | ||
66 | dat = &im->im_ioport.iop_pcdat; | ||
67 | ppar = &im->im_ioport.iop_pcpar; | ||
68 | break; | ||
69 | |||
70 | case fsiop_portd: | ||
71 | dir = &im->im_ioport.iop_pddir; | ||
72 | dat = &im->im_ioport.iop_pddat; | ||
73 | ppar = &im->im_ioport.iop_pdpar; | ||
74 | break; | ||
75 | |||
76 | case fsiop_porte: | ||
77 | dir = &im->im_cpm.cp_pedir; | ||
78 | dat = &im->im_cpm.cp_pedat; | ||
79 | ppar = &im->im_cpm.cp_pepar; | ||
80 | break; | ||
81 | |||
82 | default: | ||
83 | printk(KERN_ERR DRV_MODULE_NAME | ||
84 | "Illegal port value %d!\n", port); | ||
85 | return -EINVAL; | ||
86 | } | ||
87 | |||
88 | adv = bit >> 3; | ||
89 | dir = (char *)dir + adv; | ||
90 | dat = (char *)dat + adv; | ||
91 | ppar = (char *)ppar + adv; | ||
92 | |||
93 | msk = 1 << (7 - (bit & 7)); | ||
94 | if ((in_8(ppar) & msk) != 0) { | ||
95 | printk(KERN_ERR DRV_MODULE_NAME | ||
96 | "pin %d on port %d is not general purpose!\n", bit, port); | ||
97 | return -EINVAL; | ||
98 | } | ||
99 | |||
100 | *dirp = dir; | ||
101 | *datp = dat; | ||
102 | *mskp = msk; | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | #endif | ||
107 | |||
108 | #ifdef CONFIG_8260 | ||
109 | static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit) | ||
110 | { | ||
111 | iop_cpm2_t *io = &((cpm2_map_t *)fs_enet_immap)->im_ioport; | ||
112 | void *dir, *dat, *ppar; | ||
113 | int adv; | ||
114 | u8 msk; | ||
115 | |||
116 | switch (port) { | ||
117 | case fsiop_porta: | ||
118 | dir = &io->iop_pdira; | ||
119 | dat = &io->iop_pdata; | ||
120 | ppar = &io->iop_ppara; | ||
121 | break; | ||
122 | |||
123 | case fsiop_portb: | ||
124 | dir = &io->iop_pdirb; | ||
125 | dat = &io->iop_pdatb; | ||
126 | ppar = &io->iop_pparb; | ||
127 | break; | ||
128 | |||
129 | case fsiop_portc: | ||
130 | dir = &io->iop_pdirc; | ||
131 | dat = &io->iop_pdatc; | ||
132 | ppar = &io->iop_pparc; | ||
133 | break; | ||
134 | |||
135 | case fsiop_portd: | ||
136 | dir = &io->iop_pdird; | ||
137 | dat = &io->iop_pdatd; | ||
138 | ppar = &io->iop_ppard; | ||
139 | break; | ||
140 | |||
141 | default: | ||
142 | printk(KERN_ERR DRV_MODULE_NAME | ||
143 | "Illegal port value %d!\n", port); | ||
144 | return -EINVAL; | ||
145 | } | ||
146 | 52 | ||
147 | adv = bit >> 3; | 53 | adv = mii_bit->bit >> 3; |
148 | dir = (char *)dir + adv; | ||
149 | dat = (char *)dat + adv; | 54 | dat = (char *)dat + adv; |
150 | ppar = (char *)ppar + adv; | ||
151 | 55 | ||
152 | msk = 1 << (7 - (bit & 7)); | 56 | msk = 1 << (7 - (mii_bit->bit & 7)); |
153 | if ((in_8(ppar) & msk) != 0) { | ||
154 | printk(KERN_ERR DRV_MODULE_NAME | ||
155 | "pin %d on port %d is not general purpose!\n", bit, port); | ||
156 | return -EINVAL; | ||
157 | } | ||
158 | 57 | ||
159 | *dirp = dir; | ||
160 | *datp = dat; | 58 | *datp = dat; |
161 | *mskp = msk; | 59 | *mskp = msk; |
162 | 60 | ||
163 | return 0; | 61 | return 0; |
164 | } | 62 | } |
165 | #endif | ||
166 | 63 | ||
167 | static inline void bb_set(u8 *p, u8 m) | 64 | static inline void bb_set(u8 *p, u8 m) |
168 | { | 65 | { |
@@ -179,44 +76,44 @@ static inline int bb_read(u8 *p, u8 m) | |||
179 | return (in_8(p) & m) != 0; | 76 | return (in_8(p) & m) != 0; |
180 | } | 77 | } |
181 | 78 | ||
182 | static inline void mdio_active(struct fs_enet_mii_bus *bus) | 79 | static inline void mdio_active(struct bb_info *bitbang) |
183 | { | 80 | { |
184 | bb_set(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk); | 81 | bb_set(bitbang->mdio_dir, bitbang->mdio_dir_msk); |
185 | } | 82 | } |
186 | 83 | ||
187 | static inline void mdio_tristate(struct fs_enet_mii_bus *bus) | 84 | static inline void mdio_tristate(struct bb_info *bitbang ) |
188 | { | 85 | { |
189 | bb_clr(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk); | 86 | bb_clr(bitbang->mdio_dir, bitbang->mdio_dir_msk); |
190 | } | 87 | } |
191 | 88 | ||
192 | static inline int mdio_read(struct fs_enet_mii_bus *bus) | 89 | static inline int mdio_read(struct bb_info *bitbang ) |
193 | { | 90 | { |
194 | return bb_read(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); | 91 | return bb_read(bitbang->mdio_dat, bitbang->mdio_dat_msk); |
195 | } | 92 | } |
196 | 93 | ||
197 | static inline void mdio(struct fs_enet_mii_bus *bus, int what) | 94 | static inline void mdio(struct bb_info *bitbang , int what) |
198 | { | 95 | { |
199 | if (what) | 96 | if (what) |
200 | bb_set(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); | 97 | bb_set(bitbang->mdio_dat, bitbang->mdio_dat_msk); |
201 | else | 98 | else |
202 | bb_clr(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); | 99 | bb_clr(bitbang->mdio_dat, bitbang->mdio_dat_msk); |
203 | } | 100 | } |
204 | 101 | ||
205 | static inline void mdc(struct fs_enet_mii_bus *bus, int what) | 102 | static inline void mdc(struct bb_info *bitbang , int what) |
206 | { | 103 | { |
207 | if (what) | 104 | if (what) |
208 | bb_set(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk); | 105 | bb_set(bitbang->mdc_dat, bitbang->mdc_msk); |
209 | else | 106 | else |
210 | bb_clr(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk); | 107 | bb_clr(bitbang->mdc_dat, bitbang->mdc_msk); |
211 | } | 108 | } |
212 | 109 | ||
213 | static inline void mii_delay(struct fs_enet_mii_bus *bus) | 110 | static inline void mii_delay(struct bb_info *bitbang ) |
214 | { | 111 | { |
215 | udelay(bus->bus_info->i.bitbang.delay); | 112 | udelay(bitbang->delay); |
216 | } | 113 | } |
217 | 114 | ||
218 | /* Utility to send the preamble, address, and register (common to read and write). */ | 115 | /* Utility to send the preamble, address, and register (common to read and write). */ |
219 | static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg) | 116 | static void bitbang_pre(struct bb_info *bitbang , int read, u8 addr, u8 reg) |
220 | { | 117 | { |
221 | int j; | 118 | int j; |
222 | 119 | ||
@@ -228,177 +125,284 @@ static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg) | |||
228 | * but it is safer and will be much more robust. | 125 | * but it is safer and will be much more robust. |
229 | */ | 126 | */ |
230 | 127 | ||
231 | mdio_active(bus); | 128 | mdio_active(bitbang); |
232 | mdio(bus, 1); | 129 | mdio(bitbang, 1); |
233 | for (j = 0; j < 32; j++) { | 130 | for (j = 0; j < 32; j++) { |
234 | mdc(bus, 0); | 131 | mdc(bitbang, 0); |
235 | mii_delay(bus); | 132 | mii_delay(bitbang); |
236 | mdc(bus, 1); | 133 | mdc(bitbang, 1); |
237 | mii_delay(bus); | 134 | mii_delay(bitbang); |
238 | } | 135 | } |
239 | 136 | ||
240 | /* send the start bit (01) and the read opcode (10) or write (10) */ | 137 | /* send the start bit (01) and the read opcode (10) or write (10) */ |
241 | mdc(bus, 0); | 138 | mdc(bitbang, 0); |
242 | mdio(bus, 0); | 139 | mdio(bitbang, 0); |
243 | mii_delay(bus); | 140 | mii_delay(bitbang); |
244 | mdc(bus, 1); | 141 | mdc(bitbang, 1); |
245 | mii_delay(bus); | 142 | mii_delay(bitbang); |
246 | mdc(bus, 0); | 143 | mdc(bitbang, 0); |
247 | mdio(bus, 1); | 144 | mdio(bitbang, 1); |
248 | mii_delay(bus); | 145 | mii_delay(bitbang); |
249 | mdc(bus, 1); | 146 | mdc(bitbang, 1); |
250 | mii_delay(bus); | 147 | mii_delay(bitbang); |
251 | mdc(bus, 0); | 148 | mdc(bitbang, 0); |
252 | mdio(bus, read); | 149 | mdio(bitbang, read); |
253 | mii_delay(bus); | 150 | mii_delay(bitbang); |
254 | mdc(bus, 1); | 151 | mdc(bitbang, 1); |
255 | mii_delay(bus); | 152 | mii_delay(bitbang); |
256 | mdc(bus, 0); | 153 | mdc(bitbang, 0); |
257 | mdio(bus, !read); | 154 | mdio(bitbang, !read); |
258 | mii_delay(bus); | 155 | mii_delay(bitbang); |
259 | mdc(bus, 1); | 156 | mdc(bitbang, 1); |
260 | mii_delay(bus); | 157 | mii_delay(bitbang); |
261 | 158 | ||
262 | /* send the PHY address */ | 159 | /* send the PHY address */ |
263 | for (j = 0; j < 5; j++) { | 160 | for (j = 0; j < 5; j++) { |
264 | mdc(bus, 0); | 161 | mdc(bitbang, 0); |
265 | mdio(bus, (addr & 0x10) != 0); | 162 | mdio(bitbang, (addr & 0x10) != 0); |
266 | mii_delay(bus); | 163 | mii_delay(bitbang); |
267 | mdc(bus, 1); | 164 | mdc(bitbang, 1); |
268 | mii_delay(bus); | 165 | mii_delay(bitbang); |
269 | addr <<= 1; | 166 | addr <<= 1; |
270 | } | 167 | } |
271 | 168 | ||
272 | /* send the register address */ | 169 | /* send the register address */ |
273 | for (j = 0; j < 5; j++) { | 170 | for (j = 0; j < 5; j++) { |
274 | mdc(bus, 0); | 171 | mdc(bitbang, 0); |
275 | mdio(bus, (reg & 0x10) != 0); | 172 | mdio(bitbang, (reg & 0x10) != 0); |
276 | mii_delay(bus); | 173 | mii_delay(bitbang); |
277 | mdc(bus, 1); | 174 | mdc(bitbang, 1); |
278 | mii_delay(bus); | 175 | mii_delay(bitbang); |
279 | reg <<= 1; | 176 | reg <<= 1; |
280 | } | 177 | } |
281 | } | 178 | } |
282 | 179 | ||
283 | static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location) | 180 | static int fs_enet_mii_bb_read(struct mii_bus *bus , int phy_id, int location) |
284 | { | 181 | { |
285 | u16 rdreg; | 182 | u16 rdreg; |
286 | int ret, j; | 183 | int ret, j; |
287 | u8 addr = phy_id & 0xff; | 184 | u8 addr = phy_id & 0xff; |
288 | u8 reg = location & 0xff; | 185 | u8 reg = location & 0xff; |
186 | struct bb_info* bitbang = bus->priv; | ||
289 | 187 | ||
290 | bitbang_pre(bus, 1, addr, reg); | 188 | bitbang_pre(bitbang, 1, addr, reg); |
291 | 189 | ||
292 | /* tri-state our MDIO I/O pin so we can read */ | 190 | /* tri-state our MDIO I/O pin so we can read */ |
293 | mdc(bus, 0); | 191 | mdc(bitbang, 0); |
294 | mdio_tristate(bus); | 192 | mdio_tristate(bitbang); |
295 | mii_delay(bus); | 193 | mii_delay(bitbang); |
296 | mdc(bus, 1); | 194 | mdc(bitbang, 1); |
297 | mii_delay(bus); | 195 | mii_delay(bitbang); |
298 | 196 | ||
299 | /* check the turnaround bit: the PHY should be driving it to zero */ | 197 | /* check the turnaround bit: the PHY should be driving it to zero */ |
300 | if (mdio_read(bus) != 0) { | 198 | if (mdio_read(bitbang) != 0) { |
301 | /* PHY didn't drive TA low */ | 199 | /* PHY didn't drive TA low */ |
302 | for (j = 0; j < 32; j++) { | 200 | for (j = 0; j < 32; j++) { |
303 | mdc(bus, 0); | 201 | mdc(bitbang, 0); |
304 | mii_delay(bus); | 202 | mii_delay(bitbang); |
305 | mdc(bus, 1); | 203 | mdc(bitbang, 1); |
306 | mii_delay(bus); | 204 | mii_delay(bitbang); |
307 | } | 205 | } |
308 | ret = -1; | 206 | ret = -1; |
309 | goto out; | 207 | goto out; |
310 | } | 208 | } |
311 | 209 | ||
312 | mdc(bus, 0); | 210 | mdc(bitbang, 0); |
313 | mii_delay(bus); | 211 | mii_delay(bitbang); |
314 | 212 | ||
315 | /* read 16 bits of register data, MSB first */ | 213 | /* read 16 bits of register data, MSB first */ |
316 | rdreg = 0; | 214 | rdreg = 0; |
317 | for (j = 0; j < 16; j++) { | 215 | for (j = 0; j < 16; j++) { |
318 | mdc(bus, 1); | 216 | mdc(bitbang, 1); |
319 | mii_delay(bus); | 217 | mii_delay(bitbang); |
320 | rdreg <<= 1; | 218 | rdreg <<= 1; |
321 | rdreg |= mdio_read(bus); | 219 | rdreg |= mdio_read(bitbang); |
322 | mdc(bus, 0); | 220 | mdc(bitbang, 0); |
323 | mii_delay(bus); | 221 | mii_delay(bitbang); |
324 | } | 222 | } |
325 | 223 | ||
326 | mdc(bus, 1); | 224 | mdc(bitbang, 1); |
327 | mii_delay(bus); | 225 | mii_delay(bitbang); |
328 | mdc(bus, 0); | 226 | mdc(bitbang, 0); |
329 | mii_delay(bus); | 227 | mii_delay(bitbang); |
330 | mdc(bus, 1); | 228 | mdc(bitbang, 1); |
331 | mii_delay(bus); | 229 | mii_delay(bitbang); |
332 | 230 | ||
333 | ret = rdreg; | 231 | ret = rdreg; |
334 | out: | 232 | out: |
335 | return ret; | 233 | return ret; |
336 | } | 234 | } |
337 | 235 | ||
338 | static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val) | 236 | static int fs_enet_mii_bb_write(struct mii_bus *bus, int phy_id, int location, u16 val) |
339 | { | 237 | { |
340 | int j; | 238 | int j; |
239 | struct bb_info* bitbang = bus->priv; | ||
240 | |||
341 | u8 addr = phy_id & 0xff; | 241 | u8 addr = phy_id & 0xff; |
342 | u8 reg = location & 0xff; | 242 | u8 reg = location & 0xff; |
343 | u16 value = val & 0xffff; | 243 | u16 value = val & 0xffff; |
344 | 244 | ||
345 | bitbang_pre(bus, 0, addr, reg); | 245 | bitbang_pre(bitbang, 0, addr, reg); |
346 | 246 | ||
347 | /* send the turnaround (10) */ | 247 | /* send the turnaround (10) */ |
348 | mdc(bus, 0); | 248 | mdc(bitbang, 0); |
349 | mdio(bus, 1); | 249 | mdio(bitbang, 1); |
350 | mii_delay(bus); | 250 | mii_delay(bitbang); |
351 | mdc(bus, 1); | 251 | mdc(bitbang, 1); |
352 | mii_delay(bus); | 252 | mii_delay(bitbang); |
353 | mdc(bus, 0); | 253 | mdc(bitbang, 0); |
354 | mdio(bus, 0); | 254 | mdio(bitbang, 0); |
355 | mii_delay(bus); | 255 | mii_delay(bitbang); |
356 | mdc(bus, 1); | 256 | mdc(bitbang, 1); |
357 | mii_delay(bus); | 257 | mii_delay(bitbang); |
358 | 258 | ||
359 | /* write 16 bits of register data, MSB first */ | 259 | /* write 16 bits of register data, MSB first */ |
360 | for (j = 0; j < 16; j++) { | 260 | for (j = 0; j < 16; j++) { |
361 | mdc(bus, 0); | 261 | mdc(bitbang, 0); |
362 | mdio(bus, (value & 0x8000) != 0); | 262 | mdio(bitbang, (value & 0x8000) != 0); |
363 | mii_delay(bus); | 263 | mii_delay(bitbang); |
364 | mdc(bus, 1); | 264 | mdc(bitbang, 1); |
365 | mii_delay(bus); | 265 | mii_delay(bitbang); |
366 | value <<= 1; | 266 | value <<= 1; |
367 | } | 267 | } |
368 | 268 | ||
369 | /* | 269 | /* |
370 | * Tri-state the MDIO line. | 270 | * Tri-state the MDIO line. |
371 | */ | 271 | */ |
372 | mdio_tristate(bus); | 272 | mdio_tristate(bitbang); |
373 | mdc(bus, 0); | 273 | mdc(bitbang, 0); |
374 | mii_delay(bus); | 274 | mii_delay(bitbang); |
375 | mdc(bus, 1); | 275 | mdc(bitbang, 1); |
376 | mii_delay(bus); | 276 | mii_delay(bitbang); |
277 | return 0; | ||
377 | } | 278 | } |
378 | 279 | ||
379 | int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus) | 280 | static int fs_enet_mii_bb_reset(struct mii_bus *bus) |
281 | { | ||
282 | /*nothing here - dunno how to reset it*/ | ||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static int fs_mii_bitbang_init(struct bb_info *bitbang, struct fs_mii_bb_platform_info* fmpi) | ||
380 | { | 287 | { |
381 | const struct fs_mii_bus_info *bi = bus->bus_info; | ||
382 | int r; | 288 | int r; |
383 | 289 | ||
384 | r = bitbang_prep_bit(&bus->bitbang.mdio_dir, | 290 | bitbang->delay = fmpi->delay; |
385 | &bus->bitbang.mdio_dat, | 291 | |
386 | &bus->bitbang.mdio_msk, | 292 | r = bitbang_prep_bit(&bitbang->mdio_dir, |
387 | bi->i.bitbang.mdio_port, | 293 | &bitbang->mdio_dir_msk, |
388 | bi->i.bitbang.mdio_bit); | 294 | &fmpi->mdio_dir); |
389 | if (r != 0) | 295 | if (r != 0) |
390 | return r; | 296 | return r; |
391 | 297 | ||
392 | r = bitbang_prep_bit(&bus->bitbang.mdc_dir, | 298 | r = bitbang_prep_bit(&bitbang->mdio_dat, |
393 | &bus->bitbang.mdc_dat, | 299 | &bitbang->mdio_dat_msk, |
394 | &bus->bitbang.mdc_msk, | 300 | &fmpi->mdio_dat); |
395 | bi->i.bitbang.mdc_port, | ||
396 | bi->i.bitbang.mdc_bit); | ||
397 | if (r != 0) | 301 | if (r != 0) |
398 | return r; | 302 | return r; |
399 | 303 | ||
400 | bus->mii_read = mii_read; | 304 | r = bitbang_prep_bit(&bitbang->mdc_dat, |
401 | bus->mii_write = mii_write; | 305 | &bitbang->mdc_msk, |
306 | &fmpi->mdc_dat); | ||
307 | if (r != 0) | ||
308 | return r; | ||
402 | 309 | ||
403 | return 0; | 310 | return 0; |
404 | } | 311 | } |
312 | |||
313 | |||
314 | static int __devinit fs_enet_mdio_probe(struct device *dev) | ||
315 | { | ||
316 | struct platform_device *pdev = to_platform_device(dev); | ||
317 | struct fs_mii_bb_platform_info *pdata; | ||
318 | struct mii_bus *new_bus; | ||
319 | struct bb_info *bitbang; | ||
320 | int err = 0; | ||
321 | |||
322 | if (NULL == dev) | ||
323 | return -EINVAL; | ||
324 | |||
325 | new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); | ||
326 | |||
327 | if (NULL == new_bus) | ||
328 | return -ENOMEM; | ||
329 | |||
330 | bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); | ||
331 | |||
332 | if (NULL == bitbang) | ||
333 | return -ENOMEM; | ||
334 | |||
335 | new_bus->name = "BB MII Bus", | ||
336 | new_bus->read = &fs_enet_mii_bb_read, | ||
337 | new_bus->write = &fs_enet_mii_bb_write, | ||
338 | new_bus->reset = &fs_enet_mii_bb_reset, | ||
339 | new_bus->id = pdev->id; | ||
340 | |||
341 | new_bus->phy_mask = ~0x9; | ||
342 | pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data; | ||
343 | |||
344 | if (NULL == pdata) { | ||
345 | printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id); | ||
346 | return -ENODEV; | ||
347 | } | ||
348 | |||
349 | /*set up workspace*/ | ||
350 | fs_mii_bitbang_init(bitbang, pdata); | ||
351 | |||
352 | new_bus->priv = bitbang; | ||
353 | |||
354 | new_bus->irq = pdata->irq; | ||
355 | |||
356 | new_bus->dev = dev; | ||
357 | dev_set_drvdata(dev, new_bus); | ||
358 | |||
359 | err = mdiobus_register(new_bus); | ||
360 | |||
361 | if (0 != err) { | ||
362 | printk (KERN_ERR "%s: Cannot register as MDIO bus\n", | ||
363 | new_bus->name); | ||
364 | goto bus_register_fail; | ||
365 | } | ||
366 | |||
367 | return 0; | ||
368 | |||
369 | bus_register_fail: | ||
370 | kfree(bitbang); | ||
371 | kfree(new_bus); | ||
372 | |||
373 | return err; | ||
374 | } | ||
375 | |||
376 | |||
377 | static int fs_enet_mdio_remove(struct device *dev) | ||
378 | { | ||
379 | struct mii_bus *bus = dev_get_drvdata(dev); | ||
380 | |||
381 | mdiobus_unregister(bus); | ||
382 | |||
383 | dev_set_drvdata(dev, NULL); | ||
384 | |||
385 | iounmap((void *) (&bus->priv)); | ||
386 | bus->priv = NULL; | ||
387 | kfree(bus); | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static struct device_driver fs_enet_bb_mdio_driver = { | ||
393 | .name = "fsl-bb-mdio", | ||
394 | .bus = &platform_bus_type, | ||
395 | .probe = fs_enet_mdio_probe, | ||
396 | .remove = fs_enet_mdio_remove, | ||
397 | }; | ||
398 | |||
399 | int fs_enet_mdio_bb_init(void) | ||
400 | { | ||
401 | return driver_register(&fs_enet_bb_mdio_driver); | ||
402 | } | ||
403 | |||
404 | void fs_enet_mdio_bb_exit(void) | ||
405 | { | ||
406 | driver_unregister(&fs_enet_bb_mdio_driver); | ||
407 | } | ||
408 | |||
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c new file mode 100644 index 000000000000..1328e10caa35 --- /dev/null +++ b/drivers/net/fs_enet/mii-fec.c | |||
@@ -0,0 +1,243 @@ | |||
1 | /* | ||
2 | * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. | ||
3 | * | ||
4 | * Copyright (c) 2003 Intracom S.A. | ||
5 | * by Pantelis Antoniou <panto@intracom.gr> | ||
6 | * | ||
7 | * 2005 (c) MontaVista Software, Inc. | ||
8 | * Vitaly Bordug <vbordug@ru.mvista.com> | ||
9 | * | ||
10 | * This file is licensed under the terms of the GNU General Public License | ||
11 | * version 2. This program is licensed "as is" without any warranty of any | ||
12 | * kind, whether express or implied. | ||
13 | */ | ||
14 | |||
15 | |||
16 | #include <linux/config.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/ptrace.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/pci.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/netdevice.h> | ||
31 | #include <linux/etherdevice.h> | ||
32 | #include <linux/skbuff.h> | ||
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/mii.h> | ||
35 | #include <linux/ethtool.h> | ||
36 | #include <linux/bitops.h> | ||
37 | #include <linux/platform_device.h> | ||
38 | |||
39 | #include <asm/pgtable.h> | ||
40 | #include <asm/irq.h> | ||
41 | #include <asm/uaccess.h> | ||
42 | |||
43 | #include "fs_enet.h" | ||
44 | #include "fec.h" | ||
45 | |||
46 | /* Make MII read/write commands for the FEC. | ||
47 | */ | ||
48 | #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) | ||
49 | #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) | ||
50 | #define mk_mii_end 0 | ||
51 | |||
52 | #define FEC_MII_LOOPS 10000 | ||
53 | |||
54 | static int match_has_phy (struct device *dev, void* data) | ||
55 | { | ||
56 | struct platform_device* pdev = container_of(dev, struct platform_device, dev); | ||
57 | struct fs_platform_info* fpi; | ||
58 | if(strcmp(pdev->name, (char*)data)) | ||
59 | { | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | fpi = pdev->dev.platform_data; | ||
64 | if((fpi)&&(fpi->has_phy)) | ||
65 | return 1; | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info *fmpi) | ||
70 | { | ||
71 | struct resource *r; | ||
72 | fec_t *fecp; | ||
73 | char* name = "fsl-cpm-fec"; | ||
74 | |||
75 | /* we need fec in order to be useful */ | ||
76 | struct platform_device *fec_pdev = | ||
77 | container_of(bus_find_device(&platform_bus_type, NULL, name, match_has_phy), | ||
78 | struct platform_device, dev); | ||
79 | |||
80 | if(fec_pdev == NULL) { | ||
81 | printk(KERN_ERR"Unable to find PHY for %s", name); | ||
82 | return -ENODEV; | ||
83 | } | ||
84 | |||
85 | r = platform_get_resource_byname(fec_pdev, IORESOURCE_MEM, "regs"); | ||
86 | |||
87 | fec->fecp = fecp = (fec_t*)ioremap(r->start,sizeof(fec_t)); | ||
88 | fec->mii_speed = fmpi->mii_speed; | ||
89 | |||
90 | setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ | ||
91 | setbits32(&fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); | ||
92 | out_be32(&fecp->fec_ievent, FEC_ENET_MII); | ||
93 | out_be32(&fecp->fec_mii_speed, fec->mii_speed); | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location) | ||
99 | { | ||
100 | struct fec_info* fec = bus->priv; | ||
101 | fec_t *fecp = fec->fecp; | ||
102 | int i, ret = -1; | ||
103 | |||
104 | if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) | ||
105 | BUG(); | ||
106 | |||
107 | /* Add PHY address to register command. */ | ||
108 | out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location)); | ||
109 | |||
110 | for (i = 0; i < FEC_MII_LOOPS; i++) | ||
111 | if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) | ||
112 | break; | ||
113 | |||
114 | if (i < FEC_MII_LOOPS) { | ||
115 | out_be32(&fecp->fec_ievent, FEC_ENET_MII); | ||
116 | ret = in_be32(&fecp->fec_mii_data) & 0xffff; | ||
117 | } | ||
118 | |||
119 | return ret; | ||
120 | |||
121 | } | ||
122 | |||
123 | static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) | ||
124 | { | ||
125 | struct fec_info* fec = bus->priv; | ||
126 | fec_t *fecp = fec->fecp; | ||
127 | int i; | ||
128 | |||
129 | /* this must never happen */ | ||
130 | if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) | ||
131 | BUG(); | ||
132 | |||
133 | /* Add PHY address to register command. */ | ||
134 | out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val)); | ||
135 | |||
136 | for (i = 0; i < FEC_MII_LOOPS; i++) | ||
137 | if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) | ||
138 | break; | ||
139 | |||
140 | if (i < FEC_MII_LOOPS) | ||
141 | out_be32(&fecp->fec_ievent, FEC_ENET_MII); | ||
142 | |||
143 | return 0; | ||
144 | |||
145 | } | ||
146 | |||
147 | static int fs_enet_fec_mii_reset(struct mii_bus *bus) | ||
148 | { | ||
149 | /* nothing here - for now */ | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static int __devinit fs_enet_fec_mdio_probe(struct device *dev) | ||
154 | { | ||
155 | struct platform_device *pdev = to_platform_device(dev); | ||
156 | struct fs_mii_fec_platform_info *pdata; | ||
157 | struct mii_bus *new_bus; | ||
158 | struct fec_info *fec; | ||
159 | int err = 0; | ||
160 | if (NULL == dev) | ||
161 | return -EINVAL; | ||
162 | new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); | ||
163 | |||
164 | if (NULL == new_bus) | ||
165 | return -ENOMEM; | ||
166 | |||
167 | fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL); | ||
168 | |||
169 | if (NULL == fec) | ||
170 | return -ENOMEM; | ||
171 | |||
172 | new_bus->name = "FEC MII Bus", | ||
173 | new_bus->read = &fs_enet_fec_mii_read, | ||
174 | new_bus->write = &fs_enet_fec_mii_write, | ||
175 | new_bus->reset = &fs_enet_fec_mii_reset, | ||
176 | new_bus->id = pdev->id; | ||
177 | |||
178 | pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data; | ||
179 | |||
180 | if (NULL == pdata) { | ||
181 | printk(KERN_ERR "fs_enet FEC mdio %d: Missing platform data!\n", pdev->id); | ||
182 | return -ENODEV; | ||
183 | } | ||
184 | |||
185 | /*set up workspace*/ | ||
186 | |||
187 | fs_mii_fec_init(fec, pdata); | ||
188 | new_bus->priv = fec; | ||
189 | |||
190 | new_bus->irq = pdata->irq; | ||
191 | |||
192 | new_bus->dev = dev; | ||
193 | dev_set_drvdata(dev, new_bus); | ||
194 | |||
195 | err = mdiobus_register(new_bus); | ||
196 | |||
197 | if (0 != err) { | ||
198 | printk (KERN_ERR "%s: Cannot register as MDIO bus\n", | ||
199 | new_bus->name); | ||
200 | goto bus_register_fail; | ||
201 | } | ||
202 | |||
203 | return 0; | ||
204 | |||
205 | bus_register_fail: | ||
206 | kfree(new_bus); | ||
207 | |||
208 | return err; | ||
209 | } | ||
210 | |||
211 | |||
212 | static int fs_enet_fec_mdio_remove(struct device *dev) | ||
213 | { | ||
214 | struct mii_bus *bus = dev_get_drvdata(dev); | ||
215 | |||
216 | mdiobus_unregister(bus); | ||
217 | |||
218 | dev_set_drvdata(dev, NULL); | ||
219 | kfree(bus->priv); | ||
220 | |||
221 | bus->priv = NULL; | ||
222 | kfree(bus); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static struct device_driver fs_enet_fec_mdio_driver = { | ||
228 | .name = "fsl-cpm-fec-mdio", | ||
229 | .bus = &platform_bus_type, | ||
230 | .probe = fs_enet_fec_mdio_probe, | ||
231 | .remove = fs_enet_fec_mdio_remove, | ||
232 | }; | ||
233 | |||
234 | int fs_enet_mdio_fec_init(void) | ||
235 | { | ||
236 | return driver_register(&fs_enet_fec_mdio_driver); | ||
237 | } | ||
238 | |||
239 | void fs_enet_mdio_fec_exit(void) | ||
240 | { | ||
241 | driver_unregister(&fs_enet_fec_mdio_driver); | ||
242 | } | ||
243 | |||
diff --git a/drivers/net/fs_enet/mii-fixed.c b/drivers/net/fs_enet/mii-fixed.c deleted file mode 100644 index ae4a9c3bb393..000000000000 --- a/drivers/net/fs_enet/mii-fixed.c +++ /dev/null | |||
@@ -1,91 +0,0 @@ | |||
1 | /* | ||
2 | * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. | ||
3 | * | ||
4 | * Copyright (c) 2003 Intracom S.A. | ||
5 | * by Pantelis Antoniou <panto@intracom.gr> | ||
6 | * | ||
7 | * 2005 (c) MontaVista Software, Inc. | ||
8 | * Vitaly Bordug <vbordug@ru.mvista.com> | ||
9 | * | ||
10 | * This file is licensed under the terms of the GNU General Public License | ||
11 | * version 2. This program is licensed "as is" without any warranty of any | ||
12 | * kind, whether express or implied. | ||
13 | */ | ||
14 | |||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <linux/ptrace.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/ioport.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/pci.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/netdevice.h> | ||
30 | #include <linux/etherdevice.h> | ||
31 | #include <linux/skbuff.h> | ||
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/mii.h> | ||
34 | #include <linux/ethtool.h> | ||
35 | #include <linux/bitops.h> | ||
36 | |||
37 | #include <asm/pgtable.h> | ||
38 | #include <asm/irq.h> | ||
39 | #include <asm/uaccess.h> | ||
40 | |||
41 | #include "fs_enet.h" | ||
42 | |||
43 | static const u16 mii_regs[7] = { | ||
44 | 0x3100, | ||
45 | 0x786d, | ||
46 | 0x0fff, | ||
47 | 0x0fff, | ||
48 | 0x01e1, | ||
49 | 0x45e1, | ||
50 | 0x0003, | ||
51 | }; | ||
52 | |||
53 | static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location) | ||
54 | { | ||
55 | int ret = 0; | ||
56 | |||
57 | if ((unsigned int)location >= ARRAY_SIZE(mii_regs)) | ||
58 | return -1; | ||
59 | |||
60 | if (location != 5) | ||
61 | ret = mii_regs[location]; | ||
62 | else | ||
63 | ret = bus->fixed.lpa; | ||
64 | |||
65 | return ret; | ||
66 | } | ||
67 | |||
68 | static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val) | ||
69 | { | ||
70 | /* do nothing */ | ||
71 | } | ||
72 | |||
73 | int fs_mii_fixed_init(struct fs_enet_mii_bus *bus) | ||
74 | { | ||
75 | const struct fs_mii_bus_info *bi = bus->bus_info; | ||
76 | |||
77 | bus->fixed.lpa = 0x45e1; /* default 100Mb, full duplex */ | ||
78 | |||
79 | /* if speed is fixed at 10Mb, remove 100Mb modes */ | ||
80 | if (bi->i.fixed.speed == 10) | ||
81 | bus->fixed.lpa &= ~LPA_100; | ||
82 | |||
83 | /* if duplex is half, remove full duplex modes */ | ||
84 | if (bi->i.fixed.duplex == 0) | ||
85 | bus->fixed.lpa &= ~LPA_DUPLEX; | ||
86 | |||
87 | bus->mii_read = mii_read; | ||
88 | bus->mii_write = mii_write; | ||
89 | |||
90 | return 0; | ||
91 | } | ||
diff --git a/drivers/net/gt96100eth.c b/drivers/net/gt96100eth.c index 49dacc6e35aa..2b4db7414475 100644 --- a/drivers/net/gt96100eth.c +++ b/drivers/net/gt96100eth.c | |||
@@ -699,7 +699,6 @@ static int __init gt96100_probe1(struct pci_dev *pci, int port_num) | |||
699 | memset(gp, 0, sizeof(*gp)); // clear it | 699 | memset(gp, 0, sizeof(*gp)); // clear it |
700 | 700 | ||
701 | gp->port_num = port_num; | 701 | gp->port_num = port_num; |
702 | gp->io_size = GT96100_ETH_IO_SIZE; | ||
703 | gp->port_offset = port_num * GT96100_ETH_IO_SIZE; | 702 | gp->port_offset = port_num * GT96100_ETH_IO_SIZE; |
704 | gp->phy_addr = phy_addr; | 703 | gp->phy_addr = phy_addr; |
705 | gp->chip_rev = chip_rev; | 704 | gp->chip_rev = chip_rev; |
@@ -1531,7 +1530,7 @@ static void gt96100_cleanup_module(void) | |||
1531 | + sizeof(gt96100_td_t) * TX_RING_SIZE, | 1530 | + sizeof(gt96100_td_t) * TX_RING_SIZE, |
1532 | gp->rx_ring); | 1531 | gp->rx_ring); |
1533 | free_netdev(gtif->dev); | 1532 | free_netdev(gtif->dev); |
1534 | release_region(gtif->iobase, gp->io_size); | 1533 | release_region(gtif->iobase, GT96100_ETH_IO_SIZE); |
1535 | } | 1534 | } |
1536 | } | 1535 | } |
1537 | } | 1536 | } |
diff --git a/drivers/net/gt96100eth.h b/drivers/net/gt96100eth.h index 2a8331938b84..3b62a87c7d7f 100644 --- a/drivers/net/gt96100eth.h +++ b/drivers/net/gt96100eth.h | |||
@@ -331,7 +331,6 @@ struct gt96100_private { | |||
331 | mib_counters_t mib; | 331 | mib_counters_t mib; |
332 | struct net_device_stats stats; | 332 | struct net_device_stats stats; |
333 | 333 | ||
334 | int io_size; | ||
335 | int port_num; // 0 or 1 | 334 | int port_num; // 0 or 1 |
336 | int chip_rev; | 335 | int chip_rev; |
337 | u32 port_offset; | 336 | u32 port_offset; |
@@ -340,7 +339,6 @@ struct gt96100_private { | |||
340 | u32 last_psr; // last value of the port status register | 339 | u32 last_psr; // last value of the port status register |
341 | 340 | ||
342 | int options; /* User-settable misc. driver options. */ | 341 | int options; /* User-settable misc. driver options. */ |
343 | int drv_flags; | ||
344 | struct timer_list timer; | 342 | struct timer_list timer; |
345 | spinlock_t lock; /* Serialise access to device */ | 343 | spinlock_t lock; /* Serialise access to device */ |
346 | }; | 344 | }; |
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index 7bcd939c6edd..409c6aab0411 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c | |||
@@ -20,22 +20,15 @@ | |||
20 | 20 | ||
21 | Support and updates available at | 21 | Support and updates available at |
22 | http://www.scyld.com/network/hamachi.html | 22 | http://www.scyld.com/network/hamachi.html |
23 | [link no longer provides useful info -jgarzik] | ||
23 | or | 24 | or |
24 | http://www.parl.clemson.edu/~keithu/hamachi.html | 25 | http://www.parl.clemson.edu/~keithu/hamachi.html |
25 | 26 | ||
26 | |||
27 | |||
28 | Linux kernel changelog: | ||
29 | |||
30 | LK1.0.1: | ||
31 | - fix lack of pci_dev<->dev association | ||
32 | - ethtool support (jgarzik) | ||
33 | |||
34 | */ | 27 | */ |
35 | 28 | ||
36 | #define DRV_NAME "hamachi" | 29 | #define DRV_NAME "hamachi" |
37 | #define DRV_VERSION "1.01+LK1.0.1" | 30 | #define DRV_VERSION "2.0" |
38 | #define DRV_RELDATE "5/18/2001" | 31 | #define DRV_RELDATE "June 27, 2006" |
39 | 32 | ||
40 | 33 | ||
41 | /* A few user-configurable values. */ | 34 | /* A few user-configurable values. */ |
@@ -608,7 +601,8 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev, | |||
608 | pci_set_master(pdev); | 601 | pci_set_master(pdev); |
609 | 602 | ||
610 | i = pci_request_regions(pdev, DRV_NAME); | 603 | i = pci_request_regions(pdev, DRV_NAME); |
611 | if (i) return i; | 604 | if (i) |
605 | return i; | ||
612 | 606 | ||
613 | irq = pdev->irq; | 607 | irq = pdev->irq; |
614 | ioaddr = ioremap(base, 0x400); | 608 | ioaddr = ioremap(base, 0x400); |
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 0641f54fc638..889f338132fa 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c | |||
@@ -122,6 +122,12 @@ struct bpqdev { | |||
122 | 122 | ||
123 | static LIST_HEAD(bpq_devices); | 123 | static LIST_HEAD(bpq_devices); |
124 | 124 | ||
125 | /* | ||
126 | * bpqether network devices are paired with ethernet devices below them, so | ||
127 | * form a special "super class" of normal ethernet devices; split their locks | ||
128 | * off into a separate class since they always nest. | ||
129 | */ | ||
130 | static struct lock_class_key bpq_netdev_xmit_lock_key; | ||
125 | 131 | ||
126 | /* ------------------------------------------------------------------------ */ | 132 | /* ------------------------------------------------------------------------ */ |
127 | 133 | ||
@@ -528,6 +534,7 @@ static int bpq_new_device(struct net_device *edev) | |||
528 | err = register_netdevice(ndev); | 534 | err = register_netdevice(ndev); |
529 | if (err) | 535 | if (err) |
530 | goto error; | 536 | goto error; |
537 | lockdep_set_class(&ndev->_xmit_lock, &bpq_netdev_xmit_lock_key); | ||
531 | 538 | ||
532 | /* List protected by RTNL */ | 539 | /* List protected by RTNL */ |
533 | list_add_rcu(&bpq->bpq_list, &bpq_devices); | 540 | list_add_rcu(&bpq->bpq_list, &bpq_devices); |
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 3a42afab5036..43e3f33ed5e2 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c | |||
@@ -271,6 +271,7 @@ static int __init ifb_init_module(void) | |||
271 | for (i = 0; i < numifbs && !err; i++) | 271 | for (i = 0; i < numifbs && !err; i++) |
272 | err = ifb_init_one(i); | 272 | err = ifb_init_one(i); |
273 | if (err) { | 273 | if (err) { |
274 | i--; | ||
274 | while (--i >= 0) | 275 | while (--i >= 0) |
275 | ifb_free_one(i); | 276 | ifb_free_one(i); |
276 | } | 277 | } |
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index bf1fca5a3fa0..e3c8cd5eca67 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c | |||
@@ -146,7 +146,7 @@ static int __init ali_ircc_init(void) | |||
146 | { | 146 | { |
147 | ali_chip_t *chip; | 147 | ali_chip_t *chip; |
148 | chipio_t info; | 148 | chipio_t info; |
149 | int ret = -ENODEV; | 149 | int ret; |
150 | int cfg, cfg_base; | 150 | int cfg, cfg_base; |
151 | int reg, revision; | 151 | int reg, revision; |
152 | int i = 0; | 152 | int i = 0; |
@@ -160,6 +160,7 @@ static int __init ali_ircc_init(void) | |||
160 | return ret; | 160 | return ret; |
161 | } | 161 | } |
162 | 162 | ||
163 | ret = -ENODEV; | ||
163 | 164 | ||
164 | /* Probe for all the ALi chipsets we know about */ | 165 | /* Probe for all the ALi chipsets we know about */ |
165 | for (chip= chips; chip->name; chip++, i++) | 166 | for (chip= chips; chip->name; chip++, i++) |
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index a4674044bd6f..2eff45bedc7c 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c | |||
@@ -2353,7 +2353,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base) | |||
2353 | #ifdef CONFIG_PCI | 2353 | #ifdef CONFIG_PCI |
2354 | #define PCIID_VENDOR_INTEL 0x8086 | 2354 | #define PCIID_VENDOR_INTEL 0x8086 |
2355 | #define PCIID_VENDOR_ALI 0x10b9 | 2355 | #define PCIID_VENDOR_ALI 0x10b9 |
2356 | static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __devinitdata = { | 2356 | static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = { |
2357 | { | 2357 | { |
2358 | .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ | 2358 | .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ |
2359 | .device = 0x24cc, | 2359 | .device = 0x24cc, |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index b91e082483f6..7bbd447289b5 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -1173,7 +1173,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1173 | uint16_t ipcse, tucse, mss; | 1173 | uint16_t ipcse, tucse, mss; |
1174 | int err; | 1174 | int err; |
1175 | 1175 | ||
1176 | if(likely(skb_shinfo(skb)->gso_size)) { | 1176 | if (likely(skb_is_gso(skb))) { |
1177 | if (skb_header_cloned(skb)) { | 1177 | if (skb_header_cloned(skb)) { |
1178 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 1178 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
1179 | if (err) | 1179 | if (err) |
@@ -1281,7 +1281,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1281 | 1281 | ||
1282 | while(len) { | 1282 | while(len) { |
1283 | buffer_info = &tx_ring->buffer_info[i]; | 1283 | buffer_info = &tx_ring->buffer_info[i]; |
1284 | size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE); | 1284 | size = min(len, IXGB_MAX_DATA_PER_TXD); |
1285 | buffer_info->length = size; | 1285 | buffer_info->length = size; |
1286 | buffer_info->dma = | 1286 | buffer_info->dma = |
1287 | pci_map_single(adapter->pdev, | 1287 | pci_map_single(adapter->pdev, |
@@ -1306,7 +1306,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1306 | 1306 | ||
1307 | while(len) { | 1307 | while(len) { |
1308 | buffer_info = &tx_ring->buffer_info[i]; | 1308 | buffer_info = &tx_ring->buffer_info[i]; |
1309 | size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE); | 1309 | size = min(len, IXGB_MAX_DATA_PER_TXD); |
1310 | buffer_info->length = size; | 1310 | buffer_info->length = size; |
1311 | buffer_info->dma = | 1311 | buffer_info->dma = |
1312 | pci_map_page(adapter->pdev, | 1312 | pci_map_page(adapter->pdev, |
diff --git a/drivers/net/lance.c b/drivers/net/lance.c index c1c3452c90ca..5b4dbfe5fb77 100644 --- a/drivers/net/lance.c +++ b/drivers/net/lance.c | |||
@@ -326,7 +326,7 @@ MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)"); | |||
326 | MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)"); | 326 | MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)"); |
327 | MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)"); | 327 | MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)"); |
328 | 328 | ||
329 | int init_module(void) | 329 | int __init init_module(void) |
330 | { | 330 | { |
331 | struct net_device *dev; | 331 | struct net_device *dev; |
332 | int this_dev, found = 0; | 332 | int this_dev, found = 0; |
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c index 646e89fc3562..c0ec7f6abcb2 100644 --- a/drivers/net/lne390.c +++ b/drivers/net/lne390.c | |||
@@ -406,7 +406,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)"); | |||
406 | MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver"); | 406 | MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver"); |
407 | MODULE_LICENSE("GPL"); | 407 | MODULE_LICENSE("GPL"); |
408 | 408 | ||
409 | int init_module(void) | 409 | int __init init_module(void) |
410 | { | 410 | { |
411 | struct net_device *dev; | 411 | struct net_device *dev; |
412 | int this_dev, found = 0; | 412 | int this_dev, found = 0; |
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 43fef7de8cb9..997cbce9af6e 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) | |||
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | #ifdef LOOPBACK_TSO | 141 | #ifdef LOOPBACK_TSO |
142 | if (skb_shinfo(skb)->gso_size) { | 142 | if (skb_is_gso(skb)) { |
143 | BUG_ON(skb->protocol != htons(ETH_P_IP)); | 143 | BUG_ON(skb->protocol != htons(ETH_P_IP)); |
144 | BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); | 144 | BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); |
145 | 145 | ||
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 72aad42db7b4..9bdd43ab3573 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -177,6 +177,7 @@ struct myri10ge_priv { | |||
177 | struct work_struct watchdog_work; | 177 | struct work_struct watchdog_work; |
178 | struct timer_list watchdog_timer; | 178 | struct timer_list watchdog_timer; |
179 | int watchdog_tx_done; | 179 | int watchdog_tx_done; |
180 | int watchdog_tx_req; | ||
180 | int watchdog_resets; | 181 | int watchdog_resets; |
181 | int tx_linearized; | 182 | int tx_linearized; |
182 | int pause; | 183 | int pause; |
@@ -188,7 +189,6 @@ struct myri10ge_priv { | |||
188 | int vendor_specific_offset; | 189 | int vendor_specific_offset; |
189 | u32 devctl; | 190 | u32 devctl; |
190 | u16 msi_flags; | 191 | u16 msi_flags; |
191 | u32 pm_state[16]; | ||
192 | u32 read_dma; | 192 | u32 read_dma; |
193 | u32 write_dma; | 193 | u32 write_dma; |
194 | u32 read_write_dma; | 194 | u32 read_write_dma; |
@@ -449,6 +449,7 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size) | |||
449 | struct mcp_gen_header *hdr; | 449 | struct mcp_gen_header *hdr; |
450 | size_t hdr_offset; | 450 | size_t hdr_offset; |
451 | int status; | 451 | int status; |
452 | unsigned i; | ||
452 | 453 | ||
453 | if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { | 454 | if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { |
454 | dev_err(dev, "Unable to load %s firmware image via hotplug\n", | 455 | dev_err(dev, "Unable to load %s firmware image via hotplug\n", |
@@ -480,18 +481,12 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size) | |||
480 | goto abort_with_fw; | 481 | goto abort_with_fw; |
481 | 482 | ||
482 | crc = crc32(~0, fw->data, fw->size); | 483 | crc = crc32(~0, fw->data, fw->size); |
483 | if (mgp->tx.boundary == 2048) { | 484 | for (i = 0; i < fw->size; i += 256) { |
484 | /* Avoid PCI burst on chipset with unaligned completions. */ | 485 | myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i, |
485 | int i; | 486 | fw->data + i, |
486 | __iomem u32 *ptr = (__iomem u32 *) (mgp->sram + | 487 | min(256U, (unsigned)(fw->size - i))); |
487 | MYRI10GE_FW_OFFSET); | 488 | mb(); |
488 | for (i = 0; i < fw->size / 4; i++) { | 489 | readb(mgp->sram); |
489 | __raw_writel(((u32 *) fw->data)[i], ptr + i); | ||
490 | wmb(); | ||
491 | } | ||
492 | } else { | ||
493 | myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET, fw->data, | ||
494 | fw->size); | ||
495 | } | 490 | } |
496 | /* corruption checking is good for parity recovery and buggy chipset */ | 491 | /* corruption checking is good for parity recovery and buggy chipset */ |
497 | memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); | 492 | memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); |
@@ -621,7 +616,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) | |||
621 | return -ENXIO; | 616 | return -ENXIO; |
622 | } | 617 | } |
623 | dev_info(&mgp->pdev->dev, "handoff confirmed\n"); | 618 | dev_info(&mgp->pdev->dev, "handoff confirmed\n"); |
624 | myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); | 619 | myri10ge_dummy_rdma(mgp, 1); |
625 | 620 | ||
626 | return 0; | 621 | return 0; |
627 | } | 622 | } |
@@ -1289,6 +1284,7 @@ static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { | |||
1289 | "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", | 1284 | "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", |
1290 | "tx_heartbeat_errors", "tx_window_errors", | 1285 | "tx_heartbeat_errors", "tx_window_errors", |
1291 | /* device-specific stats */ | 1286 | /* device-specific stats */ |
1287 | "tx_boundary", "WC", "irq", "MSI", | ||
1292 | "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", | 1288 | "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", |
1293 | "serial_number", "tx_pkt_start", "tx_pkt_done", | 1289 | "serial_number", "tx_pkt_start", "tx_pkt_done", |
1294 | "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", | 1290 | "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", |
@@ -1327,6 +1323,10 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, | |||
1327 | for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) | 1323 | for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) |
1328 | data[i] = ((unsigned long *)&mgp->stats)[i]; | 1324 | data[i] = ((unsigned long *)&mgp->stats)[i]; |
1329 | 1325 | ||
1326 | data[i++] = (unsigned int)mgp->tx.boundary; | ||
1327 | data[i++] = (unsigned int)(mgp->mtrr >= 0); | ||
1328 | data[i++] = (unsigned int)mgp->pdev->irq; | ||
1329 | data[i++] = (unsigned int)mgp->msi_enabled; | ||
1330 | data[i++] = (unsigned int)mgp->read_dma; | 1330 | data[i++] = (unsigned int)mgp->read_dma; |
1331 | data[i++] = (unsigned int)mgp->write_dma; | 1331 | data[i++] = (unsigned int)mgp->write_dma; |
1332 | data[i++] = (unsigned int)mgp->read_write_dma; | 1332 | data[i++] = (unsigned int)mgp->read_write_dma; |
@@ -2112,7 +2112,7 @@ abort_linearize: | |||
2112 | } | 2112 | } |
2113 | idx = (idx + 1) & tx->mask; | 2113 | idx = (idx + 1) & tx->mask; |
2114 | } while (idx != last_idx); | 2114 | } while (idx != last_idx); |
2115 | if (skb_shinfo(skb)->gso_size) { | 2115 | if (skb_is_gso(skb)) { |
2116 | printk(KERN_ERR | 2116 | printk(KERN_ERR |
2117 | "myri10ge: %s: TSO but wanted to linearize?!?!?\n", | 2117 | "myri10ge: %s: TSO but wanted to linearize?!?!?\n", |
2118 | mgp->dev->name); | 2118 | mgp->dev->name); |
@@ -2197,8 +2197,6 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu) | |||
2197 | * any other device, except if forced with myri10ge_ecrc_enable > 1. | 2197 | * any other device, except if forced with myri10ge_ecrc_enable > 1. |
2198 | */ | 2198 | */ |
2199 | 2199 | ||
2200 | #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_PCIE 0x005d | ||
2201 | |||
2202 | static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) | 2200 | static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp) |
2203 | { | 2201 | { |
2204 | struct pci_dev *bridge = mgp->pdev->bus->self; | 2202 | struct pci_dev *bridge = mgp->pdev->bus->self; |
@@ -2410,18 +2408,24 @@ static int myri10ge_resume(struct pci_dev *pdev) | |||
2410 | return -EIO; | 2408 | return -EIO; |
2411 | } | 2409 | } |
2412 | myri10ge_restore_state(mgp); | 2410 | myri10ge_restore_state(mgp); |
2413 | pci_enable_device(pdev); | 2411 | |
2412 | status = pci_enable_device(pdev); | ||
2413 | if (status < 0) { | ||
2414 | dev_err(&pdev->dev, "failed to enable device\n"); | ||
2415 | return -EIO; | ||
2416 | } | ||
2417 | |||
2414 | pci_set_master(pdev); | 2418 | pci_set_master(pdev); |
2415 | 2419 | ||
2416 | status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED, | 2420 | status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED, |
2417 | netdev->name, mgp); | 2421 | netdev->name, mgp); |
2418 | if (status != 0) { | 2422 | if (status != 0) { |
2419 | dev_err(&pdev->dev, "failed to allocate IRQ\n"); | 2423 | dev_err(&pdev->dev, "failed to allocate IRQ\n"); |
2420 | goto abort_with_msi; | 2424 | goto abort_with_enabled; |
2421 | } | 2425 | } |
2422 | 2426 | ||
2423 | myri10ge_reset(mgp); | 2427 | myri10ge_reset(mgp); |
2424 | myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); | 2428 | myri10ge_dummy_rdma(mgp, 1); |
2425 | 2429 | ||
2426 | /* Save configuration space to be restored if the | 2430 | /* Save configuration space to be restored if the |
2427 | * nic resets due to a parity error */ | 2431 | * nic resets due to a parity error */ |
@@ -2436,7 +2440,8 @@ static int myri10ge_resume(struct pci_dev *pdev) | |||
2436 | 2440 | ||
2437 | return 0; | 2441 | return 0; |
2438 | 2442 | ||
2439 | abort_with_msi: | 2443 | abort_with_enabled: |
2444 | pci_disable_device(pdev); | ||
2440 | return -EIO; | 2445 | return -EIO; |
2441 | 2446 | ||
2442 | } | 2447 | } |
@@ -2538,7 +2543,8 @@ static void myri10ge_watchdog_timer(unsigned long arg) | |||
2538 | 2543 | ||
2539 | mgp = (struct myri10ge_priv *)arg; | 2544 | mgp = (struct myri10ge_priv *)arg; |
2540 | if (mgp->tx.req != mgp->tx.done && | 2545 | if (mgp->tx.req != mgp->tx.done && |
2541 | mgp->tx.done == mgp->watchdog_tx_done) | 2546 | mgp->tx.done == mgp->watchdog_tx_done && |
2547 | mgp->watchdog_tx_req != mgp->watchdog_tx_done) | ||
2542 | /* nic seems like it might be stuck.. */ | 2548 | /* nic seems like it might be stuck.. */ |
2543 | schedule_work(&mgp->watchdog_work); | 2549 | schedule_work(&mgp->watchdog_work); |
2544 | else | 2550 | else |
@@ -2547,6 +2553,7 @@ static void myri10ge_watchdog_timer(unsigned long arg) | |||
2547 | jiffies + myri10ge_watchdog_timeout * HZ); | 2553 | jiffies + myri10ge_watchdog_timeout * HZ); |
2548 | 2554 | ||
2549 | mgp->watchdog_tx_done = mgp->tx.done; | 2555 | mgp->watchdog_tx_done = mgp->tx.done; |
2556 | mgp->watchdog_tx_req = mgp->tx.req; | ||
2550 | } | 2557 | } |
2551 | 2558 | ||
2552 | static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 2559 | static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
@@ -2737,11 +2744,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2737 | dev_err(&pdev->dev, "register_netdev failed: %d\n", status); | 2744 | dev_err(&pdev->dev, "register_netdev failed: %d\n", status); |
2738 | goto abort_with_irq; | 2745 | goto abort_with_irq; |
2739 | } | 2746 | } |
2740 | 2747 | dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", | |
2741 | printk(KERN_INFO "myri10ge: %s: %s IRQ %d, tx bndry %d, fw %s, WC %s\n", | 2748 | (mgp->msi_enabled ? "MSI" : "xPIC"), |
2742 | netdev->name, (mgp->msi_enabled ? "MSI" : "xPIC"), | 2749 | pdev->irq, mgp->tx.boundary, mgp->fw_name, |
2743 | pdev->irq, mgp->tx.boundary, mgp->fw_name, | 2750 | (mgp->mtrr >= 0 ? "Enabled" : "Disabled")); |
2744 | (mgp->mtrr >= 0 ? "Enabled" : "Disabled")); | ||
2745 | 2751 | ||
2746 | return 0; | 2752 | return 0; |
2747 | 2753 | ||
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 9df2628be1e7..db0475a1102f 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c | |||
@@ -20,120 +20,9 @@ | |||
20 | 20 | ||
21 | Support information and updates available at | 21 | Support information and updates available at |
22 | http://www.scyld.com/network/netsemi.html | 22 | http://www.scyld.com/network/netsemi.html |
23 | [link no longer provides useful info -jgarzik] | ||
23 | 24 | ||
24 | 25 | ||
25 | Linux kernel modifications: | ||
26 | |||
27 | Version 1.0.1: | ||
28 | - Spinlock fixes | ||
29 | - Bug fixes and better intr performance (Tjeerd) | ||
30 | Version 1.0.2: | ||
31 | - Now reads correct MAC address from eeprom | ||
32 | Version 1.0.3: | ||
33 | - Eliminate redundant priv->tx_full flag | ||
34 | - Call netif_start_queue from dev->tx_timeout | ||
35 | - wmb() in start_tx() to flush data | ||
36 | - Update Tx locking | ||
37 | - Clean up PCI enable (davej) | ||
38 | Version 1.0.4: | ||
39 | - Merge Donald Becker's natsemi.c version 1.07 | ||
40 | Version 1.0.5: | ||
41 | - { fill me in } | ||
42 | Version 1.0.6: | ||
43 | * ethtool support (jgarzik) | ||
44 | * Proper initialization of the card (which sometimes | ||
45 | fails to occur and leaves the card in a non-functional | ||
46 | state). (uzi) | ||
47 | |||
48 | * Some documented register settings to optimize some | ||
49 | of the 100Mbit autodetection circuitry in rev C cards. (uzi) | ||
50 | |||
51 | * Polling of the PHY intr for stuff like link state | ||
52 | change and auto- negotiation to finally work properly. (uzi) | ||
53 | |||
54 | * One-liner removal of a duplicate declaration of | ||
55 | netdev_error(). (uzi) | ||
56 | |||
57 | Version 1.0.7: (Manfred Spraul) | ||
58 | * pci dma | ||
59 | * SMP locking update | ||
60 | * full reset added into tx_timeout | ||
61 | * correct multicast hash generation (both big and little endian) | ||
62 | [copied from a natsemi driver version | ||
63 | from Myrio Corporation, Greg Smith] | ||
64 | * suspend/resume | ||
65 | |||
66 | version 1.0.8 (Tim Hockin <thockin@sun.com>) | ||
67 | * ETHTOOL_* support | ||
68 | * Wake on lan support (Erik Gilling) | ||
69 | * MXDMA fixes for serverworks | ||
70 | * EEPROM reload | ||
71 | |||
72 | version 1.0.9 (Manfred Spraul) | ||
73 | * Main change: fix lack of synchronize | ||
74 | netif_close/netif_suspend against a last interrupt | ||
75 | or packet. | ||
76 | * do not enable superflous interrupts (e.g. the | ||
77 | drivers relies on TxDone - TxIntr not needed) | ||
78 | * wait that the hardware has really stopped in close | ||
79 | and suspend. | ||
80 | * workaround for the (at least) gcc-2.95.1 compiler | ||
81 | problem. Also simplifies the code a bit. | ||
82 | * disable_irq() in tx_timeout - needed to protect | ||
83 | against rx interrupts. | ||
84 | * stop the nic before switching into silent rx mode | ||
85 | for wol (required according to docu). | ||
86 | |||
87 | version 1.0.10: | ||
88 | * use long for ee_addr (various) | ||
89 | * print pointers properly (DaveM) | ||
90 | * include asm/irq.h (?) | ||
91 | |||
92 | version 1.0.11: | ||
93 | * check and reset if PHY errors appear (Adrian Sun) | ||
94 | * WoL cleanup (Tim Hockin) | ||
95 | * Magic number cleanup (Tim Hockin) | ||
96 | * Don't reload EEPROM on every reset (Tim Hockin) | ||
97 | * Save and restore EEPROM state across reset (Tim Hockin) | ||
98 | * MDIO Cleanup (Tim Hockin) | ||
99 | * Reformat register offsets/bits (jgarzik) | ||
100 | |||
101 | version 1.0.12: | ||
102 | * ETHTOOL_* further support (Tim Hockin) | ||
103 | |||
104 | version 1.0.13: | ||
105 | * ETHTOOL_[G]EEPROM support (Tim Hockin) | ||
106 | |||
107 | version 1.0.13: | ||
108 | * crc cleanup (Matt Domsch <Matt_Domsch@dell.com>) | ||
109 | |||
110 | version 1.0.14: | ||
111 | * Cleanup some messages and autoneg in ethtool (Tim Hockin) | ||
112 | |||
113 | version 1.0.15: | ||
114 | * Get rid of cable_magic flag | ||
115 | * use new (National provided) solution for cable magic issue | ||
116 | |||
117 | version 1.0.16: | ||
118 | * call netdev_rx() for RxErrors (Manfred Spraul) | ||
119 | * formatting and cleanups | ||
120 | * change options and full_duplex arrays to be zero | ||
121 | initialized | ||
122 | * enable only the WoL and PHY interrupts in wol mode | ||
123 | |||
124 | version 1.0.17: | ||
125 | * only do cable_magic on 83815 and early 83816 (Tim Hockin) | ||
126 | * create a function for rx refill (Manfred Spraul) | ||
127 | * combine drain_ring and init_ring (Manfred Spraul) | ||
128 | * oom handling (Manfred Spraul) | ||
129 | * hands_off instead of playing with netif_device_{de,a}ttach | ||
130 | (Manfred Spraul) | ||
131 | * be sure to write the MAC back to the chip (Manfred Spraul) | ||
132 | * lengthen EEPROM timeout, and always warn about timeouts | ||
133 | (Manfred Spraul) | ||
134 | * comments update (Manfred) | ||
135 | * do the right thing on a phy-reset (Manfred and Tim) | ||
136 | |||
137 | TODO: | 26 | TODO: |
138 | * big endian support with CFG:BEM instead of cpu_to_le32 | 27 | * big endian support with CFG:BEM instead of cpu_to_le32 |
139 | */ | 28 | */ |
@@ -165,8 +54,8 @@ | |||
165 | #include <asm/uaccess.h> | 54 | #include <asm/uaccess.h> |
166 | 55 | ||
167 | #define DRV_NAME "natsemi" | 56 | #define DRV_NAME "natsemi" |
168 | #define DRV_VERSION "1.07+LK1.0.17" | 57 | #define DRV_VERSION "2.0" |
169 | #define DRV_RELDATE "Sep 27, 2002" | 58 | #define DRV_RELDATE "June 27, 2006" |
170 | 59 | ||
171 | #define RX_OFFSET 2 | 60 | #define RX_OFFSET 2 |
172 | 61 | ||
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c index fa50eb889408..34bdba9eec79 100644 --- a/drivers/net/ne2k-pci.c +++ b/drivers/net/ne2k-pci.c | |||
@@ -231,12 +231,12 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, | |||
231 | irq = pdev->irq; | 231 | irq = pdev->irq; |
232 | 232 | ||
233 | if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) { | 233 | if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) { |
234 | printk (KERN_ERR PFX "no I/O resource at PCI BAR #0\n"); | 234 | dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n"); |
235 | return -ENODEV; | 235 | return -ENODEV; |
236 | } | 236 | } |
237 | 237 | ||
238 | if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) { | 238 | if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) { |
239 | printk (KERN_ERR PFX "I/O resource 0x%x @ 0x%lx busy\n", | 239 | dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n", |
240 | NE_IO_EXTENT, ioaddr); | 240 | NE_IO_EXTENT, ioaddr); |
241 | return -EBUSY; | 241 | return -EBUSY; |
242 | } | 242 | } |
@@ -263,7 +263,7 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, | |||
263 | /* Allocate net_device, dev->priv; fill in 8390 specific dev fields. */ | 263 | /* Allocate net_device, dev->priv; fill in 8390 specific dev fields. */ |
264 | dev = alloc_ei_netdev(); | 264 | dev = alloc_ei_netdev(); |
265 | if (!dev) { | 265 | if (!dev) { |
266 | printk (KERN_ERR PFX "cannot allocate ethernet device\n"); | 266 | dev_err(&pdev->dev, "cannot allocate ethernet device\n"); |
267 | goto err_out_free_res; | 267 | goto err_out_free_res; |
268 | } | 268 | } |
269 | SET_MODULE_OWNER(dev); | 269 | SET_MODULE_OWNER(dev); |
@@ -281,7 +281,8 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, | |||
281 | while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0) | 281 | while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0) |
282 | /* Limit wait: '2' avoids jiffy roll-over. */ | 282 | /* Limit wait: '2' avoids jiffy roll-over. */ |
283 | if (jiffies - reset_start_time > 2) { | 283 | if (jiffies - reset_start_time > 2) { |
284 | printk(KERN_ERR PFX "Card failure (no reset ack).\n"); | 284 | dev_err(&pdev->dev, |
285 | "Card failure (no reset ack).\n"); | ||
285 | goto err_out_free_netdev; | 286 | goto err_out_free_netdev; |
286 | } | 287 | } |
287 | 288 | ||
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c index a68bf474f6ed..d4be207d321a 100644 --- a/drivers/net/ni5010.c +++ b/drivers/net/ni5010.c | |||
@@ -1,17 +1,12 @@ | |||
1 | /* ni5010.c: A network driver for the MiCom-Interlan NI5010 ethercard. | 1 | /* ni5010.c: A network driver for the MiCom-Interlan NI5010 ethercard. |
2 | * | 2 | * |
3 | * Copyright 1996,1997 Jan-Pascal van Best and Andreas Mohr. | 3 | * Copyright 1996,1997,2006 Jan-Pascal van Best and Andreas Mohr. |
4 | * | 4 | * |
5 | * This software may be used and distributed according to the terms | 5 | * This software may be used and distributed according to the terms |
6 | * of the GNU General Public License, incorporated herein by reference. | 6 | * of the GNU General Public License, incorporated herein by reference. |
7 | * | 7 | * |
8 | * The authors may be reached as: | 8 | * The authors may be reached as: |
9 | * jvbest@wi.leidenuniv.nl a.mohr@mailto.de | 9 | * janpascal@vanbest.org andi@lisas.de |
10 | * or by snail mail as | ||
11 | * Jan-Pascal van Best Andreas Mohr | ||
12 | * Klikspaanweg 58-4 Stauferstr. 6 | ||
13 | * 2324 LZ Leiden D-71272 Renningen | ||
14 | * The Netherlands Germany | ||
15 | * | 10 | * |
16 | * Sources: | 11 | * Sources: |
17 | * Donald Becker's "skeleton.c" | 12 | * Donald Becker's "skeleton.c" |
@@ -27,8 +22,9 @@ | |||
27 | * 970503 v0.93: Fixed auto-irq failure on warm reboot (JB) | 22 | * 970503 v0.93: Fixed auto-irq failure on warm reboot (JB) |
28 | * 970623 v1.00: First kernel version (AM) | 23 | * 970623 v1.00: First kernel version (AM) |
29 | * 970814 v1.01: Added detection of onboard receive buffer size (AM) | 24 | * 970814 v1.01: Added detection of onboard receive buffer size (AM) |
25 | * 060611 v1.02: slight cleanup: email addresses, driver modernization. | ||
30 | * Bugs: | 26 | * Bugs: |
31 | * - None known... | 27 | * - not SMP-safe (no locking of I/O accesses) |
32 | * - Note that you have to patch ifconfig for the new /proc/net/dev | 28 | * - Note that you have to patch ifconfig for the new /proc/net/dev |
33 | * format. It gives incorrect stats otherwise. | 29 | * format. It gives incorrect stats otherwise. |
34 | * | 30 | * |
@@ -39,7 +35,7 @@ | |||
39 | * Complete merge with Andreas' driver | 35 | * Complete merge with Andreas' driver |
40 | * Implement ring buffers (Is this useful? You can't squeeze | 36 | * Implement ring buffers (Is this useful? You can't squeeze |
41 | * too many packet in a 2k buffer!) | 37 | * too many packet in a 2k buffer!) |
42 | * Implement DMA (Again, is this useful? Some docs says DMA is | 38 | * Implement DMA (Again, is this useful? Some docs say DMA is |
43 | * slower than programmed I/O) | 39 | * slower than programmed I/O) |
44 | * | 40 | * |
45 | * Compile with: | 41 | * Compile with: |
@@ -47,7 +43,7 @@ | |||
47 | * -DMODULE -c ni5010.c | 43 | * -DMODULE -c ni5010.c |
48 | * | 44 | * |
49 | * Insert with e.g.: | 45 | * Insert with e.g.: |
50 | * insmod ni5010.o io=0x300 irq=5 | 46 | * insmod ni5010.ko io=0x300 irq=5 |
51 | */ | 47 | */ |
52 | 48 | ||
53 | #include <linux/module.h> | 49 | #include <linux/module.h> |
@@ -69,15 +65,15 @@ | |||
69 | 65 | ||
70 | #include "ni5010.h" | 66 | #include "ni5010.h" |
71 | 67 | ||
72 | static const char *boardname = "NI5010"; | 68 | static const char boardname[] = "NI5010"; |
73 | static char *version = | 69 | static char version[] __initdata = |
74 | "ni5010.c: v1.00 06/23/97 Jan-Pascal van Best and Andreas Mohr\n"; | 70 | "ni5010.c: v1.02 20060611 Jan-Pascal van Best and Andreas Mohr\n"; |
75 | 71 | ||
76 | /* bufsize_rcv == 0 means autoprobing */ | 72 | /* bufsize_rcv == 0 means autoprobing */ |
77 | static unsigned int bufsize_rcv; | 73 | static unsigned int bufsize_rcv; |
78 | 74 | ||
79 | #define jumpered_interrupts /* IRQ line jumpered on board */ | 75 | #define JUMPERED_INTERRUPTS /* IRQ line jumpered on board */ |
80 | #undef jumpered_dma /* No DMA used */ | 76 | #undef JUMPERED_DMA /* No DMA used */ |
81 | #undef FULL_IODETECT /* Only detect in portlist */ | 77 | #undef FULL_IODETECT /* Only detect in portlist */ |
82 | 78 | ||
83 | #ifndef FULL_IODETECT | 79 | #ifndef FULL_IODETECT |
@@ -281,7 +277,7 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr) | |||
281 | 277 | ||
282 | PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name)); | 278 | PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name)); |
283 | 279 | ||
284 | #ifdef jumpered_interrupts | 280 | #ifdef JUMPERED_INTERRUPTS |
285 | if (dev->irq == 0xff) | 281 | if (dev->irq == 0xff) |
286 | ; | 282 | ; |
287 | else if (dev->irq < 2) { | 283 | else if (dev->irq < 2) { |
@@ -305,7 +301,7 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr) | |||
305 | } else if (dev->irq == 2) { | 301 | } else if (dev->irq == 2) { |
306 | dev->irq = 9; | 302 | dev->irq = 9; |
307 | } | 303 | } |
308 | #endif /* jumpered_irq */ | 304 | #endif /* JUMPERED_INTERRUPTS */ |
309 | PRINTK2((KERN_DEBUG "%s: I/O #9 passed!\n", dev->name)); | 305 | PRINTK2((KERN_DEBUG "%s: I/O #9 passed!\n", dev->name)); |
310 | 306 | ||
311 | /* DMA is not supported (yet?), so no use detecting it */ | 307 | /* DMA is not supported (yet?), so no use detecting it */ |
@@ -334,7 +330,7 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr) | |||
334 | outw(0, IE_GP); /* Point GP at start of packet */ | 330 | outw(0, IE_GP); /* Point GP at start of packet */ |
335 | outb(0, IE_RBUF); /* set buffer byte 0 to 0 again */ | 331 | outb(0, IE_RBUF); /* set buffer byte 0 to 0 again */ |
336 | } | 332 | } |
337 | printk("// bufsize rcv/xmt=%d/%d\n", bufsize_rcv, NI5010_BUFSIZE); | 333 | printk("-> bufsize rcv/xmt=%d/%d\n", bufsize_rcv, NI5010_BUFSIZE); |
338 | memset(dev->priv, 0, sizeof(struct ni5010_local)); | 334 | memset(dev->priv, 0, sizeof(struct ni5010_local)); |
339 | 335 | ||
340 | dev->open = ni5010_open; | 336 | dev->open = ni5010_open; |
@@ -354,11 +350,9 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr) | |||
354 | outb(0xff, EDLC_XCLR); /* Kill all pending xmt interrupts */ | 350 | outb(0xff, EDLC_XCLR); /* Kill all pending xmt interrupts */ |
355 | 351 | ||
356 | printk(KERN_INFO "%s: NI5010 found at 0x%x, using IRQ %d", dev->name, ioaddr, dev->irq); | 352 | printk(KERN_INFO "%s: NI5010 found at 0x%x, using IRQ %d", dev->name, ioaddr, dev->irq); |
357 | if (dev->dma) printk(" & DMA %d", dev->dma); | 353 | if (dev->dma) |
354 | printk(" & DMA %d", dev->dma); | ||
358 | printk(".\n"); | 355 | printk(".\n"); |
359 | |||
360 | printk(KERN_INFO "Join the NI5010 driver development team!\n"); | ||
361 | printk(KERN_INFO "Mail to a.mohr@mailto.de or jvbest@wi.leidenuniv.nl\n"); | ||
362 | return 0; | 356 | return 0; |
363 | out: | 357 | out: |
364 | release_region(dev->base_addr, NI5010_IO_EXTENT); | 358 | release_region(dev->base_addr, NI5010_IO_EXTENT); |
@@ -371,7 +365,7 @@ out: | |||
371 | * | 365 | * |
372 | * This routine should set everything up anew at each open, even | 366 | * This routine should set everything up anew at each open, even |
373 | * registers that "should" only need to be set once at boot, so that | 367 | * registers that "should" only need to be set once at boot, so that |
374 | * there is non-reboot way to recover if something goes wrong. | 368 | * there is a non-reboot way to recover if something goes wrong. |
375 | */ | 369 | */ |
376 | 370 | ||
377 | static int ni5010_open(struct net_device *dev) | 371 | static int ni5010_open(struct net_device *dev) |
@@ -390,13 +384,13 @@ static int ni5010_open(struct net_device *dev) | |||
390 | * Always allocate the DMA channel after the IRQ, | 384 | * Always allocate the DMA channel after the IRQ, |
391 | * and clean up on failure. | 385 | * and clean up on failure. |
392 | */ | 386 | */ |
393 | #ifdef jumpered_dma | 387 | #ifdef JUMPERED_DMA |
394 | if (request_dma(dev->dma, cardname)) { | 388 | if (request_dma(dev->dma, cardname)) { |
395 | printk(KERN_WARNING "%s: Cannot get dma %#2x\n", dev->name, dev->dma); | 389 | printk(KERN_WARNING "%s: Cannot get dma %#2x\n", dev->name, dev->dma); |
396 | free_irq(dev->irq, NULL); | 390 | free_irq(dev->irq, NULL); |
397 | return -EAGAIN; | 391 | return -EAGAIN; |
398 | } | 392 | } |
399 | #endif /* jumpered_dma */ | 393 | #endif /* JUMPERED_DMA */ |
400 | 394 | ||
401 | PRINTK3((KERN_DEBUG "%s: passed open() #2\n", dev->name)); | 395 | PRINTK3((KERN_DEBUG "%s: passed open() #2\n", dev->name)); |
402 | /* Reset the hardware here. Don't forget to set the station address. */ | 396 | /* Reset the hardware here. Don't forget to set the station address. */ |
@@ -633,7 +627,7 @@ static int ni5010_close(struct net_device *dev) | |||
633 | int ioaddr = dev->base_addr; | 627 | int ioaddr = dev->base_addr; |
634 | 628 | ||
635 | PRINTK2((KERN_DEBUG "%s: entering ni5010_close\n", dev->name)); | 629 | PRINTK2((KERN_DEBUG "%s: entering ni5010_close\n", dev->name)); |
636 | #ifdef jumpered_interrupts | 630 | #ifdef JUMPERED_INTERRUPTS |
637 | free_irq(dev->irq, NULL); | 631 | free_irq(dev->irq, NULL); |
638 | #endif | 632 | #endif |
639 | /* Put card in held-RESET state */ | 633 | /* Put card in held-RESET state */ |
@@ -771,7 +765,7 @@ module_param(irq, int, 0); | |||
771 | MODULE_PARM_DESC(io, "ni5010 I/O base address"); | 765 | MODULE_PARM_DESC(io, "ni5010 I/O base address"); |
772 | MODULE_PARM_DESC(irq, "ni5010 IRQ number"); | 766 | MODULE_PARM_DESC(irq, "ni5010 IRQ number"); |
773 | 767 | ||
774 | int init_module(void) | 768 | static int __init ni5010_init_module(void) |
775 | { | 769 | { |
776 | PRINTK2((KERN_DEBUG "%s: entering init_module\n", boardname)); | 770 | PRINTK2((KERN_DEBUG "%s: entering init_module\n", boardname)); |
777 | /* | 771 | /* |
@@ -792,13 +786,15 @@ int init_module(void) | |||
792 | return 0; | 786 | return 0; |
793 | } | 787 | } |
794 | 788 | ||
795 | void cleanup_module(void) | 789 | static void __exit ni5010_cleanup_module(void) |
796 | { | 790 | { |
797 | PRINTK2((KERN_DEBUG "%s: entering cleanup_module\n", boardname)); | 791 | PRINTK2((KERN_DEBUG "%s: entering cleanup_module\n", boardname)); |
798 | unregister_netdev(dev_ni5010); | 792 | unregister_netdev(dev_ni5010); |
799 | release_region(dev_ni5010->base_addr, NI5010_IO_EXTENT); | 793 | release_region(dev_ni5010->base_addr, NI5010_IO_EXTENT); |
800 | free_netdev(dev_ni5010); | 794 | free_netdev(dev_ni5010); |
801 | } | 795 | } |
796 | module_init(ni5010_init_module); | ||
797 | module_exit(ni5010_cleanup_module); | ||
802 | #endif /* MODULE */ | 798 | #endif /* MODULE */ |
803 | MODULE_LICENSE("GPL"); | 799 | MODULE_LICENSE("GPL"); |
804 | 800 | ||
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index fa854c8fde75..4d52ecf8af56 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c | |||
@@ -1323,7 +1323,7 @@ MODULE_PARM_DESC(irq, "NI5210 IRQ number,required"); | |||
1323 | MODULE_PARM_DESC(memstart, "NI5210 memory base address,required"); | 1323 | MODULE_PARM_DESC(memstart, "NI5210 memory base address,required"); |
1324 | MODULE_PARM_DESC(memend, "NI5210 memory end address,required"); | 1324 | MODULE_PARM_DESC(memend, "NI5210 memory end address,required"); |
1325 | 1325 | ||
1326 | int init_module(void) | 1326 | int __init init_module(void) |
1327 | { | 1327 | { |
1328 | if(io <= 0x0 || !memend || !memstart || irq < 2) { | 1328 | if(io <= 0x0 || !memend || !memstart || irq < 2) { |
1329 | printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n"); | 1329 | printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n"); |
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c index bb42ff218484..810cc572f5f7 100644 --- a/drivers/net/ni65.c +++ b/drivers/net/ni65.c | |||
@@ -1253,7 +1253,7 @@ MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)"); | |||
1253 | MODULE_PARM_DESC(io, "ni6510 I/O base address"); | 1253 | MODULE_PARM_DESC(io, "ni6510 I/O base address"); |
1254 | MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); | 1254 | MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); |
1255 | 1255 | ||
1256 | int init_module(void) | 1256 | int __init init_module(void) |
1257 | { | 1257 | { |
1258 | dev_ni65 = ni65_probe(-1); | 1258 | dev_ni65 = ni65_probe(-1); |
1259 | return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0; | 1259 | return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0; |
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index 70429108c40d..0e76859c90a2 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c | |||
@@ -803,7 +803,7 @@ static int ns83820_setup_rx(struct net_device *ndev) | |||
803 | 803 | ||
804 | writel(dev->IMR_cache, dev->base + IMR); | 804 | writel(dev->IMR_cache, dev->base + IMR); |
805 | writel(1, dev->base + IER); | 805 | writel(1, dev->base + IER); |
806 | spin_unlock_irq(&dev->misc_lock); | 806 | spin_unlock(&dev->misc_lock); |
807 | 807 | ||
808 | kick_rx(ndev); | 808 | kick_rx(ndev); |
809 | 809 | ||
@@ -1012,8 +1012,6 @@ static void do_tx_done(struct net_device *ndev) | |||
1012 | struct ns83820 *dev = PRIV(ndev); | 1012 | struct ns83820 *dev = PRIV(ndev); |
1013 | u32 cmdsts, tx_done_idx, *desc; | 1013 | u32 cmdsts, tx_done_idx, *desc; |
1014 | 1014 | ||
1015 | spin_lock_irq(&dev->tx_lock); | ||
1016 | |||
1017 | dprintk("do_tx_done(%p)\n", ndev); | 1015 | dprintk("do_tx_done(%p)\n", ndev); |
1018 | tx_done_idx = dev->tx_done_idx; | 1016 | tx_done_idx = dev->tx_done_idx; |
1019 | desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); | 1017 | desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); |
@@ -1069,7 +1067,6 @@ static void do_tx_done(struct net_device *ndev) | |||
1069 | netif_start_queue(ndev); | 1067 | netif_start_queue(ndev); |
1070 | netif_wake_queue(ndev); | 1068 | netif_wake_queue(ndev); |
1071 | } | 1069 | } |
1072 | spin_unlock_irq(&dev->tx_lock); | ||
1073 | } | 1070 | } |
1074 | 1071 | ||
1075 | static void ns83820_cleanup_tx(struct ns83820 *dev) | 1072 | static void ns83820_cleanup_tx(struct ns83820 *dev) |
@@ -1281,11 +1278,13 @@ static struct ethtool_ops ops = { | |||
1281 | .get_link = ns83820_get_link | 1278 | .get_link = ns83820_get_link |
1282 | }; | 1279 | }; |
1283 | 1280 | ||
1281 | /* this function is called in irq context from the ISR */ | ||
1284 | static void ns83820_mib_isr(struct ns83820 *dev) | 1282 | static void ns83820_mib_isr(struct ns83820 *dev) |
1285 | { | 1283 | { |
1286 | spin_lock(&dev->misc_lock); | 1284 | unsigned long flags; |
1285 | spin_lock_irqsave(&dev->misc_lock, flags); | ||
1287 | ns83820_update_stats(dev); | 1286 | ns83820_update_stats(dev); |
1288 | spin_unlock(&dev->misc_lock); | 1287 | spin_unlock_irqrestore(&dev->misc_lock, flags); |
1289 | } | 1288 | } |
1290 | 1289 | ||
1291 | static void ns83820_do_isr(struct net_device *ndev, u32 isr); | 1290 | static void ns83820_do_isr(struct net_device *ndev, u32 isr); |
@@ -1307,6 +1306,8 @@ static irqreturn_t ns83820_irq(int foo, void *data, struct pt_regs *regs) | |||
1307 | static void ns83820_do_isr(struct net_device *ndev, u32 isr) | 1306 | static void ns83820_do_isr(struct net_device *ndev, u32 isr) |
1308 | { | 1307 | { |
1309 | struct ns83820 *dev = PRIV(ndev); | 1308 | struct ns83820 *dev = PRIV(ndev); |
1309 | unsigned long flags; | ||
1310 | |||
1310 | #ifdef DEBUG | 1311 | #ifdef DEBUG |
1311 | if (isr & ~(ISR_PHY | ISR_RXDESC | ISR_RXEARLY | ISR_RXOK | ISR_RXERR | ISR_TXIDLE | ISR_TXOK | ISR_TXDESC)) | 1312 | if (isr & ~(ISR_PHY | ISR_RXDESC | ISR_RXEARLY | ISR_RXOK | ISR_RXERR | ISR_TXIDLE | ISR_TXOK | ISR_TXDESC)) |
1312 | Dprintk("odd isr? 0x%08x\n", isr); | 1313 | Dprintk("odd isr? 0x%08x\n", isr); |
@@ -1321,10 +1322,10 @@ static void ns83820_do_isr(struct net_device *ndev, u32 isr) | |||
1321 | if ((ISR_RXDESC | ISR_RXOK) & isr) { | 1322 | if ((ISR_RXDESC | ISR_RXOK) & isr) { |
1322 | prefetch(dev->rx_info.next_rx_desc); | 1323 | prefetch(dev->rx_info.next_rx_desc); |
1323 | 1324 | ||
1324 | spin_lock_irq(&dev->misc_lock); | 1325 | spin_lock_irqsave(&dev->misc_lock, flags); |
1325 | dev->IMR_cache &= ~(ISR_RXDESC | ISR_RXOK); | 1326 | dev->IMR_cache &= ~(ISR_RXDESC | ISR_RXOK); |
1326 | writel(dev->IMR_cache, dev->base + IMR); | 1327 | writel(dev->IMR_cache, dev->base + IMR); |
1327 | spin_unlock_irq(&dev->misc_lock); | 1328 | spin_unlock_irqrestore(&dev->misc_lock, flags); |
1328 | 1329 | ||
1329 | tasklet_schedule(&dev->rx_tasklet); | 1330 | tasklet_schedule(&dev->rx_tasklet); |
1330 | //rx_irq(ndev); | 1331 | //rx_irq(ndev); |
@@ -1370,16 +1371,18 @@ static void ns83820_do_isr(struct net_device *ndev, u32 isr) | |||
1370 | * work has accumulated | 1371 | * work has accumulated |
1371 | */ | 1372 | */ |
1372 | if ((ISR_TXDESC | ISR_TXIDLE | ISR_TXOK | ISR_TXERR) & isr) { | 1373 | if ((ISR_TXDESC | ISR_TXIDLE | ISR_TXOK | ISR_TXERR) & isr) { |
1374 | spin_lock_irqsave(&dev->tx_lock, flags); | ||
1373 | do_tx_done(ndev); | 1375 | do_tx_done(ndev); |
1376 | spin_unlock_irqrestore(&dev->tx_lock, flags); | ||
1374 | 1377 | ||
1375 | /* Disable TxOk if there are no outstanding tx packets. | 1378 | /* Disable TxOk if there are no outstanding tx packets. |
1376 | */ | 1379 | */ |
1377 | if ((dev->tx_done_idx == dev->tx_free_idx) && | 1380 | if ((dev->tx_done_idx == dev->tx_free_idx) && |
1378 | (dev->IMR_cache & ISR_TXOK)) { | 1381 | (dev->IMR_cache & ISR_TXOK)) { |
1379 | spin_lock_irq(&dev->misc_lock); | 1382 | spin_lock_irqsave(&dev->misc_lock, flags); |
1380 | dev->IMR_cache &= ~ISR_TXOK; | 1383 | dev->IMR_cache &= ~ISR_TXOK; |
1381 | writel(dev->IMR_cache, dev->base + IMR); | 1384 | writel(dev->IMR_cache, dev->base + IMR); |
1382 | spin_unlock_irq(&dev->misc_lock); | 1385 | spin_unlock_irqrestore(&dev->misc_lock, flags); |
1383 | } | 1386 | } |
1384 | } | 1387 | } |
1385 | 1388 | ||
@@ -1390,10 +1393,10 @@ static void ns83820_do_isr(struct net_device *ndev, u32 isr) | |||
1390 | * nature are expected, we must enable TxOk. | 1393 | * nature are expected, we must enable TxOk. |
1391 | */ | 1394 | */ |
1392 | if ((ISR_TXIDLE & isr) && (dev->tx_done_idx != dev->tx_free_idx)) { | 1395 | if ((ISR_TXIDLE & isr) && (dev->tx_done_idx != dev->tx_free_idx)) { |
1393 | spin_lock_irq(&dev->misc_lock); | 1396 | spin_lock_irqsave(&dev->misc_lock, flags); |
1394 | dev->IMR_cache |= ISR_TXOK; | 1397 | dev->IMR_cache |= ISR_TXOK; |
1395 | writel(dev->IMR_cache, dev->base + IMR); | 1398 | writel(dev->IMR_cache, dev->base + IMR); |
1396 | spin_unlock_irq(&dev->misc_lock); | 1399 | spin_unlock_irqrestore(&dev->misc_lock, flags); |
1397 | } | 1400 | } |
1398 | 1401 | ||
1399 | /* MIB interrupt: one of the statistics counters is about to overflow */ | 1402 | /* MIB interrupt: one of the statistics counters is about to overflow */ |
@@ -1455,7 +1458,7 @@ static void ns83820_tx_timeout(struct net_device *ndev) | |||
1455 | u32 tx_done_idx, *desc; | 1458 | u32 tx_done_idx, *desc; |
1456 | unsigned long flags; | 1459 | unsigned long flags; |
1457 | 1460 | ||
1458 | local_irq_save(flags); | 1461 | spin_lock_irqsave(&dev->tx_lock, flags); |
1459 | 1462 | ||
1460 | tx_done_idx = dev->tx_done_idx; | 1463 | tx_done_idx = dev->tx_done_idx; |
1461 | desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); | 1464 | desc = dev->tx_descs + (tx_done_idx * DESC_SIZE); |
@@ -1482,7 +1485,7 @@ static void ns83820_tx_timeout(struct net_device *ndev) | |||
1482 | ndev->name, | 1485 | ndev->name, |
1483 | tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS])); | 1486 | tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS])); |
1484 | 1487 | ||
1485 | local_irq_restore(flags); | 1488 | spin_unlock_irqrestore(&dev->tx_lock, flags); |
1486 | } | 1489 | } |
1487 | 1490 | ||
1488 | static void ns83820_tx_watch(unsigned long data) | 1491 | static void ns83820_tx_watch(unsigned long data) |
@@ -1832,7 +1835,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ | |||
1832 | } else if (!pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) { | 1835 | } else if (!pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) { |
1833 | using_dac = 0; | 1836 | using_dac = 0; |
1834 | } else { | 1837 | } else { |
1835 | printk(KERN_WARNING "ns83820.c: pci_set_dma_mask failed!\n"); | 1838 | dev_warn(&pci_dev->dev, "pci_set_dma_mask failed!\n"); |
1836 | return -ENODEV; | 1839 | return -ENODEV; |
1837 | } | 1840 | } |
1838 | 1841 | ||
@@ -1855,7 +1858,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ | |||
1855 | 1858 | ||
1856 | err = pci_enable_device(pci_dev); | 1859 | err = pci_enable_device(pci_dev); |
1857 | if (err) { | 1860 | if (err) { |
1858 | printk(KERN_INFO "ns83820: pci_enable_dev failed: %d\n", err); | 1861 | dev_info(&pci_dev->dev, "pci_enable_dev failed: %d\n", err); |
1859 | goto out_free; | 1862 | goto out_free; |
1860 | } | 1863 | } |
1861 | 1864 | ||
@@ -1884,8 +1887,8 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ | |||
1884 | err = request_irq(pci_dev->irq, ns83820_irq, IRQF_SHARED, | 1887 | err = request_irq(pci_dev->irq, ns83820_irq, IRQF_SHARED, |
1885 | DRV_NAME, ndev); | 1888 | DRV_NAME, ndev); |
1886 | if (err) { | 1889 | if (err) { |
1887 | printk(KERN_INFO "ns83820: unable to register irq %d\n", | 1890 | dev_info(&pci_dev->dev, "unable to register irq %d, err %d\n", |
1888 | pci_dev->irq); | 1891 | pci_dev->irq, err); |
1889 | goto out_disable; | 1892 | goto out_disable; |
1890 | } | 1893 | } |
1891 | 1894 | ||
@@ -1899,7 +1902,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ | |||
1899 | rtnl_lock(); | 1902 | rtnl_lock(); |
1900 | err = dev_alloc_name(ndev, ndev->name); | 1903 | err = dev_alloc_name(ndev, ndev->name); |
1901 | if (err < 0) { | 1904 | if (err < 0) { |
1902 | printk(KERN_INFO "ns83820: unable to get netdev name: %d\n", err); | 1905 | dev_info(&pci_dev->dev, "unable to get netdev name: %d\n", err); |
1903 | goto out_free_irq; | 1906 | goto out_free_irq; |
1904 | } | 1907 | } |
1905 | 1908 | ||
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c index 3388ee1313ea..e0e293964042 100644 --- a/drivers/net/pci-skeleton.c +++ b/drivers/net/pci-skeleton.c | |||
@@ -601,7 +601,7 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev, | |||
601 | /* dev zeroed in alloc_etherdev */ | 601 | /* dev zeroed in alloc_etherdev */ |
602 | dev = alloc_etherdev (sizeof (*tp)); | 602 | dev = alloc_etherdev (sizeof (*tp)); |
603 | if (dev == NULL) { | 603 | if (dev == NULL) { |
604 | printk (KERN_ERR PFX "unable to alloc new ethernet\n"); | 604 | dev_err(&pdev->dev, "unable to alloc new ethernet\n"); |
605 | DPRINTK ("EXIT, returning -ENOMEM\n"); | 605 | DPRINTK ("EXIT, returning -ENOMEM\n"); |
606 | return -ENOMEM; | 606 | return -ENOMEM; |
607 | } | 607 | } |
@@ -631,14 +631,14 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev, | |||
631 | 631 | ||
632 | /* make sure PCI base addr 0 is PIO */ | 632 | /* make sure PCI base addr 0 is PIO */ |
633 | if (!(pio_flags & IORESOURCE_IO)) { | 633 | if (!(pio_flags & IORESOURCE_IO)) { |
634 | printk (KERN_ERR PFX "region #0 not a PIO resource, aborting\n"); | 634 | dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n"); |
635 | rc = -ENODEV; | 635 | rc = -ENODEV; |
636 | goto err_out; | 636 | goto err_out; |
637 | } | 637 | } |
638 | 638 | ||
639 | /* make sure PCI base addr 1 is MMIO */ | 639 | /* make sure PCI base addr 1 is MMIO */ |
640 | if (!(mmio_flags & IORESOURCE_MEM)) { | 640 | if (!(mmio_flags & IORESOURCE_MEM)) { |
641 | printk (KERN_ERR PFX "region #1 not an MMIO resource, aborting\n"); | 641 | dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n"); |
642 | rc = -ENODEV; | 642 | rc = -ENODEV; |
643 | goto err_out; | 643 | goto err_out; |
644 | } | 644 | } |
@@ -646,12 +646,12 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev, | |||
646 | /* check for weird/broken PCI region reporting */ | 646 | /* check for weird/broken PCI region reporting */ |
647 | if ((pio_len < NETDRV_MIN_IO_SIZE) || | 647 | if ((pio_len < NETDRV_MIN_IO_SIZE) || |
648 | (mmio_len < NETDRV_MIN_IO_SIZE)) { | 648 | (mmio_len < NETDRV_MIN_IO_SIZE)) { |
649 | printk (KERN_ERR PFX "Invalid PCI region size(s), aborting\n"); | 649 | dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n"); |
650 | rc = -ENODEV; | 650 | rc = -ENODEV; |
651 | goto err_out; | 651 | goto err_out; |
652 | } | 652 | } |
653 | 653 | ||
654 | rc = pci_request_regions (pdev, "pci-skeleton"); | 654 | rc = pci_request_regions (pdev, MODNAME); |
655 | if (rc) | 655 | if (rc) |
656 | goto err_out; | 656 | goto err_out; |
657 | 657 | ||
@@ -663,7 +663,7 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev, | |||
663 | /* ioremap MMIO region */ | 663 | /* ioremap MMIO region */ |
664 | ioaddr = ioremap (mmio_start, mmio_len); | 664 | ioaddr = ioremap (mmio_start, mmio_len); |
665 | if (ioaddr == NULL) { | 665 | if (ioaddr == NULL) { |
666 | printk (KERN_ERR PFX "cannot remap MMIO, aborting\n"); | 666 | dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); |
667 | rc = -EIO; | 667 | rc = -EIO; |
668 | goto err_out_free_res; | 668 | goto err_out_free_res; |
669 | } | 669 | } |
@@ -699,9 +699,10 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev, | |||
699 | } | 699 | } |
700 | 700 | ||
701 | /* if unknown chip, assume array element #0, original RTL-8139 in this case */ | 701 | /* if unknown chip, assume array element #0, original RTL-8139 in this case */ |
702 | printk (KERN_DEBUG PFX "PCI device %s: unknown chip version, assuming RTL-8139\n", | 702 | dev_printk (KERN_DEBUG, &pdev->dev, |
703 | pci_name(pdev)); | 703 | "unknown chip version, assuming RTL-8139\n"); |
704 | printk (KERN_DEBUG PFX "PCI device %s: TxConfig = 0x%lx\n", pci_name(pdev), NETDRV_R32 (TxConfig)); | 704 | dev_printk (KERN_DEBUG, &pdev->dev, "TxConfig = 0x%lx\n", |
705 | NETDRV_R32 (TxConfig)); | ||
705 | tp->chipset = 0; | 706 | tp->chipset = 0; |
706 | 707 | ||
707 | match: | 708 | match: |
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 9bae77ce1314..4122bb46f5ff 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c | |||
@@ -345,6 +345,7 @@ typedef struct local_info_t { | |||
345 | void __iomem *dingo_ccr; /* only used for CEM56 cards */ | 345 | void __iomem *dingo_ccr; /* only used for CEM56 cards */ |
346 | unsigned last_ptr_value; /* last packets transmitted value */ | 346 | unsigned last_ptr_value; /* last packets transmitted value */ |
347 | const char *manf_str; | 347 | const char *manf_str; |
348 | struct work_struct tx_timeout_task; | ||
348 | } local_info_t; | 349 | } local_info_t; |
349 | 350 | ||
350 | /**************** | 351 | /**************** |
@@ -352,6 +353,7 @@ typedef struct local_info_t { | |||
352 | */ | 353 | */ |
353 | static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); | 354 | static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); |
354 | static void do_tx_timeout(struct net_device *dev); | 355 | static void do_tx_timeout(struct net_device *dev); |
356 | static void xirc2ps_tx_timeout_task(void *data); | ||
355 | static struct net_device_stats *do_get_stats(struct net_device *dev); | 357 | static struct net_device_stats *do_get_stats(struct net_device *dev); |
356 | static void set_addresses(struct net_device *dev); | 358 | static void set_addresses(struct net_device *dev); |
357 | static void set_multicast_list(struct net_device *dev); | 359 | static void set_multicast_list(struct net_device *dev); |
@@ -589,6 +591,7 @@ xirc2ps_probe(struct pcmcia_device *link) | |||
589 | #ifdef HAVE_TX_TIMEOUT | 591 | #ifdef HAVE_TX_TIMEOUT |
590 | dev->tx_timeout = do_tx_timeout; | 592 | dev->tx_timeout = do_tx_timeout; |
591 | dev->watchdog_timeo = TX_TIMEOUT; | 593 | dev->watchdog_timeo = TX_TIMEOUT; |
594 | INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev); | ||
592 | #endif | 595 | #endif |
593 | 596 | ||
594 | return xirc2ps_config(link); | 597 | return xirc2ps_config(link); |
@@ -1341,17 +1344,24 @@ xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
1341 | /*====================================================================*/ | 1344 | /*====================================================================*/ |
1342 | 1345 | ||
1343 | static void | 1346 | static void |
1344 | do_tx_timeout(struct net_device *dev) | 1347 | xirc2ps_tx_timeout_task(void *data) |
1345 | { | 1348 | { |
1346 | local_info_t *lp = netdev_priv(dev); | 1349 | struct net_device *dev = data; |
1347 | printk(KERN_NOTICE "%s: transmit timed out\n", dev->name); | ||
1348 | lp->stats.tx_errors++; | ||
1349 | /* reset the card */ | 1350 | /* reset the card */ |
1350 | do_reset(dev,1); | 1351 | do_reset(dev,1); |
1351 | dev->trans_start = jiffies; | 1352 | dev->trans_start = jiffies; |
1352 | netif_wake_queue(dev); | 1353 | netif_wake_queue(dev); |
1353 | } | 1354 | } |
1354 | 1355 | ||
1356 | static void | ||
1357 | do_tx_timeout(struct net_device *dev) | ||
1358 | { | ||
1359 | local_info_t *lp = netdev_priv(dev); | ||
1360 | lp->stats.tx_errors++; | ||
1361 | printk(KERN_NOTICE "%s: transmit timed out\n", dev->name); | ||
1362 | schedule_work(&lp->tx_timeout_task); | ||
1363 | } | ||
1364 | |||
1355 | static int | 1365 | static int |
1356 | do_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1366 | do_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1357 | { | 1367 | { |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index d768f3d1ac28..d50bcb89dd28 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -58,18 +58,15 @@ static const char *const version = | |||
58 | * PCI device identifiers for "new style" Linux PCI Device Drivers | 58 | * PCI device identifiers for "new style" Linux PCI Device Drivers |
59 | */ | 59 | */ |
60 | static struct pci_device_id pcnet32_pci_tbl[] = { | 60 | static struct pci_device_id pcnet32_pci_tbl[] = { |
61 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, | 61 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), }, |
62 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | 62 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), }, |
63 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, | ||
64 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | ||
65 | 63 | ||
66 | /* | 64 | /* |
67 | * Adapters that were sold with IBM's RS/6000 or pSeries hardware have | 65 | * Adapters that were sold with IBM's RS/6000 or pSeries hardware have |
68 | * the incorrect vendor id. | 66 | * the incorrect vendor id. |
69 | */ | 67 | */ |
70 | { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, | 68 | { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE), |
71 | PCI_ANY_ID, PCI_ANY_ID, | 69 | .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, }, |
72 | PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0}, | ||
73 | 70 | ||
74 | { } /* terminate list */ | 71 | { } /* terminate list */ |
75 | }; | 72 | }; |
@@ -188,6 +185,25 @@ static int homepna[MAX_UNITS]; | |||
188 | 185 | ||
189 | #define PCNET32_TOTAL_SIZE 0x20 | 186 | #define PCNET32_TOTAL_SIZE 0x20 |
190 | 187 | ||
188 | #define CSR0 0 | ||
189 | #define CSR0_INIT 0x1 | ||
190 | #define CSR0_START 0x2 | ||
191 | #define CSR0_STOP 0x4 | ||
192 | #define CSR0_TXPOLL 0x8 | ||
193 | #define CSR0_INTEN 0x40 | ||
194 | #define CSR0_IDON 0x0100 | ||
195 | #define CSR0_NORMAL (CSR0_START | CSR0_INTEN) | ||
196 | #define PCNET32_INIT_LOW 1 | ||
197 | #define PCNET32_INIT_HIGH 2 | ||
198 | #define CSR3 3 | ||
199 | #define CSR4 4 | ||
200 | #define CSR5 5 | ||
201 | #define CSR5_SUSPEND 0x0001 | ||
202 | #define CSR15 15 | ||
203 | #define PCNET32_MC_FILTER 8 | ||
204 | |||
205 | #define PCNET32_79C970A 0x2621 | ||
206 | |||
191 | /* The PCNET32 Rx and Tx ring descriptors. */ | 207 | /* The PCNET32 Rx and Tx ring descriptors. */ |
192 | struct pcnet32_rx_head { | 208 | struct pcnet32_rx_head { |
193 | u32 base; | 209 | u32 base; |
@@ -275,9 +291,9 @@ struct pcnet32_private { | |||
275 | 291 | ||
276 | /* each bit indicates an available PHY */ | 292 | /* each bit indicates an available PHY */ |
277 | u32 phymask; | 293 | u32 phymask; |
294 | unsigned short chip_version; /* which variant this is */ | ||
278 | }; | 295 | }; |
279 | 296 | ||
280 | static void pcnet32_probe_vlbus(void); | ||
281 | static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); | 297 | static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); |
282 | static int pcnet32_probe1(unsigned long, int, struct pci_dev *); | 298 | static int pcnet32_probe1(unsigned long, int, struct pci_dev *); |
283 | static int pcnet32_open(struct net_device *); | 299 | static int pcnet32_open(struct net_device *); |
@@ -419,6 +435,238 @@ static struct pcnet32_access pcnet32_dwio = { | |||
419 | .reset = pcnet32_dwio_reset | 435 | .reset = pcnet32_dwio_reset |
420 | }; | 436 | }; |
421 | 437 | ||
438 | static void pcnet32_netif_stop(struct net_device *dev) | ||
439 | { | ||
440 | dev->trans_start = jiffies; | ||
441 | netif_poll_disable(dev); | ||
442 | netif_tx_disable(dev); | ||
443 | } | ||
444 | |||
445 | static void pcnet32_netif_start(struct net_device *dev) | ||
446 | { | ||
447 | netif_wake_queue(dev); | ||
448 | netif_poll_enable(dev); | ||
449 | } | ||
450 | |||
451 | /* | ||
452 | * Allocate space for the new sized tx ring. | ||
453 | * Free old resources | ||
454 | * Save new resources. | ||
455 | * Any failure keeps old resources. | ||
456 | * Must be called with lp->lock held. | ||
457 | */ | ||
458 | static void pcnet32_realloc_tx_ring(struct net_device *dev, | ||
459 | struct pcnet32_private *lp, | ||
460 | unsigned int size) | ||
461 | { | ||
462 | dma_addr_t new_ring_dma_addr; | ||
463 | dma_addr_t *new_dma_addr_list; | ||
464 | struct pcnet32_tx_head *new_tx_ring; | ||
465 | struct sk_buff **new_skb_list; | ||
466 | |||
467 | pcnet32_purge_tx_ring(dev); | ||
468 | |||
469 | new_tx_ring = pci_alloc_consistent(lp->pci_dev, | ||
470 | sizeof(struct pcnet32_tx_head) * | ||
471 | (1 << size), | ||
472 | &new_ring_dma_addr); | ||
473 | if (new_tx_ring == NULL) { | ||
474 | if (netif_msg_drv(lp)) | ||
475 | printk("\n" KERN_ERR | ||
476 | "%s: Consistent memory allocation failed.\n", | ||
477 | dev->name); | ||
478 | return; | ||
479 | } | ||
480 | memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size)); | ||
481 | |||
482 | new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), | ||
483 | GFP_ATOMIC); | ||
484 | if (!new_dma_addr_list) { | ||
485 | if (netif_msg_drv(lp)) | ||
486 | printk("\n" KERN_ERR | ||
487 | "%s: Memory allocation failed.\n", dev->name); | ||
488 | goto free_new_tx_ring; | ||
489 | } | ||
490 | |||
491 | new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), | ||
492 | GFP_ATOMIC); | ||
493 | if (!new_skb_list) { | ||
494 | if (netif_msg_drv(lp)) | ||
495 | printk("\n" KERN_ERR | ||
496 | "%s: Memory allocation failed.\n", dev->name); | ||
497 | goto free_new_lists; | ||
498 | } | ||
499 | |||
500 | kfree(lp->tx_skbuff); | ||
501 | kfree(lp->tx_dma_addr); | ||
502 | pci_free_consistent(lp->pci_dev, | ||
503 | sizeof(struct pcnet32_tx_head) * | ||
504 | lp->tx_ring_size, lp->tx_ring, | ||
505 | lp->tx_ring_dma_addr); | ||
506 | |||
507 | lp->tx_ring_size = (1 << size); | ||
508 | lp->tx_mod_mask = lp->tx_ring_size - 1; | ||
509 | lp->tx_len_bits = (size << 12); | ||
510 | lp->tx_ring = new_tx_ring; | ||
511 | lp->tx_ring_dma_addr = new_ring_dma_addr; | ||
512 | lp->tx_dma_addr = new_dma_addr_list; | ||
513 | lp->tx_skbuff = new_skb_list; | ||
514 | return; | ||
515 | |||
516 | free_new_lists: | ||
517 | kfree(new_dma_addr_list); | ||
518 | free_new_tx_ring: | ||
519 | pci_free_consistent(lp->pci_dev, | ||
520 | sizeof(struct pcnet32_tx_head) * | ||
521 | (1 << size), | ||
522 | new_tx_ring, | ||
523 | new_ring_dma_addr); | ||
524 | return; | ||
525 | } | ||
526 | |||
527 | /* | ||
528 | * Allocate space for the new sized rx ring. | ||
529 | * Re-use old receive buffers. | ||
530 | * alloc extra buffers | ||
531 | * free unneeded buffers | ||
532 | * free unneeded buffers | ||
533 | * Save new resources. | ||
534 | * Any failure keeps old resources. | ||
535 | * Must be called with lp->lock held. | ||
536 | */ | ||
537 | static void pcnet32_realloc_rx_ring(struct net_device *dev, | ||
538 | struct pcnet32_private *lp, | ||
539 | unsigned int size) | ||
540 | { | ||
541 | dma_addr_t new_ring_dma_addr; | ||
542 | dma_addr_t *new_dma_addr_list; | ||
543 | struct pcnet32_rx_head *new_rx_ring; | ||
544 | struct sk_buff **new_skb_list; | ||
545 | int new, overlap; | ||
546 | |||
547 | new_rx_ring = pci_alloc_consistent(lp->pci_dev, | ||
548 | sizeof(struct pcnet32_rx_head) * | ||
549 | (1 << size), | ||
550 | &new_ring_dma_addr); | ||
551 | if (new_rx_ring == NULL) { | ||
552 | if (netif_msg_drv(lp)) | ||
553 | printk("\n" KERN_ERR | ||
554 | "%s: Consistent memory allocation failed.\n", | ||
555 | dev->name); | ||
556 | return; | ||
557 | } | ||
558 | memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); | ||
559 | |||
560 | new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t), | ||
561 | GFP_ATOMIC); | ||
562 | if (!new_dma_addr_list) { | ||
563 | if (netif_msg_drv(lp)) | ||
564 | printk("\n" KERN_ERR | ||
565 | "%s: Memory allocation failed.\n", dev->name); | ||
566 | goto free_new_rx_ring; | ||
567 | } | ||
568 | |||
569 | new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *), | ||
570 | GFP_ATOMIC); | ||
571 | if (!new_skb_list) { | ||
572 | if (netif_msg_drv(lp)) | ||
573 | printk("\n" KERN_ERR | ||
574 | "%s: Memory allocation failed.\n", dev->name); | ||
575 | goto free_new_lists; | ||
576 | } | ||
577 | |||
578 | /* first copy the current receive buffers */ | ||
579 | overlap = min(size, lp->rx_ring_size); | ||
580 | for (new = 0; new < overlap; new++) { | ||
581 | new_rx_ring[new] = lp->rx_ring[new]; | ||
582 | new_dma_addr_list[new] = lp->rx_dma_addr[new]; | ||
583 | new_skb_list[new] = lp->rx_skbuff[new]; | ||
584 | } | ||
585 | /* now allocate any new buffers needed */ | ||
586 | for (; new < size; new++ ) { | ||
587 | struct sk_buff *rx_skbuff; | ||
588 | new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ); | ||
589 | if (!(rx_skbuff = new_skb_list[new])) { | ||
590 | /* keep the original lists and buffers */ | ||
591 | if (netif_msg_drv(lp)) | ||
592 | printk(KERN_ERR | ||
593 | "%s: pcnet32_realloc_rx_ring dev_alloc_skb failed.\n", | ||
594 | dev->name); | ||
595 | goto free_all_new; | ||
596 | } | ||
597 | skb_reserve(rx_skbuff, 2); | ||
598 | |||
599 | new_dma_addr_list[new] = | ||
600 | pci_map_single(lp->pci_dev, rx_skbuff->data, | ||
601 | PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); | ||
602 | new_rx_ring[new].base = (u32) le32_to_cpu(new_dma_addr_list[new]); | ||
603 | new_rx_ring[new].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); | ||
604 | new_rx_ring[new].status = le16_to_cpu(0x8000); | ||
605 | } | ||
606 | /* and free any unneeded buffers */ | ||
607 | for (; new < lp->rx_ring_size; new++) { | ||
608 | if (lp->rx_skbuff[new]) { | ||
609 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], | ||
610 | PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); | ||
611 | dev_kfree_skb(lp->rx_skbuff[new]); | ||
612 | } | ||
613 | } | ||
614 | |||
615 | kfree(lp->rx_skbuff); | ||
616 | kfree(lp->rx_dma_addr); | ||
617 | pci_free_consistent(lp->pci_dev, | ||
618 | sizeof(struct pcnet32_rx_head) * | ||
619 | lp->rx_ring_size, lp->rx_ring, | ||
620 | lp->rx_ring_dma_addr); | ||
621 | |||
622 | lp->rx_ring_size = (1 << size); | ||
623 | lp->rx_mod_mask = lp->rx_ring_size - 1; | ||
624 | lp->rx_len_bits = (size << 4); | ||
625 | lp->rx_ring = new_rx_ring; | ||
626 | lp->rx_ring_dma_addr = new_ring_dma_addr; | ||
627 | lp->rx_dma_addr = new_dma_addr_list; | ||
628 | lp->rx_skbuff = new_skb_list; | ||
629 | return; | ||
630 | |||
631 | free_all_new: | ||
632 | for (; --new >= lp->rx_ring_size; ) { | ||
633 | if (new_skb_list[new]) { | ||
634 | pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], | ||
635 | PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); | ||
636 | dev_kfree_skb(new_skb_list[new]); | ||
637 | } | ||
638 | } | ||
639 | kfree(new_skb_list); | ||
640 | free_new_lists: | ||
641 | kfree(new_dma_addr_list); | ||
642 | free_new_rx_ring: | ||
643 | pci_free_consistent(lp->pci_dev, | ||
644 | sizeof(struct pcnet32_rx_head) * | ||
645 | (1 << size), | ||
646 | new_rx_ring, | ||
647 | new_ring_dma_addr); | ||
648 | return; | ||
649 | } | ||
650 | |||
651 | static void pcnet32_purge_rx_ring(struct net_device *dev) | ||
652 | { | ||
653 | struct pcnet32_private *lp = dev->priv; | ||
654 | int i; | ||
655 | |||
656 | /* free all allocated skbuffs */ | ||
657 | for (i = 0; i < lp->rx_ring_size; i++) { | ||
658 | lp->rx_ring[i].status = 0; /* CPU owns buffer */ | ||
659 | wmb(); /* Make sure adapter sees owner change */ | ||
660 | if (lp->rx_skbuff[i]) { | ||
661 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], | ||
662 | PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); | ||
663 | dev_kfree_skb_any(lp->rx_skbuff[i]); | ||
664 | } | ||
665 | lp->rx_skbuff[i] = NULL; | ||
666 | lp->rx_dma_addr[i] = 0; | ||
667 | } | ||
668 | } | ||
669 | |||
422 | #ifdef CONFIG_NET_POLL_CONTROLLER | 670 | #ifdef CONFIG_NET_POLL_CONTROLLER |
423 | static void pcnet32_poll_controller(struct net_device *dev) | 671 | static void pcnet32_poll_controller(struct net_device *dev) |
424 | { | 672 | { |
@@ -479,9 +727,11 @@ static u32 pcnet32_get_link(struct net_device *dev) | |||
479 | spin_lock_irqsave(&lp->lock, flags); | 727 | spin_lock_irqsave(&lp->lock, flags); |
480 | if (lp->mii) { | 728 | if (lp->mii) { |
481 | r = mii_link_ok(&lp->mii_if); | 729 | r = mii_link_ok(&lp->mii_if); |
482 | } else { | 730 | } else if (lp->chip_version >= PCNET32_79C970A) { |
483 | ulong ioaddr = dev->base_addr; /* card base I/O address */ | 731 | ulong ioaddr = dev->base_addr; /* card base I/O address */ |
484 | r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); | 732 | r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); |
733 | } else { /* can not detect link on really old chips */ | ||
734 | r = 1; | ||
485 | } | 735 | } |
486 | spin_unlock_irqrestore(&lp->lock, flags); | 736 | spin_unlock_irqrestore(&lp->lock, flags); |
487 | 737 | ||
@@ -519,10 +769,10 @@ static void pcnet32_get_ringparam(struct net_device *dev, | |||
519 | { | 769 | { |
520 | struct pcnet32_private *lp = dev->priv; | 770 | struct pcnet32_private *lp = dev->priv; |
521 | 771 | ||
522 | ering->tx_max_pending = TX_MAX_RING_SIZE - 1; | 772 | ering->tx_max_pending = TX_MAX_RING_SIZE; |
523 | ering->tx_pending = lp->tx_ring_size - 1; | 773 | ering->tx_pending = lp->tx_ring_size; |
524 | ering->rx_max_pending = RX_MAX_RING_SIZE - 1; | 774 | ering->rx_max_pending = RX_MAX_RING_SIZE; |
525 | ering->rx_pending = lp->rx_ring_size - 1; | 775 | ering->rx_pending = lp->rx_ring_size; |
526 | } | 776 | } |
527 | 777 | ||
528 | static int pcnet32_set_ringparam(struct net_device *dev, | 778 | static int pcnet32_set_ringparam(struct net_device *dev, |
@@ -530,56 +780,53 @@ static int pcnet32_set_ringparam(struct net_device *dev, | |||
530 | { | 780 | { |
531 | struct pcnet32_private *lp = dev->priv; | 781 | struct pcnet32_private *lp = dev->priv; |
532 | unsigned long flags; | 782 | unsigned long flags; |
783 | unsigned int size; | ||
784 | ulong ioaddr = dev->base_addr; | ||
533 | int i; | 785 | int i; |
534 | 786 | ||
535 | if (ering->rx_mini_pending || ering->rx_jumbo_pending) | 787 | if (ering->rx_mini_pending || ering->rx_jumbo_pending) |
536 | return -EINVAL; | 788 | return -EINVAL; |
537 | 789 | ||
538 | if (netif_running(dev)) | 790 | if (netif_running(dev)) |
539 | pcnet32_close(dev); | 791 | pcnet32_netif_stop(dev); |
540 | 792 | ||
541 | spin_lock_irqsave(&lp->lock, flags); | 793 | spin_lock_irqsave(&lp->lock, flags); |
542 | pcnet32_free_ring(dev); | 794 | lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ |
543 | lp->tx_ring_size = | 795 | |
544 | min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); | 796 | size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); |
545 | lp->rx_ring_size = | ||
546 | min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); | ||
547 | 797 | ||
548 | /* set the minimum ring size to 4, to allow the loopback test to work | 798 | /* set the minimum ring size to 4, to allow the loopback test to work |
549 | * unchanged. | 799 | * unchanged. |
550 | */ | 800 | */ |
551 | for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { | 801 | for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { |
552 | if (lp->tx_ring_size <= (1 << i)) | 802 | if (size <= (1 << i)) |
553 | break; | 803 | break; |
554 | } | 804 | } |
555 | lp->tx_ring_size = (1 << i); | 805 | if ((1 << i) != lp->tx_ring_size) |
556 | lp->tx_mod_mask = lp->tx_ring_size - 1; | 806 | pcnet32_realloc_tx_ring(dev, lp, i); |
557 | lp->tx_len_bits = (i << 12); | 807 | |
558 | 808 | size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); | |
559 | for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { | 809 | for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { |
560 | if (lp->rx_ring_size <= (1 << i)) | 810 | if (size <= (1 << i)) |
561 | break; | 811 | break; |
562 | } | 812 | } |
563 | lp->rx_ring_size = (1 << i); | 813 | if ((1 << i) != lp->rx_ring_size) |
564 | lp->rx_mod_mask = lp->rx_ring_size - 1; | 814 | pcnet32_realloc_rx_ring(dev, lp, i); |
565 | lp->rx_len_bits = (i << 4); | 815 | |
816 | dev->weight = lp->rx_ring_size / 2; | ||
566 | 817 | ||
567 | if (pcnet32_alloc_ring(dev, dev->name)) { | 818 | if (netif_running(dev)) { |
568 | pcnet32_free_ring(dev); | 819 | pcnet32_netif_start(dev); |
569 | spin_unlock_irqrestore(&lp->lock, flags); | 820 | pcnet32_restart(dev, CSR0_NORMAL); |
570 | return -ENOMEM; | ||
571 | } | 821 | } |
572 | 822 | ||
573 | spin_unlock_irqrestore(&lp->lock, flags); | 823 | spin_unlock_irqrestore(&lp->lock, flags); |
574 | 824 | ||
575 | if (pcnet32_debug & NETIF_MSG_DRV) | 825 | if (netif_msg_drv(lp)) |
576 | printk(KERN_INFO PFX | 826 | printk(KERN_INFO |
577 | "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name, | 827 | "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name, |
578 | lp->rx_ring_size, lp->tx_ring_size); | 828 | lp->rx_ring_size, lp->tx_ring_size); |
579 | 829 | ||
580 | if (netif_running(dev)) | ||
581 | pcnet32_open(dev); | ||
582 | |||
583 | return 0; | 830 | return 0; |
584 | } | 831 | } |
585 | 832 | ||
@@ -633,29 +880,27 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) | |||
633 | unsigned long flags; | 880 | unsigned long flags; |
634 | unsigned long ticks; | 881 | unsigned long ticks; |
635 | 882 | ||
636 | *data1 = 1; /* status of test, default to fail */ | ||
637 | rc = 1; /* default to fail */ | 883 | rc = 1; /* default to fail */ |
638 | 884 | ||
639 | if (netif_running(dev)) | 885 | if (netif_running(dev)) |
640 | pcnet32_close(dev); | 886 | pcnet32_close(dev); |
641 | 887 | ||
642 | spin_lock_irqsave(&lp->lock, flags); | 888 | spin_lock_irqsave(&lp->lock, flags); |
889 | lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ | ||
890 | |||
891 | numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size)); | ||
643 | 892 | ||
644 | /* Reset the PCNET32 */ | 893 | /* Reset the PCNET32 */ |
645 | lp->a.reset(ioaddr); | 894 | lp->a.reset(ioaddr); |
895 | lp->a.write_csr(ioaddr, CSR4, 0x0915); | ||
646 | 896 | ||
647 | /* switch pcnet32 to 32bit mode */ | 897 | /* switch pcnet32 to 32bit mode */ |
648 | lp->a.write_bcr(ioaddr, 20, 2); | 898 | lp->a.write_bcr(ioaddr, 20, 2); |
649 | 899 | ||
650 | lp->init_block.mode = | ||
651 | le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); | ||
652 | lp->init_block.filter[0] = 0; | ||
653 | lp->init_block.filter[1] = 0; | ||
654 | |||
655 | /* purge & init rings but don't actually restart */ | 900 | /* purge & init rings but don't actually restart */ |
656 | pcnet32_restart(dev, 0x0000); | 901 | pcnet32_restart(dev, 0x0000); |
657 | 902 | ||
658 | lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ | 903 | lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ |
659 | 904 | ||
660 | /* Initialize Transmit buffers. */ | 905 | /* Initialize Transmit buffers. */ |
661 | size = data_len + 15; | 906 | size = data_len + 15; |
@@ -697,14 +942,15 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) | |||
697 | } | 942 | } |
698 | } | 943 | } |
699 | 944 | ||
700 | x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */ | 945 | x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */ |
701 | x = x | 0x0002; | 946 | a->write_bcr(ioaddr, 32, x | 0x0002); |
702 | a->write_bcr(ioaddr, 32, x); | ||
703 | 947 | ||
704 | lp->a.write_csr(ioaddr, 15, 0x0044); /* set int loopback in CSR15 */ | 948 | /* set int loopback in CSR15 */ |
949 | x = a->read_csr(ioaddr, CSR15) & 0xfffc; | ||
950 | lp->a.write_csr(ioaddr, CSR15, x | 0x0044); | ||
705 | 951 | ||
706 | teststatus = le16_to_cpu(0x8000); | 952 | teststatus = le16_to_cpu(0x8000); |
707 | lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */ | 953 | lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ |
708 | 954 | ||
709 | /* Check status of descriptors */ | 955 | /* Check status of descriptors */ |
710 | for (x = 0; x < numbuffs; x++) { | 956 | for (x = 0; x < numbuffs; x++) { |
@@ -712,7 +958,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) | |||
712 | rmb(); | 958 | rmb(); |
713 | while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { | 959 | while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { |
714 | spin_unlock_irqrestore(&lp->lock, flags); | 960 | spin_unlock_irqrestore(&lp->lock, flags); |
715 | mdelay(1); | 961 | msleep(1); |
716 | spin_lock_irqsave(&lp->lock, flags); | 962 | spin_lock_irqsave(&lp->lock, flags); |
717 | rmb(); | 963 | rmb(); |
718 | ticks++; | 964 | ticks++; |
@@ -725,7 +971,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) | |||
725 | } | 971 | } |
726 | } | 972 | } |
727 | 973 | ||
728 | lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ | 974 | lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ |
729 | wmb(); | 975 | wmb(); |
730 | if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { | 976 | if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { |
731 | printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name); | 977 | printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name); |
@@ -758,25 +1004,24 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) | |||
758 | } | 1004 | } |
759 | x++; | 1005 | x++; |
760 | } | 1006 | } |
761 | if (!rc) { | ||
762 | *data1 = 0; | ||
763 | } | ||
764 | 1007 | ||
765 | clean_up: | 1008 | clean_up: |
1009 | *data1 = rc; | ||
766 | pcnet32_purge_tx_ring(dev); | 1010 | pcnet32_purge_tx_ring(dev); |
767 | x = a->read_csr(ioaddr, 15) & 0xFFFF; | ||
768 | a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */ | ||
769 | 1011 | ||
770 | x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ | 1012 | x = a->read_csr(ioaddr, CSR15); |
771 | x = x & ~0x0002; | 1013 | a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */ |
772 | a->write_bcr(ioaddr, 32, x); | ||
773 | 1014 | ||
774 | spin_unlock_irqrestore(&lp->lock, flags); | 1015 | x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ |
1016 | a->write_bcr(ioaddr, 32, (x & ~0x0002)); | ||
775 | 1017 | ||
776 | if (netif_running(dev)) { | 1018 | if (netif_running(dev)) { |
1019 | spin_unlock_irqrestore(&lp->lock, flags); | ||
777 | pcnet32_open(dev); | 1020 | pcnet32_open(dev); |
778 | } else { | 1021 | } else { |
1022 | pcnet32_purge_rx_ring(dev); | ||
779 | lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ | 1023 | lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ |
1024 | spin_unlock_irqrestore(&lp->lock, flags); | ||
780 | } | 1025 | } |
781 | 1026 | ||
782 | return (rc); | 1027 | return (rc); |
@@ -839,6 +1084,47 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data) | |||
839 | return 0; | 1084 | return 0; |
840 | } | 1085 | } |
841 | 1086 | ||
1087 | /* | ||
1088 | * lp->lock must be held. | ||
1089 | */ | ||
1090 | static int pcnet32_suspend(struct net_device *dev, unsigned long *flags, | ||
1091 | int can_sleep) | ||
1092 | { | ||
1093 | int csr5; | ||
1094 | struct pcnet32_private *lp = dev->priv; | ||
1095 | struct pcnet32_access *a = &lp->a; | ||
1096 | ulong ioaddr = dev->base_addr; | ||
1097 | int ticks; | ||
1098 | |||
1099 | /* really old chips have to be stopped. */ | ||
1100 | if (lp->chip_version < PCNET32_79C970A) | ||
1101 | return 0; | ||
1102 | |||
1103 | /* set SUSPEND (SPND) - CSR5 bit 0 */ | ||
1104 | csr5 = a->read_csr(ioaddr, CSR5); | ||
1105 | a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); | ||
1106 | |||
1107 | /* poll waiting for bit to be set */ | ||
1108 | ticks = 0; | ||
1109 | while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) { | ||
1110 | spin_unlock_irqrestore(&lp->lock, *flags); | ||
1111 | if (can_sleep) | ||
1112 | msleep(1); | ||
1113 | else | ||
1114 | mdelay(1); | ||
1115 | spin_lock_irqsave(&lp->lock, *flags); | ||
1116 | ticks++; | ||
1117 | if (ticks > 200) { | ||
1118 | if (netif_msg_hw(lp)) | ||
1119 | printk(KERN_DEBUG | ||
1120 | "%s: Error getting into suspend!\n", | ||
1121 | dev->name); | ||
1122 | return 0; | ||
1123 | } | ||
1124 | } | ||
1125 | return 1; | ||
1126 | } | ||
1127 | |||
842 | #define PCNET32_REGS_PER_PHY 32 | 1128 | #define PCNET32_REGS_PER_PHY 32 |
843 | #define PCNET32_MAX_PHYS 32 | 1129 | #define PCNET32_MAX_PHYS 32 |
844 | static int pcnet32_get_regs_len(struct net_device *dev) | 1130 | static int pcnet32_get_regs_len(struct net_device *dev) |
@@ -857,32 +1143,13 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
857 | struct pcnet32_private *lp = dev->priv; | 1143 | struct pcnet32_private *lp = dev->priv; |
858 | struct pcnet32_access *a = &lp->a; | 1144 | struct pcnet32_access *a = &lp->a; |
859 | ulong ioaddr = dev->base_addr; | 1145 | ulong ioaddr = dev->base_addr; |
860 | int ticks; | ||
861 | unsigned long flags; | 1146 | unsigned long flags; |
862 | 1147 | ||
863 | spin_lock_irqsave(&lp->lock, flags); | 1148 | spin_lock_irqsave(&lp->lock, flags); |
864 | 1149 | ||
865 | csr0 = a->read_csr(ioaddr, 0); | 1150 | csr0 = a->read_csr(ioaddr, CSR0); |
866 | if (!(csr0 & 0x0004)) { /* If not stopped */ | 1151 | if (!(csr0 & CSR0_STOP)) /* If not stopped */ |
867 | /* set SUSPEND (SPND) - CSR5 bit 0 */ | 1152 | pcnet32_suspend(dev, &flags, 1); |
868 | a->write_csr(ioaddr, 5, 0x0001); | ||
869 | |||
870 | /* poll waiting for bit to be set */ | ||
871 | ticks = 0; | ||
872 | while (!(a->read_csr(ioaddr, 5) & 0x0001)) { | ||
873 | spin_unlock_irqrestore(&lp->lock, flags); | ||
874 | mdelay(1); | ||
875 | spin_lock_irqsave(&lp->lock, flags); | ||
876 | ticks++; | ||
877 | if (ticks > 200) { | ||
878 | if (netif_msg_hw(lp)) | ||
879 | printk(KERN_DEBUG | ||
880 | "%s: Error getting into suspend!\n", | ||
881 | dev->name); | ||
882 | break; | ||
883 | } | ||
884 | } | ||
885 | } | ||
886 | 1153 | ||
887 | /* read address PROM */ | 1154 | /* read address PROM */ |
888 | for (i = 0; i < 16; i += 2) | 1155 | for (i = 0; i < 16; i += 2) |
@@ -919,9 +1186,12 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
919 | } | 1186 | } |
920 | } | 1187 | } |
921 | 1188 | ||
922 | if (!(csr0 & 0x0004)) { /* If not stopped */ | 1189 | if (!(csr0 & CSR0_STOP)) { /* If not stopped */ |
1190 | int csr5; | ||
1191 | |||
923 | /* clear SUSPEND (SPND) - CSR5 bit 0 */ | 1192 | /* clear SUSPEND (SPND) - CSR5 bit 0 */ |
924 | a->write_csr(ioaddr, 5, 0x0000); | 1193 | csr5 = a->read_csr(ioaddr, CSR5); |
1194 | a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); | ||
925 | } | 1195 | } |
926 | 1196 | ||
927 | spin_unlock_irqrestore(&lp->lock, flags); | 1197 | spin_unlock_irqrestore(&lp->lock, flags); |
@@ -952,7 +1222,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = { | |||
952 | /* only probes for non-PCI devices, the rest are handled by | 1222 | /* only probes for non-PCI devices, the rest are handled by |
953 | * pci_register_driver via pcnet32_probe_pci */ | 1223 | * pci_register_driver via pcnet32_probe_pci */ |
954 | 1224 | ||
955 | static void __devinit pcnet32_probe_vlbus(void) | 1225 | static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist) |
956 | { | 1226 | { |
957 | unsigned int *port, ioaddr; | 1227 | unsigned int *port, ioaddr; |
958 | 1228 | ||
@@ -1268,6 +1538,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
1268 | lp->mii_if.reg_num_mask = 0x1f; | 1538 | lp->mii_if.reg_num_mask = 0x1f; |
1269 | lp->dxsuflo = dxsuflo; | 1539 | lp->dxsuflo = dxsuflo; |
1270 | lp->mii = mii; | 1540 | lp->mii = mii; |
1541 | lp->chip_version = chip_version; | ||
1271 | lp->msg_enable = pcnet32_debug; | 1542 | lp->msg_enable = pcnet32_debug; |
1272 | if ((cards_found >= MAX_UNITS) | 1543 | if ((cards_found >= MAX_UNITS) |
1273 | || (options[cards_found] > sizeof(options_mapping))) | 1544 | || (options[cards_found] > sizeof(options_mapping))) |
@@ -1436,7 +1707,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, char *name) | |||
1436 | lp->tx_ring_size, | 1707 | lp->tx_ring_size, |
1437 | &lp->tx_ring_dma_addr); | 1708 | &lp->tx_ring_dma_addr); |
1438 | if (lp->tx_ring == NULL) { | 1709 | if (lp->tx_ring == NULL) { |
1439 | if (pcnet32_debug & NETIF_MSG_DRV) | 1710 | if (netif_msg_drv(lp)) |
1440 | printk("\n" KERN_ERR PFX | 1711 | printk("\n" KERN_ERR PFX |
1441 | "%s: Consistent memory allocation failed.\n", | 1712 | "%s: Consistent memory allocation failed.\n", |
1442 | name); | 1713 | name); |
@@ -1448,52 +1719,48 @@ static int pcnet32_alloc_ring(struct net_device *dev, char *name) | |||
1448 | lp->rx_ring_size, | 1719 | lp->rx_ring_size, |
1449 | &lp->rx_ring_dma_addr); | 1720 | &lp->rx_ring_dma_addr); |
1450 | if (lp->rx_ring == NULL) { | 1721 | if (lp->rx_ring == NULL) { |
1451 | if (pcnet32_debug & NETIF_MSG_DRV) | 1722 | if (netif_msg_drv(lp)) |
1452 | printk("\n" KERN_ERR PFX | 1723 | printk("\n" KERN_ERR PFX |
1453 | "%s: Consistent memory allocation failed.\n", | 1724 | "%s: Consistent memory allocation failed.\n", |
1454 | name); | 1725 | name); |
1455 | return -ENOMEM; | 1726 | return -ENOMEM; |
1456 | } | 1727 | } |
1457 | 1728 | ||
1458 | lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, | 1729 | lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), |
1459 | GFP_ATOMIC); | 1730 | GFP_ATOMIC); |
1460 | if (!lp->tx_dma_addr) { | 1731 | if (!lp->tx_dma_addr) { |
1461 | if (pcnet32_debug & NETIF_MSG_DRV) | 1732 | if (netif_msg_drv(lp)) |
1462 | printk("\n" KERN_ERR PFX | 1733 | printk("\n" KERN_ERR PFX |
1463 | "%s: Memory allocation failed.\n", name); | 1734 | "%s: Memory allocation failed.\n", name); |
1464 | return -ENOMEM; | 1735 | return -ENOMEM; |
1465 | } | 1736 | } |
1466 | memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size); | ||
1467 | 1737 | ||
1468 | lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, | 1738 | lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), |
1469 | GFP_ATOMIC); | 1739 | GFP_ATOMIC); |
1470 | if (!lp->rx_dma_addr) { | 1740 | if (!lp->rx_dma_addr) { |
1471 | if (pcnet32_debug & NETIF_MSG_DRV) | 1741 | if (netif_msg_drv(lp)) |
1472 | printk("\n" KERN_ERR PFX | 1742 | printk("\n" KERN_ERR PFX |
1473 | "%s: Memory allocation failed.\n", name); | 1743 | "%s: Memory allocation failed.\n", name); |
1474 | return -ENOMEM; | 1744 | return -ENOMEM; |
1475 | } | 1745 | } |
1476 | memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size); | ||
1477 | 1746 | ||
1478 | lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, | 1747 | lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), |
1479 | GFP_ATOMIC); | 1748 | GFP_ATOMIC); |
1480 | if (!lp->tx_skbuff) { | 1749 | if (!lp->tx_skbuff) { |
1481 | if (pcnet32_debug & NETIF_MSG_DRV) | 1750 | if (netif_msg_drv(lp)) |
1482 | printk("\n" KERN_ERR PFX | 1751 | printk("\n" KERN_ERR PFX |
1483 | "%s: Memory allocation failed.\n", name); | 1752 | "%s: Memory allocation failed.\n", name); |
1484 | return -ENOMEM; | 1753 | return -ENOMEM; |
1485 | } | 1754 | } |
1486 | memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size); | ||
1487 | 1755 | ||
1488 | lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, | 1756 | lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), |
1489 | GFP_ATOMIC); | 1757 | GFP_ATOMIC); |
1490 | if (!lp->rx_skbuff) { | 1758 | if (!lp->rx_skbuff) { |
1491 | if (pcnet32_debug & NETIF_MSG_DRV) | 1759 | if (netif_msg_drv(lp)) |
1492 | printk("\n" KERN_ERR PFX | 1760 | printk("\n" KERN_ERR PFX |
1493 | "%s: Memory allocation failed.\n", name); | 1761 | "%s: Memory allocation failed.\n", name); |
1494 | return -ENOMEM; | 1762 | return -ENOMEM; |
1495 | } | 1763 | } |
1496 | memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size); | ||
1497 | 1764 | ||
1498 | return 0; | 1765 | return 0; |
1499 | } | 1766 | } |
@@ -1582,10 +1849,7 @@ static int pcnet32_open(struct net_device *dev) | |||
1582 | val |= 2; | 1849 | val |= 2; |
1583 | } else if (lp->options & PCNET32_PORT_ASEL) { | 1850 | } else if (lp->options & PCNET32_PORT_ASEL) { |
1584 | /* workaround of xSeries250, turn on for 79C975 only */ | 1851 | /* workaround of xSeries250, turn on for 79C975 only */ |
1585 | i = ((lp->a.read_csr(ioaddr, 88) | | 1852 | if (lp->chip_version == 0x2627) |
1586 | (lp->a. | ||
1587 | read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff; | ||
1588 | if (i == 0x2627) | ||
1589 | val |= 3; | 1853 | val |= 3; |
1590 | } | 1854 | } |
1591 | lp->a.write_bcr(ioaddr, 9, val); | 1855 | lp->a.write_bcr(ioaddr, 9, val); |
@@ -1729,9 +1993,11 @@ static int pcnet32_open(struct net_device *dev) | |||
1729 | 1993 | ||
1730 | netif_start_queue(dev); | 1994 | netif_start_queue(dev); |
1731 | 1995 | ||
1732 | /* Print the link status and start the watchdog */ | 1996 | if (lp->chip_version >= PCNET32_79C970A) { |
1733 | pcnet32_check_media(dev, 1); | 1997 | /* Print the link status and start the watchdog */ |
1734 | mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); | 1998 | pcnet32_check_media(dev, 1); |
1999 | mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); | ||
2000 | } | ||
1735 | 2001 | ||
1736 | i = 0; | 2002 | i = 0; |
1737 | while (i++ < 100) | 2003 | while (i++ < 100) |
@@ -1757,16 +2023,7 @@ static int pcnet32_open(struct net_device *dev) | |||
1757 | 2023 | ||
1758 | err_free_ring: | 2024 | err_free_ring: |
1759 | /* free any allocated skbuffs */ | 2025 | /* free any allocated skbuffs */ |
1760 | for (i = 0; i < lp->rx_ring_size; i++) { | 2026 | pcnet32_purge_rx_ring(dev); |
1761 | lp->rx_ring[i].status = 0; | ||
1762 | if (lp->rx_skbuff[i]) { | ||
1763 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], | ||
1764 | PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); | ||
1765 | dev_kfree_skb(lp->rx_skbuff[i]); | ||
1766 | } | ||
1767 | lp->rx_skbuff[i] = NULL; | ||
1768 | lp->rx_dma_addr[i] = 0; | ||
1769 | } | ||
1770 | 2027 | ||
1771 | /* | 2028 | /* |
1772 | * Switch back to 16bit mode to avoid problems with dumb | 2029 | * Switch back to 16bit mode to avoid problems with dumb |
@@ -2348,7 +2605,6 @@ static int pcnet32_close(struct net_device *dev) | |||
2348 | { | 2605 | { |
2349 | unsigned long ioaddr = dev->base_addr; | 2606 | unsigned long ioaddr = dev->base_addr; |
2350 | struct pcnet32_private *lp = dev->priv; | 2607 | struct pcnet32_private *lp = dev->priv; |
2351 | int i; | ||
2352 | unsigned long flags; | 2608 | unsigned long flags; |
2353 | 2609 | ||
2354 | del_timer_sync(&lp->watchdog_timer); | 2610 | del_timer_sync(&lp->watchdog_timer); |
@@ -2379,31 +2635,8 @@ static int pcnet32_close(struct net_device *dev) | |||
2379 | 2635 | ||
2380 | spin_lock_irqsave(&lp->lock, flags); | 2636 | spin_lock_irqsave(&lp->lock, flags); |
2381 | 2637 | ||
2382 | /* free all allocated skbuffs */ | 2638 | pcnet32_purge_rx_ring(dev); |
2383 | for (i = 0; i < lp->rx_ring_size; i++) { | 2639 | pcnet32_purge_tx_ring(dev); |
2384 | lp->rx_ring[i].status = 0; | ||
2385 | wmb(); /* Make sure adapter sees owner change */ | ||
2386 | if (lp->rx_skbuff[i]) { | ||
2387 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], | ||
2388 | PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); | ||
2389 | dev_kfree_skb(lp->rx_skbuff[i]); | ||
2390 | } | ||
2391 | lp->rx_skbuff[i] = NULL; | ||
2392 | lp->rx_dma_addr[i] = 0; | ||
2393 | } | ||
2394 | |||
2395 | for (i = 0; i < lp->tx_ring_size; i++) { | ||
2396 | lp->tx_ring[i].status = 0; /* CPU owns buffer */ | ||
2397 | wmb(); /* Make sure adapter sees owner change */ | ||
2398 | if (lp->tx_skbuff[i]) { | ||
2399 | pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], | ||
2400 | lp->tx_skbuff[i]->len, | ||
2401 | PCI_DMA_TODEVICE); | ||
2402 | dev_kfree_skb(lp->tx_skbuff[i]); | ||
2403 | } | ||
2404 | lp->tx_skbuff[i] = NULL; | ||
2405 | lp->tx_dma_addr[i] = 0; | ||
2406 | } | ||
2407 | 2640 | ||
2408 | spin_unlock_irqrestore(&lp->lock, flags); | 2641 | spin_unlock_irqrestore(&lp->lock, flags); |
2409 | 2642 | ||
@@ -2433,6 +2666,7 @@ static void pcnet32_load_multicast(struct net_device *dev) | |||
2433 | volatile struct pcnet32_init_block *ib = &lp->init_block; | 2666 | volatile struct pcnet32_init_block *ib = &lp->init_block; |
2434 | volatile u16 *mcast_table = (u16 *) & ib->filter; | 2667 | volatile u16 *mcast_table = (u16 *) & ib->filter; |
2435 | struct dev_mc_list *dmi = dev->mc_list; | 2668 | struct dev_mc_list *dmi = dev->mc_list; |
2669 | unsigned long ioaddr = dev->base_addr; | ||
2436 | char *addrs; | 2670 | char *addrs; |
2437 | int i; | 2671 | int i; |
2438 | u32 crc; | 2672 | u32 crc; |
@@ -2441,6 +2675,10 @@ static void pcnet32_load_multicast(struct net_device *dev) | |||
2441 | if (dev->flags & IFF_ALLMULTI) { | 2675 | if (dev->flags & IFF_ALLMULTI) { |
2442 | ib->filter[0] = 0xffffffff; | 2676 | ib->filter[0] = 0xffffffff; |
2443 | ib->filter[1] = 0xffffffff; | 2677 | ib->filter[1] = 0xffffffff; |
2678 | lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); | ||
2679 | lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); | ||
2680 | lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); | ||
2681 | lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); | ||
2444 | return; | 2682 | return; |
2445 | } | 2683 | } |
2446 | /* clear the multicast filter */ | 2684 | /* clear the multicast filter */ |
@@ -2462,6 +2700,9 @@ static void pcnet32_load_multicast(struct net_device *dev) | |||
2462 | le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) | | 2700 | le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) | |
2463 | (1 << (crc & 0xf))); | 2701 | (1 << (crc & 0xf))); |
2464 | } | 2702 | } |
2703 | for (i = 0; i < 4; i++) | ||
2704 | lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i, | ||
2705 | le16_to_cpu(mcast_table[i])); | ||
2465 | return; | 2706 | return; |
2466 | } | 2707 | } |
2467 | 2708 | ||
@@ -2472,8 +2713,11 @@ static void pcnet32_set_multicast_list(struct net_device *dev) | |||
2472 | { | 2713 | { |
2473 | unsigned long ioaddr = dev->base_addr, flags; | 2714 | unsigned long ioaddr = dev->base_addr, flags; |
2474 | struct pcnet32_private *lp = dev->priv; | 2715 | struct pcnet32_private *lp = dev->priv; |
2716 | int csr15, suspended; | ||
2475 | 2717 | ||
2476 | spin_lock_irqsave(&lp->lock, flags); | 2718 | spin_lock_irqsave(&lp->lock, flags); |
2719 | suspended = pcnet32_suspend(dev, &flags, 0); | ||
2720 | csr15 = lp->a.read_csr(ioaddr, CSR15); | ||
2477 | if (dev->flags & IFF_PROMISC) { | 2721 | if (dev->flags & IFF_PROMISC) { |
2478 | /* Log any net taps. */ | 2722 | /* Log any net taps. */ |
2479 | if (netif_msg_hw(lp)) | 2723 | if (netif_msg_hw(lp)) |
@@ -2482,15 +2726,24 @@ static void pcnet32_set_multicast_list(struct net_device *dev) | |||
2482 | lp->init_block.mode = | 2726 | lp->init_block.mode = |
2483 | le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << | 2727 | le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << |
2484 | 7); | 2728 | 7); |
2729 | lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000); | ||
2485 | } else { | 2730 | } else { |
2486 | lp->init_block.mode = | 2731 | lp->init_block.mode = |
2487 | le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); | 2732 | le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); |
2733 | lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff); | ||
2488 | pcnet32_load_multicast(dev); | 2734 | pcnet32_load_multicast(dev); |
2489 | } | 2735 | } |
2490 | 2736 | ||
2491 | lp->a.write_csr(ioaddr, 0, 0x0004); /* Temporarily stop the lance. */ | 2737 | if (suspended) { |
2492 | pcnet32_restart(dev, 0x0042); /* Resume normal operation */ | 2738 | int csr5; |
2493 | netif_wake_queue(dev); | 2739 | /* clear SUSPEND (SPND) - CSR5 bit 0 */ |
2740 | csr5 = lp->a.read_csr(ioaddr, CSR5); | ||
2741 | lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND)); | ||
2742 | } else { | ||
2743 | lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); | ||
2744 | pcnet32_restart(dev, CSR0_NORMAL); | ||
2745 | netif_wake_queue(dev); | ||
2746 | } | ||
2494 | 2747 | ||
2495 | spin_unlock_irqrestore(&lp->lock, flags); | 2748 | spin_unlock_irqrestore(&lp->lock, flags); |
2496 | } | 2749 | } |
@@ -2730,7 +2983,7 @@ static int __init pcnet32_init_module(void) | |||
2730 | 2983 | ||
2731 | /* should we find any remaining VLbus devices ? */ | 2984 | /* should we find any remaining VLbus devices ? */ |
2732 | if (pcnet32vlb) | 2985 | if (pcnet32vlb) |
2733 | pcnet32_probe_vlbus(); | 2986 | pcnet32_probe_vlbus(pcnet32_portlist); |
2734 | 2987 | ||
2735 | if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) | 2988 | if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) |
2736 | printk(KERN_INFO PFX "%d cards_found.\n", cards_found); | 2989 | printk(KERN_INFO PFX "%d cards_found.\n", cards_found); |
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 2ba6d3a40e2e..b79ec0d7480f 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
@@ -56,5 +56,22 @@ config SMSC_PHY | |||
56 | ---help--- | 56 | ---help--- |
57 | Currently supports the LAN83C185 PHY | 57 | Currently supports the LAN83C185 PHY |
58 | 58 | ||
59 | config FIXED_PHY | ||
60 | tristate "Drivers for PHY emulation on fixed speed/link" | ||
61 | depends on PHYLIB | ||
62 | ---help--- | ||
63 | Adds the driver to PHY layer to cover the boards that do not have any PHY bound, | ||
64 | but with the ability to manipulate with speed/link in software. The relavant MII | ||
65 | speed/duplex parameters could be effectively handled in user-specified fuction. | ||
66 | Currently tested with mpc866ads. | ||
67 | |||
68 | config FIXED_MII_10_FDX | ||
69 | bool "Emulation for 10M Fdx fixed PHY behavior" | ||
70 | depends on FIXED_PHY | ||
71 | |||
72 | config FIXED_MII_100_FDX | ||
73 | bool "Emulation for 100M Fdx fixed PHY behavior" | ||
74 | depends on FIXED_PHY | ||
75 | |||
59 | endmenu | 76 | endmenu |
60 | 77 | ||
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index a00e61942525..320f8323123f 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile | |||
@@ -10,3 +10,4 @@ obj-$(CONFIG_LXT_PHY) += lxt.o | |||
10 | obj-$(CONFIG_QSEMI_PHY) += qsemi.o | 10 | obj-$(CONFIG_QSEMI_PHY) += qsemi.o |
11 | obj-$(CONFIG_SMSC_PHY) += smsc.o | 11 | obj-$(CONFIG_SMSC_PHY) += smsc.o |
12 | obj-$(CONFIG_VITESSE_PHY) += vitesse.o | 12 | obj-$(CONFIG_VITESSE_PHY) += vitesse.o |
13 | obj-$(CONFIG_FIXED_PHY) += fixed.o | ||
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c index 3efb715c28dc..ae60e6e4107c 100644 --- a/drivers/net/phy/cicada.c +++ b/drivers/net/phy/cicada.c | |||
@@ -103,7 +103,22 @@ static int cis820x_config_intr(struct phy_device *phydev) | |||
103 | return err; | 103 | return err; |
104 | } | 104 | } |
105 | 105 | ||
106 | /* Cicada 820x */ | 106 | /* Cicada 8201, a.k.a Vitesse VSC8201 */ |
107 | static struct phy_driver cis8201_driver = { | ||
108 | .phy_id = 0x000fc410, | ||
109 | .name = "Cicada Cis8201", | ||
110 | .phy_id_mask = 0x000ffff0, | ||
111 | .features = PHY_GBIT_FEATURES, | ||
112 | .flags = PHY_HAS_INTERRUPT, | ||
113 | .config_init = &cis820x_config_init, | ||
114 | .config_aneg = &genphy_config_aneg, | ||
115 | .read_status = &genphy_read_status, | ||
116 | .ack_interrupt = &cis820x_ack_interrupt, | ||
117 | .config_intr = &cis820x_config_intr, | ||
118 | .driver = { .owner = THIS_MODULE,}, | ||
119 | }; | ||
120 | |||
121 | /* Cicada 8204 */ | ||
107 | static struct phy_driver cis8204_driver = { | 122 | static struct phy_driver cis8204_driver = { |
108 | .phy_id = 0x000fc440, | 123 | .phy_id = 0x000fc440, |
109 | .name = "Cicada Cis8204", | 124 | .name = "Cicada Cis8204", |
@@ -118,15 +133,30 @@ static struct phy_driver cis8204_driver = { | |||
118 | .driver = { .owner = THIS_MODULE,}, | 133 | .driver = { .owner = THIS_MODULE,}, |
119 | }; | 134 | }; |
120 | 135 | ||
121 | static int __init cis8204_init(void) | 136 | static int __init cicada_init(void) |
122 | { | 137 | { |
123 | return phy_driver_register(&cis8204_driver); | 138 | int ret; |
139 | |||
140 | ret = phy_driver_register(&cis8204_driver); | ||
141 | if (ret) | ||
142 | goto err1; | ||
143 | |||
144 | ret = phy_driver_register(&cis8201_driver); | ||
145 | if (ret) | ||
146 | goto err2; | ||
147 | return 0; | ||
148 | |||
149 | err2: | ||
150 | phy_driver_unregister(&cis8204_driver); | ||
151 | err1: | ||
152 | return ret; | ||
124 | } | 153 | } |
125 | 154 | ||
126 | static void __exit cis8204_exit(void) | 155 | static void __exit cicada_exit(void) |
127 | { | 156 | { |
128 | phy_driver_unregister(&cis8204_driver); | 157 | phy_driver_unregister(&cis8204_driver); |
158 | phy_driver_unregister(&cis8201_driver); | ||
129 | } | 159 | } |
130 | 160 | ||
131 | module_init(cis8204_init); | 161 | module_init(cicada_init); |
132 | module_exit(cis8204_exit); | 162 | module_exit(cicada_exit); |
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c new file mode 100644 index 000000000000..341036df4710 --- /dev/null +++ b/drivers/net/phy/fixed.c | |||
@@ -0,0 +1,358 @@ | |||
1 | /* | ||
2 | * drivers/net/phy/fixed.c | ||
3 | * | ||
4 | * Driver for fixed PHYs, when transceiver is able to operate in one fixed mode. | ||
5 | * | ||
6 | * Author: Vitaly Bordug | ||
7 | * | ||
8 | * Copyright (c) 2006 MontaVista Software, Inc. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | * | ||
15 | */ | ||
16 | #include <linux/config.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/unistd.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/netdevice.h> | ||
27 | #include <linux/etherdevice.h> | ||
28 | #include <linux/skbuff.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | #include <linux/mm.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/mii.h> | ||
33 | #include <linux/ethtool.h> | ||
34 | #include <linux/phy.h> | ||
35 | |||
36 | #include <asm/io.h> | ||
37 | #include <asm/irq.h> | ||
38 | #include <asm/uaccess.h> | ||
39 | |||
40 | #define MII_REGS_NUM 7 | ||
41 | |||
42 | /* | ||
43 | The idea is to emulate normal phy behavior by responding with | ||
44 | pre-defined values to mii BMCR read, so that read_status hook could | ||
45 | take all the needed info. | ||
46 | */ | ||
47 | |||
48 | struct fixed_phy_status { | ||
49 | u8 link; | ||
50 | u16 speed; | ||
51 | u8 duplex; | ||
52 | }; | ||
53 | |||
54 | /*----------------------------------------------------------------------------- | ||
55 | * Private information hoder for mii_bus | ||
56 | *-----------------------------------------------------------------------------*/ | ||
57 | struct fixed_info { | ||
58 | u16 *regs; | ||
59 | u8 regs_num; | ||
60 | struct fixed_phy_status phy_status; | ||
61 | struct phy_device *phydev; /* pointer to the container */ | ||
62 | /* link & speed cb */ | ||
63 | int(*link_update)(struct net_device*, struct fixed_phy_status*); | ||
64 | |||
65 | }; | ||
66 | |||
67 | /*----------------------------------------------------------------------------- | ||
68 | * If something weird is required to be done with link/speed, | ||
69 | * network driver is able to assign a function to implement this. | ||
70 | * May be useful for PHY's that need to be software-driven. | ||
71 | *-----------------------------------------------------------------------------*/ | ||
72 | int fixed_mdio_set_link_update(struct phy_device* phydev, | ||
73 | int(*link_update)(struct net_device*, struct fixed_phy_status*)) | ||
74 | { | ||
75 | struct fixed_info *fixed; | ||
76 | |||
77 | if(link_update == NULL) | ||
78 | return -EINVAL; | ||
79 | |||
80 | if(phydev) { | ||
81 | if(phydev->bus) { | ||
82 | fixed = phydev->bus->priv; | ||
83 | fixed->link_update = link_update; | ||
84 | return 0; | ||
85 | } | ||
86 | } | ||
87 | return -EINVAL; | ||
88 | } | ||
89 | EXPORT_SYMBOL(fixed_mdio_set_link_update); | ||
90 | |||
91 | /*----------------------------------------------------------------------------- | ||
92 | * This is used for updating internal mii regs from the status | ||
93 | *-----------------------------------------------------------------------------*/ | ||
94 | static int fixed_mdio_update_regs(struct fixed_info *fixed) | ||
95 | { | ||
96 | u16 *regs = fixed->regs; | ||
97 | u16 bmsr = 0; | ||
98 | u16 bmcr = 0; | ||
99 | |||
100 | if(!regs) { | ||
101 | printk(KERN_ERR "%s: regs not set up", __FUNCTION__); | ||
102 | return -EINVAL; | ||
103 | } | ||
104 | |||
105 | if(fixed->phy_status.link) | ||
106 | bmsr |= BMSR_LSTATUS; | ||
107 | |||
108 | if(fixed->phy_status.duplex) { | ||
109 | bmcr |= BMCR_FULLDPLX; | ||
110 | |||
111 | switch ( fixed->phy_status.speed ) { | ||
112 | case 100: | ||
113 | bmsr |= BMSR_100FULL; | ||
114 | bmcr |= BMCR_SPEED100; | ||
115 | break; | ||
116 | |||
117 | case 10: | ||
118 | bmsr |= BMSR_10FULL; | ||
119 | break; | ||
120 | } | ||
121 | } else { | ||
122 | switch ( fixed->phy_status.speed ) { | ||
123 | case 100: | ||
124 | bmsr |= BMSR_100HALF; | ||
125 | bmcr |= BMCR_SPEED100; | ||
126 | break; | ||
127 | |||
128 | case 10: | ||
129 | bmsr |= BMSR_100HALF; | ||
130 | break; | ||
131 | } | ||
132 | } | ||
133 | |||
134 | regs[MII_BMCR] = bmcr; | ||
135 | regs[MII_BMSR] = bmsr | 0x800; /*we are always capable of 10 hdx*/ | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static int fixed_mii_read(struct mii_bus *bus, int phy_id, int location) | ||
141 | { | ||
142 | struct fixed_info *fixed = bus->priv; | ||
143 | |||
144 | /* if user has registered link update callback, use it */ | ||
145 | if(fixed->phydev) | ||
146 | if(fixed->phydev->attached_dev) { | ||
147 | if(fixed->link_update) { | ||
148 | fixed->link_update(fixed->phydev->attached_dev, | ||
149 | &fixed->phy_status); | ||
150 | fixed_mdio_update_regs(fixed); | ||
151 | } | ||
152 | } | ||
153 | |||
154 | if ((unsigned int)location >= fixed->regs_num) | ||
155 | return -1; | ||
156 | return fixed->regs[location]; | ||
157 | } | ||
158 | |||
159 | static int fixed_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) | ||
160 | { | ||
161 | /* do nothing for now*/ | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | static int fixed_mii_reset(struct mii_bus *bus) | ||
166 | { | ||
167 | /*nothing here - no way/need to reset it*/ | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static int fixed_config_aneg(struct phy_device *phydev) | ||
172 | { | ||
173 | /* :TODO:03/13/2006 09:45:37 PM:: | ||
174 | The full autoneg funcionality can be emulated, | ||
175 | but no need to have anything here for now | ||
176 | */ | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | /*----------------------------------------------------------------------------- | ||
181 | * the manual bind will do the magic - with phy_id_mask == 0 | ||
182 | * match will never return true... | ||
183 | *-----------------------------------------------------------------------------*/ | ||
184 | static struct phy_driver fixed_mdio_driver = { | ||
185 | .name = "Fixed PHY", | ||
186 | .features = PHY_BASIC_FEATURES, | ||
187 | .config_aneg = fixed_config_aneg, | ||
188 | .read_status = genphy_read_status, | ||
189 | .driver = { .owner = THIS_MODULE,}, | ||
190 | }; | ||
191 | |||
192 | /*----------------------------------------------------------------------------- | ||
193 | * This func is used to create all the necessary stuff, bind | ||
194 | * the fixed phy driver and register all it on the mdio_bus_type. | ||
195 | * speed is either 10 or 100, duplex is boolean. | ||
196 | * number is used to create multiple fixed PHYs, so that several devices can | ||
197 | * utilize them simultaneously. | ||
198 | *-----------------------------------------------------------------------------*/ | ||
199 | static int fixed_mdio_register_device(int number, int speed, int duplex) | ||
200 | { | ||
201 | struct mii_bus *new_bus; | ||
202 | struct fixed_info *fixed; | ||
203 | struct phy_device *phydev; | ||
204 | int err = 0; | ||
205 | |||
206 | struct device* dev = kzalloc(sizeof(struct device), GFP_KERNEL); | ||
207 | |||
208 | if (NULL == dev) | ||
209 | return -ENOMEM; | ||
210 | |||
211 | new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); | ||
212 | |||
213 | if (NULL == new_bus) { | ||
214 | kfree(dev); | ||
215 | return -ENOMEM; | ||
216 | } | ||
217 | fixed = kzalloc(sizeof(struct fixed_info), GFP_KERNEL); | ||
218 | |||
219 | if (NULL == fixed) { | ||
220 | kfree(dev); | ||
221 | kfree(new_bus); | ||
222 | return -ENOMEM; | ||
223 | } | ||
224 | |||
225 | fixed->regs = kzalloc(MII_REGS_NUM*sizeof(int), GFP_KERNEL); | ||
226 | fixed->regs_num = MII_REGS_NUM; | ||
227 | fixed->phy_status.speed = speed; | ||
228 | fixed->phy_status.duplex = duplex; | ||
229 | fixed->phy_status.link = 1; | ||
230 | |||
231 | new_bus->name = "Fixed MII Bus", | ||
232 | new_bus->read = &fixed_mii_read, | ||
233 | new_bus->write = &fixed_mii_write, | ||
234 | new_bus->reset = &fixed_mii_reset, | ||
235 | |||
236 | /*set up workspace*/ | ||
237 | fixed_mdio_update_regs(fixed); | ||
238 | new_bus->priv = fixed; | ||
239 | |||
240 | new_bus->dev = dev; | ||
241 | dev_set_drvdata(dev, new_bus); | ||
242 | |||
243 | /* create phy_device and register it on the mdio bus */ | ||
244 | phydev = phy_device_create(new_bus, 0, 0); | ||
245 | |||
246 | /* | ||
247 | Put the phydev pointer into the fixed pack so that bus read/write code could | ||
248 | be able to access for instance attached netdev. Well it doesn't have to do | ||
249 | so, only in case of utilizing user-specified link-update... | ||
250 | */ | ||
251 | fixed->phydev = phydev; | ||
252 | |||
253 | if(NULL == phydev) { | ||
254 | err = -ENOMEM; | ||
255 | goto device_create_fail; | ||
256 | } | ||
257 | |||
258 | phydev->irq = -1; | ||
259 | phydev->dev.bus = &mdio_bus_type; | ||
260 | |||
261 | if(number) | ||
262 | snprintf(phydev->dev.bus_id, BUS_ID_SIZE, | ||
263 | "fixed_%d@%d:%d", number, speed, duplex); | ||
264 | else | ||
265 | snprintf(phydev->dev.bus_id, BUS_ID_SIZE, | ||
266 | "fixed@%d:%d", speed, duplex); | ||
267 | phydev->bus = new_bus; | ||
268 | |||
269 | err = device_register(&phydev->dev); | ||
270 | if(err) { | ||
271 | printk(KERN_ERR "Phy %s failed to register\n", | ||
272 | phydev->dev.bus_id); | ||
273 | goto bus_register_fail; | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | the mdio bus has phy_id match... In order not to do it | ||
278 | artificially, we are binding the driver here by hand; | ||
279 | it will be the same for all the fixed phys anyway. | ||
280 | */ | ||
281 | down_write(&phydev->dev.bus->subsys.rwsem); | ||
282 | |||
283 | phydev->dev.driver = &fixed_mdio_driver.driver; | ||
284 | |||
285 | err = phydev->dev.driver->probe(&phydev->dev); | ||
286 | if(err < 0) { | ||
287 | printk(KERN_ERR "Phy %s: problems with fixed driver\n",phydev->dev.bus_id); | ||
288 | up_write(&phydev->dev.bus->subsys.rwsem); | ||
289 | goto probe_fail; | ||
290 | } | ||
291 | |||
292 | device_bind_driver(&phydev->dev); | ||
293 | up_write(&phydev->dev.bus->subsys.rwsem); | ||
294 | |||
295 | return 0; | ||
296 | |||
297 | probe_fail: | ||
298 | device_unregister(&phydev->dev); | ||
299 | bus_register_fail: | ||
300 | kfree(phydev); | ||
301 | device_create_fail: | ||
302 | kfree(dev); | ||
303 | kfree(new_bus); | ||
304 | kfree(fixed); | ||
305 | |||
306 | return err; | ||
307 | } | ||
308 | |||
309 | |||
310 | MODULE_DESCRIPTION("Fixed PHY device & driver for PAL"); | ||
311 | MODULE_AUTHOR("Vitaly Bordug"); | ||
312 | MODULE_LICENSE("GPL"); | ||
313 | |||
314 | static int __init fixed_init(void) | ||
315 | { | ||
316 | int ret; | ||
317 | int duplex = 0; | ||
318 | |||
319 | /* register on the bus... Not expected to be matched with anything there... */ | ||
320 | phy_driver_register(&fixed_mdio_driver); | ||
321 | |||
322 | /* So let the fun begin... | ||
323 | We will create several mdio devices here, and will bound the upper | ||
324 | driver to them. | ||
325 | |||
326 | Then the external software can lookup the phy bus by searching | ||
327 | fixed@speed:duplex, e.g. fixed@100:1, to be connected to the | ||
328 | virtual 100M Fdx phy. | ||
329 | |||
330 | In case several virtual PHYs required, the bus_id will be in form | ||
331 | fixed_<num>@<speed>:<duplex>, which make it able even to define | ||
332 | driver-specific link control callback, if for instance PHY is completely | ||
333 | SW-driven. | ||
334 | |||
335 | */ | ||
336 | |||
337 | #ifdef CONFIG_FIXED_MII_DUPLEX | ||
338 | duplex = 1; | ||
339 | #endif | ||
340 | |||
341 | #ifdef CONFIG_FIXED_MII_100_FDX | ||
342 | fixed_mdio_register_device(0, 100, 1); | ||
343 | #endif | ||
344 | |||
345 | #ifdef CONFIX_FIXED_MII_10_FDX | ||
346 | fixed_mdio_register_device(0, 10, 1); | ||
347 | #endif | ||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | static void __exit fixed_exit(void) | ||
352 | { | ||
353 | phy_driver_unregister(&fixed_mdio_driver); | ||
354 | /* :WARNING:02/18/2006 04:32:40 AM:: Cleanup all the created stuff */ | ||
355 | } | ||
356 | |||
357 | module_init(fixed_init); | ||
358 | module_exit(fixed_exit); | ||
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 1dde390c164d..cf6660c93ffa 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -159,6 +159,7 @@ struct bus_type mdio_bus_type = { | |||
159 | .suspend = mdio_bus_suspend, | 159 | .suspend = mdio_bus_suspend, |
160 | .resume = mdio_bus_resume, | 160 | .resume = mdio_bus_resume, |
161 | }; | 161 | }; |
162 | EXPORT_SYMBOL(mdio_bus_type); | ||
162 | 163 | ||
163 | int __init mdio_bus_init(void) | 164 | int __init mdio_bus_init(void) |
164 | { | 165 | { |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7d5c2233c252..f5aad77288f9 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -419,9 +419,8 @@ void phy_start_machine(struct phy_device *phydev, | |||
419 | 419 | ||
420 | /* phy_stop_machine | 420 | /* phy_stop_machine |
421 | * | 421 | * |
422 | * description: Stops the state machine timer, sets the state to | 422 | * description: Stops the state machine timer, sets the state to UP |
423 | * UP (unless it wasn't up yet), and then frees the interrupt, | 423 | * (unless it wasn't up yet). This function must be called BEFORE |
424 | * if it is in use. This function must be called BEFORE | ||
425 | * phy_detach. | 424 | * phy_detach. |
426 | */ | 425 | */ |
427 | void phy_stop_machine(struct phy_device *phydev) | 426 | void phy_stop_machine(struct phy_device *phydev) |
@@ -433,9 +432,6 @@ void phy_stop_machine(struct phy_device *phydev) | |||
433 | phydev->state = PHY_UP; | 432 | phydev->state = PHY_UP; |
434 | spin_unlock(&phydev->lock); | 433 | spin_unlock(&phydev->lock); |
435 | 434 | ||
436 | if (phydev->irq != PHY_POLL) | ||
437 | phy_stop_interrupts(phydev); | ||
438 | |||
439 | phydev->adjust_state = NULL; | 435 | phydev->adjust_state = NULL; |
440 | } | 436 | } |
441 | 437 | ||
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 1bc1e032c5d6..2d1ecfdc80db 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -45,6 +45,35 @@ static struct phy_driver genphy_driver; | |||
45 | extern int mdio_bus_init(void); | 45 | extern int mdio_bus_init(void); |
46 | extern void mdio_bus_exit(void); | 46 | extern void mdio_bus_exit(void); |
47 | 47 | ||
48 | struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) | ||
49 | { | ||
50 | struct phy_device *dev; | ||
51 | /* We allocate the device, and initialize the | ||
52 | * default values */ | ||
53 | dev = kcalloc(1, sizeof(*dev), GFP_KERNEL); | ||
54 | |||
55 | if (NULL == dev) | ||
56 | return (struct phy_device*) PTR_ERR((void*)-ENOMEM); | ||
57 | |||
58 | dev->speed = 0; | ||
59 | dev->duplex = -1; | ||
60 | dev->pause = dev->asym_pause = 0; | ||
61 | dev->link = 1; | ||
62 | |||
63 | dev->autoneg = AUTONEG_ENABLE; | ||
64 | |||
65 | dev->addr = addr; | ||
66 | dev->phy_id = phy_id; | ||
67 | dev->bus = bus; | ||
68 | |||
69 | dev->state = PHY_DOWN; | ||
70 | |||
71 | spin_lock_init(&dev->lock); | ||
72 | |||
73 | return dev; | ||
74 | } | ||
75 | EXPORT_SYMBOL(phy_device_create); | ||
76 | |||
48 | /* get_phy_device | 77 | /* get_phy_device |
49 | * | 78 | * |
50 | * description: Reads the ID registers of the PHY at addr on the | 79 | * description: Reads the ID registers of the PHY at addr on the |
@@ -78,27 +107,7 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr) | |||
78 | if (0xffffffff == phy_id) | 107 | if (0xffffffff == phy_id) |
79 | return NULL; | 108 | return NULL; |
80 | 109 | ||
81 | /* Otherwise, we allocate the device, and initialize the | 110 | dev = phy_device_create(bus, addr, phy_id); |
82 | * default values */ | ||
83 | dev = kcalloc(1, sizeof(*dev), GFP_KERNEL); | ||
84 | |||
85 | if (NULL == dev) | ||
86 | return ERR_PTR(-ENOMEM); | ||
87 | |||
88 | dev->speed = 0; | ||
89 | dev->duplex = -1; | ||
90 | dev->pause = dev->asym_pause = 0; | ||
91 | dev->link = 1; | ||
92 | |||
93 | dev->autoneg = AUTONEG_ENABLE; | ||
94 | |||
95 | dev->addr = addr; | ||
96 | dev->phy_id = phy_id; | ||
97 | dev->bus = bus; | ||
98 | |||
99 | dev->state = PHY_DOWN; | ||
100 | |||
101 | spin_lock_init(&dev->lock); | ||
102 | 111 | ||
103 | return dev; | 112 | return dev; |
104 | } | 113 | } |
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 0ec6e9d57b94..c872f7c6cce3 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
@@ -192,7 +192,7 @@ struct cardmap { | |||
192 | void *ptr[CARDMAP_WIDTH]; | 192 | void *ptr[CARDMAP_WIDTH]; |
193 | }; | 193 | }; |
194 | static void *cardmap_get(struct cardmap *map, unsigned int nr); | 194 | static void *cardmap_get(struct cardmap *map, unsigned int nr); |
195 | static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); | 195 | static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); |
196 | static unsigned int cardmap_find_first_free(struct cardmap *map); | 196 | static unsigned int cardmap_find_first_free(struct cardmap *map); |
197 | static void cardmap_destroy(struct cardmap **map); | 197 | static void cardmap_destroy(struct cardmap **map); |
198 | 198 | ||
@@ -1995,10 +1995,9 @@ ppp_register_channel(struct ppp_channel *chan) | |||
1995 | { | 1995 | { |
1996 | struct channel *pch; | 1996 | struct channel *pch; |
1997 | 1997 | ||
1998 | pch = kmalloc(sizeof(struct channel), GFP_KERNEL); | 1998 | pch = kzalloc(sizeof(struct channel), GFP_KERNEL); |
1999 | if (pch == 0) | 1999 | if (pch == 0) |
2000 | return -ENOMEM; | 2000 | return -ENOMEM; |
2001 | memset(pch, 0, sizeof(struct channel)); | ||
2002 | pch->ppp = NULL; | 2001 | pch->ppp = NULL; |
2003 | pch->chan = chan; | 2002 | pch->chan = chan; |
2004 | chan->ppp = pch; | 2003 | chan->ppp = pch; |
@@ -2408,13 +2407,12 @@ ppp_create_interface(int unit, int *retp) | |||
2408 | int ret = -ENOMEM; | 2407 | int ret = -ENOMEM; |
2409 | int i; | 2408 | int i; |
2410 | 2409 | ||
2411 | ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL); | 2410 | ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL); |
2412 | if (!ppp) | 2411 | if (!ppp) |
2413 | goto out; | 2412 | goto out; |
2414 | dev = alloc_netdev(0, "", ppp_setup); | 2413 | dev = alloc_netdev(0, "", ppp_setup); |
2415 | if (!dev) | 2414 | if (!dev) |
2416 | goto out1; | 2415 | goto out1; |
2417 | memset(ppp, 0, sizeof(struct ppp)); | ||
2418 | 2416 | ||
2419 | ppp->mru = PPP_MRU; | 2417 | ppp->mru = PPP_MRU; |
2420 | init_ppp_file(&ppp->file, INTERFACE); | 2418 | init_ppp_file(&ppp->file, INTERFACE); |
@@ -2454,11 +2452,16 @@ ppp_create_interface(int unit, int *retp) | |||
2454 | } | 2452 | } |
2455 | 2453 | ||
2456 | atomic_inc(&ppp_unit_count); | 2454 | atomic_inc(&ppp_unit_count); |
2457 | cardmap_set(&all_ppp_units, unit, ppp); | 2455 | ret = cardmap_set(&all_ppp_units, unit, ppp); |
2456 | if (ret != 0) | ||
2457 | goto out3; | ||
2458 | |||
2458 | mutex_unlock(&all_ppp_mutex); | 2459 | mutex_unlock(&all_ppp_mutex); |
2459 | *retp = 0; | 2460 | *retp = 0; |
2460 | return ppp; | 2461 | return ppp; |
2461 | 2462 | ||
2463 | out3: | ||
2464 | atomic_dec(&ppp_unit_count); | ||
2462 | out2: | 2465 | out2: |
2463 | mutex_unlock(&all_ppp_mutex); | 2466 | mutex_unlock(&all_ppp_mutex); |
2464 | free_netdev(dev); | 2467 | free_netdev(dev); |
@@ -2695,7 +2698,7 @@ static void *cardmap_get(struct cardmap *map, unsigned int nr) | |||
2695 | return NULL; | 2698 | return NULL; |
2696 | } | 2699 | } |
2697 | 2700 | ||
2698 | static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) | 2701 | static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) |
2699 | { | 2702 | { |
2700 | struct cardmap *p; | 2703 | struct cardmap *p; |
2701 | int i; | 2704 | int i; |
@@ -2704,8 +2707,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) | |||
2704 | if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) { | 2707 | if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) { |
2705 | do { | 2708 | do { |
2706 | /* need a new top level */ | 2709 | /* need a new top level */ |
2707 | struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); | 2710 | struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL); |
2708 | memset(np, 0, sizeof(*np)); | 2711 | if (!np) |
2712 | goto enomem; | ||
2709 | np->ptr[0] = p; | 2713 | np->ptr[0] = p; |
2710 | if (p != NULL) { | 2714 | if (p != NULL) { |
2711 | np->shift = p->shift + CARDMAP_ORDER; | 2715 | np->shift = p->shift + CARDMAP_ORDER; |
@@ -2719,8 +2723,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) | |||
2719 | while (p->shift > 0) { | 2723 | while (p->shift > 0) { |
2720 | i = (nr >> p->shift) & CARDMAP_MASK; | 2724 | i = (nr >> p->shift) & CARDMAP_MASK; |
2721 | if (p->ptr[i] == NULL) { | 2725 | if (p->ptr[i] == NULL) { |
2722 | struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); | 2726 | struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL); |
2723 | memset(np, 0, sizeof(*np)); | 2727 | if (!np) |
2728 | goto enomem; | ||
2724 | np->shift = p->shift - CARDMAP_ORDER; | 2729 | np->shift = p->shift - CARDMAP_ORDER; |
2725 | np->parent = p; | 2730 | np->parent = p; |
2726 | p->ptr[i] = np; | 2731 | p->ptr[i] = np; |
@@ -2735,6 +2740,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) | |||
2735 | set_bit(i, &p->inuse); | 2740 | set_bit(i, &p->inuse); |
2736 | else | 2741 | else |
2737 | clear_bit(i, &p->inuse); | 2742 | clear_bit(i, &p->inuse); |
2743 | return 0; | ||
2744 | enomem: | ||
2745 | return -ENOMEM; | ||
2738 | } | 2746 | } |
2739 | 2747 | ||
2740 | static unsigned int cardmap_find_first_free(struct cardmap *map) | 2748 | static unsigned int cardmap_find_first_free(struct cardmap *map) |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 16a0ef1b1369..4c2f575faad7 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -1406,7 +1406,7 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out, | |||
1406 | dev = alloc_etherdev(sizeof (*tp)); | 1406 | dev = alloc_etherdev(sizeof (*tp)); |
1407 | if (dev == NULL) { | 1407 | if (dev == NULL) { |
1408 | if (netif_msg_drv(&debug)) | 1408 | if (netif_msg_drv(&debug)) |
1409 | printk(KERN_ERR PFX "unable to alloc new ethernet\n"); | 1409 | dev_err(&pdev->dev, "unable to alloc new ethernet\n"); |
1410 | goto err_out; | 1410 | goto err_out; |
1411 | } | 1411 | } |
1412 | 1412 | ||
@@ -1418,10 +1418,8 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out, | |||
1418 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ | 1418 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ |
1419 | rc = pci_enable_device(pdev); | 1419 | rc = pci_enable_device(pdev); |
1420 | if (rc < 0) { | 1420 | if (rc < 0) { |
1421 | if (netif_msg_probe(tp)) { | 1421 | if (netif_msg_probe(tp)) |
1422 | printk(KERN_ERR PFX "%s: enable failure\n", | 1422 | dev_err(&pdev->dev, "enable failure\n"); |
1423 | pci_name(pdev)); | ||
1424 | } | ||
1425 | goto err_out_free_dev; | 1423 | goto err_out_free_dev; |
1426 | } | 1424 | } |
1427 | 1425 | ||
@@ -1437,37 +1435,32 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out, | |||
1437 | pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command); | 1435 | pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command); |
1438 | acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK; | 1436 | acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK; |
1439 | } else { | 1437 | } else { |
1440 | if (netif_msg_probe(tp)) { | 1438 | if (netif_msg_probe(tp)) |
1441 | printk(KERN_ERR PFX | 1439 | dev_err(&pdev->dev, |
1442 | "PowerManagement capability not found.\n"); | 1440 | "PowerManagement capability not found.\n"); |
1443 | } | ||
1444 | } | 1441 | } |
1445 | 1442 | ||
1446 | /* make sure PCI base addr 1 is MMIO */ | 1443 | /* make sure PCI base addr 1 is MMIO */ |
1447 | if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { | 1444 | if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { |
1448 | if (netif_msg_probe(tp)) { | 1445 | if (netif_msg_probe(tp)) |
1449 | printk(KERN_ERR PFX | 1446 | dev_err(&pdev->dev, |
1450 | "region #1 not an MMIO resource, aborting\n"); | 1447 | "region #1 not an MMIO resource, aborting\n"); |
1451 | } | ||
1452 | rc = -ENODEV; | 1448 | rc = -ENODEV; |
1453 | goto err_out_mwi; | 1449 | goto err_out_mwi; |
1454 | } | 1450 | } |
1455 | /* check for weird/broken PCI region reporting */ | 1451 | /* check for weird/broken PCI region reporting */ |
1456 | if (pci_resource_len(pdev, 1) < R8169_REGS_SIZE) { | 1452 | if (pci_resource_len(pdev, 1) < R8169_REGS_SIZE) { |
1457 | if (netif_msg_probe(tp)) { | 1453 | if (netif_msg_probe(tp)) |
1458 | printk(KERN_ERR PFX | 1454 | dev_err(&pdev->dev, |
1459 | "Invalid PCI region size(s), aborting\n"); | 1455 | "Invalid PCI region size(s), aborting\n"); |
1460 | } | ||
1461 | rc = -ENODEV; | 1456 | rc = -ENODEV; |
1462 | goto err_out_mwi; | 1457 | goto err_out_mwi; |
1463 | } | 1458 | } |
1464 | 1459 | ||
1465 | rc = pci_request_regions(pdev, MODULENAME); | 1460 | rc = pci_request_regions(pdev, MODULENAME); |
1466 | if (rc < 0) { | 1461 | if (rc < 0) { |
1467 | if (netif_msg_probe(tp)) { | 1462 | if (netif_msg_probe(tp)) |
1468 | printk(KERN_ERR PFX "%s: could not request regions.\n", | 1463 | dev_err(&pdev->dev, "could not request regions.\n"); |
1469 | pci_name(pdev)); | ||
1470 | } | ||
1471 | goto err_out_mwi; | 1464 | goto err_out_mwi; |
1472 | } | 1465 | } |
1473 | 1466 | ||
@@ -1480,10 +1473,9 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out, | |||
1480 | } else { | 1473 | } else { |
1481 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | 1474 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); |
1482 | if (rc < 0) { | 1475 | if (rc < 0) { |
1483 | if (netif_msg_probe(tp)) { | 1476 | if (netif_msg_probe(tp)) |
1484 | printk(KERN_ERR PFX | 1477 | dev_err(&pdev->dev, |
1485 | "DMA configuration failed.\n"); | 1478 | "DMA configuration failed.\n"); |
1486 | } | ||
1487 | goto err_out_free_res; | 1479 | goto err_out_free_res; |
1488 | } | 1480 | } |
1489 | } | 1481 | } |
@@ -1494,7 +1486,7 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out, | |||
1494 | ioaddr = ioremap(pci_resource_start(pdev, 1), R8169_REGS_SIZE); | 1486 | ioaddr = ioremap(pci_resource_start(pdev, 1), R8169_REGS_SIZE); |
1495 | if (ioaddr == NULL) { | 1487 | if (ioaddr == NULL) { |
1496 | if (netif_msg_probe(tp)) | 1488 | if (netif_msg_probe(tp)) |
1497 | printk(KERN_ERR PFX "cannot remap MMIO, aborting\n"); | 1489 | dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); |
1498 | rc = -EIO; | 1490 | rc = -EIO; |
1499 | goto err_out_free_res; | 1491 | goto err_out_free_res; |
1500 | } | 1492 | } |
@@ -1526,9 +1518,9 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out, | |||
1526 | if (i < 0) { | 1518 | if (i < 0) { |
1527 | /* Unknown chip: assume array element #0, original RTL-8169 */ | 1519 | /* Unknown chip: assume array element #0, original RTL-8169 */ |
1528 | if (netif_msg_probe(tp)) { | 1520 | if (netif_msg_probe(tp)) { |
1529 | printk(KERN_DEBUG PFX "PCI device %s: " | 1521 | dev_printk(KERN_DEBUG, &pdev->dev, |
1530 | "unknown chip version, assuming %s\n", | 1522 | "unknown chip version, assuming %s\n", |
1531 | pci_name(pdev), rtl_chip_info[0].name); | 1523 | rtl_chip_info[0].name); |
1532 | } | 1524 | } |
1533 | i++; | 1525 | i++; |
1534 | } | 1526 | } |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index c6b77acb35ef..e72e0e099060 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -71,12 +71,13 @@ | |||
71 | #include <asm/uaccess.h> | 71 | #include <asm/uaccess.h> |
72 | #include <asm/io.h> | 72 | #include <asm/io.h> |
73 | #include <asm/div64.h> | 73 | #include <asm/div64.h> |
74 | #include <asm/irq.h> | ||
74 | 75 | ||
75 | /* local include */ | 76 | /* local include */ |
76 | #include "s2io.h" | 77 | #include "s2io.h" |
77 | #include "s2io-regs.h" | 78 | #include "s2io-regs.h" |
78 | 79 | ||
79 | #define DRV_VERSION "2.0.14.2" | 80 | #define DRV_VERSION "2.0.15.2" |
80 | 81 | ||
81 | /* S2io Driver name & version. */ | 82 | /* S2io Driver name & version. */ |
82 | static char s2io_driver_name[] = "Neterion"; | 83 | static char s2io_driver_name[] = "Neterion"; |
@@ -370,38 +371,50 @@ static const u64 fix_mac[] = { | |||
370 | END_SIGN | 371 | END_SIGN |
371 | }; | 372 | }; |
372 | 373 | ||
374 | MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); | ||
375 | MODULE_LICENSE("GPL"); | ||
376 | MODULE_VERSION(DRV_VERSION); | ||
377 | |||
378 | |||
373 | /* Module Loadable parameters. */ | 379 | /* Module Loadable parameters. */ |
374 | static unsigned int tx_fifo_num = 1; | 380 | S2IO_PARM_INT(tx_fifo_num, 1); |
375 | static unsigned int tx_fifo_len[MAX_TX_FIFOS] = | 381 | S2IO_PARM_INT(rx_ring_num, 1); |
376 | {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; | 382 | |
377 | static unsigned int rx_ring_num = 1; | 383 | |
378 | static unsigned int rx_ring_sz[MAX_RX_RINGS] = | 384 | S2IO_PARM_INT(rx_ring_mode, 1); |
379 | {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; | 385 | S2IO_PARM_INT(use_continuous_tx_intrs, 1); |
380 | static unsigned int rts_frm_len[MAX_RX_RINGS] = | 386 | S2IO_PARM_INT(rmac_pause_time, 0x100); |
381 | {[0 ...(MAX_RX_RINGS - 1)] = 0 }; | 387 | S2IO_PARM_INT(mc_pause_threshold_q0q3, 187); |
382 | static unsigned int rx_ring_mode = 1; | 388 | S2IO_PARM_INT(mc_pause_threshold_q4q7, 187); |
383 | static unsigned int use_continuous_tx_intrs = 1; | 389 | S2IO_PARM_INT(shared_splits, 0); |
384 | static unsigned int rmac_pause_time = 0x100; | 390 | S2IO_PARM_INT(tmac_util_period, 5); |
385 | static unsigned int mc_pause_threshold_q0q3 = 187; | 391 | S2IO_PARM_INT(rmac_util_period, 5); |
386 | static unsigned int mc_pause_threshold_q4q7 = 187; | 392 | S2IO_PARM_INT(bimodal, 0); |
387 | static unsigned int shared_splits; | 393 | S2IO_PARM_INT(l3l4hdr_size, 128); |
388 | static unsigned int tmac_util_period = 5; | ||
389 | static unsigned int rmac_util_period = 5; | ||
390 | static unsigned int bimodal = 0; | ||
391 | static unsigned int l3l4hdr_size = 128; | ||
392 | #ifndef CONFIG_S2IO_NAPI | ||
393 | static unsigned int indicate_max_pkts; | ||
394 | #endif | ||
395 | /* Frequency of Rx desc syncs expressed as power of 2 */ | 394 | /* Frequency of Rx desc syncs expressed as power of 2 */ |
396 | static unsigned int rxsync_frequency = 3; | 395 | S2IO_PARM_INT(rxsync_frequency, 3); |
397 | /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ | 396 | /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ |
398 | static unsigned int intr_type = 0; | 397 | S2IO_PARM_INT(intr_type, 0); |
399 | /* Large receive offload feature */ | 398 | /* Large receive offload feature */ |
400 | static unsigned int lro = 0; | 399 | S2IO_PARM_INT(lro, 0); |
401 | /* Max pkts to be aggregated by LRO at one time. If not specified, | 400 | /* Max pkts to be aggregated by LRO at one time. If not specified, |
402 | * aggregation happens until we hit max IP pkt size(64K) | 401 | * aggregation happens until we hit max IP pkt size(64K) |
403 | */ | 402 | */ |
404 | static unsigned int lro_max_pkts = 0xFFFF; | 403 | S2IO_PARM_INT(lro_max_pkts, 0xFFFF); |
404 | #ifndef CONFIG_S2IO_NAPI | ||
405 | S2IO_PARM_INT(indicate_max_pkts, 0); | ||
406 | #endif | ||
407 | |||
408 | static unsigned int tx_fifo_len[MAX_TX_FIFOS] = | ||
409 | {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; | ||
410 | static unsigned int rx_ring_sz[MAX_RX_RINGS] = | ||
411 | {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; | ||
412 | static unsigned int rts_frm_len[MAX_RX_RINGS] = | ||
413 | {[0 ...(MAX_RX_RINGS - 1)] = 0 }; | ||
414 | |||
415 | module_param_array(tx_fifo_len, uint, NULL, 0); | ||
416 | module_param_array(rx_ring_sz, uint, NULL, 0); | ||
417 | module_param_array(rts_frm_len, uint, NULL, 0); | ||
405 | 418 | ||
406 | /* | 419 | /* |
407 | * S2IO device table. | 420 | * S2IO device table. |
@@ -464,10 +477,9 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
464 | size += config->tx_cfg[i].fifo_len; | 477 | size += config->tx_cfg[i].fifo_len; |
465 | } | 478 | } |
466 | if (size > MAX_AVAILABLE_TXDS) { | 479 | if (size > MAX_AVAILABLE_TXDS) { |
467 | DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ", | 480 | DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, "); |
468 | __FUNCTION__); | ||
469 | DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); | 481 | DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); |
470 | return FAILURE; | 482 | return -EINVAL; |
471 | } | 483 | } |
472 | 484 | ||
473 | lst_size = (sizeof(TxD_t) * config->max_txds); | 485 | lst_size = (sizeof(TxD_t) * config->max_txds); |
@@ -547,6 +559,7 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
547 | nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL); | 559 | nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL); |
548 | if (!nic->ufo_in_band_v) | 560 | if (!nic->ufo_in_band_v) |
549 | return -ENOMEM; | 561 | return -ENOMEM; |
562 | memset(nic->ufo_in_band_v, 0, size); | ||
550 | 563 | ||
551 | /* Allocation and initialization of RXDs in Rings */ | 564 | /* Allocation and initialization of RXDs in Rings */ |
552 | size = 0; | 565 | size = 0; |
@@ -1213,7 +1226,7 @@ static int init_nic(struct s2io_nic *nic) | |||
1213 | break; | 1226 | break; |
1214 | } | 1227 | } |
1215 | 1228 | ||
1216 | /* Enable Tx FIFO partition 0. */ | 1229 | /* Enable all configured Tx FIFO partitions */ |
1217 | val64 = readq(&bar0->tx_fifo_partition_0); | 1230 | val64 = readq(&bar0->tx_fifo_partition_0); |
1218 | val64 |= (TX_FIFO_PARTITION_EN); | 1231 | val64 |= (TX_FIFO_PARTITION_EN); |
1219 | writeq(val64, &bar0->tx_fifo_partition_0); | 1232 | writeq(val64, &bar0->tx_fifo_partition_0); |
@@ -1650,7 +1663,7 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) | |||
1650 | writeq(temp64, &bar0->general_int_mask); | 1663 | writeq(temp64, &bar0->general_int_mask); |
1651 | /* | 1664 | /* |
1652 | * If Hercules adapter enable GPIO otherwise | 1665 | * If Hercules adapter enable GPIO otherwise |
1653 | * disabled all PCIX, Flash, MDIO, IIC and GPIO | 1666 | * disable all PCIX, Flash, MDIO, IIC and GPIO |
1654 | * interrupts for now. | 1667 | * interrupts for now. |
1655 | * TODO | 1668 | * TODO |
1656 | */ | 1669 | */ |
@@ -1976,7 +1989,6 @@ static int start_nic(struct s2io_nic *nic) | |||
1976 | XENA_dev_config_t __iomem *bar0 = nic->bar0; | 1989 | XENA_dev_config_t __iomem *bar0 = nic->bar0; |
1977 | struct net_device *dev = nic->dev; | 1990 | struct net_device *dev = nic->dev; |
1978 | register u64 val64 = 0; | 1991 | register u64 val64 = 0; |
1979 | u16 interruptible; | ||
1980 | u16 subid, i; | 1992 | u16 subid, i; |
1981 | mac_info_t *mac_control; | 1993 | mac_info_t *mac_control; |
1982 | struct config_param *config; | 1994 | struct config_param *config; |
@@ -2047,16 +2059,6 @@ static int start_nic(struct s2io_nic *nic) | |||
2047 | return FAILURE; | 2059 | return FAILURE; |
2048 | } | 2060 | } |
2049 | 2061 | ||
2050 | /* Enable select interrupts */ | ||
2051 | if (nic->intr_type != INTA) | ||
2052 | en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS); | ||
2053 | else { | ||
2054 | interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; | ||
2055 | interruptible |= TX_PIC_INTR | RX_PIC_INTR; | ||
2056 | interruptible |= TX_MAC_INTR | RX_MAC_INTR; | ||
2057 | en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS); | ||
2058 | } | ||
2059 | |||
2060 | /* | 2062 | /* |
2061 | * With some switches, link might be already up at this point. | 2063 | * With some switches, link might be already up at this point. |
2062 | * Because of this weird behavior, when we enable laser, | 2064 | * Because of this weird behavior, when we enable laser, |
@@ -2130,7 +2132,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in | |||
2130 | frag->size, PCI_DMA_TODEVICE); | 2132 | frag->size, PCI_DMA_TODEVICE); |
2131 | } | 2133 | } |
2132 | } | 2134 | } |
2133 | txdlp->Host_Control = 0; | 2135 | memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds)); |
2134 | return(skb); | 2136 | return(skb); |
2135 | } | 2137 | } |
2136 | 2138 | ||
@@ -2382,9 +2384,14 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2382 | skb->data = (void *) (unsigned long)tmp; | 2384 | skb->data = (void *) (unsigned long)tmp; |
2383 | skb->tail = (void *) (unsigned long)tmp; | 2385 | skb->tail = (void *) (unsigned long)tmp; |
2384 | 2386 | ||
2385 | ((RxD3_t*)rxdp)->Buffer0_ptr = | 2387 | if (!(((RxD3_t*)rxdp)->Buffer0_ptr)) |
2386 | pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, | 2388 | ((RxD3_t*)rxdp)->Buffer0_ptr = |
2389 | pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, | ||
2387 | PCI_DMA_FROMDEVICE); | 2390 | PCI_DMA_FROMDEVICE); |
2391 | else | ||
2392 | pci_dma_sync_single_for_device(nic->pdev, | ||
2393 | (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr, | ||
2394 | BUF0_LEN, PCI_DMA_FROMDEVICE); | ||
2388 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); | 2395 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); |
2389 | if (nic->rxd_mode == RXD_MODE_3B) { | 2396 | if (nic->rxd_mode == RXD_MODE_3B) { |
2390 | /* Two buffer mode */ | 2397 | /* Two buffer mode */ |
@@ -2397,10 +2404,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
2397 | (nic->pdev, skb->data, dev->mtu + 4, | 2404 | (nic->pdev, skb->data, dev->mtu + 4, |
2398 | PCI_DMA_FROMDEVICE); | 2405 | PCI_DMA_FROMDEVICE); |
2399 | 2406 | ||
2400 | /* Buffer-1 will be dummy buffer not used */ | 2407 | /* Buffer-1 will be dummy buffer. Not used */ |
2401 | ((RxD3_t*)rxdp)->Buffer1_ptr = | 2408 | if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) { |
2402 | pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, | 2409 | ((RxD3_t*)rxdp)->Buffer1_ptr = |
2403 | PCI_DMA_FROMDEVICE); | 2410 | pci_map_single(nic->pdev, |
2411 | ba->ba_1, BUF1_LEN, | ||
2412 | PCI_DMA_FROMDEVICE); | ||
2413 | } | ||
2404 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); | 2414 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); |
2405 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3 | 2415 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3 |
2406 | (dev->mtu + 4); | 2416 | (dev->mtu + 4); |
@@ -2625,23 +2635,23 @@ no_rx: | |||
2625 | } | 2635 | } |
2626 | #endif | 2636 | #endif |
2627 | 2637 | ||
2638 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2628 | /** | 2639 | /** |
2629 | * s2io_netpoll - Rx interrupt service handler for netpoll support | 2640 | * s2io_netpoll - netpoll event handler entry point |
2630 | * @dev : pointer to the device structure. | 2641 | * @dev : pointer to the device structure. |
2631 | * Description: | 2642 | * Description: |
2632 | * Polling 'interrupt' - used by things like netconsole to send skbs | 2643 | * This function will be called by upper layer to check for events on the |
2633 | * without having to re-enable interrupts. It's not called while | 2644 | * interface in situations where interrupts are disabled. It is used for |
2634 | * the interrupt routine is executing. | 2645 | * specific in-kernel networking tasks, such as remote consoles and kernel |
2646 | * debugging over the network (example netdump in RedHat). | ||
2635 | */ | 2647 | */ |
2636 | |||
2637 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2638 | static void s2io_netpoll(struct net_device *dev) | 2648 | static void s2io_netpoll(struct net_device *dev) |
2639 | { | 2649 | { |
2640 | nic_t *nic = dev->priv; | 2650 | nic_t *nic = dev->priv; |
2641 | mac_info_t *mac_control; | 2651 | mac_info_t *mac_control; |
2642 | struct config_param *config; | 2652 | struct config_param *config; |
2643 | XENA_dev_config_t __iomem *bar0 = nic->bar0; | 2653 | XENA_dev_config_t __iomem *bar0 = nic->bar0; |
2644 | u64 val64; | 2654 | u64 val64 = 0xFFFFFFFFFFFFFFFFULL; |
2645 | int i; | 2655 | int i; |
2646 | 2656 | ||
2647 | disable_irq(dev->irq); | 2657 | disable_irq(dev->irq); |
@@ -2650,9 +2660,17 @@ static void s2io_netpoll(struct net_device *dev) | |||
2650 | mac_control = &nic->mac_control; | 2660 | mac_control = &nic->mac_control; |
2651 | config = &nic->config; | 2661 | config = &nic->config; |
2652 | 2662 | ||
2653 | val64 = readq(&bar0->rx_traffic_int); | ||
2654 | writeq(val64, &bar0->rx_traffic_int); | 2663 | writeq(val64, &bar0->rx_traffic_int); |
2664 | writeq(val64, &bar0->tx_traffic_int); | ||
2655 | 2665 | ||
2666 | /* we need to free up the transmitted skbufs or else netpoll will | ||
2667 | * run out of skbs and will fail and eventually netpoll application such | ||
2668 | * as netdump will fail. | ||
2669 | */ | ||
2670 | for (i = 0; i < config->tx_fifo_num; i++) | ||
2671 | tx_intr_handler(&mac_control->fifos[i]); | ||
2672 | |||
2673 | /* check for received packet and indicate up to network */ | ||
2656 | for (i = 0; i < config->rx_ring_num; i++) | 2674 | for (i = 0; i < config->rx_ring_num; i++) |
2657 | rx_intr_handler(&mac_control->rings[i]); | 2675 | rx_intr_handler(&mac_control->rings[i]); |
2658 | 2676 | ||
@@ -2719,7 +2737,7 @@ static void rx_intr_handler(ring_info_t *ring_data) | |||
2719 | /* If your are next to put index then it's FIFO full condition */ | 2737 | /* If your are next to put index then it's FIFO full condition */ |
2720 | if ((get_block == put_block) && | 2738 | if ((get_block == put_block) && |
2721 | (get_info.offset + 1) == put_info.offset) { | 2739 | (get_info.offset + 1) == put_info.offset) { |
2722 | DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name); | 2740 | DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); |
2723 | break; | 2741 | break; |
2724 | } | 2742 | } |
2725 | skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); | 2743 | skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); |
@@ -2739,18 +2757,15 @@ static void rx_intr_handler(ring_info_t *ring_data) | |||
2739 | HEADER_SNAP_SIZE, | 2757 | HEADER_SNAP_SIZE, |
2740 | PCI_DMA_FROMDEVICE); | 2758 | PCI_DMA_FROMDEVICE); |
2741 | } else if (nic->rxd_mode == RXD_MODE_3B) { | 2759 | } else if (nic->rxd_mode == RXD_MODE_3B) { |
2742 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2760 | pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) |
2743 | ((RxD3_t*)rxdp)->Buffer0_ptr, | 2761 | ((RxD3_t*)rxdp)->Buffer0_ptr, |
2744 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 2762 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
2745 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2763 | pci_unmap_single(nic->pdev, (dma_addr_t) |
2746 | ((RxD3_t*)rxdp)->Buffer1_ptr, | ||
2747 | BUF1_LEN, PCI_DMA_FROMDEVICE); | ||
2748 | pci_unmap_single(nic->pdev, (dma_addr_t) | ||
2749 | ((RxD3_t*)rxdp)->Buffer2_ptr, | 2764 | ((RxD3_t*)rxdp)->Buffer2_ptr, |
2750 | dev->mtu + 4, | 2765 | dev->mtu + 4, |
2751 | PCI_DMA_FROMDEVICE); | 2766 | PCI_DMA_FROMDEVICE); |
2752 | } else { | 2767 | } else { |
2753 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2768 | pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) |
2754 | ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, | 2769 | ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, |
2755 | PCI_DMA_FROMDEVICE); | 2770 | PCI_DMA_FROMDEVICE); |
2756 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2771 | pci_unmap_single(nic->pdev, (dma_addr_t) |
@@ -3338,7 +3353,7 @@ static void s2io_reset(nic_t * sp) | |||
3338 | 3353 | ||
3339 | /* Clear certain PCI/PCI-X fields after reset */ | 3354 | /* Clear certain PCI/PCI-X fields after reset */ |
3340 | if (sp->device_type == XFRAME_II_DEVICE) { | 3355 | if (sp->device_type == XFRAME_II_DEVICE) { |
3341 | /* Clear parity err detect bit */ | 3356 | /* Clear "detected parity error" bit */ |
3342 | pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); | 3357 | pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); |
3343 | 3358 | ||
3344 | /* Clearing PCIX Ecc status register */ | 3359 | /* Clearing PCIX Ecc status register */ |
@@ -3539,7 +3554,7 @@ static void restore_xmsi_data(nic_t *nic) | |||
3539 | u64 val64; | 3554 | u64 val64; |
3540 | int i; | 3555 | int i; |
3541 | 3556 | ||
3542 | for (i=0; i< nic->avail_msix_vectors; i++) { | 3557 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { |
3543 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); | 3558 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); |
3544 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); | 3559 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); |
3545 | val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); | 3560 | val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); |
@@ -3558,7 +3573,7 @@ static void store_xmsi_data(nic_t *nic) | |||
3558 | int i; | 3573 | int i; |
3559 | 3574 | ||
3560 | /* Store and display */ | 3575 | /* Store and display */ |
3561 | for (i=0; i< nic->avail_msix_vectors; i++) { | 3576 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { |
3562 | val64 = (BIT(15) | vBIT(i, 26, 6)); | 3577 | val64 = (BIT(15) | vBIT(i, 26, 6)); |
3563 | writeq(val64, &bar0->xmsi_access); | 3578 | writeq(val64, &bar0->xmsi_access); |
3564 | if (wait_for_msix_trans(nic, i)) { | 3579 | if (wait_for_msix_trans(nic, i)) { |
@@ -3749,101 +3764,19 @@ static int s2io_open(struct net_device *dev) | |||
3749 | if (err) { | 3764 | if (err) { |
3750 | DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", | 3765 | DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", |
3751 | dev->name); | 3766 | dev->name); |
3752 | if (err == -ENODEV) | 3767 | goto hw_init_failed; |
3753 | goto hw_init_failed; | ||
3754 | else | ||
3755 | goto hw_enable_failed; | ||
3756 | } | ||
3757 | |||
3758 | /* Store the values of the MSIX table in the nic_t structure */ | ||
3759 | store_xmsi_data(sp); | ||
3760 | |||
3761 | /* After proper initialization of H/W, register ISR */ | ||
3762 | if (sp->intr_type == MSI) { | ||
3763 | err = request_irq((int) sp->pdev->irq, s2io_msi_handle, | ||
3764 | IRQF_SHARED, sp->name, dev); | ||
3765 | if (err) { | ||
3766 | DBG_PRINT(ERR_DBG, "%s: MSI registration \ | ||
3767 | failed\n", dev->name); | ||
3768 | goto isr_registration_failed; | ||
3769 | } | ||
3770 | } | ||
3771 | if (sp->intr_type == MSI_X) { | ||
3772 | int i; | ||
3773 | |||
3774 | for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { | ||
3775 | if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { | ||
3776 | sprintf(sp->desc1, "%s:MSI-X-%d-TX", | ||
3777 | dev->name, i); | ||
3778 | err = request_irq(sp->entries[i].vector, | ||
3779 | s2io_msix_fifo_handle, 0, sp->desc1, | ||
3780 | sp->s2io_entries[i].arg); | ||
3781 | DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1, | ||
3782 | (unsigned long long)sp->msix_info[i].addr); | ||
3783 | } else { | ||
3784 | sprintf(sp->desc2, "%s:MSI-X-%d-RX", | ||
3785 | dev->name, i); | ||
3786 | err = request_irq(sp->entries[i].vector, | ||
3787 | s2io_msix_ring_handle, 0, sp->desc2, | ||
3788 | sp->s2io_entries[i].arg); | ||
3789 | DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2, | ||
3790 | (unsigned long long)sp->msix_info[i].addr); | ||
3791 | } | ||
3792 | if (err) { | ||
3793 | DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \ | ||
3794 | failed\n", dev->name, i); | ||
3795 | DBG_PRINT(ERR_DBG, "Returned: %d\n", err); | ||
3796 | goto isr_registration_failed; | ||
3797 | } | ||
3798 | sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS; | ||
3799 | } | ||
3800 | } | ||
3801 | if (sp->intr_type == INTA) { | ||
3802 | err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED, | ||
3803 | sp->name, dev); | ||
3804 | if (err) { | ||
3805 | DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", | ||
3806 | dev->name); | ||
3807 | goto isr_registration_failed; | ||
3808 | } | ||
3809 | } | 3768 | } |
3810 | 3769 | ||
3811 | if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) { | 3770 | if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) { |
3812 | DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n"); | 3771 | DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n"); |
3772 | s2io_card_down(sp); | ||
3813 | err = -ENODEV; | 3773 | err = -ENODEV; |
3814 | goto setting_mac_address_failed; | 3774 | goto hw_init_failed; |
3815 | } | 3775 | } |
3816 | 3776 | ||
3817 | netif_start_queue(dev); | 3777 | netif_start_queue(dev); |
3818 | return 0; | 3778 | return 0; |
3819 | 3779 | ||
3820 | setting_mac_address_failed: | ||
3821 | if (sp->intr_type != MSI_X) | ||
3822 | free_irq(sp->pdev->irq, dev); | ||
3823 | isr_registration_failed: | ||
3824 | del_timer_sync(&sp->alarm_timer); | ||
3825 | if (sp->intr_type == MSI_X) { | ||
3826 | int i; | ||
3827 | u16 msi_control; /* Temp variable */ | ||
3828 | |||
3829 | for (i=1; (sp->s2io_entries[i].in_use == | ||
3830 | MSIX_REGISTERED_SUCCESS); i++) { | ||
3831 | int vector = sp->entries[i].vector; | ||
3832 | void *arg = sp->s2io_entries[i].arg; | ||
3833 | |||
3834 | free_irq(vector, arg); | ||
3835 | } | ||
3836 | pci_disable_msix(sp->pdev); | ||
3837 | |||
3838 | /* Temp */ | ||
3839 | pci_read_config_word(sp->pdev, 0x42, &msi_control); | ||
3840 | msi_control &= 0xFFFE; /* Disable MSI */ | ||
3841 | pci_write_config_word(sp->pdev, 0x42, msi_control); | ||
3842 | } | ||
3843 | else if (sp->intr_type == MSI) | ||
3844 | pci_disable_msi(sp->pdev); | ||
3845 | hw_enable_failed: | ||
3846 | s2io_reset(sp); | ||
3847 | hw_init_failed: | 3780 | hw_init_failed: |
3848 | if (sp->intr_type == MSI_X) { | 3781 | if (sp->intr_type == MSI_X) { |
3849 | if (sp->entries) | 3782 | if (sp->entries) |
@@ -3874,7 +3807,7 @@ static int s2io_close(struct net_device *dev) | |||
3874 | flush_scheduled_work(); | 3807 | flush_scheduled_work(); |
3875 | netif_stop_queue(dev); | 3808 | netif_stop_queue(dev); |
3876 | /* Reset card, kill tasklet and free Tx and Rx buffers. */ | 3809 | /* Reset card, kill tasklet and free Tx and Rx buffers. */ |
3877 | s2io_card_down(sp, 1); | 3810 | s2io_card_down(sp); |
3878 | 3811 | ||
3879 | sp->device_close_flag = TRUE; /* Device is shut down. */ | 3812 | sp->device_close_flag = TRUE; /* Device is shut down. */ |
3880 | return 0; | 3813 | return 0; |
@@ -3901,13 +3834,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3901 | TxD_t *txdp; | 3834 | TxD_t *txdp; |
3902 | TxFIFO_element_t __iomem *tx_fifo; | 3835 | TxFIFO_element_t __iomem *tx_fifo; |
3903 | unsigned long flags; | 3836 | unsigned long flags; |
3904 | #ifdef NETIF_F_TSO | ||
3905 | int mss; | ||
3906 | #endif | ||
3907 | u16 vlan_tag = 0; | 3837 | u16 vlan_tag = 0; |
3908 | int vlan_priority = 0; | 3838 | int vlan_priority = 0; |
3909 | mac_info_t *mac_control; | 3839 | mac_info_t *mac_control; |
3910 | struct config_param *config; | 3840 | struct config_param *config; |
3841 | int offload_type; | ||
3911 | 3842 | ||
3912 | mac_control = &sp->mac_control; | 3843 | mac_control = &sp->mac_control; |
3913 | config = &sp->config; | 3844 | config = &sp->config; |
@@ -3955,13 +3886,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3955 | return 0; | 3886 | return 0; |
3956 | } | 3887 | } |
3957 | 3888 | ||
3958 | txdp->Control_1 = 0; | 3889 | offload_type = s2io_offload_type(skb); |
3959 | txdp->Control_2 = 0; | ||
3960 | #ifdef NETIF_F_TSO | 3890 | #ifdef NETIF_F_TSO |
3961 | mss = skb_shinfo(skb)->gso_size; | 3891 | if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { |
3962 | if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { | ||
3963 | txdp->Control_1 |= TXD_TCP_LSO_EN; | 3892 | txdp->Control_1 |= TXD_TCP_LSO_EN; |
3964 | txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); | 3893 | txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); |
3965 | } | 3894 | } |
3966 | #endif | 3895 | #endif |
3967 | if (skb->ip_summed == CHECKSUM_HW) { | 3896 | if (skb->ip_summed == CHECKSUM_HW) { |
@@ -3979,10 +3908,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3979 | } | 3908 | } |
3980 | 3909 | ||
3981 | frg_len = skb->len - skb->data_len; | 3910 | frg_len = skb->len - skb->data_len; |
3982 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) { | 3911 | if (offload_type == SKB_GSO_UDP) { |
3983 | int ufo_size; | 3912 | int ufo_size; |
3984 | 3913 | ||
3985 | ufo_size = skb_shinfo(skb)->gso_size; | 3914 | ufo_size = s2io_udp_mss(skb); |
3986 | ufo_size &= ~7; | 3915 | ufo_size &= ~7; |
3987 | txdp->Control_1 |= TXD_UFO_EN; | 3916 | txdp->Control_1 |= TXD_UFO_EN; |
3988 | txdp->Control_1 |= TXD_UFO_MSS(ufo_size); | 3917 | txdp->Control_1 |= TXD_UFO_MSS(ufo_size); |
@@ -3999,16 +3928,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3999 | sp->ufo_in_band_v, | 3928 | sp->ufo_in_band_v, |
4000 | sizeof(u64), PCI_DMA_TODEVICE); | 3929 | sizeof(u64), PCI_DMA_TODEVICE); |
4001 | txdp++; | 3930 | txdp++; |
4002 | txdp->Control_1 = 0; | ||
4003 | txdp->Control_2 = 0; | ||
4004 | } | 3931 | } |
4005 | 3932 | ||
4006 | txdp->Buffer_Pointer = pci_map_single | 3933 | txdp->Buffer_Pointer = pci_map_single |
4007 | (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); | 3934 | (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); |
4008 | txdp->Host_Control = (unsigned long) skb; | 3935 | txdp->Host_Control = (unsigned long) skb; |
4009 | txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); | 3936 | txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); |
4010 | 3937 | if (offload_type == SKB_GSO_UDP) | |
4011 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) | ||
4012 | txdp->Control_1 |= TXD_UFO_EN; | 3938 | txdp->Control_1 |= TXD_UFO_EN; |
4013 | 3939 | ||
4014 | frg_cnt = skb_shinfo(skb)->nr_frags; | 3940 | frg_cnt = skb_shinfo(skb)->nr_frags; |
@@ -4023,12 +3949,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4023 | (sp->pdev, frag->page, frag->page_offset, | 3949 | (sp->pdev, frag->page, frag->page_offset, |
4024 | frag->size, PCI_DMA_TODEVICE); | 3950 | frag->size, PCI_DMA_TODEVICE); |
4025 | txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); | 3951 | txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); |
4026 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) | 3952 | if (offload_type == SKB_GSO_UDP) |
4027 | txdp->Control_1 |= TXD_UFO_EN; | 3953 | txdp->Control_1 |= TXD_UFO_EN; |
4028 | } | 3954 | } |
4029 | txdp->Control_1 |= TXD_GATHER_CODE_LAST; | 3955 | txdp->Control_1 |= TXD_GATHER_CODE_LAST; |
4030 | 3956 | ||
4031 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) | 3957 | if (offload_type == SKB_GSO_UDP) |
4032 | frg_cnt++; /* as Txd0 was used for inband header */ | 3958 | frg_cnt++; /* as Txd0 was used for inband header */ |
4033 | 3959 | ||
4034 | tx_fifo = mac_control->tx_FIFO_start[queue]; | 3960 | tx_fifo = mac_control->tx_FIFO_start[queue]; |
@@ -4037,13 +3963,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4037 | 3963 | ||
4038 | val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | | 3964 | val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | |
4039 | TX_FIFO_LAST_LIST); | 3965 | TX_FIFO_LAST_LIST); |
4040 | 3966 | if (offload_type) | |
4041 | #ifdef NETIF_F_TSO | ||
4042 | if (mss) | ||
4043 | val64 |= TX_FIFO_SPECIAL_FUNC; | ||
4044 | #endif | ||
4045 | if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) | ||
4046 | val64 |= TX_FIFO_SPECIAL_FUNC; | 3967 | val64 |= TX_FIFO_SPECIAL_FUNC; |
3968 | |||
4047 | writeq(val64, &tx_fifo->List_Control); | 3969 | writeq(val64, &tx_fifo->List_Control); |
4048 | 3970 | ||
4049 | mmiowb(); | 3971 | mmiowb(); |
@@ -4077,13 +3999,41 @@ s2io_alarm_handle(unsigned long data) | |||
4077 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); | 3999 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); |
4078 | } | 4000 | } |
4079 | 4001 | ||
4002 | static int s2io_chk_rx_buffers(nic_t *sp, int rng_n) | ||
4003 | { | ||
4004 | int rxb_size, level; | ||
4005 | |||
4006 | if (!sp->lro) { | ||
4007 | rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); | ||
4008 | level = rx_buffer_level(sp, rxb_size, rng_n); | ||
4009 | |||
4010 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | ||
4011 | int ret; | ||
4012 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); | ||
4013 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | ||
4014 | if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { | ||
4015 | DBG_PRINT(ERR_DBG, "Out of memory in %s", | ||
4016 | __FUNCTION__); | ||
4017 | clear_bit(0, (&sp->tasklet_status)); | ||
4018 | return -1; | ||
4019 | } | ||
4020 | clear_bit(0, (&sp->tasklet_status)); | ||
4021 | } else if (level == LOW) | ||
4022 | tasklet_schedule(&sp->task); | ||
4023 | |||
4024 | } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { | ||
4025 | DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name); | ||
4026 | DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); | ||
4027 | } | ||
4028 | return 0; | ||
4029 | } | ||
4030 | |||
4080 | static irqreturn_t | 4031 | static irqreturn_t |
4081 | s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) | 4032 | s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) |
4082 | { | 4033 | { |
4083 | struct net_device *dev = (struct net_device *) dev_id; | 4034 | struct net_device *dev = (struct net_device *) dev_id; |
4084 | nic_t *sp = dev->priv; | 4035 | nic_t *sp = dev->priv; |
4085 | int i; | 4036 | int i; |
4086 | int ret; | ||
4087 | mac_info_t *mac_control; | 4037 | mac_info_t *mac_control; |
4088 | struct config_param *config; | 4038 | struct config_param *config; |
4089 | 4039 | ||
@@ -4105,35 +4055,8 @@ s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) | |||
4105 | * reallocate the buffers from the interrupt handler itself, | 4055 | * reallocate the buffers from the interrupt handler itself, |
4106 | * else schedule a tasklet to reallocate the buffers. | 4056 | * else schedule a tasklet to reallocate the buffers. |
4107 | */ | 4057 | */ |
4108 | for (i = 0; i < config->rx_ring_num; i++) { | 4058 | for (i = 0; i < config->rx_ring_num; i++) |
4109 | if (!sp->lro) { | 4059 | s2io_chk_rx_buffers(sp, i); |
4110 | int rxb_size = atomic_read(&sp->rx_bufs_left[i]); | ||
4111 | int level = rx_buffer_level(sp, rxb_size, i); | ||
4112 | |||
4113 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | ||
4114 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", | ||
4115 | dev->name); | ||
4116 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | ||
4117 | if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { | ||
4118 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | ||
4119 | dev->name); | ||
4120 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); | ||
4121 | clear_bit(0, (&sp->tasklet_status)); | ||
4122 | atomic_dec(&sp->isr_cnt); | ||
4123 | return IRQ_HANDLED; | ||
4124 | } | ||
4125 | clear_bit(0, (&sp->tasklet_status)); | ||
4126 | } else if (level == LOW) { | ||
4127 | tasklet_schedule(&sp->task); | ||
4128 | } | ||
4129 | } | ||
4130 | else if (fill_rx_buffers(sp, i) == -ENOMEM) { | ||
4131 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | ||
4132 | dev->name); | ||
4133 | DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); | ||
4134 | break; | ||
4135 | } | ||
4136 | } | ||
4137 | 4060 | ||
4138 | atomic_dec(&sp->isr_cnt); | 4061 | atomic_dec(&sp->isr_cnt); |
4139 | return IRQ_HANDLED; | 4062 | return IRQ_HANDLED; |
@@ -4144,39 +4067,13 @@ s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs) | |||
4144 | { | 4067 | { |
4145 | ring_info_t *ring = (ring_info_t *)dev_id; | 4068 | ring_info_t *ring = (ring_info_t *)dev_id; |
4146 | nic_t *sp = ring->nic; | 4069 | nic_t *sp = ring->nic; |
4147 | struct net_device *dev = (struct net_device *) dev_id; | ||
4148 | int rxb_size, level, rng_n; | ||
4149 | 4070 | ||
4150 | atomic_inc(&sp->isr_cnt); | 4071 | atomic_inc(&sp->isr_cnt); |
4151 | rx_intr_handler(ring); | ||
4152 | 4072 | ||
4153 | rng_n = ring->ring_no; | 4073 | rx_intr_handler(ring); |
4154 | if (!sp->lro) { | 4074 | s2io_chk_rx_buffers(sp, ring->ring_no); |
4155 | rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); | ||
4156 | level = rx_buffer_level(sp, rxb_size, rng_n); | ||
4157 | |||
4158 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | ||
4159 | int ret; | ||
4160 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); | ||
4161 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | ||
4162 | if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { | ||
4163 | DBG_PRINT(ERR_DBG, "Out of memory in %s", | ||
4164 | __FUNCTION__); | ||
4165 | clear_bit(0, (&sp->tasklet_status)); | ||
4166 | return IRQ_HANDLED; | ||
4167 | } | ||
4168 | clear_bit(0, (&sp->tasklet_status)); | ||
4169 | } else if (level == LOW) { | ||
4170 | tasklet_schedule(&sp->task); | ||
4171 | } | ||
4172 | } | ||
4173 | else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { | ||
4174 | DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name); | ||
4175 | DBG_PRINT(ERR_DBG, " in Rx Intr!!\n"); | ||
4176 | } | ||
4177 | 4075 | ||
4178 | atomic_dec(&sp->isr_cnt); | 4076 | atomic_dec(&sp->isr_cnt); |
4179 | |||
4180 | return IRQ_HANDLED; | 4077 | return IRQ_HANDLED; |
4181 | } | 4078 | } |
4182 | 4079 | ||
@@ -4341,37 +4238,8 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs) | |||
4341 | * else schedule a tasklet to reallocate the buffers. | 4238 | * else schedule a tasklet to reallocate the buffers. |
4342 | */ | 4239 | */ |
4343 | #ifndef CONFIG_S2IO_NAPI | 4240 | #ifndef CONFIG_S2IO_NAPI |
4344 | for (i = 0; i < config->rx_ring_num; i++) { | 4241 | for (i = 0; i < config->rx_ring_num; i++) |
4345 | if (!sp->lro) { | 4242 | s2io_chk_rx_buffers(sp, i); |
4346 | int ret; | ||
4347 | int rxb_size = atomic_read(&sp->rx_bufs_left[i]); | ||
4348 | int level = rx_buffer_level(sp, rxb_size, i); | ||
4349 | |||
4350 | if ((level == PANIC) && (!TASKLET_IN_USE)) { | ||
4351 | DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", | ||
4352 | dev->name); | ||
4353 | DBG_PRINT(INTR_DBG, "PANIC levels\n"); | ||
4354 | if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { | ||
4355 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | ||
4356 | dev->name); | ||
4357 | DBG_PRINT(ERR_DBG, " in ISR!!\n"); | ||
4358 | clear_bit(0, (&sp->tasklet_status)); | ||
4359 | atomic_dec(&sp->isr_cnt); | ||
4360 | writeq(org_mask, &bar0->general_int_mask); | ||
4361 | return IRQ_HANDLED; | ||
4362 | } | ||
4363 | clear_bit(0, (&sp->tasklet_status)); | ||
4364 | } else if (level == LOW) { | ||
4365 | tasklet_schedule(&sp->task); | ||
4366 | } | ||
4367 | } | ||
4368 | else if (fill_rx_buffers(sp, i) == -ENOMEM) { | ||
4369 | DBG_PRINT(ERR_DBG, "%s:Out of memory", | ||
4370 | dev->name); | ||
4371 | DBG_PRINT(ERR_DBG, " in Rx intr!!\n"); | ||
4372 | break; | ||
4373 | } | ||
4374 | } | ||
4375 | #endif | 4243 | #endif |
4376 | writeq(org_mask, &bar0->general_int_mask); | 4244 | writeq(org_mask, &bar0->general_int_mask); |
4377 | atomic_dec(&sp->isr_cnt); | 4245 | atomic_dec(&sp->isr_cnt); |
@@ -4401,6 +4269,8 @@ static void s2io_updt_stats(nic_t *sp) | |||
4401 | if (cnt == 5) | 4269 | if (cnt == 5) |
4402 | break; /* Updt failed */ | 4270 | break; /* Updt failed */ |
4403 | } while(1); | 4271 | } while(1); |
4272 | } else { | ||
4273 | memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t)); | ||
4404 | } | 4274 | } |
4405 | } | 4275 | } |
4406 | 4276 | ||
@@ -5035,7 +4905,8 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt) | |||
5035 | } | 4905 | } |
5036 | static void s2io_vpd_read(nic_t *nic) | 4906 | static void s2io_vpd_read(nic_t *nic) |
5037 | { | 4907 | { |
5038 | u8 vpd_data[256],data; | 4908 | u8 *vpd_data; |
4909 | u8 data; | ||
5039 | int i=0, cnt, fail = 0; | 4910 | int i=0, cnt, fail = 0; |
5040 | int vpd_addr = 0x80; | 4911 | int vpd_addr = 0x80; |
5041 | 4912 | ||
@@ -5048,6 +4919,10 @@ static void s2io_vpd_read(nic_t *nic) | |||
5048 | vpd_addr = 0x50; | 4919 | vpd_addr = 0x50; |
5049 | } | 4920 | } |
5050 | 4921 | ||
4922 | vpd_data = kmalloc(256, GFP_KERNEL); | ||
4923 | if (!vpd_data) | ||
4924 | return; | ||
4925 | |||
5051 | for (i = 0; i < 256; i +=4 ) { | 4926 | for (i = 0; i < 256; i +=4 ) { |
5052 | pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); | 4927 | pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); |
5053 | pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); | 4928 | pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); |
@@ -5070,6 +4945,7 @@ static void s2io_vpd_read(nic_t *nic) | |||
5070 | memset(nic->product_name, 0, vpd_data[1]); | 4945 | memset(nic->product_name, 0, vpd_data[1]); |
5071 | memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); | 4946 | memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); |
5072 | } | 4947 | } |
4948 | kfree(vpd_data); | ||
5073 | } | 4949 | } |
5074 | 4950 | ||
5075 | /** | 4951 | /** |
@@ -5388,7 +5264,7 @@ static int s2io_link_test(nic_t * sp, uint64_t * data) | |||
5388 | else | 5264 | else |
5389 | *data = 0; | 5265 | *data = 0; |
5390 | 5266 | ||
5391 | return 0; | 5267 | return *data; |
5392 | } | 5268 | } |
5393 | 5269 | ||
5394 | /** | 5270 | /** |
@@ -5846,6 +5722,19 @@ static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) | |||
5846 | return 0; | 5722 | return 0; |
5847 | } | 5723 | } |
5848 | 5724 | ||
5725 | static u32 s2io_ethtool_op_get_tso(struct net_device *dev) | ||
5726 | { | ||
5727 | return (dev->features & NETIF_F_TSO) != 0; | ||
5728 | } | ||
5729 | static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data) | ||
5730 | { | ||
5731 | if (data) | ||
5732 | dev->features |= (NETIF_F_TSO | NETIF_F_TSO6); | ||
5733 | else | ||
5734 | dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); | ||
5735 | |||
5736 | return 0; | ||
5737 | } | ||
5849 | 5738 | ||
5850 | static struct ethtool_ops netdev_ethtool_ops = { | 5739 | static struct ethtool_ops netdev_ethtool_ops = { |
5851 | .get_settings = s2io_ethtool_gset, | 5740 | .get_settings = s2io_ethtool_gset, |
@@ -5866,8 +5755,8 @@ static struct ethtool_ops netdev_ethtool_ops = { | |||
5866 | .get_sg = ethtool_op_get_sg, | 5755 | .get_sg = ethtool_op_get_sg, |
5867 | .set_sg = ethtool_op_set_sg, | 5756 | .set_sg = ethtool_op_set_sg, |
5868 | #ifdef NETIF_F_TSO | 5757 | #ifdef NETIF_F_TSO |
5869 | .get_tso = ethtool_op_get_tso, | 5758 | .get_tso = s2io_ethtool_op_get_tso, |
5870 | .set_tso = ethtool_op_set_tso, | 5759 | .set_tso = s2io_ethtool_op_set_tso, |
5871 | #endif | 5760 | #endif |
5872 | .get_ufo = ethtool_op_get_ufo, | 5761 | .get_ufo = ethtool_op_get_ufo, |
5873 | .set_ufo = ethtool_op_set_ufo, | 5762 | .set_ufo = ethtool_op_set_ufo, |
@@ -5919,7 +5808,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu) | |||
5919 | 5808 | ||
5920 | dev->mtu = new_mtu; | 5809 | dev->mtu = new_mtu; |
5921 | if (netif_running(dev)) { | 5810 | if (netif_running(dev)) { |
5922 | s2io_card_down(sp, 0); | 5811 | s2io_card_down(sp); |
5923 | netif_stop_queue(dev); | 5812 | netif_stop_queue(dev); |
5924 | if (s2io_card_up(sp)) { | 5813 | if (s2io_card_up(sp)) { |
5925 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", | 5814 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", |
@@ -6216,43 +6105,106 @@ static int rxd_owner_bit_reset(nic_t *sp) | |||
6216 | 6105 | ||
6217 | } | 6106 | } |
6218 | 6107 | ||
6219 | static void s2io_card_down(nic_t * sp, int flag) | 6108 | static int s2io_add_isr(nic_t * sp) |
6220 | { | 6109 | { |
6221 | int cnt = 0; | 6110 | int ret = 0; |
6222 | XENA_dev_config_t __iomem *bar0 = sp->bar0; | ||
6223 | unsigned long flags; | ||
6224 | register u64 val64 = 0; | ||
6225 | struct net_device *dev = sp->dev; | 6111 | struct net_device *dev = sp->dev; |
6112 | int err = 0; | ||
6226 | 6113 | ||
6227 | del_timer_sync(&sp->alarm_timer); | 6114 | if (sp->intr_type == MSI) |
6228 | /* If s2io_set_link task is executing, wait till it completes. */ | 6115 | ret = s2io_enable_msi(sp); |
6229 | while (test_and_set_bit(0, &(sp->link_state))) { | 6116 | else if (sp->intr_type == MSI_X) |
6230 | msleep(50); | 6117 | ret = s2io_enable_msi_x(sp); |
6118 | if (ret) { | ||
6119 | DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name); | ||
6120 | sp->intr_type = INTA; | ||
6231 | } | 6121 | } |
6232 | atomic_set(&sp->card_state, CARD_DOWN); | ||
6233 | 6122 | ||
6234 | /* disable Tx and Rx traffic on the NIC */ | 6123 | /* Store the values of the MSIX table in the nic_t structure */ |
6235 | stop_nic(sp); | 6124 | store_xmsi_data(sp); |
6236 | if (flag) { | ||
6237 | if (sp->intr_type == MSI_X) { | ||
6238 | int i; | ||
6239 | u16 msi_control; | ||
6240 | 6125 | ||
6241 | for (i=1; (sp->s2io_entries[i].in_use == | 6126 | /* After proper initialization of H/W, register ISR */ |
6242 | MSIX_REGISTERED_SUCCESS); i++) { | 6127 | if (sp->intr_type == MSI) { |
6243 | int vector = sp->entries[i].vector; | 6128 | err = request_irq((int) sp->pdev->irq, s2io_msi_handle, |
6244 | void *arg = sp->s2io_entries[i].arg; | 6129 | IRQF_SHARED, sp->name, dev); |
6130 | if (err) { | ||
6131 | pci_disable_msi(sp->pdev); | ||
6132 | DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n", | ||
6133 | dev->name); | ||
6134 | return -1; | ||
6135 | } | ||
6136 | } | ||
6137 | if (sp->intr_type == MSI_X) { | ||
6138 | int i; | ||
6245 | 6139 | ||
6246 | free_irq(vector, arg); | 6140 | for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { |
6141 | if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { | ||
6142 | sprintf(sp->desc[i], "%s:MSI-X-%d-TX", | ||
6143 | dev->name, i); | ||
6144 | err = request_irq(sp->entries[i].vector, | ||
6145 | s2io_msix_fifo_handle, 0, sp->desc[i], | ||
6146 | sp->s2io_entries[i].arg); | ||
6147 | DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i], | ||
6148 | (unsigned long long)sp->msix_info[i].addr); | ||
6149 | } else { | ||
6150 | sprintf(sp->desc[i], "%s:MSI-X-%d-RX", | ||
6151 | dev->name, i); | ||
6152 | err = request_irq(sp->entries[i].vector, | ||
6153 | s2io_msix_ring_handle, 0, sp->desc[i], | ||
6154 | sp->s2io_entries[i].arg); | ||
6155 | DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i], | ||
6156 | (unsigned long long)sp->msix_info[i].addr); | ||
6247 | } | 6157 | } |
6248 | pci_read_config_word(sp->pdev, 0x42, &msi_control); | 6158 | if (err) { |
6249 | msi_control &= 0xFFFE; /* Disable MSI */ | 6159 | DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration " |
6250 | pci_write_config_word(sp->pdev, 0x42, msi_control); | 6160 | "failed\n", dev->name, i); |
6251 | pci_disable_msix(sp->pdev); | 6161 | DBG_PRINT(ERR_DBG, "Returned: %d\n", err); |
6252 | } else { | 6162 | return -1; |
6253 | free_irq(sp->pdev->irq, dev); | 6163 | } |
6254 | if (sp->intr_type == MSI) | 6164 | sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS; |
6255 | pci_disable_msi(sp->pdev); | 6165 | } |
6166 | } | ||
6167 | if (sp->intr_type == INTA) { | ||
6168 | err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED, | ||
6169 | sp->name, dev); | ||
6170 | if (err) { | ||
6171 | DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", | ||
6172 | dev->name); | ||
6173 | return -1; | ||
6174 | } | ||
6175 | } | ||
6176 | return 0; | ||
6177 | } | ||
6178 | static void s2io_rem_isr(nic_t * sp) | ||
6179 | { | ||
6180 | int cnt = 0; | ||
6181 | struct net_device *dev = sp->dev; | ||
6182 | |||
6183 | if (sp->intr_type == MSI_X) { | ||
6184 | int i; | ||
6185 | u16 msi_control; | ||
6186 | |||
6187 | for (i=1; (sp->s2io_entries[i].in_use == | ||
6188 | MSIX_REGISTERED_SUCCESS); i++) { | ||
6189 | int vector = sp->entries[i].vector; | ||
6190 | void *arg = sp->s2io_entries[i].arg; | ||
6191 | |||
6192 | free_irq(vector, arg); | ||
6193 | } | ||
6194 | pci_read_config_word(sp->pdev, 0x42, &msi_control); | ||
6195 | msi_control &= 0xFFFE; /* Disable MSI */ | ||
6196 | pci_write_config_word(sp->pdev, 0x42, msi_control); | ||
6197 | |||
6198 | pci_disable_msix(sp->pdev); | ||
6199 | } else { | ||
6200 | free_irq(sp->pdev->irq, dev); | ||
6201 | if (sp->intr_type == MSI) { | ||
6202 | u16 val; | ||
6203 | |||
6204 | pci_disable_msi(sp->pdev); | ||
6205 | pci_read_config_word(sp->pdev, 0x4c, &val); | ||
6206 | val ^= 0x1; | ||
6207 | pci_write_config_word(sp->pdev, 0x4c, val); | ||
6256 | } | 6208 | } |
6257 | } | 6209 | } |
6258 | /* Waiting till all Interrupt handlers are complete */ | 6210 | /* Waiting till all Interrupt handlers are complete */ |
@@ -6263,6 +6215,26 @@ static void s2io_card_down(nic_t * sp, int flag) | |||
6263 | break; | 6215 | break; |
6264 | cnt++; | 6216 | cnt++; |
6265 | } while(cnt < 5); | 6217 | } while(cnt < 5); |
6218 | } | ||
6219 | |||
6220 | static void s2io_card_down(nic_t * sp) | ||
6221 | { | ||
6222 | int cnt = 0; | ||
6223 | XENA_dev_config_t __iomem *bar0 = sp->bar0; | ||
6224 | unsigned long flags; | ||
6225 | register u64 val64 = 0; | ||
6226 | |||
6227 | del_timer_sync(&sp->alarm_timer); | ||
6228 | /* If s2io_set_link task is executing, wait till it completes. */ | ||
6229 | while (test_and_set_bit(0, &(sp->link_state))) { | ||
6230 | msleep(50); | ||
6231 | } | ||
6232 | atomic_set(&sp->card_state, CARD_DOWN); | ||
6233 | |||
6234 | /* disable Tx and Rx traffic on the NIC */ | ||
6235 | stop_nic(sp); | ||
6236 | |||
6237 | s2io_rem_isr(sp); | ||
6266 | 6238 | ||
6267 | /* Kill tasklet. */ | 6239 | /* Kill tasklet. */ |
6268 | tasklet_kill(&sp->task); | 6240 | tasklet_kill(&sp->task); |
@@ -6314,23 +6286,16 @@ static int s2io_card_up(nic_t * sp) | |||
6314 | mac_info_t *mac_control; | 6286 | mac_info_t *mac_control; |
6315 | struct config_param *config; | 6287 | struct config_param *config; |
6316 | struct net_device *dev = (struct net_device *) sp->dev; | 6288 | struct net_device *dev = (struct net_device *) sp->dev; |
6289 | u16 interruptible; | ||
6317 | 6290 | ||
6318 | /* Initialize the H/W I/O registers */ | 6291 | /* Initialize the H/W I/O registers */ |
6319 | if (init_nic(sp) != 0) { | 6292 | if (init_nic(sp) != 0) { |
6320 | DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", | 6293 | DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", |
6321 | dev->name); | 6294 | dev->name); |
6295 | s2io_reset(sp); | ||
6322 | return -ENODEV; | 6296 | return -ENODEV; |
6323 | } | 6297 | } |
6324 | 6298 | ||
6325 | if (sp->intr_type == MSI) | ||
6326 | ret = s2io_enable_msi(sp); | ||
6327 | else if (sp->intr_type == MSI_X) | ||
6328 | ret = s2io_enable_msi_x(sp); | ||
6329 | if (ret) { | ||
6330 | DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name); | ||
6331 | sp->intr_type = INTA; | ||
6332 | } | ||
6333 | |||
6334 | /* | 6299 | /* |
6335 | * Initializing the Rx buffers. For now we are considering only 1 | 6300 | * Initializing the Rx buffers. For now we are considering only 1 |
6336 | * Rx ring and initializing buffers into 30 Rx blocks | 6301 | * Rx ring and initializing buffers into 30 Rx blocks |
@@ -6354,28 +6319,46 @@ static int s2io_card_up(nic_t * sp) | |||
6354 | s2io_set_multicast(dev); | 6319 | s2io_set_multicast(dev); |
6355 | 6320 | ||
6356 | if (sp->lro) { | 6321 | if (sp->lro) { |
6357 | /* Initialize max aggregatable pkts based on MTU */ | 6322 | /* Initialize max aggregatable pkts per session based on MTU */ |
6358 | sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; | 6323 | sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; |
6359 | /* Check if we can use(if specified) user provided value */ | 6324 | /* Check if we can use(if specified) user provided value */ |
6360 | if (lro_max_pkts < sp->lro_max_aggr_per_sess) | 6325 | if (lro_max_pkts < sp->lro_max_aggr_per_sess) |
6361 | sp->lro_max_aggr_per_sess = lro_max_pkts; | 6326 | sp->lro_max_aggr_per_sess = lro_max_pkts; |
6362 | } | 6327 | } |
6363 | 6328 | ||
6364 | /* Enable tasklet for the device */ | ||
6365 | tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev); | ||
6366 | |||
6367 | /* Enable Rx Traffic and interrupts on the NIC */ | 6329 | /* Enable Rx Traffic and interrupts on the NIC */ |
6368 | if (start_nic(sp)) { | 6330 | if (start_nic(sp)) { |
6369 | DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name); | 6331 | DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name); |
6370 | tasklet_kill(&sp->task); | ||
6371 | s2io_reset(sp); | 6332 | s2io_reset(sp); |
6372 | free_irq(dev->irq, dev); | 6333 | free_rx_buffers(sp); |
6334 | return -ENODEV; | ||
6335 | } | ||
6336 | |||
6337 | /* Add interrupt service routine */ | ||
6338 | if (s2io_add_isr(sp) != 0) { | ||
6339 | if (sp->intr_type == MSI_X) | ||
6340 | s2io_rem_isr(sp); | ||
6341 | s2io_reset(sp); | ||
6373 | free_rx_buffers(sp); | 6342 | free_rx_buffers(sp); |
6374 | return -ENODEV; | 6343 | return -ENODEV; |
6375 | } | 6344 | } |
6376 | 6345 | ||
6377 | S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2)); | 6346 | S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2)); |
6378 | 6347 | ||
6348 | /* Enable tasklet for the device */ | ||
6349 | tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev); | ||
6350 | |||
6351 | /* Enable select interrupts */ | ||
6352 | if (sp->intr_type != INTA) | ||
6353 | en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS); | ||
6354 | else { | ||
6355 | interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; | ||
6356 | interruptible |= TX_PIC_INTR | RX_PIC_INTR; | ||
6357 | interruptible |= TX_MAC_INTR | RX_MAC_INTR; | ||
6358 | en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS); | ||
6359 | } | ||
6360 | |||
6361 | |||
6379 | atomic_set(&sp->card_state, CARD_UP); | 6362 | atomic_set(&sp->card_state, CARD_UP); |
6380 | return 0; | 6363 | return 0; |
6381 | } | 6364 | } |
@@ -6395,7 +6378,7 @@ static void s2io_restart_nic(unsigned long data) | |||
6395 | struct net_device *dev = (struct net_device *) data; | 6378 | struct net_device *dev = (struct net_device *) data; |
6396 | nic_t *sp = dev->priv; | 6379 | nic_t *sp = dev->priv; |
6397 | 6380 | ||
6398 | s2io_card_down(sp, 0); | 6381 | s2io_card_down(sp); |
6399 | if (s2io_card_up(sp)) { | 6382 | if (s2io_card_up(sp)) { |
6400 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", | 6383 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", |
6401 | dev->name); | 6384 | dev->name); |
@@ -6437,7 +6420,7 @@ static void s2io_tx_watchdog(struct net_device *dev) | |||
6437 | * @cksum : FCS checksum of the frame. | 6420 | * @cksum : FCS checksum of the frame. |
6438 | * @ring_no : the ring from which this RxD was extracted. | 6421 | * @ring_no : the ring from which this RxD was extracted. |
6439 | * Description: | 6422 | * Description: |
6440 | * This function is called by the Tx interrupt serivce routine to perform | 6423 | * This function is called by the Rx interrupt serivce routine to perform |
6441 | * some OS related operations on the SKB before passing it to the upper | 6424 | * some OS related operations on the SKB before passing it to the upper |
6442 | * layers. It mainly checks if the checksum is OK, if so adds it to the | 6425 | * layers. It mainly checks if the checksum is OK, if so adds it to the |
6443 | * SKBs cksum variable, increments the Rx packet count and passes the SKB | 6426 | * SKBs cksum variable, increments the Rx packet count and passes the SKB |
@@ -6697,33 +6680,6 @@ static void s2io_init_pci(nic_t * sp) | |||
6697 | pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); | 6680 | pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); |
6698 | } | 6681 | } |
6699 | 6682 | ||
6700 | MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); | ||
6701 | MODULE_LICENSE("GPL"); | ||
6702 | MODULE_VERSION(DRV_VERSION); | ||
6703 | |||
6704 | module_param(tx_fifo_num, int, 0); | ||
6705 | module_param(rx_ring_num, int, 0); | ||
6706 | module_param(rx_ring_mode, int, 0); | ||
6707 | module_param_array(tx_fifo_len, uint, NULL, 0); | ||
6708 | module_param_array(rx_ring_sz, uint, NULL, 0); | ||
6709 | module_param_array(rts_frm_len, uint, NULL, 0); | ||
6710 | module_param(use_continuous_tx_intrs, int, 1); | ||
6711 | module_param(rmac_pause_time, int, 0); | ||
6712 | module_param(mc_pause_threshold_q0q3, int, 0); | ||
6713 | module_param(mc_pause_threshold_q4q7, int, 0); | ||
6714 | module_param(shared_splits, int, 0); | ||
6715 | module_param(tmac_util_period, int, 0); | ||
6716 | module_param(rmac_util_period, int, 0); | ||
6717 | module_param(bimodal, bool, 0); | ||
6718 | module_param(l3l4hdr_size, int , 0); | ||
6719 | #ifndef CONFIG_S2IO_NAPI | ||
6720 | module_param(indicate_max_pkts, int, 0); | ||
6721 | #endif | ||
6722 | module_param(rxsync_frequency, int, 0); | ||
6723 | module_param(intr_type, int, 0); | ||
6724 | module_param(lro, int, 0); | ||
6725 | module_param(lro_max_pkts, int, 0); | ||
6726 | |||
6727 | static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) | 6683 | static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) |
6728 | { | 6684 | { |
6729 | if ( tx_fifo_num > 8) { | 6685 | if ( tx_fifo_num > 8) { |
@@ -6831,8 +6787,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
6831 | } | 6787 | } |
6832 | if (dev_intr_type != MSI_X) { | 6788 | if (dev_intr_type != MSI_X) { |
6833 | if (pci_request_regions(pdev, s2io_driver_name)) { | 6789 | if (pci_request_regions(pdev, s2io_driver_name)) { |
6834 | DBG_PRINT(ERR_DBG, "Request Regions failed\n"), | 6790 | DBG_PRINT(ERR_DBG, "Request Regions failed\n"); |
6835 | pci_disable_device(pdev); | 6791 | pci_disable_device(pdev); |
6836 | return -ENODEV; | 6792 | return -ENODEV; |
6837 | } | 6793 | } |
6838 | } | 6794 | } |
@@ -6956,7 +6912,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
6956 | /* initialize the shared memory used by the NIC and the host */ | 6912 | /* initialize the shared memory used by the NIC and the host */ |
6957 | if (init_shared_mem(sp)) { | 6913 | if (init_shared_mem(sp)) { |
6958 | DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", | 6914 | DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", |
6959 | __FUNCTION__); | 6915 | dev->name); |
6960 | ret = -ENOMEM; | 6916 | ret = -ENOMEM; |
6961 | goto mem_alloc_failed; | 6917 | goto mem_alloc_failed; |
6962 | } | 6918 | } |
@@ -7093,6 +7049,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7093 | dev->addr_len = ETH_ALEN; | 7049 | dev->addr_len = ETH_ALEN; |
7094 | memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); | 7050 | memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); |
7095 | 7051 | ||
7052 | /* reset Nic and bring it to known state */ | ||
7053 | s2io_reset(sp); | ||
7054 | |||
7096 | /* | 7055 | /* |
7097 | * Initialize the tasklet status and link state flags | 7056 | * Initialize the tasklet status and link state flags |
7098 | * and the card state parameter | 7057 | * and the card state parameter |
@@ -7130,11 +7089,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
7130 | goto register_failed; | 7089 | goto register_failed; |
7131 | } | 7090 | } |
7132 | s2io_vpd_read(sp); | 7091 | s2io_vpd_read(sp); |
7133 | DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name); | ||
7134 | DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n", | ||
7135 | get_xena_rev_id(sp->pdev), | ||
7136 | s2io_driver_version); | ||
7137 | DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); | 7092 | DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); |
7093 | DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name, | ||
7094 | sp->product_name, get_xena_rev_id(sp->pdev)); | ||
7095 | DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, | ||
7096 | s2io_driver_version); | ||
7138 | DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " | 7097 | DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " |
7139 | "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, | 7098 | "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, |
7140 | sp->def_mac_addr[0].mac_addr[0], | 7099 | sp->def_mac_addr[0].mac_addr[0], |
@@ -7435,8 +7394,13 @@ static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip, | |||
7435 | if (ip->ihl != 5) /* IP has options */ | 7394 | if (ip->ihl != 5) /* IP has options */ |
7436 | return -1; | 7395 | return -1; |
7437 | 7396 | ||
7397 | /* If we see CE codepoint in IP header, packet is not mergeable */ | ||
7398 | if (INET_ECN_is_ce(ipv4_get_dsfield(ip))) | ||
7399 | return -1; | ||
7400 | |||
7401 | /* If we see ECE or CWR flags in TCP header, packet is not mergeable */ | ||
7438 | if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || | 7402 | if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || |
7439 | !tcp->ack) { | 7403 | tcp->ece || tcp->cwr || !tcp->ack) { |
7440 | /* | 7404 | /* |
7441 | * Currently recognize only the ack control word and | 7405 | * Currently recognize only the ack control word and |
7442 | * any other control field being set would result in | 7406 | * any other control field being set would result in |
@@ -7590,18 +7554,16 @@ static void queue_rx_frame(struct sk_buff *skb) | |||
7590 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, | 7554 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, |
7591 | u32 tcp_len) | 7555 | u32 tcp_len) |
7592 | { | 7556 | { |
7593 | struct sk_buff *tmp, *first = lro->parent; | 7557 | struct sk_buff *first = lro->parent; |
7594 | 7558 | ||
7595 | first->len += tcp_len; | 7559 | first->len += tcp_len; |
7596 | first->data_len = lro->frags_len; | 7560 | first->data_len = lro->frags_len; |
7597 | skb_pull(skb, (skb->len - tcp_len)); | 7561 | skb_pull(skb, (skb->len - tcp_len)); |
7598 | if ((tmp = skb_shinfo(first)->frag_list)) { | 7562 | if (skb_shinfo(first)->frag_list) |
7599 | while (tmp->next) | 7563 | lro->last_frag->next = skb; |
7600 | tmp = tmp->next; | ||
7601 | tmp->next = skb; | ||
7602 | } | ||
7603 | else | 7564 | else |
7604 | skb_shinfo(first)->frag_list = skb; | 7565 | skb_shinfo(first)->frag_list = skb; |
7566 | lro->last_frag = skb; | ||
7605 | sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; | 7567 | sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; |
7606 | return; | 7568 | return; |
7607 | } | 7569 | } |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index c43f52179708..5ed49c3be1e9 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -719,6 +719,7 @@ struct msix_info_st { | |||
719 | /* Data structure to represent a LRO session */ | 719 | /* Data structure to represent a LRO session */ |
720 | typedef struct lro { | 720 | typedef struct lro { |
721 | struct sk_buff *parent; | 721 | struct sk_buff *parent; |
722 | struct sk_buff *last_frag; | ||
722 | u8 *l2h; | 723 | u8 *l2h; |
723 | struct iphdr *iph; | 724 | struct iphdr *iph; |
724 | struct tcphdr *tcph; | 725 | struct tcphdr *tcph; |
@@ -829,8 +830,7 @@ struct s2io_nic { | |||
829 | #define MSIX_FLG 0xA5 | 830 | #define MSIX_FLG 0xA5 |
830 | struct msix_entry *entries; | 831 | struct msix_entry *entries; |
831 | struct s2io_msix_entry *s2io_entries; | 832 | struct s2io_msix_entry *s2io_entries; |
832 | char desc1[35]; | 833 | char desc[MAX_REQUESTED_MSI_X][25]; |
833 | char desc2[35]; | ||
834 | 834 | ||
835 | int avail_msix_vectors; /* No. of MSI-X vectors granted by system */ | 835 | int avail_msix_vectors; /* No. of MSI-X vectors granted by system */ |
836 | 836 | ||
@@ -1002,7 +1002,7 @@ static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); | |||
1002 | static struct ethtool_ops netdev_ethtool_ops; | 1002 | static struct ethtool_ops netdev_ethtool_ops; |
1003 | static void s2io_set_link(unsigned long data); | 1003 | static void s2io_set_link(unsigned long data); |
1004 | static int s2io_set_swapper(nic_t * sp); | 1004 | static int s2io_set_swapper(nic_t * sp); |
1005 | static void s2io_card_down(nic_t *nic, int flag); | 1005 | static void s2io_card_down(nic_t *nic); |
1006 | static int s2io_card_up(nic_t *nic); | 1006 | static int s2io_card_up(nic_t *nic); |
1007 | static int get_xena_rev_id(struct pci_dev *pdev); | 1007 | static int get_xena_rev_id(struct pci_dev *pdev); |
1008 | static void restore_xmsi_data(nic_t *nic); | 1008 | static void restore_xmsi_data(nic_t *nic); |
@@ -1012,4 +1012,13 @@ static void clear_lro_session(lro_t *lro); | |||
1012 | static void queue_rx_frame(struct sk_buff *skb); | 1012 | static void queue_rx_frame(struct sk_buff *skb); |
1013 | static void update_L3L4_header(nic_t *sp, lro_t *lro); | 1013 | static void update_L3L4_header(nic_t *sp, lro_t *lro); |
1014 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); | 1014 | static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); |
1015 | |||
1016 | #define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size | ||
1017 | #define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size | ||
1018 | #define s2io_offload_type(skb) skb_shinfo(skb)->gso_type | ||
1019 | |||
1020 | #define S2IO_PARM_INT(X, def_val) \ | ||
1021 | static unsigned int X = def_val;\ | ||
1022 | module_param(X , uint, 0); | ||
1023 | |||
1015 | #endif /* _S2IO_H */ | 1024 | #endif /* _S2IO_H */ |
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c index efd0f235020f..01392bca0223 100644 --- a/drivers/net/seeq8005.c +++ b/drivers/net/seeq8005.c | |||
@@ -742,7 +742,7 @@ module_param(irq, int, 0); | |||
742 | MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address"); | 742 | MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address"); |
743 | MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number"); | 743 | MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number"); |
744 | 744 | ||
745 | int init_module(void) | 745 | int __init init_module(void) |
746 | { | 746 | { |
747 | dev_seeq = seeq8005_probe(-1); | 747 | dev_seeq = seeq8005_probe(-1); |
748 | if (IS_ERR(dev_seeq)) | 748 | if (IS_ERR(dev_seeq)) |
diff --git a/drivers/net/sk98lin/h/xmac_ii.h b/drivers/net/sk98lin/h/xmac_ii.h index 2b19f8ad0318..7f8e6d0084c7 100644 --- a/drivers/net/sk98lin/h/xmac_ii.h +++ b/drivers/net/sk98lin/h/xmac_ii.h | |||
@@ -1473,7 +1473,7 @@ extern "C" { | |||
1473 | #define GM_TXCR_FORCE_JAM (1<<15) /* Bit 15: Force Jam / Flow-Control */ | 1473 | #define GM_TXCR_FORCE_JAM (1<<15) /* Bit 15: Force Jam / Flow-Control */ |
1474 | #define GM_TXCR_CRC_DIS (1<<14) /* Bit 14: Disable insertion of CRC */ | 1474 | #define GM_TXCR_CRC_DIS (1<<14) /* Bit 14: Disable insertion of CRC */ |
1475 | #define GM_TXCR_PAD_DIS (1<<13) /* Bit 13: Disable padding of packets */ | 1475 | #define GM_TXCR_PAD_DIS (1<<13) /* Bit 13: Disable padding of packets */ |
1476 | #define GM_TXCR_COL_THR_MSK (1<<10) /* Bit 12..10: Collision Threshold */ | 1476 | #define GM_TXCR_COL_THR_MSK (7<<10) /* Bit 12..10: Collision Threshold */ |
1477 | 1477 | ||
1478 | #define TX_COL_THR(x) (SHIFT10(x) & GM_TXCR_COL_THR_MSK) | 1478 | #define TX_COL_THR(x) (SHIFT10(x) & GM_TXCR_COL_THR_MSK) |
1479 | 1479 | ||
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 82200bfaa8ed..ad878dfddef4 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -516,10 +516,7 @@ static int skge_set_pauseparam(struct net_device *dev, | |||
516 | /* Chip internal frequency for clock calculations */ | 516 | /* Chip internal frequency for clock calculations */ |
517 | static inline u32 hwkhz(const struct skge_hw *hw) | 517 | static inline u32 hwkhz(const struct skge_hw *hw) |
518 | { | 518 | { |
519 | if (hw->chip_id == CHIP_ID_GENESIS) | 519 | return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125; |
520 | return 53215; /* or: 53.125 MHz */ | ||
521 | else | ||
522 | return 78215; /* or: 78.125 MHz */ | ||
523 | } | 520 | } |
524 | 521 | ||
525 | /* Chip HZ to microseconds */ | 522 | /* Chip HZ to microseconds */ |
@@ -2214,6 +2211,7 @@ static int skge_up(struct net_device *dev) | |||
2214 | skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); | 2211 | skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); |
2215 | skge_led(skge, LED_MODE_ON); | 2212 | skge_led(skge, LED_MODE_ON); |
2216 | 2213 | ||
2214 | netif_poll_enable(dev); | ||
2217 | return 0; | 2215 | return 0; |
2218 | 2216 | ||
2219 | free_rx_ring: | 2217 | free_rx_ring: |
@@ -2282,6 +2280,7 @@ static int skge_down(struct net_device *dev) | |||
2282 | 2280 | ||
2283 | skge_led(skge, LED_MODE_OFF); | 2281 | skge_led(skge, LED_MODE_OFF); |
2284 | 2282 | ||
2283 | netif_poll_disable(dev); | ||
2285 | skge_tx_clean(skge); | 2284 | skge_tx_clean(skge); |
2286 | skge_rx_clean(skge); | 2285 | skge_rx_clean(skge); |
2287 | 2286 | ||
diff --git a/drivers/net/skge.h b/drivers/net/skge.h index ed19ff47ce11..593387b3c0dd 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h | |||
@@ -1734,11 +1734,11 @@ enum { | |||
1734 | GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ | 1734 | GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ |
1735 | GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ | 1735 | GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ |
1736 | GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ | 1736 | GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ |
1737 | GM_TXCR_COL_THR_MSK = 1<<10, /* Bit 12..10: Collision Threshold */ | 1737 | GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */ |
1738 | }; | 1738 | }; |
1739 | 1739 | ||
1740 | #define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) | 1740 | #define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) |
1741 | #define TX_COL_DEF 0x04 | 1741 | #define TX_COL_DEF 0x04 /* late collision after 64 byte */ |
1742 | 1742 | ||
1743 | /* GM_RX_CTRL 16 bit r/w Receive Control Register */ | 1743 | /* GM_RX_CTRL 16 bit r/w Receive Control Register */ |
1744 | enum { | 1744 | enum { |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 418f169a6a31..933e87f1cc68 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -50,7 +50,7 @@ | |||
50 | #include "sky2.h" | 50 | #include "sky2.h" |
51 | 51 | ||
52 | #define DRV_NAME "sky2" | 52 | #define DRV_NAME "sky2" |
53 | #define DRV_VERSION "1.4" | 53 | #define DRV_VERSION "1.5" |
54 | #define PFX DRV_NAME " " | 54 | #define PFX DRV_NAME " " |
55 | 55 | ||
56 | /* | 56 | /* |
@@ -65,6 +65,7 @@ | |||
65 | #define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) | 65 | #define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) |
66 | #define RX_DEF_PENDING RX_MAX_PENDING | 66 | #define RX_DEF_PENDING RX_MAX_PENDING |
67 | #define RX_SKB_ALIGN 8 | 67 | #define RX_SKB_ALIGN 8 |
68 | #define RX_BUF_WRITE 16 | ||
68 | 69 | ||
69 | #define TX_RING_SIZE 512 | 70 | #define TX_RING_SIZE 512 |
70 | #define TX_DEF_PENDING (TX_RING_SIZE - 1) | 71 | #define TX_DEF_PENDING (TX_RING_SIZE - 1) |
@@ -232,9 +233,10 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) | |||
232 | if (hw->ports > 1) | 233 | if (hw->ports > 1) |
233 | reg1 |= PCI_Y2_PHY2_COMA; | 234 | reg1 |= PCI_Y2_PHY2_COMA; |
234 | } | 235 | } |
236 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); | ||
237 | udelay(100); | ||
235 | 238 | ||
236 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { | 239 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { |
237 | sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON); | ||
238 | sky2_pci_write32(hw, PCI_DEV_REG3, 0); | 240 | sky2_pci_write32(hw, PCI_DEV_REG3, 0); |
239 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); | 241 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); |
240 | reg1 &= P_ASPM_CONTROL_MSK; | 242 | reg1 &= P_ASPM_CONTROL_MSK; |
@@ -242,8 +244,6 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) | |||
242 | sky2_pci_write32(hw, PCI_DEV_REG5, 0); | 244 | sky2_pci_write32(hw, PCI_DEV_REG5, 0); |
243 | } | 245 | } |
244 | 246 | ||
245 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); | ||
246 | |||
247 | break; | 247 | break; |
248 | 248 | ||
249 | case PCI_D3hot: | 249 | case PCI_D3hot: |
@@ -255,6 +255,7 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) | |||
255 | else | 255 | else |
256 | reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); | 256 | reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); |
257 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); | 257 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); |
258 | udelay(100); | ||
258 | 259 | ||
259 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) | 260 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) |
260 | sky2_write8(hw, B2_Y2_CLK_GATE, 0); | 261 | sky2_write8(hw, B2_Y2_CLK_GATE, 0); |
@@ -1159,7 +1160,7 @@ static unsigned tx_le_req(const struct sk_buff *skb) | |||
1159 | count = sizeof(dma_addr_t) / sizeof(u32); | 1160 | count = sizeof(dma_addr_t) / sizeof(u32); |
1160 | count += skb_shinfo(skb)->nr_frags * count; | 1161 | count += skb_shinfo(skb)->nr_frags * count; |
1161 | 1162 | ||
1162 | if (skb_shinfo(skb)->gso_size) | 1163 | if (skb_is_gso(skb)) |
1163 | ++count; | 1164 | ++count; |
1164 | 1165 | ||
1165 | if (skb->ip_summed == CHECKSUM_HW) | 1166 | if (skb->ip_summed == CHECKSUM_HW) |
@@ -1389,7 +1390,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) | |||
1389 | } | 1390 | } |
1390 | 1391 | ||
1391 | sky2->tx_cons = put; | 1392 | sky2->tx_cons = put; |
1392 | if (tx_avail(sky2) > MAX_SKB_TX_LE) | 1393 | if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) |
1393 | netif_wake_queue(dev); | 1394 | netif_wake_queue(dev); |
1394 | } | 1395 | } |
1395 | 1396 | ||
@@ -1888,9 +1889,6 @@ resubmit: | |||
1888 | re->skb->ip_summed = CHECKSUM_NONE; | 1889 | re->skb->ip_summed = CHECKSUM_NONE; |
1889 | sky2_rx_add(sky2, re->mapaddr); | 1890 | sky2_rx_add(sky2, re->mapaddr); |
1890 | 1891 | ||
1891 | /* Tell receiver about new buffers. */ | ||
1892 | sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put); | ||
1893 | |||
1894 | return skb; | 1892 | return skb; |
1895 | 1893 | ||
1896 | oversize: | 1894 | oversize: |
@@ -1937,7 +1935,9 @@ static inline int sky2_more_work(const struct sky2_hw *hw) | |||
1937 | /* Process status response ring */ | 1935 | /* Process status response ring */ |
1938 | static int sky2_status_intr(struct sky2_hw *hw, int to_do) | 1936 | static int sky2_status_intr(struct sky2_hw *hw, int to_do) |
1939 | { | 1937 | { |
1938 | struct sky2_port *sky2; | ||
1940 | int work_done = 0; | 1939 | int work_done = 0; |
1940 | unsigned buf_write[2] = { 0, 0 }; | ||
1941 | u16 hwidx = sky2_read16(hw, STAT_PUT_IDX); | 1941 | u16 hwidx = sky2_read16(hw, STAT_PUT_IDX); |
1942 | 1942 | ||
1943 | rmb(); | 1943 | rmb(); |
@@ -1945,7 +1945,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) | |||
1945 | while (hw->st_idx != hwidx) { | 1945 | while (hw->st_idx != hwidx) { |
1946 | struct sky2_status_le *le = hw->st_le + hw->st_idx; | 1946 | struct sky2_status_le *le = hw->st_le + hw->st_idx; |
1947 | struct net_device *dev; | 1947 | struct net_device *dev; |
1948 | struct sky2_port *sky2; | ||
1949 | struct sk_buff *skb; | 1948 | struct sk_buff *skb; |
1950 | u32 status; | 1949 | u32 status; |
1951 | u16 length; | 1950 | u16 length; |
@@ -1978,6 +1977,14 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) | |||
1978 | #endif | 1977 | #endif |
1979 | netif_receive_skb(skb); | 1978 | netif_receive_skb(skb); |
1980 | 1979 | ||
1980 | /* Update receiver after 16 frames */ | ||
1981 | if (++buf_write[le->link] == RX_BUF_WRITE) { | ||
1982 | sky2_put_idx(hw, rxqaddr[le->link], | ||
1983 | sky2->rx_put); | ||
1984 | buf_write[le->link] = 0; | ||
1985 | } | ||
1986 | |||
1987 | /* Stop after net poll weight */ | ||
1981 | if (++work_done >= to_do) | 1988 | if (++work_done >= to_do) |
1982 | goto exit_loop; | 1989 | goto exit_loop; |
1983 | break; | 1990 | break; |
@@ -2016,6 +2023,16 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) | |||
2016 | } | 2023 | } |
2017 | 2024 | ||
2018 | exit_loop: | 2025 | exit_loop: |
2026 | if (buf_write[0]) { | ||
2027 | sky2 = netdev_priv(hw->dev[0]); | ||
2028 | sky2_put_idx(hw, Q_R1, sky2->rx_put); | ||
2029 | } | ||
2030 | |||
2031 | if (buf_write[1]) { | ||
2032 | sky2 = netdev_priv(hw->dev[1]); | ||
2033 | sky2_put_idx(hw, Q_R2, sky2->rx_put); | ||
2034 | } | ||
2035 | |||
2019 | return work_done; | 2036 | return work_done; |
2020 | } | 2037 | } |
2021 | 2038 | ||
@@ -2186,9 +2203,6 @@ static int sky2_poll(struct net_device *dev0, int *budget) | |||
2186 | int work_done = 0; | 2203 | int work_done = 0; |
2187 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); | 2204 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); |
2188 | 2205 | ||
2189 | if (!~status) | ||
2190 | goto out; | ||
2191 | |||
2192 | if (status & Y2_IS_HW_ERR) | 2206 | if (status & Y2_IS_HW_ERR) |
2193 | sky2_hw_intr(hw); | 2207 | sky2_hw_intr(hw); |
2194 | 2208 | ||
@@ -2225,7 +2239,7 @@ static int sky2_poll(struct net_device *dev0, int *budget) | |||
2225 | 2239 | ||
2226 | if (sky2_more_work(hw)) | 2240 | if (sky2_more_work(hw)) |
2227 | return 1; | 2241 | return 1; |
2228 | out: | 2242 | |
2229 | netif_rx_complete(dev0); | 2243 | netif_rx_complete(dev0); |
2230 | 2244 | ||
2231 | sky2_read32(hw, B0_Y2_SP_LISR); | 2245 | sky2_read32(hw, B0_Y2_SP_LISR); |
@@ -2286,7 +2300,7 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk) | |||
2286 | } | 2300 | } |
2287 | 2301 | ||
2288 | 2302 | ||
2289 | static int __devinit sky2_reset(struct sky2_hw *hw) | 2303 | static int sky2_reset(struct sky2_hw *hw) |
2290 | { | 2304 | { |
2291 | u16 status; | 2305 | u16 status; |
2292 | u8 t8, pmd_type; | 2306 | u8 t8, pmd_type; |
@@ -3437,17 +3451,14 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3437 | return -EINVAL; | 3451 | return -EINVAL; |
3438 | 3452 | ||
3439 | del_timer_sync(&hw->idle_timer); | 3453 | del_timer_sync(&hw->idle_timer); |
3454 | netif_poll_disable(hw->dev[0]); | ||
3440 | 3455 | ||
3441 | for (i = 0; i < hw->ports; i++) { | 3456 | for (i = 0; i < hw->ports; i++) { |
3442 | struct net_device *dev = hw->dev[i]; | 3457 | struct net_device *dev = hw->dev[i]; |
3443 | 3458 | ||
3444 | if (dev) { | 3459 | if (netif_running(dev)) { |
3445 | if (!netif_running(dev)) | ||
3446 | continue; | ||
3447 | |||
3448 | sky2_down(dev); | 3460 | sky2_down(dev); |
3449 | netif_device_detach(dev); | 3461 | netif_device_detach(dev); |
3450 | netif_poll_disable(dev); | ||
3451 | } | 3462 | } |
3452 | } | 3463 | } |
3453 | 3464 | ||
@@ -3474,9 +3485,8 @@ static int sky2_resume(struct pci_dev *pdev) | |||
3474 | 3485 | ||
3475 | for (i = 0; i < hw->ports; i++) { | 3486 | for (i = 0; i < hw->ports; i++) { |
3476 | struct net_device *dev = hw->dev[i]; | 3487 | struct net_device *dev = hw->dev[i]; |
3477 | if (dev && netif_running(dev)) { | 3488 | if (netif_running(dev)) { |
3478 | netif_device_attach(dev); | 3489 | netif_device_attach(dev); |
3479 | netif_poll_enable(dev); | ||
3480 | 3490 | ||
3481 | err = sky2_up(dev); | 3491 | err = sky2_up(dev); |
3482 | if (err) { | 3492 | if (err) { |
@@ -3488,6 +3498,7 @@ static int sky2_resume(struct pci_dev *pdev) | |||
3488 | } | 3498 | } |
3489 | } | 3499 | } |
3490 | 3500 | ||
3501 | netif_poll_enable(hw->dev[0]); | ||
3491 | sky2_idle_start(hw); | 3502 | sky2_idle_start(hw); |
3492 | out: | 3503 | out: |
3493 | return err; | 3504 | return err; |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 8a0bc5525f0a..2db8d19b22d1 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -1480,7 +1480,7 @@ enum { | |||
1480 | GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ | 1480 | GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ |
1481 | GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ | 1481 | GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ |
1482 | GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ | 1482 | GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ |
1483 | GM_TXCR_COL_THR_MSK = 1<<10, /* Bit 12..10: Collision Threshold */ | 1483 | GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */ |
1484 | }; | 1484 | }; |
1485 | 1485 | ||
1486 | #define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) | 1486 | #define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) |
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index d37bd860b336..0b15290df278 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c | |||
@@ -1092,6 +1092,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id, struct pt_regs *regs | |||
1092 | /* Spurious interrupt check */ | 1092 | /* Spurious interrupt check */ |
1093 | if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) != | 1093 | if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) != |
1094 | (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) { | 1094 | (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) { |
1095 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1095 | return IRQ_NONE; | 1096 | return IRQ_NONE; |
1096 | } | 1097 | } |
1097 | 1098 | ||
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 3d8dcb6c8758..cf62373b808b 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c | |||
@@ -321,12 +321,12 @@ static void smc_reset(struct net_device *dev) | |||
321 | DBG(2, "%s: %s\n", dev->name, __FUNCTION__); | 321 | DBG(2, "%s: %s\n", dev->name, __FUNCTION__); |
322 | 322 | ||
323 | /* Disable all interrupts, block TX tasklet */ | 323 | /* Disable all interrupts, block TX tasklet */ |
324 | spin_lock(&lp->lock); | 324 | spin_lock_irq(&lp->lock); |
325 | SMC_SELECT_BANK(2); | 325 | SMC_SELECT_BANK(2); |
326 | SMC_SET_INT_MASK(0); | 326 | SMC_SET_INT_MASK(0); |
327 | pending_skb = lp->pending_tx_skb; | 327 | pending_skb = lp->pending_tx_skb; |
328 | lp->pending_tx_skb = NULL; | 328 | lp->pending_tx_skb = NULL; |
329 | spin_unlock(&lp->lock); | 329 | spin_unlock_irq(&lp->lock); |
330 | 330 | ||
331 | /* free any pending tx skb */ | 331 | /* free any pending tx skb */ |
332 | if (pending_skb) { | 332 | if (pending_skb) { |
@@ -448,12 +448,12 @@ static void smc_shutdown(struct net_device *dev) | |||
448 | DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); | 448 | DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); |
449 | 449 | ||
450 | /* no more interrupts for me */ | 450 | /* no more interrupts for me */ |
451 | spin_lock(&lp->lock); | 451 | spin_lock_irq(&lp->lock); |
452 | SMC_SELECT_BANK(2); | 452 | SMC_SELECT_BANK(2); |
453 | SMC_SET_INT_MASK(0); | 453 | SMC_SET_INT_MASK(0); |
454 | pending_skb = lp->pending_tx_skb; | 454 | pending_skb = lp->pending_tx_skb; |
455 | lp->pending_tx_skb = NULL; | 455 | lp->pending_tx_skb = NULL; |
456 | spin_unlock(&lp->lock); | 456 | spin_unlock_irq(&lp->lock); |
457 | if (pending_skb) | 457 | if (pending_skb) |
458 | dev_kfree_skb(pending_skb); | 458 | dev_kfree_skb(pending_skb); |
459 | 459 | ||
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index b4028049ed76..7aa7fbac8224 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h | |||
@@ -136,14 +136,9 @@ | |||
136 | #define SMC_CAN_USE_32BIT 0 | 136 | #define SMC_CAN_USE_32BIT 0 |
137 | #define SMC_IO_SHIFT 0 | 137 | #define SMC_IO_SHIFT 0 |
138 | #define SMC_NOWAIT 1 | 138 | #define SMC_NOWAIT 1 |
139 | #define SMC_USE_PXA_DMA 1 | ||
140 | 139 | ||
141 | #define SMC_inb(a, r) readb((a) + (r)) | ||
142 | #define SMC_inw(a, r) readw((a) + (r)) | 140 | #define SMC_inw(a, r) readw((a) + (r)) |
143 | #define SMC_inl(a, r) readl((a) + (r)) | ||
144 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
145 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | 141 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) |
146 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
147 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | 142 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) |
148 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | 143 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) |
149 | 144 | ||
@@ -189,16 +184,10 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) | |||
189 | #define SMC_IO_SHIFT 0 | 184 | #define SMC_IO_SHIFT 0 |
190 | #define SMC_NOWAIT 1 | 185 | #define SMC_NOWAIT 1 |
191 | 186 | ||
192 | #define SMC_inb(a, r) readb((a) + (r)) | ||
193 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
194 | #define SMC_inw(a, r) readw((a) + (r)) | 187 | #define SMC_inw(a, r) readw((a) + (r)) |
195 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | 188 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) |
196 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | 189 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) |
197 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | 190 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) |
198 | #define SMC_inl(a, r) readl((a) + (r)) | ||
199 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
200 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | ||
201 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | ||
202 | 191 | ||
203 | #include <asm/mach-types.h> | 192 | #include <asm/mach-types.h> |
204 | #include <asm/arch/cpu.h> | 193 | #include <asm/arch/cpu.h> |
@@ -354,6 +343,42 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, | |||
354 | 343 | ||
355 | #define SMC_IRQ_FLAGS (0) | 344 | #define SMC_IRQ_FLAGS (0) |
356 | 345 | ||
346 | #elif defined(CONFIG_ARCH_VERSATILE) | ||
347 | |||
348 | #define SMC_CAN_USE_8BIT 1 | ||
349 | #define SMC_CAN_USE_16BIT 1 | ||
350 | #define SMC_CAN_USE_32BIT 1 | ||
351 | #define SMC_NOWAIT 1 | ||
352 | |||
353 | #define SMC_inb(a, r) readb((a) + (r)) | ||
354 | #define SMC_inw(a, r) readw((a) + (r)) | ||
355 | #define SMC_inl(a, r) readl((a) + (r)) | ||
356 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
357 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
358 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
359 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | ||
360 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | ||
361 | |||
362 | #define SMC_IRQ_FLAGS (0) | ||
363 | |||
364 | #elif defined(CONFIG_ARCH_VERSATILE) | ||
365 | |||
366 | #define SMC_CAN_USE_8BIT 1 | ||
367 | #define SMC_CAN_USE_16BIT 1 | ||
368 | #define SMC_CAN_USE_32BIT 1 | ||
369 | #define SMC_NOWAIT 1 | ||
370 | |||
371 | #define SMC_inb(a, r) readb((a) + (r)) | ||
372 | #define SMC_inw(a, r) readw((a) + (r)) | ||
373 | #define SMC_inl(a, r) readl((a) + (r)) | ||
374 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
375 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
376 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
377 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | ||
378 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | ||
379 | |||
380 | #define SMC_IRQ_FLAGS (0) | ||
381 | |||
357 | #else | 382 | #else |
358 | 383 | ||
359 | #define SMC_CAN_USE_8BIT 1 | 384 | #define SMC_CAN_USE_8BIT 1 |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index fb1d5a8a45cf..88907218457a 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -84,7 +84,7 @@ MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl); | |||
84 | * | 84 | * |
85 | * returns the content of the specified SMMIO register. | 85 | * returns the content of the specified SMMIO register. |
86 | */ | 86 | */ |
87 | static u32 | 87 | static inline u32 |
88 | spider_net_read_reg(struct spider_net_card *card, u32 reg) | 88 | spider_net_read_reg(struct spider_net_card *card, u32 reg) |
89 | { | 89 | { |
90 | u32 value; | 90 | u32 value; |
@@ -101,7 +101,7 @@ spider_net_read_reg(struct spider_net_card *card, u32 reg) | |||
101 | * @reg: register to write to | 101 | * @reg: register to write to |
102 | * @value: value to write into the specified SMMIO register | 102 | * @value: value to write into the specified SMMIO register |
103 | */ | 103 | */ |
104 | static void | 104 | static inline void |
105 | spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) | 105 | spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) |
106 | { | 106 | { |
107 | value = cpu_to_le32(value); | 107 | value = cpu_to_le32(value); |
@@ -259,39 +259,10 @@ spider_net_get_mac_address(struct net_device *netdev) | |||
259 | * | 259 | * |
260 | * returns the status as in the dmac_cmd_status field of the descriptor | 260 | * returns the status as in the dmac_cmd_status field of the descriptor |
261 | */ | 261 | */ |
262 | static enum spider_net_descr_status | 262 | static inline int |
263 | spider_net_get_descr_status(struct spider_net_descr *descr) | 263 | spider_net_get_descr_status(struct spider_net_descr *descr) |
264 | { | 264 | { |
265 | u32 cmd_status; | 265 | return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK; |
266 | |||
267 | cmd_status = descr->dmac_cmd_status; | ||
268 | cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT; | ||
269 | /* no need to mask out any bits, as cmd_status is 32 bits wide only | ||
270 | * (and unsigned) */ | ||
271 | return cmd_status; | ||
272 | } | ||
273 | |||
274 | /** | ||
275 | * spider_net_set_descr_status -- sets the status of a descriptor | ||
276 | * @descr: descriptor to change | ||
277 | * @status: status to set in the descriptor | ||
278 | * | ||
279 | * changes the status to the specified value. Doesn't change other bits | ||
280 | * in the status | ||
281 | */ | ||
282 | static void | ||
283 | spider_net_set_descr_status(struct spider_net_descr *descr, | ||
284 | enum spider_net_descr_status status) | ||
285 | { | ||
286 | u32 cmd_status; | ||
287 | /* read the status */ | ||
288 | cmd_status = descr->dmac_cmd_status; | ||
289 | /* clean the upper 4 bits */ | ||
290 | cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO; | ||
291 | /* add the status to it */ | ||
292 | cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT; | ||
293 | /* and write it back */ | ||
294 | descr->dmac_cmd_status = cmd_status; | ||
295 | } | 266 | } |
296 | 267 | ||
297 | /** | 268 | /** |
@@ -328,24 +299,23 @@ spider_net_free_chain(struct spider_net_card *card, | |||
328 | static int | 299 | static int |
329 | spider_net_init_chain(struct spider_net_card *card, | 300 | spider_net_init_chain(struct spider_net_card *card, |
330 | struct spider_net_descr_chain *chain, | 301 | struct spider_net_descr_chain *chain, |
331 | struct spider_net_descr *start_descr, int no) | 302 | struct spider_net_descr *start_descr, |
303 | int direction, int no) | ||
332 | { | 304 | { |
333 | int i; | 305 | int i; |
334 | struct spider_net_descr *descr; | 306 | struct spider_net_descr *descr; |
335 | dma_addr_t buf; | 307 | dma_addr_t buf; |
336 | 308 | ||
337 | atomic_set(&card->rx_chain_refill,0); | ||
338 | |||
339 | descr = start_descr; | 309 | descr = start_descr; |
340 | memset(descr, 0, sizeof(*descr) * no); | 310 | memset(descr, 0, sizeof(*descr) * no); |
341 | 311 | ||
342 | /* set up the hardware pointers in each descriptor */ | 312 | /* set up the hardware pointers in each descriptor */ |
343 | for (i=0; i<no; i++, descr++) { | 313 | for (i=0; i<no; i++, descr++) { |
344 | spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); | 314 | descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; |
345 | 315 | ||
346 | buf = pci_map_single(card->pdev, descr, | 316 | buf = pci_map_single(card->pdev, descr, |
347 | SPIDER_NET_DESCR_SIZE, | 317 | SPIDER_NET_DESCR_SIZE, |
348 | PCI_DMA_BIDIRECTIONAL); | 318 | direction); |
349 | 319 | ||
350 | if (buf == DMA_ERROR_CODE) | 320 | if (buf == DMA_ERROR_CODE) |
351 | goto iommu_error; | 321 | goto iommu_error; |
@@ -360,10 +330,11 @@ spider_net_init_chain(struct spider_net_card *card, | |||
360 | start_descr->prev = descr-1; | 330 | start_descr->prev = descr-1; |
361 | 331 | ||
362 | descr = start_descr; | 332 | descr = start_descr; |
363 | for (i=0; i < no; i++, descr++) { | 333 | if (direction == PCI_DMA_FROMDEVICE) |
364 | descr->next_descr_addr = descr->next->bus_addr; | 334 | for (i=0; i < no; i++, descr++) |
365 | } | 335 | descr->next_descr_addr = descr->next->bus_addr; |
366 | 336 | ||
337 | spin_lock_init(&chain->lock); | ||
367 | chain->head = start_descr; | 338 | chain->head = start_descr; |
368 | chain->tail = start_descr; | 339 | chain->tail = start_descr; |
369 | 340 | ||
@@ -375,7 +346,7 @@ iommu_error: | |||
375 | if (descr->bus_addr) | 346 | if (descr->bus_addr) |
376 | pci_unmap_single(card->pdev, descr->bus_addr, | 347 | pci_unmap_single(card->pdev, descr->bus_addr, |
377 | SPIDER_NET_DESCR_SIZE, | 348 | SPIDER_NET_DESCR_SIZE, |
378 | PCI_DMA_BIDIRECTIONAL); | 349 | direction); |
379 | return -ENOMEM; | 350 | return -ENOMEM; |
380 | } | 351 | } |
381 | 352 | ||
@@ -396,7 +367,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card) | |||
396 | dev_kfree_skb(descr->skb); | 367 | dev_kfree_skb(descr->skb); |
397 | pci_unmap_single(card->pdev, descr->buf_addr, | 368 | pci_unmap_single(card->pdev, descr->buf_addr, |
398 | SPIDER_NET_MAX_FRAME, | 369 | SPIDER_NET_MAX_FRAME, |
399 | PCI_DMA_BIDIRECTIONAL); | 370 | PCI_DMA_FROMDEVICE); |
400 | } | 371 | } |
401 | descr = descr->next; | 372 | descr = descr->next; |
402 | } | 373 | } |
@@ -446,15 +417,16 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, | |||
446 | skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); | 417 | skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); |
447 | /* io-mmu-map the skb */ | 418 | /* io-mmu-map the skb */ |
448 | buf = pci_map_single(card->pdev, descr->skb->data, | 419 | buf = pci_map_single(card->pdev, descr->skb->data, |
449 | SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); | 420 | SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); |
450 | descr->buf_addr = buf; | 421 | descr->buf_addr = buf; |
451 | if (buf == DMA_ERROR_CODE) { | 422 | if (buf == DMA_ERROR_CODE) { |
452 | dev_kfree_skb_any(descr->skb); | 423 | dev_kfree_skb_any(descr->skb); |
453 | if (netif_msg_rx_err(card) && net_ratelimit()) | 424 | if (netif_msg_rx_err(card) && net_ratelimit()) |
454 | pr_err("Could not iommu-map rx buffer\n"); | 425 | pr_err("Could not iommu-map rx buffer\n"); |
455 | spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); | 426 | descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; |
456 | } else { | 427 | } else { |
457 | descr->dmac_cmd_status = SPIDER_NET_DMAC_RX_CARDOWNED; | 428 | descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | |
429 | SPIDER_NET_DMAC_NOINTR_COMPLETE; | ||
458 | } | 430 | } |
459 | 431 | ||
460 | return error; | 432 | return error; |
@@ -468,7 +440,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, | |||
468 | * chip by writing to the appropriate register. DMA is enabled in | 440 | * chip by writing to the appropriate register. DMA is enabled in |
469 | * spider_net_enable_rxdmac. | 441 | * spider_net_enable_rxdmac. |
470 | */ | 442 | */ |
471 | static void | 443 | static inline void |
472 | spider_net_enable_rxchtails(struct spider_net_card *card) | 444 | spider_net_enable_rxchtails(struct spider_net_card *card) |
473 | { | 445 | { |
474 | /* assume chain is aligned correctly */ | 446 | /* assume chain is aligned correctly */ |
@@ -483,7 +455,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card) | |||
483 | * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN | 455 | * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN |
484 | * in the GDADMACCNTR register | 456 | * in the GDADMACCNTR register |
485 | */ | 457 | */ |
486 | static void | 458 | static inline void |
487 | spider_net_enable_rxdmac(struct spider_net_card *card) | 459 | spider_net_enable_rxdmac(struct spider_net_card *card) |
488 | { | 460 | { |
489 | wmb(); | 461 | wmb(); |
@@ -500,23 +472,24 @@ spider_net_enable_rxdmac(struct spider_net_card *card) | |||
500 | static void | 472 | static void |
501 | spider_net_refill_rx_chain(struct spider_net_card *card) | 473 | spider_net_refill_rx_chain(struct spider_net_card *card) |
502 | { | 474 | { |
503 | struct spider_net_descr_chain *chain; | 475 | struct spider_net_descr_chain *chain = &card->rx_chain; |
504 | 476 | unsigned long flags; | |
505 | chain = &card->rx_chain; | ||
506 | 477 | ||
507 | /* one context doing the refill (and a second context seeing that | 478 | /* one context doing the refill (and a second context seeing that |
508 | * and omitting it) is ok. If called by NAPI, we'll be called again | 479 | * and omitting it) is ok. If called by NAPI, we'll be called again |
509 | * as spider_net_decode_one_descr is called several times. If some | 480 | * as spider_net_decode_one_descr is called several times. If some |
510 | * interrupt calls us, the NAPI is about to clean up anyway. */ | 481 | * interrupt calls us, the NAPI is about to clean up anyway. */ |
511 | if (atomic_inc_return(&card->rx_chain_refill) == 1) | 482 | if (!spin_trylock_irqsave(&chain->lock, flags)) |
512 | while (spider_net_get_descr_status(chain->head) == | 483 | return; |
513 | SPIDER_NET_DESCR_NOT_IN_USE) { | 484 | |
514 | if (spider_net_prepare_rx_descr(card, chain->head)) | 485 | while (spider_net_get_descr_status(chain->head) == |
515 | break; | 486 | SPIDER_NET_DESCR_NOT_IN_USE) { |
516 | chain->head = chain->head->next; | 487 | if (spider_net_prepare_rx_descr(card, chain->head)) |
517 | } | 488 | break; |
489 | chain->head = chain->head->next; | ||
490 | } | ||
518 | 491 | ||
519 | atomic_dec(&card->rx_chain_refill); | 492 | spin_unlock_irqrestore(&chain->lock, flags); |
520 | } | 493 | } |
521 | 494 | ||
522 | /** | 495 | /** |
@@ -554,111 +527,6 @@ error: | |||
554 | } | 527 | } |
555 | 528 | ||
556 | /** | 529 | /** |
557 | * spider_net_release_tx_descr - processes a used tx descriptor | ||
558 | * @card: card structure | ||
559 | * @descr: descriptor to release | ||
560 | * | ||
561 | * releases a used tx descriptor (unmapping, freeing of skb) | ||
562 | */ | ||
563 | static void | ||
564 | spider_net_release_tx_descr(struct spider_net_card *card, | ||
565 | struct spider_net_descr *descr) | ||
566 | { | ||
567 | struct sk_buff *skb; | ||
568 | |||
569 | /* unmap the skb */ | ||
570 | skb = descr->skb; | ||
571 | pci_unmap_single(card->pdev, descr->buf_addr, skb->len, | ||
572 | PCI_DMA_BIDIRECTIONAL); | ||
573 | |||
574 | dev_kfree_skb_any(skb); | ||
575 | |||
576 | /* set status to not used */ | ||
577 | spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); | ||
578 | } | ||
579 | |||
580 | /** | ||
581 | * spider_net_release_tx_chain - processes sent tx descriptors | ||
582 | * @card: adapter structure | ||
583 | * @brutal: if set, don't care about whether descriptor seems to be in use | ||
584 | * | ||
585 | * returns 0 if the tx ring is empty, otherwise 1. | ||
586 | * | ||
587 | * spider_net_release_tx_chain releases the tx descriptors that spider has | ||
588 | * finished with (if non-brutal) or simply release tx descriptors (if brutal). | ||
589 | * If some other context is calling this function, we return 1 so that we're | ||
590 | * scheduled again (if we were scheduled) and will not loose initiative. | ||
591 | */ | ||
592 | static int | ||
593 | spider_net_release_tx_chain(struct spider_net_card *card, int brutal) | ||
594 | { | ||
595 | struct spider_net_descr_chain *tx_chain = &card->tx_chain; | ||
596 | enum spider_net_descr_status status; | ||
597 | |||
598 | if (atomic_inc_return(&card->tx_chain_release) != 1) { | ||
599 | atomic_dec(&card->tx_chain_release); | ||
600 | return 1; | ||
601 | } | ||
602 | |||
603 | for (;;) { | ||
604 | status = spider_net_get_descr_status(tx_chain->tail); | ||
605 | switch (status) { | ||
606 | case SPIDER_NET_DESCR_CARDOWNED: | ||
607 | if (!brutal) | ||
608 | goto out; | ||
609 | /* fallthrough, if we release the descriptors | ||
610 | * brutally (then we don't care about | ||
611 | * SPIDER_NET_DESCR_CARDOWNED) */ | ||
612 | case SPIDER_NET_DESCR_RESPONSE_ERROR: | ||
613 | case SPIDER_NET_DESCR_PROTECTION_ERROR: | ||
614 | case SPIDER_NET_DESCR_FORCE_END: | ||
615 | if (netif_msg_tx_err(card)) | ||
616 | pr_err("%s: forcing end of tx descriptor " | ||
617 | "with status x%02x\n", | ||
618 | card->netdev->name, status); | ||
619 | card->netdev_stats.tx_dropped++; | ||
620 | break; | ||
621 | |||
622 | case SPIDER_NET_DESCR_COMPLETE: | ||
623 | card->netdev_stats.tx_packets++; | ||
624 | card->netdev_stats.tx_bytes += | ||
625 | tx_chain->tail->skb->len; | ||
626 | break; | ||
627 | |||
628 | default: /* any other value (== SPIDER_NET_DESCR_NOT_IN_USE) */ | ||
629 | goto out; | ||
630 | } | ||
631 | spider_net_release_tx_descr(card, tx_chain->tail); | ||
632 | tx_chain->tail = tx_chain->tail->next; | ||
633 | } | ||
634 | out: | ||
635 | atomic_dec(&card->tx_chain_release); | ||
636 | |||
637 | netif_wake_queue(card->netdev); | ||
638 | |||
639 | if (status == SPIDER_NET_DESCR_CARDOWNED) | ||
640 | return 1; | ||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | /** | ||
645 | * spider_net_cleanup_tx_ring - cleans up the TX ring | ||
646 | * @card: card structure | ||
647 | * | ||
648 | * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use | ||
649 | * interrupts to cleanup our TX ring) and returns sent packets to the stack | ||
650 | * by freeing them | ||
651 | */ | ||
652 | static void | ||
653 | spider_net_cleanup_tx_ring(struct spider_net_card *card) | ||
654 | { | ||
655 | if ( (spider_net_release_tx_chain(card, 0)) && | ||
656 | (card->netdev->flags & IFF_UP) ) { | ||
657 | mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); | ||
658 | } | ||
659 | } | ||
660 | |||
661 | /** | ||
662 | * spider_net_get_multicast_hash - generates hash for multicast filter table | 530 | * spider_net_get_multicast_hash - generates hash for multicast filter table |
663 | * @addr: multicast address | 531 | * @addr: multicast address |
664 | * | 532 | * |
@@ -761,97 +629,6 @@ spider_net_disable_rxdmac(struct spider_net_card *card) | |||
761 | } | 629 | } |
762 | 630 | ||
763 | /** | 631 | /** |
764 | * spider_net_stop - called upon ifconfig down | ||
765 | * @netdev: interface device structure | ||
766 | * | ||
767 | * always returns 0 | ||
768 | */ | ||
769 | int | ||
770 | spider_net_stop(struct net_device *netdev) | ||
771 | { | ||
772 | struct spider_net_card *card = netdev_priv(netdev); | ||
773 | |||
774 | tasklet_kill(&card->rxram_full_tl); | ||
775 | netif_poll_disable(netdev); | ||
776 | netif_carrier_off(netdev); | ||
777 | netif_stop_queue(netdev); | ||
778 | del_timer_sync(&card->tx_timer); | ||
779 | |||
780 | /* disable/mask all interrupts */ | ||
781 | spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); | ||
782 | spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); | ||
783 | spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); | ||
784 | |||
785 | /* free_irq(netdev->irq, netdev);*/ | ||
786 | free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev); | ||
787 | |||
788 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | ||
789 | SPIDER_NET_DMA_TX_FEND_VALUE); | ||
790 | |||
791 | /* turn off DMA, force end */ | ||
792 | spider_net_disable_rxdmac(card); | ||
793 | |||
794 | /* release chains */ | ||
795 | spider_net_release_tx_chain(card, 1); | ||
796 | |||
797 | spider_net_free_chain(card, &card->tx_chain); | ||
798 | spider_net_free_chain(card, &card->rx_chain); | ||
799 | |||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | /** | ||
804 | * spider_net_get_next_tx_descr - returns the next available tx descriptor | ||
805 | * @card: device structure to get descriptor from | ||
806 | * | ||
807 | * returns the address of the next descriptor, or NULL if not available. | ||
808 | */ | ||
809 | static struct spider_net_descr * | ||
810 | spider_net_get_next_tx_descr(struct spider_net_card *card) | ||
811 | { | ||
812 | /* check, if head points to not-in-use descr */ | ||
813 | if ( spider_net_get_descr_status(card->tx_chain.head) == | ||
814 | SPIDER_NET_DESCR_NOT_IN_USE ) { | ||
815 | return card->tx_chain.head; | ||
816 | } else { | ||
817 | return NULL; | ||
818 | } | ||
819 | } | ||
820 | |||
821 | /** | ||
822 | * spider_net_set_txdescr_cmdstat - sets the tx descriptor command field | ||
823 | * @descr: descriptor structure to fill out | ||
824 | * @skb: packet to consider | ||
825 | * | ||
826 | * fills out the command and status field of the descriptor structure, | ||
827 | * depending on hardware checksum settings. | ||
828 | */ | ||
829 | static void | ||
830 | spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, | ||
831 | struct sk_buff *skb) | ||
832 | { | ||
833 | /* make sure the other fields in the descriptor are written */ | ||
834 | wmb(); | ||
835 | |||
836 | if (skb->ip_summed != CHECKSUM_HW) { | ||
837 | descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; | ||
838 | return; | ||
839 | } | ||
840 | |||
841 | /* is packet ip? | ||
842 | * if yes: tcp? udp? */ | ||
843 | if (skb->protocol == htons(ETH_P_IP)) { | ||
844 | if (skb->nh.iph->protocol == IPPROTO_TCP) | ||
845 | descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS; | ||
846 | else if (skb->nh.iph->protocol == IPPROTO_UDP) | ||
847 | descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS; | ||
848 | else /* the stack should checksum non-tcp and non-udp | ||
849 | packets on his own: NETIF_F_IP_CSUM */ | ||
850 | descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; | ||
851 | } | ||
852 | } | ||
853 | |||
854 | /** | ||
855 | * spider_net_prepare_tx_descr - fill tx descriptor with skb data | 632 | * spider_net_prepare_tx_descr - fill tx descriptor with skb data |
856 | * @card: card structure | 633 | * @card: card structure |
857 | * @descr: descriptor structure to fill out | 634 | * @descr: descriptor structure to fill out |
@@ -864,13 +641,12 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, | |||
864 | */ | 641 | */ |
865 | static int | 642 | static int |
866 | spider_net_prepare_tx_descr(struct spider_net_card *card, | 643 | spider_net_prepare_tx_descr(struct spider_net_card *card, |
867 | struct spider_net_descr *descr, | ||
868 | struct sk_buff *skb) | 644 | struct sk_buff *skb) |
869 | { | 645 | { |
646 | struct spider_net_descr *descr = card->tx_chain.head; | ||
870 | dma_addr_t buf; | 647 | dma_addr_t buf; |
871 | 648 | ||
872 | buf = pci_map_single(card->pdev, skb->data, | 649 | buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); |
873 | skb->len, PCI_DMA_BIDIRECTIONAL); | ||
874 | if (buf == DMA_ERROR_CODE) { | 650 | if (buf == DMA_ERROR_CODE) { |
875 | if (netif_msg_tx_err(card) && net_ratelimit()) | 651 | if (netif_msg_tx_err(card) && net_ratelimit()) |
876 | pr_err("could not iommu-map packet (%p, %i). " | 652 | pr_err("could not iommu-map packet (%p, %i). " |
@@ -880,10 +656,101 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
880 | 656 | ||
881 | descr->buf_addr = buf; | 657 | descr->buf_addr = buf; |
882 | descr->buf_size = skb->len; | 658 | descr->buf_size = skb->len; |
659 | descr->next_descr_addr = 0; | ||
883 | descr->skb = skb; | 660 | descr->skb = skb; |
884 | descr->data_status = 0; | 661 | descr->data_status = 0; |
885 | 662 | ||
886 | spider_net_set_txdescr_cmdstat(descr,skb); | 663 | descr->dmac_cmd_status = |
664 | SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; | ||
665 | if (skb->protocol == htons(ETH_P_IP)) | ||
666 | switch (skb->nh.iph->protocol) { | ||
667 | case IPPROTO_TCP: | ||
668 | descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; | ||
669 | break; | ||
670 | case IPPROTO_UDP: | ||
671 | descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP; | ||
672 | break; | ||
673 | } | ||
674 | |||
675 | descr->prev->next_descr_addr = descr->bus_addr; | ||
676 | |||
677 | return 0; | ||
678 | } | ||
679 | |||
680 | /** | ||
681 | * spider_net_release_tx_descr - processes a used tx descriptor | ||
682 | * @card: card structure | ||
683 | * @descr: descriptor to release | ||
684 | * | ||
685 | * releases a used tx descriptor (unmapping, freeing of skb) | ||
686 | */ | ||
687 | static inline void | ||
688 | spider_net_release_tx_descr(struct spider_net_card *card) | ||
689 | { | ||
690 | struct spider_net_descr *descr = card->tx_chain.tail; | ||
691 | struct sk_buff *skb; | ||
692 | |||
693 | card->tx_chain.tail = card->tx_chain.tail->next; | ||
694 | descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; | ||
695 | |||
696 | /* unmap the skb */ | ||
697 | skb = descr->skb; | ||
698 | pci_unmap_single(card->pdev, descr->buf_addr, skb->len, | ||
699 | PCI_DMA_TODEVICE); | ||
700 | dev_kfree_skb_any(skb); | ||
701 | } | ||
702 | |||
703 | /** | ||
704 | * spider_net_release_tx_chain - processes sent tx descriptors | ||
705 | * @card: adapter structure | ||
706 | * @brutal: if set, don't care about whether descriptor seems to be in use | ||
707 | * | ||
708 | * returns 0 if the tx ring is empty, otherwise 1. | ||
709 | * | ||
710 | * spider_net_release_tx_chain releases the tx descriptors that spider has | ||
711 | * finished with (if non-brutal) or simply release tx descriptors (if brutal). | ||
712 | * If some other context is calling this function, we return 1 so that we're | ||
713 | * scheduled again (if we were scheduled) and will not loose initiative. | ||
714 | */ | ||
715 | static int | ||
716 | spider_net_release_tx_chain(struct spider_net_card *card, int brutal) | ||
717 | { | ||
718 | struct spider_net_descr_chain *chain = &card->tx_chain; | ||
719 | int status; | ||
720 | |||
721 | spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR); | ||
722 | |||
723 | while (chain->tail != chain->head) { | ||
724 | status = spider_net_get_descr_status(chain->tail); | ||
725 | switch (status) { | ||
726 | case SPIDER_NET_DESCR_COMPLETE: | ||
727 | card->netdev_stats.tx_packets++; | ||
728 | card->netdev_stats.tx_bytes += chain->tail->skb->len; | ||
729 | break; | ||
730 | |||
731 | case SPIDER_NET_DESCR_CARDOWNED: | ||
732 | if (!brutal) | ||
733 | return 1; | ||
734 | /* fallthrough, if we release the descriptors | ||
735 | * brutally (then we don't care about | ||
736 | * SPIDER_NET_DESCR_CARDOWNED) */ | ||
737 | |||
738 | case SPIDER_NET_DESCR_RESPONSE_ERROR: | ||
739 | case SPIDER_NET_DESCR_PROTECTION_ERROR: | ||
740 | case SPIDER_NET_DESCR_FORCE_END: | ||
741 | if (netif_msg_tx_err(card)) | ||
742 | pr_err("%s: forcing end of tx descriptor " | ||
743 | "with status x%02x\n", | ||
744 | card->netdev->name, status); | ||
745 | card->netdev_stats.tx_errors++; | ||
746 | break; | ||
747 | |||
748 | default: | ||
749 | card->netdev_stats.tx_dropped++; | ||
750 | return 1; | ||
751 | } | ||
752 | spider_net_release_tx_descr(card); | ||
753 | } | ||
887 | 754 | ||
888 | return 0; | 755 | return 0; |
889 | } | 756 | } |
@@ -896,18 +763,32 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
896 | * spider_net_kick_tx_dma writes the current tx chain head as start address | 763 | * spider_net_kick_tx_dma writes the current tx chain head as start address |
897 | * of the tx descriptor chain and enables the transmission DMA engine | 764 | * of the tx descriptor chain and enables the transmission DMA engine |
898 | */ | 765 | */ |
899 | static void | 766 | static inline void |
900 | spider_net_kick_tx_dma(struct spider_net_card *card, | 767 | spider_net_kick_tx_dma(struct spider_net_card *card) |
901 | struct spider_net_descr *descr) | ||
902 | { | 768 | { |
903 | /* this is the only descriptor in the output chain. | 769 | struct spider_net_descr *descr; |
904 | * Enable TX DMA */ | ||
905 | 770 | ||
906 | spider_net_write_reg(card, SPIDER_NET_GDTDCHA, | 771 | if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) & |
907 | descr->bus_addr); | 772 | SPIDER_NET_TX_DMA_EN) |
773 | goto out; | ||
908 | 774 | ||
909 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | 775 | descr = card->tx_chain.tail; |
910 | SPIDER_NET_DMA_TX_VALUE); | 776 | for (;;) { |
777 | if (spider_net_get_descr_status(descr) == | ||
778 | SPIDER_NET_DESCR_CARDOWNED) { | ||
779 | spider_net_write_reg(card, SPIDER_NET_GDTDCHA, | ||
780 | descr->bus_addr); | ||
781 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | ||
782 | SPIDER_NET_DMA_TX_VALUE); | ||
783 | break; | ||
784 | } | ||
785 | if (descr == card->tx_chain.head) | ||
786 | break; | ||
787 | descr = descr->next; | ||
788 | } | ||
789 | |||
790 | out: | ||
791 | mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); | ||
911 | } | 792 | } |
912 | 793 | ||
913 | /** | 794 | /** |
@@ -915,47 +796,69 @@ spider_net_kick_tx_dma(struct spider_net_card *card, | |||
915 | * @skb: packet to send out | 796 | * @skb: packet to send out |
916 | * @netdev: interface device structure | 797 | * @netdev: interface device structure |
917 | * | 798 | * |
918 | * returns 0 on success, <0 on failure | 799 | * returns 0 on success, !0 on failure |
919 | */ | 800 | */ |
920 | static int | 801 | static int |
921 | spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) | 802 | spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) |
922 | { | 803 | { |
923 | struct spider_net_card *card = netdev_priv(netdev); | 804 | struct spider_net_card *card = netdev_priv(netdev); |
924 | struct spider_net_descr *descr; | 805 | struct spider_net_descr_chain *chain = &card->tx_chain; |
806 | struct spider_net_descr *descr = chain->head; | ||
807 | unsigned long flags; | ||
925 | int result; | 808 | int result; |
926 | 809 | ||
810 | spin_lock_irqsave(&chain->lock, flags); | ||
811 | |||
927 | spider_net_release_tx_chain(card, 0); | 812 | spider_net_release_tx_chain(card, 0); |
928 | 813 | ||
929 | descr = spider_net_get_next_tx_descr(card); | 814 | if (chain->head->next == chain->tail->prev) { |
815 | card->netdev_stats.tx_dropped++; | ||
816 | result = NETDEV_TX_LOCKED; | ||
817 | goto out; | ||
818 | } | ||
930 | 819 | ||
931 | if (!descr) | 820 | if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) { |
932 | goto error; | 821 | result = NETDEV_TX_LOCKED; |
822 | goto out; | ||
823 | } | ||
933 | 824 | ||
934 | result = spider_net_prepare_tx_descr(card, descr, skb); | 825 | if (spider_net_prepare_tx_descr(card, skb) != 0) { |
935 | if (result) | 826 | card->netdev_stats.tx_dropped++; |
936 | goto error; | 827 | result = NETDEV_TX_BUSY; |
828 | goto out; | ||
829 | } | ||
937 | 830 | ||
831 | result = NETDEV_TX_OK; | ||
832 | |||
833 | spider_net_kick_tx_dma(card); | ||
938 | card->tx_chain.head = card->tx_chain.head->next; | 834 | card->tx_chain.head = card->tx_chain.head->next; |
939 | 835 | ||
940 | if (spider_net_get_descr_status(descr->prev) != | 836 | out: |
941 | SPIDER_NET_DESCR_CARDOWNED) { | 837 | spin_unlock_irqrestore(&chain->lock, flags); |
942 | /* make sure the current descriptor is in memory. Then | 838 | netif_wake_queue(netdev); |
943 | * kicking it on again makes sense, if the previous is not | 839 | return result; |
944 | * card-owned anymore. Check the previous descriptor twice | 840 | } |
945 | * to omit an mb() in heavy traffic cases */ | ||
946 | mb(); | ||
947 | if (spider_net_get_descr_status(descr->prev) != | ||
948 | SPIDER_NET_DESCR_CARDOWNED) | ||
949 | spider_net_kick_tx_dma(card, descr); | ||
950 | } | ||
951 | 841 | ||
952 | mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); | 842 | /** |
843 | * spider_net_cleanup_tx_ring - cleans up the TX ring | ||
844 | * @card: card structure | ||
845 | * | ||
846 | * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use | ||
847 | * interrupts to cleanup our TX ring) and returns sent packets to the stack | ||
848 | * by freeing them | ||
849 | */ | ||
850 | static void | ||
851 | spider_net_cleanup_tx_ring(struct spider_net_card *card) | ||
852 | { | ||
853 | unsigned long flags; | ||
953 | 854 | ||
954 | return NETDEV_TX_OK; | 855 | spin_lock_irqsave(&card->tx_chain.lock, flags); |
955 | 856 | ||
956 | error: | 857 | if ((spider_net_release_tx_chain(card, 0) != 0) && |
957 | card->netdev_stats.tx_dropped++; | 858 | (card->netdev->flags & IFF_UP)) |
958 | return NETDEV_TX_BUSY; | 859 | spider_net_kick_tx_dma(card); |
860 | |||
861 | spin_unlock_irqrestore(&card->tx_chain.lock, flags); | ||
959 | } | 862 | } |
960 | 863 | ||
961 | /** | 864 | /** |
@@ -1002,7 +905,7 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, | |||
1002 | 905 | ||
1003 | /* unmap descriptor */ | 906 | /* unmap descriptor */ |
1004 | pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME, | 907 | pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME, |
1005 | PCI_DMA_BIDIRECTIONAL); | 908 | PCI_DMA_FROMDEVICE); |
1006 | 909 | ||
1007 | /* the cases we'll throw away the packet immediately */ | 910 | /* the cases we'll throw away the packet immediately */ |
1008 | if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) { | 911 | if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) { |
@@ -1067,14 +970,11 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, | |||
1067 | static int | 970 | static int |
1068 | spider_net_decode_one_descr(struct spider_net_card *card, int napi) | 971 | spider_net_decode_one_descr(struct spider_net_card *card, int napi) |
1069 | { | 972 | { |
1070 | enum spider_net_descr_status status; | 973 | struct spider_net_descr_chain *chain = &card->rx_chain; |
1071 | struct spider_net_descr *descr; | 974 | struct spider_net_descr *descr = chain->tail; |
1072 | struct spider_net_descr_chain *chain; | 975 | int status; |
1073 | int result; | 976 | int result; |
1074 | 977 | ||
1075 | chain = &card->rx_chain; | ||
1076 | descr = chain->tail; | ||
1077 | |||
1078 | status = spider_net_get_descr_status(descr); | 978 | status = spider_net_get_descr_status(descr); |
1079 | 979 | ||
1080 | if (status == SPIDER_NET_DESCR_CARDOWNED) { | 980 | if (status == SPIDER_NET_DESCR_CARDOWNED) { |
@@ -1103,7 +1003,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi) | |||
1103 | card->netdev->name, status); | 1003 | card->netdev->name, status); |
1104 | card->netdev_stats.rx_dropped++; | 1004 | card->netdev_stats.rx_dropped++; |
1105 | pci_unmap_single(card->pdev, descr->buf_addr, | 1005 | pci_unmap_single(card->pdev, descr->buf_addr, |
1106 | SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); | 1006 | SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); |
1107 | dev_kfree_skb_irq(descr->skb); | 1007 | dev_kfree_skb_irq(descr->skb); |
1108 | goto refill; | 1008 | goto refill; |
1109 | } | 1009 | } |
@@ -1119,7 +1019,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi) | |||
1119 | /* ok, we've got a packet in descr */ | 1019 | /* ok, we've got a packet in descr */ |
1120 | result = spider_net_pass_skb_up(descr, card, napi); | 1020 | result = spider_net_pass_skb_up(descr, card, napi); |
1121 | refill: | 1021 | refill: |
1122 | spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); | 1022 | descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; |
1123 | /* change the descriptor state: */ | 1023 | /* change the descriptor state: */ |
1124 | if (!napi) | 1024 | if (!napi) |
1125 | spider_net_refill_rx_chain(card); | 1025 | spider_net_refill_rx_chain(card); |
@@ -1291,21 +1191,6 @@ spider_net_set_mac(struct net_device *netdev, void *p) | |||
1291 | } | 1191 | } |
1292 | 1192 | ||
1293 | /** | 1193 | /** |
1294 | * spider_net_enable_txdmac - enables a TX DMA controller | ||
1295 | * @card: card structure | ||
1296 | * | ||
1297 | * spider_net_enable_txdmac enables the TX DMA controller by setting the | ||
1298 | * descriptor chain tail address | ||
1299 | */ | ||
1300 | static void | ||
1301 | spider_net_enable_txdmac(struct spider_net_card *card) | ||
1302 | { | ||
1303 | /* assume chain is aligned correctly */ | ||
1304 | spider_net_write_reg(card, SPIDER_NET_GDTDCHA, | ||
1305 | card->tx_chain.tail->bus_addr); | ||
1306 | } | ||
1307 | |||
1308 | /** | ||
1309 | * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt | 1194 | * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt |
1310 | * @card: card structure | 1195 | * @card: card structure |
1311 | * | 1196 | * |
@@ -1653,7 +1538,6 @@ spider_net_enable_card(struct spider_net_card *card) | |||
1653 | { SPIDER_NET_GMRWOLCTRL, 0 }, | 1538 | { SPIDER_NET_GMRWOLCTRL, 0 }, |
1654 | { SPIDER_NET_GTESTMD, 0x10000000 }, | 1539 | { SPIDER_NET_GTESTMD, 0x10000000 }, |
1655 | { SPIDER_NET_GTTQMSK, 0x00400040 }, | 1540 | { SPIDER_NET_GTTQMSK, 0x00400040 }, |
1656 | { SPIDER_NET_GTESTMD, 0 }, | ||
1657 | 1541 | ||
1658 | { SPIDER_NET_GMACINTEN, 0 }, | 1542 | { SPIDER_NET_GMACINTEN, 0 }, |
1659 | 1543 | ||
@@ -1692,9 +1576,6 @@ spider_net_enable_card(struct spider_net_card *card) | |||
1692 | 1576 | ||
1693 | spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE); | 1577 | spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE); |
1694 | 1578 | ||
1695 | /* set chain tail adress for TX chain */ | ||
1696 | spider_net_enable_txdmac(card); | ||
1697 | |||
1698 | spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, | 1579 | spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, |
1699 | SPIDER_NET_LENLMT_VALUE); | 1580 | SPIDER_NET_LENLMT_VALUE); |
1700 | spider_net_write_reg(card, SPIDER_NET_GMACMODE, | 1581 | spider_net_write_reg(card, SPIDER_NET_GMACMODE, |
@@ -1709,6 +1590,9 @@ spider_net_enable_card(struct spider_net_card *card) | |||
1709 | SPIDER_NET_INT1_MASK_VALUE); | 1590 | SPIDER_NET_INT1_MASK_VALUE); |
1710 | spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, | 1591 | spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, |
1711 | SPIDER_NET_INT2_MASK_VALUE); | 1592 | SPIDER_NET_INT2_MASK_VALUE); |
1593 | |||
1594 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | ||
1595 | SPIDER_NET_GDTDCEIDIS); | ||
1712 | } | 1596 | } |
1713 | 1597 | ||
1714 | /** | 1598 | /** |
@@ -1727,11 +1611,12 @@ spider_net_open(struct net_device *netdev) | |||
1727 | int result; | 1611 | int result; |
1728 | 1612 | ||
1729 | result = -ENOMEM; | 1613 | result = -ENOMEM; |
1730 | if (spider_net_init_chain(card, &card->tx_chain, | 1614 | if (spider_net_init_chain(card, &card->tx_chain, card->descr, |
1731 | card->descr, tx_descriptors)) | 1615 | PCI_DMA_TODEVICE, card->tx_desc)) |
1732 | goto alloc_tx_failed; | 1616 | goto alloc_tx_failed; |
1733 | if (spider_net_init_chain(card, &card->rx_chain, | 1617 | if (spider_net_init_chain(card, &card->rx_chain, |
1734 | card->descr + tx_descriptors, rx_descriptors)) | 1618 | card->descr + card->rx_desc, |
1619 | PCI_DMA_FROMDEVICE, card->rx_desc)) | ||
1735 | goto alloc_rx_failed; | 1620 | goto alloc_rx_failed; |
1736 | 1621 | ||
1737 | /* allocate rx skbs */ | 1622 | /* allocate rx skbs */ |
@@ -1938,7 +1823,7 @@ spider_net_workaround_rxramfull(struct spider_net_card *card) | |||
1938 | /* empty sequencer data */ | 1823 | /* empty sequencer data */ |
1939 | for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; | 1824 | for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; |
1940 | sequencer++) { | 1825 | sequencer++) { |
1941 | spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + | 1826 | spider_net_write_reg(card, SPIDER_NET_GSnPRGADR + |
1942 | sequencer * 8, 0x0); | 1827 | sequencer * 8, 0x0); |
1943 | for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { | 1828 | for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { |
1944 | spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + | 1829 | spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + |
@@ -1955,6 +1840,49 @@ spider_net_workaround_rxramfull(struct spider_net_card *card) | |||
1955 | } | 1840 | } |
1956 | 1841 | ||
1957 | /** | 1842 | /** |
1843 | * spider_net_stop - called upon ifconfig down | ||
1844 | * @netdev: interface device structure | ||
1845 | * | ||
1846 | * always returns 0 | ||
1847 | */ | ||
1848 | int | ||
1849 | spider_net_stop(struct net_device *netdev) | ||
1850 | { | ||
1851 | struct spider_net_card *card = netdev_priv(netdev); | ||
1852 | |||
1853 | tasklet_kill(&card->rxram_full_tl); | ||
1854 | netif_poll_disable(netdev); | ||
1855 | netif_carrier_off(netdev); | ||
1856 | netif_stop_queue(netdev); | ||
1857 | del_timer_sync(&card->tx_timer); | ||
1858 | |||
1859 | /* disable/mask all interrupts */ | ||
1860 | spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); | ||
1861 | spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); | ||
1862 | spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); | ||
1863 | |||
1864 | /* free_irq(netdev->irq, netdev);*/ | ||
1865 | free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev); | ||
1866 | |||
1867 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | ||
1868 | SPIDER_NET_DMA_TX_FEND_VALUE); | ||
1869 | |||
1870 | /* turn off DMA, force end */ | ||
1871 | spider_net_disable_rxdmac(card); | ||
1872 | |||
1873 | /* release chains */ | ||
1874 | if (spin_trylock(&card->tx_chain.lock)) { | ||
1875 | spider_net_release_tx_chain(card, 1); | ||
1876 | spin_unlock(&card->tx_chain.lock); | ||
1877 | } | ||
1878 | |||
1879 | spider_net_free_chain(card, &card->tx_chain); | ||
1880 | spider_net_free_chain(card, &card->rx_chain); | ||
1881 | |||
1882 | return 0; | ||
1883 | } | ||
1884 | |||
1885 | /** | ||
1958 | * spider_net_tx_timeout_task - task scheduled by the watchdog timeout | 1886 | * spider_net_tx_timeout_task - task scheduled by the watchdog timeout |
1959 | * function (to be called not under interrupt status) | 1887 | * function (to be called not under interrupt status) |
1960 | * @data: data, is interface device structure | 1888 | * @data: data, is interface device structure |
@@ -1982,7 +1910,7 @@ spider_net_tx_timeout_task(void *data) | |||
1982 | goto out; | 1910 | goto out; |
1983 | 1911 | ||
1984 | spider_net_open(netdev); | 1912 | spider_net_open(netdev); |
1985 | spider_net_kick_tx_dma(card, card->tx_chain.head); | 1913 | spider_net_kick_tx_dma(card); |
1986 | netif_device_attach(netdev); | 1914 | netif_device_attach(netdev); |
1987 | 1915 | ||
1988 | out: | 1916 | out: |
@@ -2065,7 +1993,6 @@ spider_net_setup_netdev(struct spider_net_card *card) | |||
2065 | 1993 | ||
2066 | pci_set_drvdata(card->pdev, netdev); | 1994 | pci_set_drvdata(card->pdev, netdev); |
2067 | 1995 | ||
2068 | atomic_set(&card->tx_chain_release,0); | ||
2069 | card->rxram_full_tl.data = (unsigned long) card; | 1996 | card->rxram_full_tl.data = (unsigned long) card; |
2070 | card->rxram_full_tl.func = | 1997 | card->rxram_full_tl.func = |
2071 | (void (*)(unsigned long)) spider_net_handle_rxram_full; | 1998 | (void (*)(unsigned long)) spider_net_handle_rxram_full; |
@@ -2077,9 +2004,12 @@ spider_net_setup_netdev(struct spider_net_card *card) | |||
2077 | 2004 | ||
2078 | card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; | 2005 | card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; |
2079 | 2006 | ||
2007 | card->tx_desc = tx_descriptors; | ||
2008 | card->rx_desc = rx_descriptors; | ||
2009 | |||
2080 | spider_net_setup_netdev_ops(netdev); | 2010 | spider_net_setup_netdev_ops(netdev); |
2081 | 2011 | ||
2082 | netdev->features = NETIF_F_HW_CSUM; | 2012 | netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX; |
2083 | /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | | 2013 | /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | |
2084 | * NETIF_F_HW_VLAN_FILTER */ | 2014 | * NETIF_F_HW_VLAN_FILTER */ |
2085 | 2015 | ||
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index 3b8d951cf73c..30407cdf0892 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h | |||
@@ -208,7 +208,10 @@ extern char spider_net_driver_name[]; | |||
208 | #define SPIDER_NET_DMA_RX_VALUE 0x80000000 | 208 | #define SPIDER_NET_DMA_RX_VALUE 0x80000000 |
209 | #define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003 | 209 | #define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003 |
210 | /* to set TX_DMA_EN */ | 210 | /* to set TX_DMA_EN */ |
211 | #define SPIDER_NET_DMA_TX_VALUE 0x80000000 | 211 | #define SPIDER_NET_TX_DMA_EN 0x80000000 |
212 | #define SPIDER_NET_GDTDCEIDIS 0x00000002 | ||
213 | #define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \ | ||
214 | SPIDER_NET_GDTDCEIDIS | ||
212 | #define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 | 215 | #define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 |
213 | 216 | ||
214 | /* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */ | 217 | /* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */ |
@@ -329,55 +332,23 @@ enum spider_net_int2_status { | |||
329 | (~SPIDER_NET_TXINT) & \ | 332 | (~SPIDER_NET_TXINT) & \ |
330 | (~SPIDER_NET_RXINT) ) | 333 | (~SPIDER_NET_RXINT) ) |
331 | 334 | ||
332 | #define SPIDER_NET_GPREXEC 0x80000000 | 335 | #define SPIDER_NET_GPREXEC 0x80000000 |
333 | #define SPIDER_NET_GPRDAT_MASK 0x0000ffff | 336 | #define SPIDER_NET_GPRDAT_MASK 0x0000ffff |
334 | 337 | ||
335 | /* descriptor bits | 338 | #define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000 |
336 | * | 339 | #define SPIDER_NET_DMAC_NOCS 0x00040000 |
337 | * 1010 descriptor ready | 340 | #define SPIDER_NET_DMAC_TCP 0x00020000 |
338 | * 0 descr in middle of chain | 341 | #define SPIDER_NET_DMAC_UDP 0x00030000 |
339 | * 000 fixed to 0 | 342 | #define SPIDER_NET_TXDCEST 0x08000000 |
340 | * | 343 | |
341 | * 0 no interrupt on completion | 344 | #define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000 |
342 | * 000 fixed to 0 | 345 | #define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */ |
343 | * 1 no ipsec processing | 346 | #define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */ |
344 | * 1 last descriptor for this frame | 347 | #define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */ |
345 | * 00 no checksum | 348 | #define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */ |
346 | * 10 tcp checksum | 349 | #define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */ |
347 | * 11 udp checksum | 350 | #define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */ |
348 | * | 351 | #define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000 |
349 | * 00 fixed to 0 | ||
350 | * 0 fixed to 0 | ||
351 | * 0 no interrupt on response errors | ||
352 | * 0 no interrupt on invalid descr | ||
353 | * 0 no interrupt on dma process termination | ||
354 | * 0 no interrupt on descr chain end | ||
355 | * 0 no interrupt on descr complete | ||
356 | * | ||
357 | * 000 fixed to 0 | ||
358 | * 0 response error interrupt status | ||
359 | * 0 invalid descr status | ||
360 | * 0 dma termination status | ||
361 | * 0 descr chain end status | ||
362 | * 0 descr complete status */ | ||
363 | #define SPIDER_NET_DMAC_CMDSTAT_NOCS 0xa00c0000 | ||
364 | #define SPIDER_NET_DMAC_CMDSTAT_TCPCS 0xa00e0000 | ||
365 | #define SPIDER_NET_DMAC_CMDSTAT_UDPCS 0xa00f0000 | ||
366 | #define SPIDER_NET_DESCR_IND_PROC_SHIFT 28 | ||
367 | #define SPIDER_NET_DESCR_IND_PROC_MASKO 0x0fffffff | ||
368 | |||
369 | /* descr ready, descr is in middle of chain, get interrupt on completion */ | ||
370 | #define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000 | ||
371 | |||
372 | enum spider_net_descr_status { | ||
373 | SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */ | ||
374 | SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */ | ||
375 | SPIDER_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */ | ||
376 | SPIDER_NET_DESCR_FRAME_END = 0x04, /* used in rx */ | ||
377 | SPIDER_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */ | ||
378 | SPIDER_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */ | ||
379 | SPIDER_NET_DESCR_NOT_IN_USE /* any other value */ | ||
380 | }; | ||
381 | 352 | ||
382 | struct spider_net_descr { | 353 | struct spider_net_descr { |
383 | /* as defined by the hardware */ | 354 | /* as defined by the hardware */ |
@@ -398,7 +369,7 @@ struct spider_net_descr { | |||
398 | } __attribute__((aligned(32))); | 369 | } __attribute__((aligned(32))); |
399 | 370 | ||
400 | struct spider_net_descr_chain { | 371 | struct spider_net_descr_chain { |
401 | /* we walk from tail to head */ | 372 | spinlock_t lock; |
402 | struct spider_net_descr *head; | 373 | struct spider_net_descr *head; |
403 | struct spider_net_descr *tail; | 374 | struct spider_net_descr *tail; |
404 | }; | 375 | }; |
@@ -453,8 +424,6 @@ struct spider_net_card { | |||
453 | 424 | ||
454 | struct spider_net_descr_chain tx_chain; | 425 | struct spider_net_descr_chain tx_chain; |
455 | struct spider_net_descr_chain rx_chain; | 426 | struct spider_net_descr_chain rx_chain; |
456 | atomic_t rx_chain_refill; | ||
457 | atomic_t tx_chain_release; | ||
458 | 427 | ||
459 | struct net_device_stats netdev_stats; | 428 | struct net_device_stats netdev_stats; |
460 | 429 | ||
@@ -471,6 +440,9 @@ struct spider_net_card { | |||
471 | /* for ethtool */ | 440 | /* for ethtool */ |
472 | int msg_enable; | 441 | int msg_enable; |
473 | 442 | ||
443 | int rx_desc; | ||
444 | int tx_desc; | ||
445 | |||
474 | struct spider_net_descr descr[0]; | 446 | struct spider_net_descr descr[0]; |
475 | }; | 447 | }; |
476 | 448 | ||
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c index a5bb0b7633af..02209222b8c9 100644 --- a/drivers/net/spider_net_ethtool.c +++ b/drivers/net/spider_net_ethtool.c | |||
@@ -130,6 +130,18 @@ spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data) | |||
130 | return 0; | 130 | return 0; |
131 | } | 131 | } |
132 | 132 | ||
133 | static void | ||
134 | spider_net_ethtool_get_ringparam(struct net_device *netdev, | ||
135 | struct ethtool_ringparam *ering) | ||
136 | { | ||
137 | struct spider_net_card *card = netdev->priv; | ||
138 | |||
139 | ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX; | ||
140 | ering->tx_pending = card->tx_desc; | ||
141 | ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX; | ||
142 | ering->rx_pending = card->rx_desc; | ||
143 | } | ||
144 | |||
133 | struct ethtool_ops spider_net_ethtool_ops = { | 145 | struct ethtool_ops spider_net_ethtool_ops = { |
134 | .get_settings = spider_net_ethtool_get_settings, | 146 | .get_settings = spider_net_ethtool_get_settings, |
135 | .get_drvinfo = spider_net_ethtool_get_drvinfo, | 147 | .get_drvinfo = spider_net_ethtool_get_drvinfo, |
@@ -141,5 +153,6 @@ struct ethtool_ops spider_net_ethtool_ops = { | |||
141 | .set_rx_csum = spider_net_ethtool_set_rx_csum, | 153 | .set_rx_csum = spider_net_ethtool_set_rx_csum, |
142 | .get_tx_csum = spider_net_ethtool_get_tx_csum, | 154 | .get_tx_csum = spider_net_ethtool_get_tx_csum, |
143 | .set_tx_csum = spider_net_ethtool_set_tx_csum, | 155 | .set_tx_csum = spider_net_ethtool_set_tx_csum, |
156 | .get_ringparam = spider_net_ethtool_get_ringparam, | ||
144 | }; | 157 | }; |
145 | 158 | ||
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index ed1f59901ff4..c0a62b00ffc8 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c | |||
@@ -22,129 +22,13 @@ | |||
22 | 22 | ||
23 | Support and updates available at | 23 | Support and updates available at |
24 | http://www.scyld.com/network/starfire.html | 24 | http://www.scyld.com/network/starfire.html |
25 | [link no longer provides useful info -jgarzik] | ||
25 | 26 | ||
26 | ----------------------------------------------------------- | ||
27 | |||
28 | Linux kernel-specific changes: | ||
29 | |||
30 | LK1.1.1 (jgarzik): | ||
31 | - Use PCI driver interface | ||
32 | - Fix MOD_xxx races | ||
33 | - softnet fixups | ||
34 | |||
35 | LK1.1.2 (jgarzik): | ||
36 | - Merge Becker version 0.15 | ||
37 | |||
38 | LK1.1.3 (Andrew Morton) | ||
39 | - Timer cleanups | ||
40 | |||
41 | LK1.1.4 (jgarzik): | ||
42 | - Merge Becker version 1.03 | ||
43 | |||
44 | LK1.2.1 (Ion Badulescu <ionut@cs.columbia.edu>) | ||
45 | - Support hardware Rx/Tx checksumming | ||
46 | - Use the GFP firmware taken from Adaptec's Netware driver | ||
47 | |||
48 | LK1.2.2 (Ion Badulescu) | ||
49 | - Backported to 2.2.x | ||
50 | |||
51 | LK1.2.3 (Ion Badulescu) | ||
52 | - Fix the flaky mdio interface | ||
53 | - More compat clean-ups | ||
54 | |||
55 | LK1.2.4 (Ion Badulescu) | ||
56 | - More 2.2.x initialization fixes | ||
57 | |||
58 | LK1.2.5 (Ion Badulescu) | ||
59 | - Several fixes from Manfred Spraul | ||
60 | |||
61 | LK1.2.6 (Ion Badulescu) | ||
62 | - Fixed ifup/ifdown/ifup problem in 2.4.x | ||
63 | |||
64 | LK1.2.7 (Ion Badulescu) | ||
65 | - Removed unused code | ||
66 | - Made more functions static and __init | ||
67 | |||
68 | LK1.2.8 (Ion Badulescu) | ||
69 | - Quell bogus error messages, inform about the Tx threshold | ||
70 | - Removed #ifdef CONFIG_PCI, this driver is PCI only | ||
71 | |||
72 | LK1.2.9 (Ion Badulescu) | ||
73 | - Merged Jeff Garzik's changes from 2.4.4-pre5 | ||
74 | - Added 2.2.x compatibility stuff required by the above changes | ||
75 | |||
76 | LK1.2.9a (Ion Badulescu) | ||
77 | - More updates from Jeff Garzik | ||
78 | |||
79 | LK1.3.0 (Ion Badulescu) | ||
80 | - Merged zerocopy support | ||
81 | |||
82 | LK1.3.1 (Ion Badulescu) | ||
83 | - Added ethtool support | ||
84 | - Added GPIO (media change) interrupt support | ||
85 | |||
86 | LK1.3.2 (Ion Badulescu) | ||
87 | - Fixed 2.2.x compatibility issues introduced in 1.3.1 | ||
88 | - Fixed ethtool ioctl returning uninitialized memory | ||
89 | |||
90 | LK1.3.3 (Ion Badulescu) | ||
91 | - Initialize the TxMode register properly | ||
92 | - Don't dereference dev->priv after freeing it | ||
93 | |||
94 | LK1.3.4 (Ion Badulescu) | ||
95 | - Fixed initialization timing problems | ||
96 | - Fixed interrupt mask definitions | ||
97 | |||
98 | LK1.3.5 (jgarzik) | ||
99 | - ethtool NWAY_RST, GLINK, [GS]MSGLVL support | ||
100 | |||
101 | LK1.3.6: | ||
102 | - Sparc64 support and fixes (Ion Badulescu) | ||
103 | - Better stats and error handling (Ion Badulescu) | ||
104 | - Use new pci_set_mwi() PCI API function (jgarzik) | ||
105 | |||
106 | LK1.3.7 (Ion Badulescu) | ||
107 | - minimal implementation of tx_timeout() | ||
108 | - correctly shutdown the Rx/Tx engines in netdev_close() | ||
109 | - added calls to netif_carrier_on/off | ||
110 | (patch from Stefan Rompf <srompf@isg.de>) | ||
111 | - VLAN support | ||
112 | |||
113 | LK1.3.8 (Ion Badulescu) | ||
114 | - adjust DMA burst size on sparc64 | ||
115 | - 64-bit support | ||
116 | - reworked zerocopy support for 64-bit buffers | ||
117 | - working and usable interrupt mitigation/latency | ||
118 | - reduced Tx interrupt frequency for lower interrupt overhead | ||
119 | |||
120 | LK1.3.9 (Ion Badulescu) | ||
121 | - bugfix for mcast filter | ||
122 | - enable the right kind of Tx interrupts (TxDMADone, not TxDone) | ||
123 | |||
124 | LK1.4.0 (Ion Badulescu) | ||
125 | - NAPI support | ||
126 | |||
127 | LK1.4.1 (Ion Badulescu) | ||
128 | - flush PCI posting buffers after disabling Rx interrupts | ||
129 | - put the chip to a D3 slumber on driver unload | ||
130 | - added config option to enable/disable NAPI | ||
131 | |||
132 | LK1.4.2 (Ion Badulescu) | ||
133 | - finally added firmware (GPL'ed by Adaptec) | ||
134 | - removed compatibility code for 2.2.x | ||
135 | |||
136 | LK1.4.2.1 (Ion Badulescu) | ||
137 | - fixed 32/64 bit issues on i386 + CONFIG_HIGHMEM | ||
138 | - added 32-bit padding to outgoing skb's, removed previous workaround | ||
139 | |||
140 | TODO: - fix forced speed/duplexing code (broken a long time ago, when | ||
141 | somebody converted the driver to use the generic MII code) | ||
142 | - fix VLAN support | ||
143 | */ | 27 | */ |
144 | 28 | ||
145 | #define DRV_NAME "starfire" | 29 | #define DRV_NAME "starfire" |
146 | #define DRV_VERSION "1.03+LK1.4.2.1" | 30 | #define DRV_VERSION "2.0" |
147 | #define DRV_RELDATE "October 3, 2005" | 31 | #define DRV_RELDATE "June 27, 2006" |
148 | 32 | ||
149 | #include <linux/module.h> | 33 | #include <linux/module.h> |
150 | #include <linux/kernel.h> | 34 | #include <linux/kernel.h> |
@@ -846,7 +730,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, | |||
846 | goto err_out_free_netdev; | 730 | goto err_out_free_netdev; |
847 | } | 731 | } |
848 | 732 | ||
849 | /* ioremap is borken in Linux-2.2.x/sparc64 */ | ||
850 | base = ioremap(ioaddr, io_size); | 733 | base = ioremap(ioaddr, io_size); |
851 | if (!base) { | 734 | if (!base) { |
852 | printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n", | 735 | printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n", |
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index 643fceae3db5..698568e751da 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c | |||
@@ -16,91 +16,13 @@ | |||
16 | 16 | ||
17 | Support and updates available at | 17 | Support and updates available at |
18 | http://www.scyld.com/network/sundance.html | 18 | http://www.scyld.com/network/sundance.html |
19 | [link no longer provides useful info -jgarzik] | ||
19 | 20 | ||
20 | |||
21 | Version LK1.01a (jgarzik): | ||
22 | - Replace some MII-related magic numbers with constants | ||
23 | |||
24 | Version LK1.02 (D-Link): | ||
25 | - Add new board to PCI ID list | ||
26 | - Fix multicast bug | ||
27 | |||
28 | Version LK1.03 (D-Link): | ||
29 | - New Rx scheme, reduce Rx congestion | ||
30 | - Option to disable flow control | ||
31 | |||
32 | Version LK1.04 (D-Link): | ||
33 | - Tx timeout recovery | ||
34 | - More support for ethtool. | ||
35 | |||
36 | Version LK1.04a: | ||
37 | - Remove unused/constant members from struct pci_id_info | ||
38 | (which then allows removal of 'drv_flags' from private struct) | ||
39 | (jgarzik) | ||
40 | - If no phy is found, fail to load that board (jgarzik) | ||
41 | - Always start phy id scan at id 1 to avoid problems (Donald Becker) | ||
42 | - Autodetect where mii_preable_required is needed, | ||
43 | default to not needed. (Donald Becker) | ||
44 | |||
45 | Version LK1.04b: | ||
46 | - Remove mii_preamble_required module parameter (Donald Becker) | ||
47 | - Add per-interface mii_preamble_required (setting is autodetected) | ||
48 | (Donald Becker) | ||
49 | - Remove unnecessary cast from void pointer (jgarzik) | ||
50 | - Re-align comments in private struct (jgarzik) | ||
51 | |||
52 | Version LK1.04c (jgarzik): | ||
53 | - Support bitmapped message levels (NETIF_MSG_xxx), and the | ||
54 | two ethtool ioctls that get/set them | ||
55 | - Don't hand-code MII ethtool support, use standard API/lib | ||
56 | |||
57 | Version LK1.04d: | ||
58 | - Merge from Donald Becker's sundance.c: (Jason Lunz) | ||
59 | * proper support for variably-sized MTUs | ||
60 | * default to PIO, to fix chip bugs | ||
61 | - Add missing unregister_netdev (Jason Lunz) | ||
62 | - Add CONFIG_SUNDANCE_MMIO config option (jgarzik) | ||
63 | - Better rx buf size calculation (Donald Becker) | ||
64 | |||
65 | Version LK1.05 (D-Link): | ||
66 | - Fix DFE-580TX packet drop issue (for DL10050C) | ||
67 | - Fix reset_tx logic | ||
68 | |||
69 | Version LK1.06 (D-Link): | ||
70 | - Fix crash while unloading driver | ||
71 | |||
72 | Versin LK1.06b (D-Link): | ||
73 | - New tx scheme, adaptive tx_coalesce | ||
74 | |||
75 | Version LK1.07 (D-Link): | ||
76 | - Fix tx bugs in big-endian machines | ||
77 | - Remove unused max_interrupt_work module parameter, the new | ||
78 | NAPI-like rx scheme doesn't need it. | ||
79 | - Remove redundancy get_stats() in intr_handler(), those | ||
80 | I/O access could affect performance in ARM-based system | ||
81 | - Add Linux software VLAN support | ||
82 | |||
83 | Version LK1.08 (Philippe De Muyter phdm@macqel.be): | ||
84 | - Fix bug of custom mac address | ||
85 | (StationAddr register only accept word write) | ||
86 | |||
87 | Version LK1.09 (D-Link): | ||
88 | - Fix the flowctrl bug. | ||
89 | - Set Pause bit in MII ANAR if flow control enabled. | ||
90 | |||
91 | Version LK1.09a (ICPlus): | ||
92 | - Add the delay time in reading the contents of EEPROM | ||
93 | |||
94 | Version LK1.10 (Philippe De Muyter phdm@macqel.be): | ||
95 | - Make 'unblock interface after Tx underrun' work | ||
96 | |||
97 | Version LK1.11 (Pedro Alejandro Lopez-Valencia palopezv at gmail.com): | ||
98 | - Add support for IC Plus Corporation IP100A chipset | ||
99 | */ | 21 | */ |
100 | 22 | ||
101 | #define DRV_NAME "sundance" | 23 | #define DRV_NAME "sundance" |
102 | #define DRV_VERSION "1.01+LK1.11" | 24 | #define DRV_VERSION "1.1" |
103 | #define DRV_RELDATE "14-Jun-2006" | 25 | #define DRV_RELDATE "27-Jun-2006" |
104 | 26 | ||
105 | 27 | ||
106 | /* The user-configurable values. | 28 | /* The user-configurable values. |
@@ -185,7 +107,7 @@ static char *media[MAX_UNITS]; | |||
185 | #endif | 107 | #endif |
186 | 108 | ||
187 | /* These identify the driver base version and may not be removed. */ | 109 | /* These identify the driver base version and may not be removed. */ |
188 | static char version[] __devinitdata = | 110 | static char version[] = |
189 | KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" | 111 | KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" |
190 | KERN_INFO " http://www.scyld.com/network/sundance.html\n"; | 112 | KERN_INFO " http://www.scyld.com/network/sundance.html\n"; |
191 | 113 | ||
@@ -282,15 +204,15 @@ IVc. Errata | |||
282 | #define USE_IO_OPS 1 | 204 | #define USE_IO_OPS 1 |
283 | #endif | 205 | #endif |
284 | 206 | ||
285 | static struct pci_device_id sundance_pci_tbl[] = { | 207 | static const struct pci_device_id sundance_pci_tbl[] = { |
286 | {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0}, | 208 | { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 }, |
287 | {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1}, | 209 | { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 }, |
288 | {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2}, | 210 | { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 }, |
289 | {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3}, | 211 | { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 }, |
290 | {0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, | 212 | { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, |
291 | {0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5}, | 213 | { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, |
292 | {0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6}, | 214 | { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, |
293 | {0,} | 215 | { } |
294 | }; | 216 | }; |
295 | MODULE_DEVICE_TABLE(pci, sundance_pci_tbl); | 217 | MODULE_DEVICE_TABLE(pci, sundance_pci_tbl); |
296 | 218 | ||
@@ -301,7 +223,7 @@ enum { | |||
301 | struct pci_id_info { | 223 | struct pci_id_info { |
302 | const char *name; | 224 | const char *name; |
303 | }; | 225 | }; |
304 | static const struct pci_id_info pci_id_tbl[] = { | 226 | static const struct pci_id_info pci_id_tbl[] __devinitdata = { |
305 | {"D-Link DFE-550TX FAST Ethernet Adapter"}, | 227 | {"D-Link DFE-550TX FAST Ethernet Adapter"}, |
306 | {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, | 228 | {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, |
307 | {"D-Link DFE-580TX 4 port Server Adapter"}, | 229 | {"D-Link DFE-580TX 4 port Server Adapter"}, |
@@ -309,7 +231,7 @@ static const struct pci_id_info pci_id_tbl[] = { | |||
309 | {"D-Link DL10050-based FAST Ethernet Adapter"}, | 231 | {"D-Link DL10050-based FAST Ethernet Adapter"}, |
310 | {"Sundance Technology Alta"}, | 232 | {"Sundance Technology Alta"}, |
311 | {"IC Plus Corporation IP100A FAST Ethernet Adapter"}, | 233 | {"IC Plus Corporation IP100A FAST Ethernet Adapter"}, |
312 | {NULL,}, /* 0 terminated list. */ | 234 | { } /* terminate list. */ |
313 | }; | 235 | }; |
314 | 236 | ||
315 | /* This driver was written to use PCI memory space, however x86-oriented | 237 | /* This driver was written to use PCI memory space, however x86-oriented |
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index 8673fd4c08c7..c6f5bc3c042f 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c | |||
@@ -3255,12 +3255,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev) | |||
3255 | } | 3255 | } |
3256 | 3256 | ||
3257 | static struct pci_device_id happymeal_pci_ids[] = { | 3257 | static struct pci_device_id happymeal_pci_ids[] = { |
3258 | { | 3258 | { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) }, |
3259 | .vendor = PCI_VENDOR_ID_SUN, | ||
3260 | .device = PCI_DEVICE_ID_SUN_HAPPYMEAL, | ||
3261 | .subvendor = PCI_ANY_ID, | ||
3262 | .subdevice = PCI_ANY_ID, | ||
3263 | }, | ||
3264 | { } /* Terminating entry */ | 3259 | { } /* Terminating entry */ |
3265 | }; | 3260 | }; |
3266 | 3261 | ||
@@ -3275,7 +3270,7 @@ static struct pci_driver hme_pci_driver = { | |||
3275 | 3270 | ||
3276 | static int __init happy_meal_pci_init(void) | 3271 | static int __init happy_meal_pci_init(void) |
3277 | { | 3272 | { |
3278 | return pci_module_init(&hme_pci_driver); | 3273 | return pci_register_driver(&hme_pci_driver); |
3279 | } | 3274 | } |
3280 | 3275 | ||
3281 | static void happy_meal_pci_exit(void) | 3276 | static void happy_meal_pci_exit(void) |
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 1ef9fd39a79a..0e3fdf7c6dd3 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c | |||
@@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void) | |||
1537 | { | 1537 | { |
1538 | if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || | 1538 | if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || |
1539 | (idprom->id_machtype == (SM_SUN4|SM_4_470))) { | 1539 | (idprom->id_machtype == (SM_SUN4|SM_4_470))) { |
1540 | memset(&sun4_sdev, 0, sizeof(sdev)); | 1540 | memset(&sun4_sdev, 0, sizeof(struct sbus_dev)); |
1541 | sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; | 1541 | sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; |
1542 | sun4_sdev.irqs[0] = 6; | 1542 | sun4_sdev.irqs[0] = 6; |
1543 | return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); | 1543 | return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); |
@@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void) | |||
1547 | 1547 | ||
1548 | static int __exit sunlance_sun4_remove(void) | 1548 | static int __exit sunlance_sun4_remove(void) |
1549 | { | 1549 | { |
1550 | struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev); | 1550 | struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev); |
1551 | struct net_device *net_dev = lp->dev; | 1551 | struct net_device *net_dev = lp->dev; |
1552 | 1552 | ||
1553 | unregister_netdevice(net_dev); | 1553 | unregister_netdevice(net_dev); |
1554 | 1554 | ||
1555 | lance_free_hwresources(root_lance_dev); | 1555 | lance_free_hwresources(lp); |
1556 | 1556 | ||
1557 | free_netdev(net_dev); | 1557 | free_netdev(net_dev); |
1558 | 1558 | ||
1559 | dev_set_drvdata(&sun4_sdev->dev, NULL); | 1559 | dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL); |
1560 | 1560 | ||
1561 | return 0; | 1561 | return 0; |
1562 | } | 1562 | } |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index f645921aff8b..eafabb253f08 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -68,8 +68,8 @@ | |||
68 | 68 | ||
69 | #define DRV_MODULE_NAME "tg3" | 69 | #define DRV_MODULE_NAME "tg3" |
70 | #define PFX DRV_MODULE_NAME ": " | 70 | #define PFX DRV_MODULE_NAME ": " |
71 | #define DRV_MODULE_VERSION "3.62" | 71 | #define DRV_MODULE_VERSION "3.65" |
72 | #define DRV_MODULE_RELDATE "June 30, 2006" | 72 | #define DRV_MODULE_RELDATE "August 07, 2006" |
73 | 73 | ||
74 | #define TG3_DEF_MAC_MODE 0 | 74 | #define TG3_DEF_MAC_MODE 0 |
75 | #define TG3_DEF_RX_MODE 0 | 75 | #define TG3_DEF_RX_MODE 0 |
@@ -123,9 +123,6 @@ | |||
123 | TG3_RX_RCB_RING_SIZE(tp)) | 123 | TG3_RX_RCB_RING_SIZE(tp)) |
124 | #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ | 124 | #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ |
125 | TG3_TX_RING_SIZE) | 125 | TG3_TX_RING_SIZE) |
126 | #define TX_BUFFS_AVAIL(TP) \ | ||
127 | ((TP)->tx_pending - \ | ||
128 | (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1))) | ||
129 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) | 126 | #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) |
130 | 127 | ||
131 | #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) | 128 | #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) |
@@ -2987,6 +2984,13 @@ static void tg3_tx_recover(struct tg3 *tp) | |||
2987 | spin_unlock(&tp->lock); | 2984 | spin_unlock(&tp->lock); |
2988 | } | 2985 | } |
2989 | 2986 | ||
2987 | static inline u32 tg3_tx_avail(struct tg3 *tp) | ||
2988 | { | ||
2989 | smp_mb(); | ||
2990 | return (tp->tx_pending - | ||
2991 | ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1))); | ||
2992 | } | ||
2993 | |||
2990 | /* Tigon3 never reports partial packet sends. So we do not | 2994 | /* Tigon3 never reports partial packet sends. So we do not |
2991 | * need special logic to handle SKBs that have not had all | 2995 | * need special logic to handle SKBs that have not had all |
2992 | * of their frags sent yet, like SunGEM does. | 2996 | * of their frags sent yet, like SunGEM does. |
@@ -3038,12 +3042,20 @@ static void tg3_tx(struct tg3 *tp) | |||
3038 | 3042 | ||
3039 | tp->tx_cons = sw_idx; | 3043 | tp->tx_cons = sw_idx; |
3040 | 3044 | ||
3041 | if (unlikely(netif_queue_stopped(tp->dev))) { | 3045 | /* Need to make the tx_cons update visible to tg3_start_xmit() |
3042 | spin_lock(&tp->tx_lock); | 3046 | * before checking for netif_queue_stopped(). Without the |
3047 | * memory barrier, there is a small possibility that tg3_start_xmit() | ||
3048 | * will miss it and cause the queue to be stopped forever. | ||
3049 | */ | ||
3050 | smp_mb(); | ||
3051 | |||
3052 | if (unlikely(netif_queue_stopped(tp->dev) && | ||
3053 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) { | ||
3054 | netif_tx_lock(tp->dev); | ||
3043 | if (netif_queue_stopped(tp->dev) && | 3055 | if (netif_queue_stopped(tp->dev) && |
3044 | (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) | 3056 | (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)) |
3045 | netif_wake_queue(tp->dev); | 3057 | netif_wake_queue(tp->dev); |
3046 | spin_unlock(&tp->tx_lock); | 3058 | netif_tx_unlock(tp->dev); |
3047 | } | 3059 | } |
3048 | } | 3060 | } |
3049 | 3061 | ||
@@ -3097,11 +3109,10 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key, | |||
3097 | * Callers depend upon this behavior and assume that | 3109 | * Callers depend upon this behavior and assume that |
3098 | * we leave everything unchanged if we fail. | 3110 | * we leave everything unchanged if we fail. |
3099 | */ | 3111 | */ |
3100 | skb = dev_alloc_skb(skb_size); | 3112 | skb = netdev_alloc_skb(tp->dev, skb_size); |
3101 | if (skb == NULL) | 3113 | if (skb == NULL) |
3102 | return -ENOMEM; | 3114 | return -ENOMEM; |
3103 | 3115 | ||
3104 | skb->dev = tp->dev; | ||
3105 | skb_reserve(skb, tp->rx_offset); | 3116 | skb_reserve(skb, tp->rx_offset); |
3106 | 3117 | ||
3107 | mapping = pci_map_single(tp->pdev, skb->data, | 3118 | mapping = pci_map_single(tp->pdev, skb->data, |
@@ -3270,11 +3281,10 @@ static int tg3_rx(struct tg3 *tp, int budget) | |||
3270 | tg3_recycle_rx(tp, opaque_key, | 3281 | tg3_recycle_rx(tp, opaque_key, |
3271 | desc_idx, *post_ptr); | 3282 | desc_idx, *post_ptr); |
3272 | 3283 | ||
3273 | copy_skb = dev_alloc_skb(len + 2); | 3284 | copy_skb = netdev_alloc_skb(tp->dev, len + 2); |
3274 | if (copy_skb == NULL) | 3285 | if (copy_skb == NULL) |
3275 | goto drop_it_no_recycle; | 3286 | goto drop_it_no_recycle; |
3276 | 3287 | ||
3277 | copy_skb->dev = tp->dev; | ||
3278 | skb_reserve(copy_skb, 2); | 3288 | skb_reserve(copy_skb, 2); |
3279 | skb_put(copy_skb, len); | 3289 | skb_put(copy_skb, len); |
3280 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); | 3290 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); |
@@ -3590,6 +3600,28 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id, | |||
3590 | static int tg3_init_hw(struct tg3 *, int); | 3600 | static int tg3_init_hw(struct tg3 *, int); |
3591 | static int tg3_halt(struct tg3 *, int, int); | 3601 | static int tg3_halt(struct tg3 *, int, int); |
3592 | 3602 | ||
3603 | /* Restart hardware after configuration changes, self-test, etc. | ||
3604 | * Invoked with tp->lock held. | ||
3605 | */ | ||
3606 | static int tg3_restart_hw(struct tg3 *tp, int reset_phy) | ||
3607 | { | ||
3608 | int err; | ||
3609 | |||
3610 | err = tg3_init_hw(tp, reset_phy); | ||
3611 | if (err) { | ||
3612 | printk(KERN_ERR PFX "%s: Failed to re-initialize device, " | ||
3613 | "aborting.\n", tp->dev->name); | ||
3614 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | ||
3615 | tg3_full_unlock(tp); | ||
3616 | del_timer_sync(&tp->timer); | ||
3617 | tp->irq_sync = 0; | ||
3618 | netif_poll_enable(tp->dev); | ||
3619 | dev_close(tp->dev); | ||
3620 | tg3_full_lock(tp, 0); | ||
3621 | } | ||
3622 | return err; | ||
3623 | } | ||
3624 | |||
3593 | #ifdef CONFIG_NET_POLL_CONTROLLER | 3625 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3594 | static void tg3_poll_controller(struct net_device *dev) | 3626 | static void tg3_poll_controller(struct net_device *dev) |
3595 | { | 3627 | { |
@@ -3630,13 +3662,15 @@ static void tg3_reset_task(void *_data) | |||
3630 | } | 3662 | } |
3631 | 3663 | ||
3632 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); | 3664 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); |
3633 | tg3_init_hw(tp, 1); | 3665 | if (tg3_init_hw(tp, 1)) |
3666 | goto out; | ||
3634 | 3667 | ||
3635 | tg3_netif_start(tp); | 3668 | tg3_netif_start(tp); |
3636 | 3669 | ||
3637 | if (restart_timer) | 3670 | if (restart_timer) |
3638 | mod_timer(&tp->timer, jiffies + 1); | 3671 | mod_timer(&tp->timer, jiffies + 1); |
3639 | 3672 | ||
3673 | out: | ||
3640 | tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; | 3674 | tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; |
3641 | 3675 | ||
3642 | tg3_full_unlock(tp); | 3676 | tg3_full_unlock(tp); |
@@ -3773,7 +3807,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3773 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 3807 | * interrupt. Furthermore, IRQ processing runs lockless so we have |
3774 | * no IRQ context deadlocks to worry about either. Rejoice! | 3808 | * no IRQ context deadlocks to worry about either. Rejoice! |
3775 | */ | 3809 | */ |
3776 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { | 3810 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { |
3777 | if (!netif_queue_stopped(dev)) { | 3811 | if (!netif_queue_stopped(dev)) { |
3778 | netif_stop_queue(dev); | 3812 | netif_stop_queue(dev); |
3779 | 3813 | ||
@@ -3869,12 +3903,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3869 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | 3903 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); |
3870 | 3904 | ||
3871 | tp->tx_prod = entry; | 3905 | tp->tx_prod = entry; |
3872 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { | 3906 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { |
3873 | spin_lock(&tp->tx_lock); | ||
3874 | netif_stop_queue(dev); | 3907 | netif_stop_queue(dev); |
3875 | if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) | 3908 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) |
3876 | netif_wake_queue(tp->dev); | 3909 | netif_wake_queue(tp->dev); |
3877 | spin_unlock(&tp->tx_lock); | ||
3878 | } | 3910 | } |
3879 | 3911 | ||
3880 | out_unlock: | 3912 | out_unlock: |
@@ -3896,7 +3928,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) | |||
3896 | struct sk_buff *segs, *nskb; | 3928 | struct sk_buff *segs, *nskb; |
3897 | 3929 | ||
3898 | /* Estimate the number of fragments in the worst case */ | 3930 | /* Estimate the number of fragments in the worst case */ |
3899 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { | 3931 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { |
3900 | netif_stop_queue(tp->dev); | 3932 | netif_stop_queue(tp->dev); |
3901 | return NETDEV_TX_BUSY; | 3933 | return NETDEV_TX_BUSY; |
3902 | } | 3934 | } |
@@ -3936,7 +3968,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
3936 | * interrupt. Furthermore, IRQ processing runs lockless so we have | 3968 | * interrupt. Furthermore, IRQ processing runs lockless so we have |
3937 | * no IRQ context deadlocks to worry about either. Rejoice! | 3969 | * no IRQ context deadlocks to worry about either. Rejoice! |
3938 | */ | 3970 | */ |
3939 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { | 3971 | if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { |
3940 | if (!netif_queue_stopped(dev)) { | 3972 | if (!netif_queue_stopped(dev)) { |
3941 | netif_stop_queue(dev); | 3973 | netif_stop_queue(dev); |
3942 | 3974 | ||
@@ -4086,12 +4118,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | |||
4086 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | 4118 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); |
4087 | 4119 | ||
4088 | tp->tx_prod = entry; | 4120 | tp->tx_prod = entry; |
4089 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { | 4121 | if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) { |
4090 | spin_lock(&tp->tx_lock); | ||
4091 | netif_stop_queue(dev); | 4122 | netif_stop_queue(dev); |
4092 | if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) | 4123 | if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH) |
4093 | netif_wake_queue(tp->dev); | 4124 | netif_wake_queue(tp->dev); |
4094 | spin_unlock(&tp->tx_lock); | ||
4095 | } | 4125 | } |
4096 | 4126 | ||
4097 | out_unlock: | 4127 | out_unlock: |
@@ -4124,6 +4154,7 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, | |||
4124 | static int tg3_change_mtu(struct net_device *dev, int new_mtu) | 4154 | static int tg3_change_mtu(struct net_device *dev, int new_mtu) |
4125 | { | 4155 | { |
4126 | struct tg3 *tp = netdev_priv(dev); | 4156 | struct tg3 *tp = netdev_priv(dev); |
4157 | int err; | ||
4127 | 4158 | ||
4128 | if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) | 4159 | if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) |
4129 | return -EINVAL; | 4160 | return -EINVAL; |
@@ -4144,13 +4175,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) | |||
4144 | 4175 | ||
4145 | tg3_set_mtu(dev, tp, new_mtu); | 4176 | tg3_set_mtu(dev, tp, new_mtu); |
4146 | 4177 | ||
4147 | tg3_init_hw(tp, 0); | 4178 | err = tg3_restart_hw(tp, 0); |
4148 | 4179 | ||
4149 | tg3_netif_start(tp); | 4180 | if (!err) |
4181 | tg3_netif_start(tp); | ||
4150 | 4182 | ||
4151 | tg3_full_unlock(tp); | 4183 | tg3_full_unlock(tp); |
4152 | 4184 | ||
4153 | return 0; | 4185 | return err; |
4154 | } | 4186 | } |
4155 | 4187 | ||
4156 | /* Free up pending packets in all rx/tx rings. | 4188 | /* Free up pending packets in all rx/tx rings. |
@@ -4232,7 +4264,7 @@ static void tg3_free_rings(struct tg3 *tp) | |||
4232 | * end up in the driver. tp->{tx,}lock are held and thus | 4264 | * end up in the driver. tp->{tx,}lock are held and thus |
4233 | * we may not sleep. | 4265 | * we may not sleep. |
4234 | */ | 4266 | */ |
4235 | static void tg3_init_rings(struct tg3 *tp) | 4267 | static int tg3_init_rings(struct tg3 *tp) |
4236 | { | 4268 | { |
4237 | u32 i; | 4269 | u32 i; |
4238 | 4270 | ||
@@ -4281,18 +4313,38 @@ static void tg3_init_rings(struct tg3 *tp) | |||
4281 | 4313 | ||
4282 | /* Now allocate fresh SKBs for each rx ring. */ | 4314 | /* Now allocate fresh SKBs for each rx ring. */ |
4283 | for (i = 0; i < tp->rx_pending; i++) { | 4315 | for (i = 0; i < tp->rx_pending; i++) { |
4284 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, | 4316 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { |
4285 | -1, i) < 0) | 4317 | printk(KERN_WARNING PFX |
4318 | "%s: Using a smaller RX standard ring, " | ||
4319 | "only %d out of %d buffers were allocated " | ||
4320 | "successfully.\n", | ||
4321 | tp->dev->name, i, tp->rx_pending); | ||
4322 | if (i == 0) | ||
4323 | return -ENOMEM; | ||
4324 | tp->rx_pending = i; | ||
4286 | break; | 4325 | break; |
4326 | } | ||
4287 | } | 4327 | } |
4288 | 4328 | ||
4289 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { | 4329 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { |
4290 | for (i = 0; i < tp->rx_jumbo_pending; i++) { | 4330 | for (i = 0; i < tp->rx_jumbo_pending; i++) { |
4291 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, | 4331 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, |
4292 | -1, i) < 0) | 4332 | -1, i) < 0) { |
4333 | printk(KERN_WARNING PFX | ||
4334 | "%s: Using a smaller RX jumbo ring, " | ||
4335 | "only %d out of %d buffers were " | ||
4336 | "allocated successfully.\n", | ||
4337 | tp->dev->name, i, tp->rx_jumbo_pending); | ||
4338 | if (i == 0) { | ||
4339 | tg3_free_rings(tp); | ||
4340 | return -ENOMEM; | ||
4341 | } | ||
4342 | tp->rx_jumbo_pending = i; | ||
4293 | break; | 4343 | break; |
4344 | } | ||
4294 | } | 4345 | } |
4295 | } | 4346 | } |
4347 | return 0; | ||
4296 | } | 4348 | } |
4297 | 4349 | ||
4298 | /* | 4350 | /* |
@@ -5815,6 +5867,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) | |||
5815 | { | 5867 | { |
5816 | struct tg3 *tp = netdev_priv(dev); | 5868 | struct tg3 *tp = netdev_priv(dev); |
5817 | struct sockaddr *addr = p; | 5869 | struct sockaddr *addr = p; |
5870 | int err = 0; | ||
5818 | 5871 | ||
5819 | if (!is_valid_ether_addr(addr->sa_data)) | 5872 | if (!is_valid_ether_addr(addr->sa_data)) |
5820 | return -EINVAL; | 5873 | return -EINVAL; |
@@ -5832,9 +5885,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) | |||
5832 | tg3_full_lock(tp, 1); | 5885 | tg3_full_lock(tp, 1); |
5833 | 5886 | ||
5834 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 5887 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
5835 | tg3_init_hw(tp, 0); | 5888 | err = tg3_restart_hw(tp, 0); |
5836 | 5889 | if (!err) | |
5837 | tg3_netif_start(tp); | 5890 | tg3_netif_start(tp); |
5838 | tg3_full_unlock(tp); | 5891 | tg3_full_unlock(tp); |
5839 | } else { | 5892 | } else { |
5840 | spin_lock_bh(&tp->lock); | 5893 | spin_lock_bh(&tp->lock); |
@@ -5842,7 +5895,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) | |||
5842 | spin_unlock_bh(&tp->lock); | 5895 | spin_unlock_bh(&tp->lock); |
5843 | } | 5896 | } |
5844 | 5897 | ||
5845 | return 0; | 5898 | return err; |
5846 | } | 5899 | } |
5847 | 5900 | ||
5848 | /* tp->lock is held. */ | 5901 | /* tp->lock is held. */ |
@@ -5942,7 +5995,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
5942 | * can only do this after the hardware has been | 5995 | * can only do this after the hardware has been |
5943 | * successfully reset. | 5996 | * successfully reset. |
5944 | */ | 5997 | */ |
5945 | tg3_init_rings(tp); | 5998 | err = tg3_init_rings(tp); |
5999 | if (err) | ||
6000 | return err; | ||
5946 | 6001 | ||
5947 | /* This value is determined during the probe time DMA | 6002 | /* This value is determined during the probe time DMA |
5948 | * engine test, tg3_test_dma. | 6003 | * engine test, tg3_test_dma. |
@@ -7956,7 +8011,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * | |||
7956 | static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | 8011 | static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) |
7957 | { | 8012 | { |
7958 | struct tg3 *tp = netdev_priv(dev); | 8013 | struct tg3 *tp = netdev_priv(dev); |
7959 | int irq_sync = 0; | 8014 | int irq_sync = 0, err = 0; |
7960 | 8015 | ||
7961 | if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || | 8016 | if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || |
7962 | (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || | 8017 | (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || |
@@ -7980,13 +8035,14 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e | |||
7980 | 8035 | ||
7981 | if (netif_running(dev)) { | 8036 | if (netif_running(dev)) { |
7982 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 8037 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
7983 | tg3_init_hw(tp, 1); | 8038 | err = tg3_restart_hw(tp, 1); |
7984 | tg3_netif_start(tp); | 8039 | if (!err) |
8040 | tg3_netif_start(tp); | ||
7985 | } | 8041 | } |
7986 | 8042 | ||
7987 | tg3_full_unlock(tp); | 8043 | tg3_full_unlock(tp); |
7988 | 8044 | ||
7989 | return 0; | 8045 | return err; |
7990 | } | 8046 | } |
7991 | 8047 | ||
7992 | static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) | 8048 | static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) |
@@ -8001,7 +8057,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
8001 | static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) | 8057 | static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) |
8002 | { | 8058 | { |
8003 | struct tg3 *tp = netdev_priv(dev); | 8059 | struct tg3 *tp = netdev_priv(dev); |
8004 | int irq_sync = 0; | 8060 | int irq_sync = 0, err = 0; |
8005 | 8061 | ||
8006 | if (netif_running(dev)) { | 8062 | if (netif_running(dev)) { |
8007 | tg3_netif_stop(tp); | 8063 | tg3_netif_stop(tp); |
@@ -8025,13 +8081,14 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
8025 | 8081 | ||
8026 | if (netif_running(dev)) { | 8082 | if (netif_running(dev)) { |
8027 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 8083 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
8028 | tg3_init_hw(tp, 1); | 8084 | err = tg3_restart_hw(tp, 1); |
8029 | tg3_netif_start(tp); | 8085 | if (!err) |
8086 | tg3_netif_start(tp); | ||
8030 | } | 8087 | } |
8031 | 8088 | ||
8032 | tg3_full_unlock(tp); | 8089 | tg3_full_unlock(tp); |
8033 | 8090 | ||
8034 | return 0; | 8091 | return err; |
8035 | } | 8092 | } |
8036 | 8093 | ||
8037 | static u32 tg3_get_rx_csum(struct net_device *dev) | 8094 | static u32 tg3_get_rx_csum(struct net_device *dev) |
@@ -8567,7 +8624,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
8567 | err = -EIO; | 8624 | err = -EIO; |
8568 | 8625 | ||
8569 | tx_len = 1514; | 8626 | tx_len = 1514; |
8570 | skb = dev_alloc_skb(tx_len); | 8627 | skb = netdev_alloc_skb(tp->dev, tx_len); |
8571 | if (!skb) | 8628 | if (!skb) |
8572 | return -ENOMEM; | 8629 | return -ENOMEM; |
8573 | 8630 | ||
@@ -8666,7 +8723,9 @@ static int tg3_test_loopback(struct tg3 *tp) | |||
8666 | if (!netif_running(tp->dev)) | 8723 | if (!netif_running(tp->dev)) |
8667 | return TG3_LOOPBACK_FAILED; | 8724 | return TG3_LOOPBACK_FAILED; |
8668 | 8725 | ||
8669 | tg3_reset_hw(tp, 1); | 8726 | err = tg3_reset_hw(tp, 1); |
8727 | if (err) | ||
8728 | return TG3_LOOPBACK_FAILED; | ||
8670 | 8729 | ||
8671 | if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) | 8730 | if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) |
8672 | err |= TG3_MAC_LOOPBACK_FAILED; | 8731 | err |= TG3_MAC_LOOPBACK_FAILED; |
@@ -8740,8 +8799,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | |||
8740 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 8799 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
8741 | if (netif_running(dev)) { | 8800 | if (netif_running(dev)) { |
8742 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 8801 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
8743 | tg3_init_hw(tp, 1); | 8802 | if (!tg3_restart_hw(tp, 1)) |
8744 | tg3_netif_start(tp); | 8803 | tg3_netif_start(tp); |
8745 | } | 8804 | } |
8746 | 8805 | ||
8747 | tg3_full_unlock(tp); | 8806 | tg3_full_unlock(tp); |
@@ -10078,6 +10137,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
10078 | static struct pci_device_id write_reorder_chipsets[] = { | 10137 | static struct pci_device_id write_reorder_chipsets[] = { |
10079 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, | 10138 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, |
10080 | PCI_DEVICE_ID_AMD_FE_GATE_700C) }, | 10139 | PCI_DEVICE_ID_AMD_FE_GATE_700C) }, |
10140 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, | ||
10141 | PCI_DEVICE_ID_AMD_8131_BRIDGE) }, | ||
10081 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, | 10142 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, |
10082 | PCI_DEVICE_ID_VIA_8385_0) }, | 10143 | PCI_DEVICE_ID_VIA_8385_0) }, |
10083 | { }, | 10144 | { }, |
@@ -11419,7 +11480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
11419 | tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; | 11480 | tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; |
11420 | #endif | 11481 | #endif |
11421 | spin_lock_init(&tp->lock); | 11482 | spin_lock_init(&tp->lock); |
11422 | spin_lock_init(&tp->tx_lock); | ||
11423 | spin_lock_init(&tp->indirect_lock); | 11483 | spin_lock_init(&tp->indirect_lock); |
11424 | INIT_WORK(&tp->reset_task, tg3_reset_task, tp); | 11484 | INIT_WORK(&tp->reset_task, tg3_reset_task, tp); |
11425 | 11485 | ||
@@ -11697,7 +11757,8 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) | |||
11697 | tg3_full_lock(tp, 0); | 11757 | tg3_full_lock(tp, 0); |
11698 | 11758 | ||
11699 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 11759 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
11700 | tg3_init_hw(tp, 1); | 11760 | if (tg3_restart_hw(tp, 1)) |
11761 | goto out; | ||
11701 | 11762 | ||
11702 | tp->timer.expires = jiffies + tp->timer_offset; | 11763 | tp->timer.expires = jiffies + tp->timer_offset; |
11703 | add_timer(&tp->timer); | 11764 | add_timer(&tp->timer); |
@@ -11705,6 +11766,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) | |||
11705 | netif_device_attach(dev); | 11766 | netif_device_attach(dev); |
11706 | tg3_netif_start(tp); | 11767 | tg3_netif_start(tp); |
11707 | 11768 | ||
11769 | out: | ||
11708 | tg3_full_unlock(tp); | 11770 | tg3_full_unlock(tp); |
11709 | } | 11771 | } |
11710 | 11772 | ||
@@ -11731,16 +11793,19 @@ static int tg3_resume(struct pci_dev *pdev) | |||
11731 | tg3_full_lock(tp, 0); | 11793 | tg3_full_lock(tp, 0); |
11732 | 11794 | ||
11733 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 11795 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
11734 | tg3_init_hw(tp, 1); | 11796 | err = tg3_restart_hw(tp, 1); |
11797 | if (err) | ||
11798 | goto out; | ||
11735 | 11799 | ||
11736 | tp->timer.expires = jiffies + tp->timer_offset; | 11800 | tp->timer.expires = jiffies + tp->timer_offset; |
11737 | add_timer(&tp->timer); | 11801 | add_timer(&tp->timer); |
11738 | 11802 | ||
11739 | tg3_netif_start(tp); | 11803 | tg3_netif_start(tp); |
11740 | 11804 | ||
11805 | out: | ||
11741 | tg3_full_unlock(tp); | 11806 | tg3_full_unlock(tp); |
11742 | 11807 | ||
11743 | return 0; | 11808 | return err; |
11744 | } | 11809 | } |
11745 | 11810 | ||
11746 | static struct pci_driver tg3_driver = { | 11811 | static struct pci_driver tg3_driver = { |
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index ba2c98711c88..3ecf356cfb08 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
@@ -2079,9 +2079,9 @@ struct tg3 { | |||
2079 | * lock: Held during reset, PHY access, timer, and when | 2079 | * lock: Held during reset, PHY access, timer, and when |
2080 | * updating tg3_flags and tg3_flags2. | 2080 | * updating tg3_flags and tg3_flags2. |
2081 | * | 2081 | * |
2082 | * tx_lock: Held during tg3_start_xmit and tg3_tx only | 2082 | * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds |
2083 | * when calling netif_[start|stop]_queue. | 2083 | * netif_tx_lock when it needs to call |
2084 | * tg3_start_xmit is protected by netif_tx_lock. | 2084 | * netif_wake_queue. |
2085 | * | 2085 | * |
2086 | * Both of these locks are to be held with BH safety. | 2086 | * Both of these locks are to be held with BH safety. |
2087 | * | 2087 | * |
@@ -2118,8 +2118,6 @@ struct tg3 { | |||
2118 | u32 tx_cons; | 2118 | u32 tx_cons; |
2119 | u32 tx_pending; | 2119 | u32 tx_pending; |
2120 | 2120 | ||
2121 | spinlock_t tx_lock; | ||
2122 | |||
2123 | struct tg3_tx_buffer_desc *tx_ring; | 2121 | struct tg3_tx_buffer_desc *tx_ring; |
2124 | struct tx_ring_info *tx_buffers; | 2122 | struct tx_ring_info *tx_buffers; |
2125 | dma_addr_t tx_desc_mapping; | 2123 | dma_addr_t tx_desc_mapping; |
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index 9f491563944e..4470025ff7f8 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c | |||
@@ -140,7 +140,7 @@ in the event that chatty debug messages are desired - jjs 12/30/98 */ | |||
140 | 140 | ||
141 | /* version and credits */ | 141 | /* version and credits */ |
142 | #ifndef PCMCIA | 142 | #ifndef PCMCIA |
143 | static char version[] __initdata = | 143 | static char version[] __devinitdata = |
144 | "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n" | 144 | "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n" |
145 | " v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n" | 145 | " v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n" |
146 | " v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n" | 146 | " v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n" |
@@ -216,7 +216,7 @@ static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0}; | |||
216 | static int __devinitdata turbo_searched = 0; | 216 | static int __devinitdata turbo_searched = 0; |
217 | 217 | ||
218 | #ifndef PCMCIA | 218 | #ifndef PCMCIA |
219 | static __u32 ibmtr_mem_base __initdata = 0xd0000; | 219 | static __u32 ibmtr_mem_base __devinitdata = 0xd0000; |
220 | #endif | 220 | #endif |
221 | 221 | ||
222 | static void __devinit PrtChanID(char *pcid, short stride) | 222 | static void __devinit PrtChanID(char *pcid, short stride) |
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c index cd2e0251e2bc..85a7f797d343 100644 --- a/drivers/net/tokenring/smctr.c +++ b/drivers/net/tokenring/smctr.c | |||
@@ -5666,7 +5666,7 @@ module_param_array(io, int, NULL, 0); | |||
5666 | module_param_array(irq, int, NULL, 0); | 5666 | module_param_array(irq, int, NULL, 0); |
5667 | module_param(ringspeed, int, 0); | 5667 | module_param(ringspeed, int, 0); |
5668 | 5668 | ||
5669 | static struct net_device *setup_card(int n) | 5669 | static struct net_device * __init setup_card(int n) |
5670 | { | 5670 | { |
5671 | struct net_device *dev = alloc_trdev(sizeof(struct net_local)); | 5671 | struct net_device *dev = alloc_trdev(sizeof(struct net_local)); |
5672 | int err; | 5672 | int err; |
@@ -5696,9 +5696,8 @@ out: | |||
5696 | free_netdev(dev); | 5696 | free_netdev(dev); |
5697 | return ERR_PTR(err); | 5697 | return ERR_PTR(err); |
5698 | } | 5698 | } |
5699 | |||
5700 | 5699 | ||
5701 | int init_module(void) | 5700 | int __init init_module(void) |
5702 | { | 5701 | { |
5703 | int i, found = 0; | 5702 | int i, found = 0; |
5704 | struct net_device *dev; | 5703 | struct net_device *dev; |
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index b4c0d101a7d7..eba9083da146 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c | |||
@@ -138,7 +138,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; | |||
138 | #include <asm/irq.h> | 138 | #include <asm/irq.h> |
139 | 139 | ||
140 | /* These identify the driver base version and may not be removed. */ | 140 | /* These identify the driver base version and may not be removed. */ |
141 | static char version[] __devinitdata = | 141 | static char version[] = |
142 | KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n" | 142 | KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n" |
143 | KERN_INFO " http://www.scyld.com/network/drivers.html\n"; | 143 | KERN_INFO " http://www.scyld.com/network/drivers.html\n"; |
144 | 144 | ||
@@ -224,24 +224,21 @@ static const struct pci_device_id w840_pci_tbl[] = { | |||
224 | }; | 224 | }; |
225 | MODULE_DEVICE_TABLE(pci, w840_pci_tbl); | 225 | MODULE_DEVICE_TABLE(pci, w840_pci_tbl); |
226 | 226 | ||
227 | enum { | ||
228 | netdev_res_size = 128, /* size of PCI BAR resource */ | ||
229 | }; | ||
230 | |||
227 | struct pci_id_info { | 231 | struct pci_id_info { |
228 | const char *name; | 232 | const char *name; |
229 | struct match_info { | 233 | int drv_flags; /* Driver use, intended as capability flags. */ |
230 | int pci, pci_mask, subsystem, subsystem_mask; | ||
231 | int revision, revision_mask; /* Only 8 bits. */ | ||
232 | } id; | ||
233 | int io_size; /* Needed for I/O region check or ioremap(). */ | ||
234 | int drv_flags; /* Driver use, intended as capability flags. */ | ||
235 | }; | 234 | }; |
236 | static struct pci_id_info pci_id_tbl[] = { | 235 | |
237 | {"Winbond W89c840", /* Sometime a Level-One switch card. */ | 236 | static const struct pci_id_info pci_id_tbl[] __devinitdata = { |
238 | { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 }, | 237 | { /* Sometime a Level-One switch card. */ |
239 | 128, CanHaveMII | HasBrokenTx | FDXOnNoMII}, | 238 | "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII}, |
240 | {"Winbond W89c840", { 0x08401050, 0xffffffff, }, | 239 | { "Winbond W89c840", CanHaveMII | HasBrokenTx}, |
241 | 128, CanHaveMII | HasBrokenTx}, | 240 | { "Compex RL100-ATX", CanHaveMII | HasBrokenTx}, |
242 | {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,}, | 241 | { } /* terminate list. */ |
243 | 128, CanHaveMII | HasBrokenTx}, | ||
244 | {NULL,}, /* 0 terminated list. */ | ||
245 | }; | 242 | }; |
246 | 243 | ||
247 | /* This driver was written to use PCI memory space, however some x86 systems | 244 | /* This driver was written to use PCI memory space, however some x86 systems |
@@ -399,7 +396,7 @@ static int __devinit w840_probe1 (struct pci_dev *pdev, | |||
399 | #ifdef USE_IO_OPS | 396 | #ifdef USE_IO_OPS |
400 | bar = 0; | 397 | bar = 0; |
401 | #endif | 398 | #endif |
402 | ioaddr = pci_iomap(pdev, bar, pci_id_tbl[chip_idx].io_size); | 399 | ioaddr = pci_iomap(pdev, bar, netdev_res_size); |
403 | if (!ioaddr) | 400 | if (!ioaddr) |
404 | goto err_out_free_res; | 401 | goto err_out_free_res; |
405 | 402 | ||
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c index f874e4f6ccf6..cf43390d2c80 100644 --- a/drivers/net/tulip/xircom_cb.c +++ b/drivers/net/tulip/xircom_cb.c | |||
@@ -1264,8 +1264,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p | |||
1264 | 1264 | ||
1265 | static int __init xircom_init(void) | 1265 | static int __init xircom_init(void) |
1266 | { | 1266 | { |
1267 | pci_register_driver(&xircom_ops); | 1267 | return pci_register_driver(&xircom_ops); |
1268 | return 0; | ||
1269 | } | 1268 | } |
1270 | 1269 | ||
1271 | static void __exit xircom_exit(void) | 1270 | static void __exit xircom_exit(void) |
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c index 091ebb7a62f6..17ca7dc42e6f 100644 --- a/drivers/net/tulip/xircom_tulip_cb.c +++ b/drivers/net/tulip/xircom_tulip_cb.c | |||
@@ -10,26 +10,11 @@ | |||
10 | 410 Severn Ave., Suite 210 | 10 | 410 Severn Ave., Suite 210 |
11 | Annapolis MD 21403 | 11 | Annapolis MD 21403 |
12 | 12 | ||
13 | ----------------------------------------------------------- | ||
14 | |||
15 | Linux kernel-specific changes: | ||
16 | |||
17 | LK1.0 (Ion Badulescu) | ||
18 | - Major cleanup | ||
19 | - Use 2.4 PCI API | ||
20 | - Support ethtool | ||
21 | - Rewrite perfect filter/hash code | ||
22 | - Use interrupts for media changes | ||
23 | |||
24 | LK1.1 (Ion Badulescu) | ||
25 | - Disallow negotiation of unsupported full-duplex modes | ||
26 | */ | 13 | */ |
27 | 14 | ||
28 | #define DRV_NAME "xircom_tulip_cb" | 15 | #define DRV_NAME "xircom_tulip_cb" |
29 | #define DRV_VERSION "0.91+LK1.1" | 16 | #define DRV_VERSION "0.92" |
30 | #define DRV_RELDATE "October 11, 2001" | 17 | #define DRV_RELDATE "June 27, 2006" |
31 | |||
32 | #define CARDBUS 1 | ||
33 | 18 | ||
34 | /* A few user-configurable values. */ | 19 | /* A few user-configurable values. */ |
35 | 20 | ||
@@ -306,10 +291,10 @@ struct xircom_private { | |||
306 | struct xircom_tx_desc tx_ring[TX_RING_SIZE]; | 291 | struct xircom_tx_desc tx_ring[TX_RING_SIZE]; |
307 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ | 292 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ |
308 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | 293 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; |
309 | #ifdef CARDBUS | 294 | |
310 | /* The X3201-3 requires 4-byte aligned tx bufs */ | 295 | /* The X3201-3 requires 4-byte aligned tx bufs */ |
311 | struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE]; | 296 | struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE]; |
312 | #endif | 297 | |
313 | /* The addresses of receive-in-place skbuffs. */ | 298 | /* The addresses of receive-in-place skbuffs. */ |
314 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; | 299 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; |
315 | u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)]; /* Pseudo-Tx frame to init address table. */ | 300 | u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)]; /* Pseudo-Tx frame to init address table. */ |
@@ -908,10 +893,8 @@ static void xircom_init_ring(struct net_device *dev) | |||
908 | tp->tx_skbuff[i] = NULL; | 893 | tp->tx_skbuff[i] = NULL; |
909 | tp->tx_ring[i].status = 0; | 894 | tp->tx_ring[i].status = 0; |
910 | tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]); | 895 | tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]); |
911 | #ifdef CARDBUS | ||
912 | if (tp->chip_id == X3201_3) | 896 | if (tp->chip_id == X3201_3) |
913 | tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ); | 897 | tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ); |
914 | #endif /* CARDBUS */ | ||
915 | } | 898 | } |
916 | tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]); | 899 | tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]); |
917 | } | 900 | } |
@@ -931,12 +914,10 @@ xircom_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
931 | entry = tp->cur_tx % TX_RING_SIZE; | 914 | entry = tp->cur_tx % TX_RING_SIZE; |
932 | 915 | ||
933 | tp->tx_skbuff[entry] = skb; | 916 | tp->tx_skbuff[entry] = skb; |
934 | #ifdef CARDBUS | ||
935 | if (tp->chip_id == X3201_3) { | 917 | if (tp->chip_id == X3201_3) { |
936 | memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len); | 918 | memcpy(tp->tx_aligned_skbuff[entry]->data,skb->data,skb->len); |
937 | tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data); | 919 | tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data); |
938 | } else | 920 | } else |
939 | #endif | ||
940 | tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data); | 921 | tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data); |
941 | 922 | ||
942 | if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ | 923 | if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ |
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 063816f2b11e..4103c37172f9 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c | |||
@@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
805 | * If problems develop with TSO, check this first. | 805 | * If problems develop with TSO, check this first. |
806 | */ | 806 | */ |
807 | numDesc = skb_shinfo(skb)->nr_frags + 1; | 807 | numDesc = skb_shinfo(skb)->nr_frags + 1; |
808 | if(skb_tso_size(skb)) | 808 | if (skb_is_gso(skb)) |
809 | numDesc++; | 809 | numDesc++; |
810 | 810 | ||
811 | /* When checking for free space in the ring, we need to also | 811 | /* When checking for free space in the ring, we need to also |
@@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
845 | TYPHOON_TX_PF_VLAN_TAG_SHIFT); | 845 | TYPHOON_TX_PF_VLAN_TAG_SHIFT); |
846 | } | 846 | } |
847 | 847 | ||
848 | if(skb_tso_size(skb)) { | 848 | if (skb_is_gso(skb)) { |
849 | first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; | 849 | first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; |
850 | first_txd->numDesc++; | 850 | first_txd->numDesc++; |
851 | 851 | ||
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c new file mode 100644 index 000000000000..47f49ef72bdc --- /dev/null +++ b/drivers/net/ucc_geth.c | |||
@@ -0,0 +1,4278 @@ | |||
1 | /* | ||
2 | * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. | ||
3 | * | ||
4 | * Author: Shlomi Gridish <gridish@freescale.com> | ||
5 | * | ||
6 | * Description: | ||
7 | * QE UCC Gigabit Ethernet Driver | ||
8 | * | ||
9 | * Changelog: | ||
10 | * Jul 6, 2006 Li Yang <LeoLi@freescale.com> | ||
11 | * - Rearrange code and style fixes | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify it | ||
14 | * under the terms of the GNU General Public License as published by the | ||
15 | * Free Software Foundation; either version 2 of the License, or (at your | ||
16 | * option) any later version. | ||
17 | */ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/stddef.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/netdevice.h> | ||
25 | #include <linux/etherdevice.h> | ||
26 | #include <linux/skbuff.h> | ||
27 | #include <linux/spinlock.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/ethtool.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/dma-mapping.h> | ||
32 | #include <linux/fsl_devices.h> | ||
33 | #include <linux/ethtool.h> | ||
34 | #include <linux/platform_device.h> | ||
35 | #include <linux/mii.h> | ||
36 | |||
37 | #include <asm/uaccess.h> | ||
38 | #include <asm/irq.h> | ||
39 | #include <asm/io.h> | ||
40 | #include <asm/immap_qe.h> | ||
41 | #include <asm/qe.h> | ||
42 | #include <asm/ucc.h> | ||
43 | #include <asm/ucc_fast.h> | ||
44 | |||
45 | #include "ucc_geth.h" | ||
46 | #include "ucc_geth_phy.h" | ||
47 | |||
48 | #undef DEBUG | ||
49 | |||
50 | #define DRV_DESC "QE UCC Gigabit Ethernet Controller version:June 20, 2006" | ||
51 | #define DRV_NAME "ucc_geth" | ||
52 | |||
53 | #define ugeth_printk(level, format, arg...) \ | ||
54 | printk(level format "\n", ## arg) | ||
55 | |||
56 | #define ugeth_dbg(format, arg...) \ | ||
57 | ugeth_printk(KERN_DEBUG , format , ## arg) | ||
58 | #define ugeth_err(format, arg...) \ | ||
59 | ugeth_printk(KERN_ERR , format , ## arg) | ||
60 | #define ugeth_info(format, arg...) \ | ||
61 | ugeth_printk(KERN_INFO , format , ## arg) | ||
62 | #define ugeth_warn(format, arg...) \ | ||
63 | ugeth_printk(KERN_WARNING , format , ## arg) | ||
64 | |||
65 | #ifdef UGETH_VERBOSE_DEBUG | ||
66 | #define ugeth_vdbg ugeth_dbg | ||
67 | #else | ||
68 | #define ugeth_vdbg(fmt, args...) do { } while (0) | ||
69 | #endif /* UGETH_VERBOSE_DEBUG */ | ||
70 | |||
71 | static DEFINE_SPINLOCK(ugeth_lock); | ||
72 | |||
73 | static ucc_geth_info_t ugeth_primary_info = { | ||
74 | .uf_info = { | ||
75 | .bd_mem_part = MEM_PART_SYSTEM, | ||
76 | .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, | ||
77 | .max_rx_buf_length = 1536, | ||
78 | /* FIXME: should be changed in run time for 1G and 100M */ | ||
79 | #ifdef CONFIG_UGETH_HAS_GIGA | ||
80 | .urfs = UCC_GETH_URFS_GIGA_INIT, | ||
81 | .urfet = UCC_GETH_URFET_GIGA_INIT, | ||
82 | .urfset = UCC_GETH_URFSET_GIGA_INIT, | ||
83 | .utfs = UCC_GETH_UTFS_GIGA_INIT, | ||
84 | .utfet = UCC_GETH_UTFET_GIGA_INIT, | ||
85 | .utftt = UCC_GETH_UTFTT_GIGA_INIT, | ||
86 | #else | ||
87 | .urfs = UCC_GETH_URFS_INIT, | ||
88 | .urfet = UCC_GETH_URFET_INIT, | ||
89 | .urfset = UCC_GETH_URFSET_INIT, | ||
90 | .utfs = UCC_GETH_UTFS_INIT, | ||
91 | .utfet = UCC_GETH_UTFET_INIT, | ||
92 | .utftt = UCC_GETH_UTFTT_INIT, | ||
93 | #endif | ||
94 | .ufpt = 256, | ||
95 | .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET, | ||
96 | .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, | ||
97 | .tenc = UCC_FAST_TX_ENCODING_NRZ, | ||
98 | .renc = UCC_FAST_RX_ENCODING_NRZ, | ||
99 | .tcrc = UCC_FAST_16_BIT_CRC, | ||
100 | .synl = UCC_FAST_SYNC_LEN_NOT_USED, | ||
101 | }, | ||
102 | .numQueuesTx = 1, | ||
103 | .numQueuesRx = 1, | ||
104 | .extendedFilteringChainPointer = ((uint32_t) NULL), | ||
105 | .typeorlen = 3072 /*1536 */ , | ||
106 | .nonBackToBackIfgPart1 = 0x40, | ||
107 | .nonBackToBackIfgPart2 = 0x60, | ||
108 | .miminumInterFrameGapEnforcement = 0x50, | ||
109 | .backToBackInterFrameGap = 0x60, | ||
110 | .mblinterval = 128, | ||
111 | .nortsrbytetime = 5, | ||
112 | .fracsiz = 1, | ||
113 | .strictpriorityq = 0xff, | ||
114 | .altBebTruncation = 0xa, | ||
115 | .excessDefer = 1, | ||
116 | .maxRetransmission = 0xf, | ||
117 | .collisionWindow = 0x37, | ||
118 | .receiveFlowControl = 1, | ||
119 | .maxGroupAddrInHash = 4, | ||
120 | .maxIndAddrInHash = 4, | ||
121 | .prel = 7, | ||
122 | .maxFrameLength = 1518, | ||
123 | .minFrameLength = 64, | ||
124 | .maxD1Length = 1520, | ||
125 | .maxD2Length = 1520, | ||
126 | .vlantype = 0x8100, | ||
127 | .ecamptr = ((uint32_t) NULL), | ||
128 | .eventRegMask = UCCE_OTHER, | ||
129 | .pausePeriod = 0xf000, | ||
130 | .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1}, | ||
131 | .bdRingLenTx = { | ||
132 | TX_BD_RING_LEN, | ||
133 | TX_BD_RING_LEN, | ||
134 | TX_BD_RING_LEN, | ||
135 | TX_BD_RING_LEN, | ||
136 | TX_BD_RING_LEN, | ||
137 | TX_BD_RING_LEN, | ||
138 | TX_BD_RING_LEN, | ||
139 | TX_BD_RING_LEN}, | ||
140 | |||
141 | .bdRingLenRx = { | ||
142 | RX_BD_RING_LEN, | ||
143 | RX_BD_RING_LEN, | ||
144 | RX_BD_RING_LEN, | ||
145 | RX_BD_RING_LEN, | ||
146 | RX_BD_RING_LEN, | ||
147 | RX_BD_RING_LEN, | ||
148 | RX_BD_RING_LEN, | ||
149 | RX_BD_RING_LEN}, | ||
150 | |||
151 | .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, | ||
152 | .largestexternallookupkeysize = | ||
153 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, | ||
154 | .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE, | ||
155 | .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, | ||
156 | .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, | ||
157 | .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, | ||
158 | .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE, | ||
159 | .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC, | ||
160 | .numThreadsTx = UCC_GETH_NUM_OF_THREADS_4, | ||
161 | .numThreadsRx = UCC_GETH_NUM_OF_THREADS_4, | ||
162 | .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, | ||
163 | .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, | ||
164 | }; | ||
165 | |||
166 | static ucc_geth_info_t ugeth_info[8]; | ||
167 | |||
168 | #ifdef DEBUG | ||
169 | static void mem_disp(u8 *addr, int size) | ||
170 | { | ||
171 | u8 *i; | ||
172 | int size16Aling = (size >> 4) << 4; | ||
173 | int size4Aling = (size >> 2) << 2; | ||
174 | int notAlign = 0; | ||
175 | if (size % 16) | ||
176 | notAlign = 1; | ||
177 | |||
178 | for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16) | ||
179 | printk("0x%08x: %08x %08x %08x %08x\r\n", | ||
180 | (u32) i, | ||
181 | *((u32 *) (i)), | ||
182 | *((u32 *) (i + 4)), | ||
183 | *((u32 *) (i + 8)), *((u32 *) (i + 12))); | ||
184 | if (notAlign == 1) | ||
185 | printk("0x%08x: ", (u32) i); | ||
186 | for (; (u32) i < (u32) addr + size4Aling; i += 4) | ||
187 | printk("%08x ", *((u32 *) (i))); | ||
188 | for (; (u32) i < (u32) addr + size; i++) | ||
189 | printk("%02x", *((u8 *) (i))); | ||
190 | if (notAlign == 1) | ||
191 | printk("\r\n"); | ||
192 | } | ||
193 | #endif /* DEBUG */ | ||
194 | |||
195 | #ifdef CONFIG_UGETH_FILTERING | ||
196 | static void enqueue(struct list_head *node, struct list_head *lh) | ||
197 | { | ||
198 | unsigned long flags; | ||
199 | |||
200 | spin_lock_irqsave(ugeth_lock, flags); | ||
201 | list_add_tail(node, lh); | ||
202 | spin_unlock_irqrestore(ugeth_lock, flags); | ||
203 | } | ||
204 | #endif /* CONFIG_UGETH_FILTERING */ | ||
205 | |||
206 | static struct list_head *dequeue(struct list_head *lh) | ||
207 | { | ||
208 | unsigned long flags; | ||
209 | |||
210 | spin_lock_irqsave(ugeth_lock, flags); | ||
211 | if (!list_empty(lh)) { | ||
212 | struct list_head *node = lh->next; | ||
213 | list_del(node); | ||
214 | spin_unlock_irqrestore(ugeth_lock, flags); | ||
215 | return node; | ||
216 | } else { | ||
217 | spin_unlock_irqrestore(ugeth_lock, flags); | ||
218 | return NULL; | ||
219 | } | ||
220 | } | ||
221 | |||
222 | static int get_interface_details(enet_interface_e enet_interface, | ||
223 | enet_speed_e *speed, | ||
224 | int *r10m, | ||
225 | int *rmm, | ||
226 | int *rpm, | ||
227 | int *tbi, int *limited_to_full_duplex) | ||
228 | { | ||
229 | /* Analyze enet_interface according to Interface Mode | ||
230 | Configuration table */ | ||
231 | switch (enet_interface) { | ||
232 | case ENET_10_MII: | ||
233 | *speed = ENET_SPEED_10BT; | ||
234 | break; | ||
235 | case ENET_10_RMII: | ||
236 | *speed = ENET_SPEED_10BT; | ||
237 | *r10m = 1; | ||
238 | *rmm = 1; | ||
239 | break; | ||
240 | case ENET_10_RGMII: | ||
241 | *speed = ENET_SPEED_10BT; | ||
242 | *rpm = 1; | ||
243 | *r10m = 1; | ||
244 | *limited_to_full_duplex = 1; | ||
245 | break; | ||
246 | case ENET_100_MII: | ||
247 | *speed = ENET_SPEED_100BT; | ||
248 | break; | ||
249 | case ENET_100_RMII: | ||
250 | *speed = ENET_SPEED_100BT; | ||
251 | *rmm = 1; | ||
252 | break; | ||
253 | case ENET_100_RGMII: | ||
254 | *speed = ENET_SPEED_100BT; | ||
255 | *rpm = 1; | ||
256 | *limited_to_full_duplex = 1; | ||
257 | break; | ||
258 | case ENET_1000_GMII: | ||
259 | *speed = ENET_SPEED_1000BT; | ||
260 | *limited_to_full_duplex = 1; | ||
261 | break; | ||
262 | case ENET_1000_RGMII: | ||
263 | *speed = ENET_SPEED_1000BT; | ||
264 | *rpm = 1; | ||
265 | *limited_to_full_duplex = 1; | ||
266 | break; | ||
267 | case ENET_1000_TBI: | ||
268 | *speed = ENET_SPEED_1000BT; | ||
269 | *tbi = 1; | ||
270 | *limited_to_full_duplex = 1; | ||
271 | break; | ||
272 | case ENET_1000_RTBI: | ||
273 | *speed = ENET_SPEED_1000BT; | ||
274 | *rpm = 1; | ||
275 | *tbi = 1; | ||
276 | *limited_to_full_duplex = 1; | ||
277 | break; | ||
278 | default: | ||
279 | return -EINVAL; | ||
280 | break; | ||
281 | } | ||
282 | |||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static struct sk_buff *get_new_skb(ucc_geth_private_t *ugeth, u8 *bd) | ||
287 | { | ||
288 | struct sk_buff *skb = NULL; | ||
289 | |||
290 | skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + | ||
291 | UCC_GETH_RX_DATA_BUF_ALIGNMENT); | ||
292 | |||
293 | if (skb == NULL) | ||
294 | return NULL; | ||
295 | |||
296 | /* We need the data buffer to be aligned properly. We will reserve | ||
297 | * as many bytes as needed to align the data properly | ||
298 | */ | ||
299 | skb_reserve(skb, | ||
300 | UCC_GETH_RX_DATA_BUF_ALIGNMENT - | ||
301 | (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - | ||
302 | 1))); | ||
303 | |||
304 | skb->dev = ugeth->dev; | ||
305 | |||
306 | BD_BUFFER_SET(bd, | ||
307 | dma_map_single(NULL, | ||
308 | skb->data, | ||
309 | ugeth->ug_info->uf_info.max_rx_buf_length + | ||
310 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, | ||
311 | DMA_FROM_DEVICE)); | ||
312 | |||
313 | BD_STATUS_AND_LENGTH_SET(bd, | ||
314 | (R_E | R_I | | ||
315 | (BD_STATUS_AND_LENGTH(bd) & R_W))); | ||
316 | |||
317 | return skb; | ||
318 | } | ||
319 | |||
320 | static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ) | ||
321 | { | ||
322 | u8 *bd; | ||
323 | u32 bd_status; | ||
324 | struct sk_buff *skb; | ||
325 | int i; | ||
326 | |||
327 | bd = ugeth->p_rx_bd_ring[rxQ]; | ||
328 | i = 0; | ||
329 | |||
330 | do { | ||
331 | bd_status = BD_STATUS_AND_LENGTH(bd); | ||
332 | skb = get_new_skb(ugeth, bd); | ||
333 | |||
334 | if (!skb) /* If can not allocate data buffer, | ||
335 | abort. Cleanup will be elsewhere */ | ||
336 | return -ENOMEM; | ||
337 | |||
338 | ugeth->rx_skbuff[rxQ][i] = skb; | ||
339 | |||
340 | /* advance the BD pointer */ | ||
341 | bd += UCC_GETH_SIZE_OF_BD; | ||
342 | i++; | ||
343 | } while (!(bd_status & R_W)); | ||
344 | |||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | static int fill_init_enet_entries(ucc_geth_private_t *ugeth, | ||
349 | volatile u32 *p_start, | ||
350 | u8 num_entries, | ||
351 | u32 thread_size, | ||
352 | u32 thread_alignment, | ||
353 | qe_risc_allocation_e risc, | ||
354 | int skip_page_for_first_entry) | ||
355 | { | ||
356 | u32 init_enet_offset; | ||
357 | u8 i; | ||
358 | int snum; | ||
359 | |||
360 | for (i = 0; i < num_entries; i++) { | ||
361 | if ((snum = qe_get_snum()) < 0) { | ||
362 | ugeth_err("fill_init_enet_entries: Can not get SNUM."); | ||
363 | return snum; | ||
364 | } | ||
365 | if ((i == 0) && skip_page_for_first_entry) | ||
366 | /* First entry of Rx does not have page */ | ||
367 | init_enet_offset = 0; | ||
368 | else { | ||
369 | init_enet_offset = | ||
370 | qe_muram_alloc(thread_size, thread_alignment); | ||
371 | if (IS_MURAM_ERR(init_enet_offset)) { | ||
372 | ugeth_err | ||
373 | ("fill_init_enet_entries: Can not allocate DPRAM memory."); | ||
374 | qe_put_snum((u8) snum); | ||
375 | return -ENOMEM; | ||
376 | } | ||
377 | } | ||
378 | *(p_start++) = | ||
379 | ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset | ||
380 | | risc; | ||
381 | } | ||
382 | |||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | static int return_init_enet_entries(ucc_geth_private_t *ugeth, | ||
387 | volatile u32 *p_start, | ||
388 | u8 num_entries, | ||
389 | qe_risc_allocation_e risc, | ||
390 | int skip_page_for_first_entry) | ||
391 | { | ||
392 | u32 init_enet_offset; | ||
393 | u8 i; | ||
394 | int snum; | ||
395 | |||
396 | for (i = 0; i < num_entries; i++) { | ||
397 | /* Check that this entry was actually valid -- | ||
398 | needed in case failed in allocations */ | ||
399 | if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { | ||
400 | snum = | ||
401 | (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> | ||
402 | ENET_INIT_PARAM_SNUM_SHIFT; | ||
403 | qe_put_snum((u8) snum); | ||
404 | if (!((i == 0) && skip_page_for_first_entry)) { | ||
405 | /* First entry of Rx does not have page */ | ||
406 | init_enet_offset = | ||
407 | (in_be32(p_start) & | ||
408 | ENET_INIT_PARAM_PTR_MASK); | ||
409 | qe_muram_free(init_enet_offset); | ||
410 | } | ||
411 | *(p_start++) = 0; /* Just for cosmetics */ | ||
412 | } | ||
413 | } | ||
414 | |||
415 | return 0; | ||
416 | } | ||
417 | |||
418 | #ifdef DEBUG | ||
419 | static int dump_init_enet_entries(ucc_geth_private_t *ugeth, | ||
420 | volatile u32 *p_start, | ||
421 | u8 num_entries, | ||
422 | u32 thread_size, | ||
423 | qe_risc_allocation_e risc, | ||
424 | int skip_page_for_first_entry) | ||
425 | { | ||
426 | u32 init_enet_offset; | ||
427 | u8 i; | ||
428 | int snum; | ||
429 | |||
430 | for (i = 0; i < num_entries; i++) { | ||
431 | /* Check that this entry was actually valid -- | ||
432 | needed in case failed in allocations */ | ||
433 | if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) { | ||
434 | snum = | ||
435 | (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >> | ||
436 | ENET_INIT_PARAM_SNUM_SHIFT; | ||
437 | qe_put_snum((u8) snum); | ||
438 | if (!((i == 0) && skip_page_for_first_entry)) { | ||
439 | /* First entry of Rx does not have page */ | ||
440 | init_enet_offset = | ||
441 | (in_be32(p_start) & | ||
442 | ENET_INIT_PARAM_PTR_MASK); | ||
443 | ugeth_info("Init enet entry %d:", i); | ||
444 | ugeth_info("Base address: 0x%08x", | ||
445 | (u32) | ||
446 | qe_muram_addr(init_enet_offset)); | ||
447 | mem_disp(qe_muram_addr(init_enet_offset), | ||
448 | thread_size); | ||
449 | } | ||
450 | p_start++; | ||
451 | } | ||
452 | } | ||
453 | |||
454 | return 0; | ||
455 | } | ||
456 | #endif | ||
457 | |||
458 | #ifdef CONFIG_UGETH_FILTERING | ||
459 | static enet_addr_container_t *get_enet_addr_container(void) | ||
460 | { | ||
461 | enet_addr_container_t *enet_addr_cont; | ||
462 | |||
463 | /* allocate memory */ | ||
464 | enet_addr_cont = kmalloc(sizeof(enet_addr_container_t), GFP_KERNEL); | ||
465 | if (!enet_addr_cont) { | ||
466 | ugeth_err("%s: No memory for enet_addr_container_t object.", | ||
467 | __FUNCTION__); | ||
468 | return NULL; | ||
469 | } | ||
470 | |||
471 | return enet_addr_cont; | ||
472 | } | ||
473 | #endif /* CONFIG_UGETH_FILTERING */ | ||
474 | |||
475 | static void put_enet_addr_container(enet_addr_container_t *enet_addr_cont) | ||
476 | { | ||
477 | kfree(enet_addr_cont); | ||
478 | } | ||
479 | |||
480 | #ifdef CONFIG_UGETH_FILTERING | ||
481 | static int hw_add_addr_in_paddr(ucc_geth_private_t *ugeth, | ||
482 | enet_addr_t *p_enet_addr, u8 paddr_num) | ||
483 | { | ||
484 | ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; | ||
485 | |||
486 | if (!(paddr_num < NUM_OF_PADDRS)) { | ||
487 | ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); | ||
488 | return -EINVAL; | ||
489 | } | ||
490 | |||
491 | p_82xx_addr_filt = | ||
492 | (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> | ||
493 | addressfiltering; | ||
494 | |||
495 | /* Ethernet frames are defined in Little Endian mode, */ | ||
496 | /* therefore to insert the address we reverse the bytes. */ | ||
497 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, | ||
498 | (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) | | ||
499 | (u16) (*p_enet_addr)[4])); | ||
500 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, | ||
501 | (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) | | ||
502 | (u16) (*p_enet_addr)[2])); | ||
503 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, | ||
504 | (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) | | ||
505 | (u16) (*p_enet_addr)[0])); | ||
506 | |||
507 | return 0; | ||
508 | } | ||
509 | #endif /* CONFIG_UGETH_FILTERING */ | ||
510 | |||
511 | static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num) | ||
512 | { | ||
513 | ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; | ||
514 | |||
515 | if (!(paddr_num < NUM_OF_PADDRS)) { | ||
516 | ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); | ||
517 | return -EINVAL; | ||
518 | } | ||
519 | |||
520 | p_82xx_addr_filt = | ||
521 | (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> | ||
522 | addressfiltering; | ||
523 | |||
524 | /* Writing address ff.ff.ff.ff.ff.ff disables address | ||
525 | recognition for this register */ | ||
526 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); | ||
527 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff); | ||
528 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); | ||
529 | |||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | static void hw_add_addr_in_hash(ucc_geth_private_t *ugeth, | ||
534 | enet_addr_t *p_enet_addr) | ||
535 | { | ||
536 | ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; | ||
537 | u32 cecr_subblock; | ||
538 | |||
539 | p_82xx_addr_filt = | ||
540 | (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> | ||
541 | addressfiltering; | ||
542 | |||
543 | cecr_subblock = | ||
544 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | ||
545 | |||
546 | /* Ethernet frames are defined in Little Endian mode, | ||
547 | therefor to insert */ | ||
548 | /* the address to the hash (Big Endian mode), we reverse the bytes.*/ | ||
549 | out_be16(&p_82xx_addr_filt->taddr.h, | ||
550 | (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) | | ||
551 | (u16) (*p_enet_addr)[4])); | ||
552 | out_be16(&p_82xx_addr_filt->taddr.m, | ||
553 | (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) | | ||
554 | (u16) (*p_enet_addr)[2])); | ||
555 | out_be16(&p_82xx_addr_filt->taddr.l, | ||
556 | (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) | | ||
557 | (u16) (*p_enet_addr)[0])); | ||
558 | |||
559 | qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock, | ||
560 | (u8) QE_CR_PROTOCOL_ETHERNET, 0); | ||
561 | } | ||
562 | |||
563 | #ifdef CONFIG_UGETH_MAGIC_PACKET | ||
564 | static void magic_packet_detection_enable(ucc_geth_private_t *ugeth) | ||
565 | { | ||
566 | ucc_fast_private_t *uccf; | ||
567 | ucc_geth_t *ug_regs; | ||
568 | u32 maccfg2, uccm; | ||
569 | |||
570 | uccf = ugeth->uccf; | ||
571 | ug_regs = ugeth->ug_regs; | ||
572 | |||
573 | /* Enable interrupts for magic packet detection */ | ||
574 | uccm = in_be32(uccf->p_uccm); | ||
575 | uccm |= UCCE_MPD; | ||
576 | out_be32(uccf->p_uccm, uccm); | ||
577 | |||
578 | /* Enable magic packet detection */ | ||
579 | maccfg2 = in_be32(&ug_regs->maccfg2); | ||
580 | maccfg2 |= MACCFG2_MPE; | ||
581 | out_be32(&ug_regs->maccfg2, maccfg2); | ||
582 | } | ||
583 | |||
584 | static void magic_packet_detection_disable(ucc_geth_private_t *ugeth) | ||
585 | { | ||
586 | ucc_fast_private_t *uccf; | ||
587 | ucc_geth_t *ug_regs; | ||
588 | u32 maccfg2, uccm; | ||
589 | |||
590 | uccf = ugeth->uccf; | ||
591 | ug_regs = ugeth->ug_regs; | ||
592 | |||
593 | /* Disable interrupts for magic packet detection */ | ||
594 | uccm = in_be32(uccf->p_uccm); | ||
595 | uccm &= ~UCCE_MPD; | ||
596 | out_be32(uccf->p_uccm, uccm); | ||
597 | |||
598 | /* Disable magic packet detection */ | ||
599 | maccfg2 = in_be32(&ug_regs->maccfg2); | ||
600 | maccfg2 &= ~MACCFG2_MPE; | ||
601 | out_be32(&ug_regs->maccfg2, maccfg2); | ||
602 | } | ||
603 | #endif /* MAGIC_PACKET */ | ||
604 | |||
605 | static inline int compare_addr(enet_addr_t *addr1, enet_addr_t *addr2) | ||
606 | { | ||
607 | return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS); | ||
608 | } | ||
609 | |||
610 | #ifdef DEBUG | ||
611 | static void get_statistics(ucc_geth_private_t *ugeth, | ||
612 | ucc_geth_tx_firmware_statistics_t * | ||
613 | tx_firmware_statistics, | ||
614 | ucc_geth_rx_firmware_statistics_t * | ||
615 | rx_firmware_statistics, | ||
616 | ucc_geth_hardware_statistics_t *hardware_statistics) | ||
617 | { | ||
618 | ucc_fast_t *uf_regs; | ||
619 | ucc_geth_t *ug_regs; | ||
620 | ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram; | ||
621 | ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram; | ||
622 | |||
623 | ug_regs = ugeth->ug_regs; | ||
624 | uf_regs = (ucc_fast_t *) ug_regs; | ||
625 | p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; | ||
626 | p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; | ||
627 | |||
628 | /* Tx firmware only if user handed pointer and driver actually | ||
629 | gathers Tx firmware statistics */ | ||
630 | if (tx_firmware_statistics && p_tx_fw_statistics_pram) { | ||
631 | tx_firmware_statistics->sicoltx = | ||
632 | in_be32(&p_tx_fw_statistics_pram->sicoltx); | ||
633 | tx_firmware_statistics->mulcoltx = | ||
634 | in_be32(&p_tx_fw_statistics_pram->mulcoltx); | ||
635 | tx_firmware_statistics->latecoltxfr = | ||
636 | in_be32(&p_tx_fw_statistics_pram->latecoltxfr); | ||
637 | tx_firmware_statistics->frabortduecol = | ||
638 | in_be32(&p_tx_fw_statistics_pram->frabortduecol); | ||
639 | tx_firmware_statistics->frlostinmactxer = | ||
640 | in_be32(&p_tx_fw_statistics_pram->frlostinmactxer); | ||
641 | tx_firmware_statistics->carriersenseertx = | ||
642 | in_be32(&p_tx_fw_statistics_pram->carriersenseertx); | ||
643 | tx_firmware_statistics->frtxok = | ||
644 | in_be32(&p_tx_fw_statistics_pram->frtxok); | ||
645 | tx_firmware_statistics->txfrexcessivedefer = | ||
646 | in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer); | ||
647 | tx_firmware_statistics->txpkts256 = | ||
648 | in_be32(&p_tx_fw_statistics_pram->txpkts256); | ||
649 | tx_firmware_statistics->txpkts512 = | ||
650 | in_be32(&p_tx_fw_statistics_pram->txpkts512); | ||
651 | tx_firmware_statistics->txpkts1024 = | ||
652 | in_be32(&p_tx_fw_statistics_pram->txpkts1024); | ||
653 | tx_firmware_statistics->txpktsjumbo = | ||
654 | in_be32(&p_tx_fw_statistics_pram->txpktsjumbo); | ||
655 | } | ||
656 | |||
657 | /* Rx firmware only if user handed pointer and driver actually | ||
658 | * gathers Rx firmware statistics */ | ||
659 | if (rx_firmware_statistics && p_rx_fw_statistics_pram) { | ||
660 | int i; | ||
661 | rx_firmware_statistics->frrxfcser = | ||
662 | in_be32(&p_rx_fw_statistics_pram->frrxfcser); | ||
663 | rx_firmware_statistics->fraligner = | ||
664 | in_be32(&p_rx_fw_statistics_pram->fraligner); | ||
665 | rx_firmware_statistics->inrangelenrxer = | ||
666 | in_be32(&p_rx_fw_statistics_pram->inrangelenrxer); | ||
667 | rx_firmware_statistics->outrangelenrxer = | ||
668 | in_be32(&p_rx_fw_statistics_pram->outrangelenrxer); | ||
669 | rx_firmware_statistics->frtoolong = | ||
670 | in_be32(&p_rx_fw_statistics_pram->frtoolong); | ||
671 | rx_firmware_statistics->runt = | ||
672 | in_be32(&p_rx_fw_statistics_pram->runt); | ||
673 | rx_firmware_statistics->verylongevent = | ||
674 | in_be32(&p_rx_fw_statistics_pram->verylongevent); | ||
675 | rx_firmware_statistics->symbolerror = | ||
676 | in_be32(&p_rx_fw_statistics_pram->symbolerror); | ||
677 | rx_firmware_statistics->dropbsy = | ||
678 | in_be32(&p_rx_fw_statistics_pram->dropbsy); | ||
679 | for (i = 0; i < 0x8; i++) | ||
680 | rx_firmware_statistics->res0[i] = | ||
681 | p_rx_fw_statistics_pram->res0[i]; | ||
682 | rx_firmware_statistics->mismatchdrop = | ||
683 | in_be32(&p_rx_fw_statistics_pram->mismatchdrop); | ||
684 | rx_firmware_statistics->underpkts = | ||
685 | in_be32(&p_rx_fw_statistics_pram->underpkts); | ||
686 | rx_firmware_statistics->pkts256 = | ||
687 | in_be32(&p_rx_fw_statistics_pram->pkts256); | ||
688 | rx_firmware_statistics->pkts512 = | ||
689 | in_be32(&p_rx_fw_statistics_pram->pkts512); | ||
690 | rx_firmware_statistics->pkts1024 = | ||
691 | in_be32(&p_rx_fw_statistics_pram->pkts1024); | ||
692 | rx_firmware_statistics->pktsjumbo = | ||
693 | in_be32(&p_rx_fw_statistics_pram->pktsjumbo); | ||
694 | rx_firmware_statistics->frlossinmacer = | ||
695 | in_be32(&p_rx_fw_statistics_pram->frlossinmacer); | ||
696 | rx_firmware_statistics->pausefr = | ||
697 | in_be32(&p_rx_fw_statistics_pram->pausefr); | ||
698 | for (i = 0; i < 0x4; i++) | ||
699 | rx_firmware_statistics->res1[i] = | ||
700 | p_rx_fw_statistics_pram->res1[i]; | ||
701 | rx_firmware_statistics->removevlan = | ||
702 | in_be32(&p_rx_fw_statistics_pram->removevlan); | ||
703 | rx_firmware_statistics->replacevlan = | ||
704 | in_be32(&p_rx_fw_statistics_pram->replacevlan); | ||
705 | rx_firmware_statistics->insertvlan = | ||
706 | in_be32(&p_rx_fw_statistics_pram->insertvlan); | ||
707 | } | ||
708 | |||
709 | /* Hardware only if user handed pointer and driver actually | ||
710 | gathers hardware statistics */ | ||
711 | if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) { | ||
712 | hardware_statistics->tx64 = in_be32(&ug_regs->tx64); | ||
713 | hardware_statistics->tx127 = in_be32(&ug_regs->tx127); | ||
714 | hardware_statistics->tx255 = in_be32(&ug_regs->tx255); | ||
715 | hardware_statistics->rx64 = in_be32(&ug_regs->rx64); | ||
716 | hardware_statistics->rx127 = in_be32(&ug_regs->rx127); | ||
717 | hardware_statistics->rx255 = in_be32(&ug_regs->rx255); | ||
718 | hardware_statistics->txok = in_be32(&ug_regs->txok); | ||
719 | hardware_statistics->txcf = in_be16(&ug_regs->txcf); | ||
720 | hardware_statistics->tmca = in_be32(&ug_regs->tmca); | ||
721 | hardware_statistics->tbca = in_be32(&ug_regs->tbca); | ||
722 | hardware_statistics->rxfok = in_be32(&ug_regs->rxfok); | ||
723 | hardware_statistics->rxbok = in_be32(&ug_regs->rxbok); | ||
724 | hardware_statistics->rbyt = in_be32(&ug_regs->rbyt); | ||
725 | hardware_statistics->rmca = in_be32(&ug_regs->rmca); | ||
726 | hardware_statistics->rbca = in_be32(&ug_regs->rbca); | ||
727 | } | ||
728 | } | ||
729 | |||
730 | static void dump_bds(ucc_geth_private_t *ugeth) | ||
731 | { | ||
732 | int i; | ||
733 | int length; | ||
734 | |||
735 | for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { | ||
736 | if (ugeth->p_tx_bd_ring[i]) { | ||
737 | length = | ||
738 | (ugeth->ug_info->bdRingLenTx[i] * | ||
739 | UCC_GETH_SIZE_OF_BD); | ||
740 | ugeth_info("TX BDs[%d]", i); | ||
741 | mem_disp(ugeth->p_tx_bd_ring[i], length); | ||
742 | } | ||
743 | } | ||
744 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | ||
745 | if (ugeth->p_rx_bd_ring[i]) { | ||
746 | length = | ||
747 | (ugeth->ug_info->bdRingLenRx[i] * | ||
748 | UCC_GETH_SIZE_OF_BD); | ||
749 | ugeth_info("RX BDs[%d]", i); | ||
750 | mem_disp(ugeth->p_rx_bd_ring[i], length); | ||
751 | } | ||
752 | } | ||
753 | } | ||
754 | |||
755 | static void dump_regs(ucc_geth_private_t *ugeth) | ||
756 | { | ||
757 | int i; | ||
758 | |||
759 | ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num); | ||
760 | ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); | ||
761 | |||
762 | ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", | ||
763 | (u32) & ugeth->ug_regs->maccfg1, | ||
764 | in_be32(&ugeth->ug_regs->maccfg1)); | ||
765 | ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x", | ||
766 | (u32) & ugeth->ug_regs->maccfg2, | ||
767 | in_be32(&ugeth->ug_regs->maccfg2)); | ||
768 | ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x", | ||
769 | (u32) & ugeth->ug_regs->ipgifg, | ||
770 | in_be32(&ugeth->ug_regs->ipgifg)); | ||
771 | ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x", | ||
772 | (u32) & ugeth->ug_regs->hafdup, | ||
773 | in_be32(&ugeth->ug_regs->hafdup)); | ||
774 | ugeth_info("miimcfg : addr - 0x%08x, val - 0x%08x", | ||
775 | (u32) & ugeth->ug_regs->miimng.miimcfg, | ||
776 | in_be32(&ugeth->ug_regs->miimng.miimcfg)); | ||
777 | ugeth_info("miimcom : addr - 0x%08x, val - 0x%08x", | ||
778 | (u32) & ugeth->ug_regs->miimng.miimcom, | ||
779 | in_be32(&ugeth->ug_regs->miimng.miimcom)); | ||
780 | ugeth_info("miimadd : addr - 0x%08x, val - 0x%08x", | ||
781 | (u32) & ugeth->ug_regs->miimng.miimadd, | ||
782 | in_be32(&ugeth->ug_regs->miimng.miimadd)); | ||
783 | ugeth_info("miimcon : addr - 0x%08x, val - 0x%08x", | ||
784 | (u32) & ugeth->ug_regs->miimng.miimcon, | ||
785 | in_be32(&ugeth->ug_regs->miimng.miimcon)); | ||
786 | ugeth_info("miimstat : addr - 0x%08x, val - 0x%08x", | ||
787 | (u32) & ugeth->ug_regs->miimng.miimstat, | ||
788 | in_be32(&ugeth->ug_regs->miimng.miimstat)); | ||
789 | ugeth_info("miimmind : addr - 0x%08x, val - 0x%08x", | ||
790 | (u32) & ugeth->ug_regs->miimng.miimind, | ||
791 | in_be32(&ugeth->ug_regs->miimng.miimind)); | ||
792 | ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x", | ||
793 | (u32) & ugeth->ug_regs->ifctl, | ||
794 | in_be32(&ugeth->ug_regs->ifctl)); | ||
795 | ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x", | ||
796 | (u32) & ugeth->ug_regs->ifstat, | ||
797 | in_be32(&ugeth->ug_regs->ifstat)); | ||
798 | ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x", | ||
799 | (u32) & ugeth->ug_regs->macstnaddr1, | ||
800 | in_be32(&ugeth->ug_regs->macstnaddr1)); | ||
801 | ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x", | ||
802 | (u32) & ugeth->ug_regs->macstnaddr2, | ||
803 | in_be32(&ugeth->ug_regs->macstnaddr2)); | ||
804 | ugeth_info("uempr : addr - 0x%08x, val - 0x%08x", | ||
805 | (u32) & ugeth->ug_regs->uempr, | ||
806 | in_be32(&ugeth->ug_regs->uempr)); | ||
807 | ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x", | ||
808 | (u32) & ugeth->ug_regs->utbipar, | ||
809 | in_be32(&ugeth->ug_regs->utbipar)); | ||
810 | ugeth_info("uescr : addr - 0x%08x, val - 0x%04x", | ||
811 | (u32) & ugeth->ug_regs->uescr, | ||
812 | in_be16(&ugeth->ug_regs->uescr)); | ||
813 | ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x", | ||
814 | (u32) & ugeth->ug_regs->tx64, | ||
815 | in_be32(&ugeth->ug_regs->tx64)); | ||
816 | ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x", | ||
817 | (u32) & ugeth->ug_regs->tx127, | ||
818 | in_be32(&ugeth->ug_regs->tx127)); | ||
819 | ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x", | ||
820 | (u32) & ugeth->ug_regs->tx255, | ||
821 | in_be32(&ugeth->ug_regs->tx255)); | ||
822 | ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x", | ||
823 | (u32) & ugeth->ug_regs->rx64, | ||
824 | in_be32(&ugeth->ug_regs->rx64)); | ||
825 | ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x", | ||
826 | (u32) & ugeth->ug_regs->rx127, | ||
827 | in_be32(&ugeth->ug_regs->rx127)); | ||
828 | ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x", | ||
829 | (u32) & ugeth->ug_regs->rx255, | ||
830 | in_be32(&ugeth->ug_regs->rx255)); | ||
831 | ugeth_info("txok : addr - 0x%08x, val - 0x%08x", | ||
832 | (u32) & ugeth->ug_regs->txok, | ||
833 | in_be32(&ugeth->ug_regs->txok)); | ||
834 | ugeth_info("txcf : addr - 0x%08x, val - 0x%04x", | ||
835 | (u32) & ugeth->ug_regs->txcf, | ||
836 | in_be16(&ugeth->ug_regs->txcf)); | ||
837 | ugeth_info("tmca : addr - 0x%08x, val - 0x%08x", | ||
838 | (u32) & ugeth->ug_regs->tmca, | ||
839 | in_be32(&ugeth->ug_regs->tmca)); | ||
840 | ugeth_info("tbca : addr - 0x%08x, val - 0x%08x", | ||
841 | (u32) & ugeth->ug_regs->tbca, | ||
842 | in_be32(&ugeth->ug_regs->tbca)); | ||
843 | ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x", | ||
844 | (u32) & ugeth->ug_regs->rxfok, | ||
845 | in_be32(&ugeth->ug_regs->rxfok)); | ||
846 | ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x", | ||
847 | (u32) & ugeth->ug_regs->rxbok, | ||
848 | in_be32(&ugeth->ug_regs->rxbok)); | ||
849 | ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x", | ||
850 | (u32) & ugeth->ug_regs->rbyt, | ||
851 | in_be32(&ugeth->ug_regs->rbyt)); | ||
852 | ugeth_info("rmca : addr - 0x%08x, val - 0x%08x", | ||
853 | (u32) & ugeth->ug_regs->rmca, | ||
854 | in_be32(&ugeth->ug_regs->rmca)); | ||
855 | ugeth_info("rbca : addr - 0x%08x, val - 0x%08x", | ||
856 | (u32) & ugeth->ug_regs->rbca, | ||
857 | in_be32(&ugeth->ug_regs->rbca)); | ||
858 | ugeth_info("scar : addr - 0x%08x, val - 0x%08x", | ||
859 | (u32) & ugeth->ug_regs->scar, | ||
860 | in_be32(&ugeth->ug_regs->scar)); | ||
861 | ugeth_info("scam : addr - 0x%08x, val - 0x%08x", | ||
862 | (u32) & ugeth->ug_regs->scam, | ||
863 | in_be32(&ugeth->ug_regs->scam)); | ||
864 | |||
865 | if (ugeth->p_thread_data_tx) { | ||
866 | int numThreadsTxNumerical; | ||
867 | switch (ugeth->ug_info->numThreadsTx) { | ||
868 | case UCC_GETH_NUM_OF_THREADS_1: | ||
869 | numThreadsTxNumerical = 1; | ||
870 | break; | ||
871 | case UCC_GETH_NUM_OF_THREADS_2: | ||
872 | numThreadsTxNumerical = 2; | ||
873 | break; | ||
874 | case UCC_GETH_NUM_OF_THREADS_4: | ||
875 | numThreadsTxNumerical = 4; | ||
876 | break; | ||
877 | case UCC_GETH_NUM_OF_THREADS_6: | ||
878 | numThreadsTxNumerical = 6; | ||
879 | break; | ||
880 | case UCC_GETH_NUM_OF_THREADS_8: | ||
881 | numThreadsTxNumerical = 8; | ||
882 | break; | ||
883 | default: | ||
884 | numThreadsTxNumerical = 0; | ||
885 | break; | ||
886 | } | ||
887 | |||
888 | ugeth_info("Thread data TXs:"); | ||
889 | ugeth_info("Base address: 0x%08x", | ||
890 | (u32) ugeth->p_thread_data_tx); | ||
891 | for (i = 0; i < numThreadsTxNumerical; i++) { | ||
892 | ugeth_info("Thread data TX[%d]:", i); | ||
893 | ugeth_info("Base address: 0x%08x", | ||
894 | (u32) & ugeth->p_thread_data_tx[i]); | ||
895 | mem_disp((u8 *) & ugeth->p_thread_data_tx[i], | ||
896 | sizeof(ucc_geth_thread_data_tx_t)); | ||
897 | } | ||
898 | } | ||
899 | if (ugeth->p_thread_data_rx) { | ||
900 | int numThreadsRxNumerical; | ||
901 | switch (ugeth->ug_info->numThreadsRx) { | ||
902 | case UCC_GETH_NUM_OF_THREADS_1: | ||
903 | numThreadsRxNumerical = 1; | ||
904 | break; | ||
905 | case UCC_GETH_NUM_OF_THREADS_2: | ||
906 | numThreadsRxNumerical = 2; | ||
907 | break; | ||
908 | case UCC_GETH_NUM_OF_THREADS_4: | ||
909 | numThreadsRxNumerical = 4; | ||
910 | break; | ||
911 | case UCC_GETH_NUM_OF_THREADS_6: | ||
912 | numThreadsRxNumerical = 6; | ||
913 | break; | ||
914 | case UCC_GETH_NUM_OF_THREADS_8: | ||
915 | numThreadsRxNumerical = 8; | ||
916 | break; | ||
917 | default: | ||
918 | numThreadsRxNumerical = 0; | ||
919 | break; | ||
920 | } | ||
921 | |||
922 | ugeth_info("Thread data RX:"); | ||
923 | ugeth_info("Base address: 0x%08x", | ||
924 | (u32) ugeth->p_thread_data_rx); | ||
925 | for (i = 0; i < numThreadsRxNumerical; i++) { | ||
926 | ugeth_info("Thread data RX[%d]:", i); | ||
927 | ugeth_info("Base address: 0x%08x", | ||
928 | (u32) & ugeth->p_thread_data_rx[i]); | ||
929 | mem_disp((u8 *) & ugeth->p_thread_data_rx[i], | ||
930 | sizeof(ucc_geth_thread_data_rx_t)); | ||
931 | } | ||
932 | } | ||
933 | if (ugeth->p_exf_glbl_param) { | ||
934 | ugeth_info("EXF global param:"); | ||
935 | ugeth_info("Base address: 0x%08x", | ||
936 | (u32) ugeth->p_exf_glbl_param); | ||
937 | mem_disp((u8 *) ugeth->p_exf_glbl_param, | ||
938 | sizeof(*ugeth->p_exf_glbl_param)); | ||
939 | } | ||
940 | if (ugeth->p_tx_glbl_pram) { | ||
941 | ugeth_info("TX global param:"); | ||
942 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram); | ||
943 | ugeth_info("temoder : addr - 0x%08x, val - 0x%04x", | ||
944 | (u32) & ugeth->p_tx_glbl_pram->temoder, | ||
945 | in_be16(&ugeth->p_tx_glbl_pram->temoder)); | ||
946 | ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x", | ||
947 | (u32) & ugeth->p_tx_glbl_pram->sqptr, | ||
948 | in_be32(&ugeth->p_tx_glbl_pram->sqptr)); | ||
949 | ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x", | ||
950 | (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer, | ||
951 | in_be32(&ugeth->p_tx_glbl_pram-> | ||
952 | schedulerbasepointer)); | ||
953 | ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x", | ||
954 | (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr, | ||
955 | in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); | ||
956 | ugeth_info("tstate : addr - 0x%08x, val - 0x%08x", | ||
957 | (u32) & ugeth->p_tx_glbl_pram->tstate, | ||
958 | in_be32(&ugeth->p_tx_glbl_pram->tstate)); | ||
959 | ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x", | ||
960 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[0], | ||
961 | ugeth->p_tx_glbl_pram->iphoffset[0]); | ||
962 | ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x", | ||
963 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[1], | ||
964 | ugeth->p_tx_glbl_pram->iphoffset[1]); | ||
965 | ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x", | ||
966 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[2], | ||
967 | ugeth->p_tx_glbl_pram->iphoffset[2]); | ||
968 | ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x", | ||
969 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[3], | ||
970 | ugeth->p_tx_glbl_pram->iphoffset[3]); | ||
971 | ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x", | ||
972 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[4], | ||
973 | ugeth->p_tx_glbl_pram->iphoffset[4]); | ||
974 | ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x", | ||
975 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[5], | ||
976 | ugeth->p_tx_glbl_pram->iphoffset[5]); | ||
977 | ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x", | ||
978 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[6], | ||
979 | ugeth->p_tx_glbl_pram->iphoffset[6]); | ||
980 | ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x", | ||
981 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[7], | ||
982 | ugeth->p_tx_glbl_pram->iphoffset[7]); | ||
983 | ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x", | ||
984 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[0], | ||
985 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); | ||
986 | ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x", | ||
987 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[1], | ||
988 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); | ||
989 | ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x", | ||
990 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[2], | ||
991 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); | ||
992 | ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x", | ||
993 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[3], | ||
994 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); | ||
995 | ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x", | ||
996 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[4], | ||
997 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); | ||
998 | ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x", | ||
999 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[5], | ||
1000 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); | ||
1001 | ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x", | ||
1002 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[6], | ||
1003 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); | ||
1004 | ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x", | ||
1005 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[7], | ||
1006 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); | ||
1007 | ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x", | ||
1008 | (u32) & ugeth->p_tx_glbl_pram->tqptr, | ||
1009 | in_be32(&ugeth->p_tx_glbl_pram->tqptr)); | ||
1010 | } | ||
1011 | if (ugeth->p_rx_glbl_pram) { | ||
1012 | ugeth_info("RX global param:"); | ||
1013 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram); | ||
1014 | ugeth_info("remoder : addr - 0x%08x, val - 0x%08x", | ||
1015 | (u32) & ugeth->p_rx_glbl_pram->remoder, | ||
1016 | in_be32(&ugeth->p_rx_glbl_pram->remoder)); | ||
1017 | ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x", | ||
1018 | (u32) & ugeth->p_rx_glbl_pram->rqptr, | ||
1019 | in_be32(&ugeth->p_rx_glbl_pram->rqptr)); | ||
1020 | ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x", | ||
1021 | (u32) & ugeth->p_rx_glbl_pram->typeorlen, | ||
1022 | in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); | ||
1023 | ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x", | ||
1024 | (u32) & ugeth->p_rx_glbl_pram->rxgstpack, | ||
1025 | ugeth->p_rx_glbl_pram->rxgstpack); | ||
1026 | ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x", | ||
1027 | (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr, | ||
1028 | in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); | ||
1029 | ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x", | ||
1030 | (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr, | ||
1031 | in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); | ||
1032 | ugeth_info("rstate : addr - 0x%08x, val - 0x%02x", | ||
1033 | (u32) & ugeth->p_rx_glbl_pram->rstate, | ||
1034 | ugeth->p_rx_glbl_pram->rstate); | ||
1035 | ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x", | ||
1036 | (u32) & ugeth->p_rx_glbl_pram->mrblr, | ||
1037 | in_be16(&ugeth->p_rx_glbl_pram->mrblr)); | ||
1038 | ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x", | ||
1039 | (u32) & ugeth->p_rx_glbl_pram->rbdqptr, | ||
1040 | in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); | ||
1041 | ugeth_info("mflr : addr - 0x%08x, val - 0x%04x", | ||
1042 | (u32) & ugeth->p_rx_glbl_pram->mflr, | ||
1043 | in_be16(&ugeth->p_rx_glbl_pram->mflr)); | ||
1044 | ugeth_info("minflr : addr - 0x%08x, val - 0x%04x", | ||
1045 | (u32) & ugeth->p_rx_glbl_pram->minflr, | ||
1046 | in_be16(&ugeth->p_rx_glbl_pram->minflr)); | ||
1047 | ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x", | ||
1048 | (u32) & ugeth->p_rx_glbl_pram->maxd1, | ||
1049 | in_be16(&ugeth->p_rx_glbl_pram->maxd1)); | ||
1050 | ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x", | ||
1051 | (u32) & ugeth->p_rx_glbl_pram->maxd2, | ||
1052 | in_be16(&ugeth->p_rx_glbl_pram->maxd2)); | ||
1053 | ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x", | ||
1054 | (u32) & ugeth->p_rx_glbl_pram->ecamptr, | ||
1055 | in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); | ||
1056 | ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x", | ||
1057 | (u32) & ugeth->p_rx_glbl_pram->l2qt, | ||
1058 | in_be32(&ugeth->p_rx_glbl_pram->l2qt)); | ||
1059 | ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x", | ||
1060 | (u32) & ugeth->p_rx_glbl_pram->l3qt[0], | ||
1061 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); | ||
1062 | ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x", | ||
1063 | (u32) & ugeth->p_rx_glbl_pram->l3qt[1], | ||
1064 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); | ||
1065 | ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x", | ||
1066 | (u32) & ugeth->p_rx_glbl_pram->l3qt[2], | ||
1067 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); | ||
1068 | ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x", | ||
1069 | (u32) & ugeth->p_rx_glbl_pram->l3qt[3], | ||
1070 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); | ||
1071 | ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x", | ||
1072 | (u32) & ugeth->p_rx_glbl_pram->l3qt[4], | ||
1073 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); | ||
1074 | ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x", | ||
1075 | (u32) & ugeth->p_rx_glbl_pram->l3qt[5], | ||
1076 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); | ||
1077 | ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x", | ||
1078 | (u32) & ugeth->p_rx_glbl_pram->l3qt[6], | ||
1079 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); | ||
1080 | ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x", | ||
1081 | (u32) & ugeth->p_rx_glbl_pram->l3qt[7], | ||
1082 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); | ||
1083 | ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x", | ||
1084 | (u32) & ugeth->p_rx_glbl_pram->vlantype, | ||
1085 | in_be16(&ugeth->p_rx_glbl_pram->vlantype)); | ||
1086 | ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x", | ||
1087 | (u32) & ugeth->p_rx_glbl_pram->vlantci, | ||
1088 | in_be16(&ugeth->p_rx_glbl_pram->vlantci)); | ||
1089 | for (i = 0; i < 64; i++) | ||
1090 | ugeth_info | ||
1091 | ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x", | ||
1092 | i, | ||
1093 | (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i], | ||
1094 | ugeth->p_rx_glbl_pram->addressfiltering[i]); | ||
1095 | ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x", | ||
1096 | (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam, | ||
1097 | in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); | ||
1098 | } | ||
1099 | if (ugeth->p_send_q_mem_reg) { | ||
1100 | ugeth_info("Send Q memory registers:"); | ||
1101 | ugeth_info("Base address: 0x%08x", | ||
1102 | (u32) ugeth->p_send_q_mem_reg); | ||
1103 | for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { | ||
1104 | ugeth_info("SQQD[%d]:", i); | ||
1105 | ugeth_info("Base address: 0x%08x", | ||
1106 | (u32) & ugeth->p_send_q_mem_reg->sqqd[i]); | ||
1107 | mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], | ||
1108 | sizeof(ucc_geth_send_queue_qd_t)); | ||
1109 | } | ||
1110 | } | ||
1111 | if (ugeth->p_scheduler) { | ||
1112 | ugeth_info("Scheduler:"); | ||
1113 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler); | ||
1114 | mem_disp((u8 *) ugeth->p_scheduler, | ||
1115 | sizeof(*ugeth->p_scheduler)); | ||
1116 | } | ||
1117 | if (ugeth->p_tx_fw_statistics_pram) { | ||
1118 | ugeth_info("TX FW statistics pram:"); | ||
1119 | ugeth_info("Base address: 0x%08x", | ||
1120 | (u32) ugeth->p_tx_fw_statistics_pram); | ||
1121 | mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, | ||
1122 | sizeof(*ugeth->p_tx_fw_statistics_pram)); | ||
1123 | } | ||
1124 | if (ugeth->p_rx_fw_statistics_pram) { | ||
1125 | ugeth_info("RX FW statistics pram:"); | ||
1126 | ugeth_info("Base address: 0x%08x", | ||
1127 | (u32) ugeth->p_rx_fw_statistics_pram); | ||
1128 | mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, | ||
1129 | sizeof(*ugeth->p_rx_fw_statistics_pram)); | ||
1130 | } | ||
1131 | if (ugeth->p_rx_irq_coalescing_tbl) { | ||
1132 | ugeth_info("RX IRQ coalescing tables:"); | ||
1133 | ugeth_info("Base address: 0x%08x", | ||
1134 | (u32) ugeth->p_rx_irq_coalescing_tbl); | ||
1135 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | ||
1136 | ugeth_info("RX IRQ coalescing table entry[%d]:", i); | ||
1137 | ugeth_info("Base address: 0x%08x", | ||
1138 | (u32) & ugeth->p_rx_irq_coalescing_tbl-> | ||
1139 | coalescingentry[i]); | ||
1140 | ugeth_info | ||
1141 | ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x", | ||
1142 | (u32) & ugeth->p_rx_irq_coalescing_tbl-> | ||
1143 | coalescingentry[i].interruptcoalescingmaxvalue, | ||
1144 | in_be32(&ugeth->p_rx_irq_coalescing_tbl-> | ||
1145 | coalescingentry[i]. | ||
1146 | interruptcoalescingmaxvalue)); | ||
1147 | ugeth_info | ||
1148 | ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x", | ||
1149 | (u32) & ugeth->p_rx_irq_coalescing_tbl-> | ||
1150 | coalescingentry[i].interruptcoalescingcounter, | ||
1151 | in_be32(&ugeth->p_rx_irq_coalescing_tbl-> | ||
1152 | coalescingentry[i]. | ||
1153 | interruptcoalescingcounter)); | ||
1154 | } | ||
1155 | } | ||
1156 | if (ugeth->p_rx_bd_qs_tbl) { | ||
1157 | ugeth_info("RX BD QS tables:"); | ||
1158 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl); | ||
1159 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | ||
1160 | ugeth_info("RX BD QS table[%d]:", i); | ||
1161 | ugeth_info("Base address: 0x%08x", | ||
1162 | (u32) & ugeth->p_rx_bd_qs_tbl[i]); | ||
1163 | ugeth_info | ||
1164 | ("bdbaseptr : addr - 0x%08x, val - 0x%08x", | ||
1165 | (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, | ||
1166 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); | ||
1167 | ugeth_info | ||
1168 | ("bdptr : addr - 0x%08x, val - 0x%08x", | ||
1169 | (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr, | ||
1170 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); | ||
1171 | ugeth_info | ||
1172 | ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x", | ||
1173 | (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, | ||
1174 | in_be32(&ugeth->p_rx_bd_qs_tbl[i]. | ||
1175 | externalbdbaseptr)); | ||
1176 | ugeth_info | ||
1177 | ("externalbdptr : addr - 0x%08x, val - 0x%08x", | ||
1178 | (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr, | ||
1179 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); | ||
1180 | ugeth_info("ucode RX Prefetched BDs:"); | ||
1181 | ugeth_info("Base address: 0x%08x", | ||
1182 | (u32) | ||
1183 | qe_muram_addr(in_be32 | ||
1184 | (&ugeth->p_rx_bd_qs_tbl[i]. | ||
1185 | bdbaseptr))); | ||
1186 | mem_disp((u8 *) | ||
1187 | qe_muram_addr(in_be32 | ||
1188 | (&ugeth->p_rx_bd_qs_tbl[i]. | ||
1189 | bdbaseptr)), | ||
1190 | sizeof(ucc_geth_rx_prefetched_bds_t)); | ||
1191 | } | ||
1192 | } | ||
1193 | if (ugeth->p_init_enet_param_shadow) { | ||
1194 | int size; | ||
1195 | ugeth_info("Init enet param shadow:"); | ||
1196 | ugeth_info("Base address: 0x%08x", | ||
1197 | (u32) ugeth->p_init_enet_param_shadow); | ||
1198 | mem_disp((u8 *) ugeth->p_init_enet_param_shadow, | ||
1199 | sizeof(*ugeth->p_init_enet_param_shadow)); | ||
1200 | |||
1201 | size = sizeof(ucc_geth_thread_rx_pram_t); | ||
1202 | if (ugeth->ug_info->rxExtendedFiltering) { | ||
1203 | size += | ||
1204 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; | ||
1205 | if (ugeth->ug_info->largestexternallookupkeysize == | ||
1206 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) | ||
1207 | size += | ||
1208 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; | ||
1209 | if (ugeth->ug_info->largestexternallookupkeysize == | ||
1210 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) | ||
1211 | size += | ||
1212 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; | ||
1213 | } | ||
1214 | |||
1215 | dump_init_enet_entries(ugeth, | ||
1216 | &(ugeth->p_init_enet_param_shadow-> | ||
1217 | txthread[0]), | ||
1218 | ENET_INIT_PARAM_MAX_ENTRIES_TX, | ||
1219 | sizeof(ucc_geth_thread_tx_pram_t), | ||
1220 | ugeth->ug_info->riscTx, 0); | ||
1221 | dump_init_enet_entries(ugeth, | ||
1222 | &(ugeth->p_init_enet_param_shadow-> | ||
1223 | rxthread[0]), | ||
1224 | ENET_INIT_PARAM_MAX_ENTRIES_RX, size, | ||
1225 | ugeth->ug_info->riscRx, 1); | ||
1226 | } | ||
1227 | } | ||
1228 | #endif /* DEBUG */ | ||
1229 | |||
1230 | static void init_default_reg_vals(volatile u32 *upsmr_register, | ||
1231 | volatile u32 *maccfg1_register, | ||
1232 | volatile u32 *maccfg2_register) | ||
1233 | { | ||
1234 | out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); | ||
1235 | out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); | ||
1236 | out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT); | ||
1237 | } | ||
1238 | |||
1239 | static int init_half_duplex_params(int alt_beb, | ||
1240 | int back_pressure_no_backoff, | ||
1241 | int no_backoff, | ||
1242 | int excess_defer, | ||
1243 | u8 alt_beb_truncation, | ||
1244 | u8 max_retransmissions, | ||
1245 | u8 collision_window, | ||
1246 | volatile u32 *hafdup_register) | ||
1247 | { | ||
1248 | u32 value = 0; | ||
1249 | |||
1250 | if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) || | ||
1251 | (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) || | ||
1252 | (collision_window > HALFDUP_COLLISION_WINDOW_MAX)) | ||
1253 | return -EINVAL; | ||
1254 | |||
1255 | value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT); | ||
1256 | |||
1257 | if (alt_beb) | ||
1258 | value |= HALFDUP_ALT_BEB; | ||
1259 | if (back_pressure_no_backoff) | ||
1260 | value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF; | ||
1261 | if (no_backoff) | ||
1262 | value |= HALFDUP_NO_BACKOFF; | ||
1263 | if (excess_defer) | ||
1264 | value |= HALFDUP_EXCESSIVE_DEFER; | ||
1265 | |||
1266 | value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT); | ||
1267 | |||
1268 | value |= collision_window; | ||
1269 | |||
1270 | out_be32(hafdup_register, value); | ||
1271 | return 0; | ||
1272 | } | ||
1273 | |||
1274 | static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, | ||
1275 | u8 non_btb_ipg, | ||
1276 | u8 min_ifg, | ||
1277 | u8 btb_ipg, | ||
1278 | volatile u32 *ipgifg_register) | ||
1279 | { | ||
1280 | u32 value = 0; | ||
1281 | |||
1282 | /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back | ||
1283 | IPG part 2 */ | ||
1284 | if (non_btb_cs_ipg > non_btb_ipg) | ||
1285 | return -EINVAL; | ||
1286 | |||
1287 | if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) || | ||
1288 | (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) || | ||
1289 | /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */ | ||
1290 | (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX)) | ||
1291 | return -EINVAL; | ||
1292 | |||
1293 | value |= | ||
1294 | ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) & | ||
1295 | IPGIFG_NBTB_CS_IPG_MASK); | ||
1296 | value |= | ||
1297 | ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) & | ||
1298 | IPGIFG_NBTB_IPG_MASK); | ||
1299 | value |= | ||
1300 | ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) & | ||
1301 | IPGIFG_MIN_IFG_MASK); | ||
1302 | value |= (btb_ipg & IPGIFG_BTB_IPG_MASK); | ||
1303 | |||
1304 | out_be32(ipgifg_register, value); | ||
1305 | return 0; | ||
1306 | } | ||
1307 | |||
1308 | static int init_flow_control_params(u32 automatic_flow_control_mode, | ||
1309 | int rx_flow_control_enable, | ||
1310 | int tx_flow_control_enable, | ||
1311 | u16 pause_period, | ||
1312 | u16 extension_field, | ||
1313 | volatile u32 *upsmr_register, | ||
1314 | volatile u32 *uempr_register, | ||
1315 | volatile u32 *maccfg1_register) | ||
1316 | { | ||
1317 | u32 value = 0; | ||
1318 | |||
1319 | /* Set UEMPR register */ | ||
1320 | value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT; | ||
1321 | value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT; | ||
1322 | out_be32(uempr_register, value); | ||
1323 | |||
1324 | /* Set UPSMR register */ | ||
1325 | value = in_be32(upsmr_register); | ||
1326 | value |= automatic_flow_control_mode; | ||
1327 | out_be32(upsmr_register, value); | ||
1328 | |||
1329 | value = in_be32(maccfg1_register); | ||
1330 | if (rx_flow_control_enable) | ||
1331 | value |= MACCFG1_FLOW_RX; | ||
1332 | if (tx_flow_control_enable) | ||
1333 | value |= MACCFG1_FLOW_TX; | ||
1334 | out_be32(maccfg1_register, value); | ||
1335 | |||
1336 | return 0; | ||
1337 | } | ||
1338 | |||
1339 | static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, | ||
1340 | int auto_zero_hardware_statistics, | ||
1341 | volatile u32 *upsmr_register, | ||
1342 | volatile u16 *uescr_register) | ||
1343 | { | ||
1344 | u32 upsmr_value = 0; | ||
1345 | u16 uescr_value = 0; | ||
1346 | /* Enable hardware statistics gathering if requested */ | ||
1347 | if (enable_hardware_statistics) { | ||
1348 | upsmr_value = in_be32(upsmr_register); | ||
1349 | upsmr_value |= UPSMR_HSE; | ||
1350 | out_be32(upsmr_register, upsmr_value); | ||
1351 | } | ||
1352 | |||
1353 | /* Clear hardware statistics counters */ | ||
1354 | uescr_value = in_be16(uescr_register); | ||
1355 | uescr_value |= UESCR_CLRCNT; | ||
1356 | /* Automatically zero hardware statistics counters on read, | ||
1357 | if requested */ | ||
1358 | if (auto_zero_hardware_statistics) | ||
1359 | uescr_value |= UESCR_AUTOZ; | ||
1360 | out_be16(uescr_register, uescr_value); | ||
1361 | |||
1362 | return 0; | ||
1363 | } | ||
1364 | |||
1365 | static int init_firmware_statistics_gathering_mode(int | ||
1366 | enable_tx_firmware_statistics, | ||
1367 | int enable_rx_firmware_statistics, | ||
1368 | volatile u32 *tx_rmon_base_ptr, | ||
1369 | u32 tx_firmware_statistics_structure_address, | ||
1370 | volatile u32 *rx_rmon_base_ptr, | ||
1371 | u32 rx_firmware_statistics_structure_address, | ||
1372 | volatile u16 *temoder_register, | ||
1373 | volatile u32 *remoder_register) | ||
1374 | { | ||
1375 | /* Note: this function does not check if */ | ||
1376 | /* the parameters it receives are NULL */ | ||
1377 | u16 temoder_value; | ||
1378 | u32 remoder_value; | ||
1379 | |||
1380 | if (enable_tx_firmware_statistics) { | ||
1381 | out_be32(tx_rmon_base_ptr, | ||
1382 | tx_firmware_statistics_structure_address); | ||
1383 | temoder_value = in_be16(temoder_register); | ||
1384 | temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE; | ||
1385 | out_be16(temoder_register, temoder_value); | ||
1386 | } | ||
1387 | |||
1388 | if (enable_rx_firmware_statistics) { | ||
1389 | out_be32(rx_rmon_base_ptr, | ||
1390 | rx_firmware_statistics_structure_address); | ||
1391 | remoder_value = in_be32(remoder_register); | ||
1392 | remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE; | ||
1393 | out_be32(remoder_register, remoder_value); | ||
1394 | } | ||
1395 | |||
1396 | return 0; | ||
1397 | } | ||
1398 | |||
1399 | static int init_mac_station_addr_regs(u8 address_byte_0, | ||
1400 | u8 address_byte_1, | ||
1401 | u8 address_byte_2, | ||
1402 | u8 address_byte_3, | ||
1403 | u8 address_byte_4, | ||
1404 | u8 address_byte_5, | ||
1405 | volatile u32 *macstnaddr1_register, | ||
1406 | volatile u32 *macstnaddr2_register) | ||
1407 | { | ||
1408 | u32 value = 0; | ||
1409 | |||
1410 | /* Example: for a station address of 0x12345678ABCD, */ | ||
1411 | /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */ | ||
1412 | |||
1413 | /* MACSTNADDR1 Register: */ | ||
1414 | |||
1415 | /* 0 7 8 15 */ | ||
1416 | /* station address byte 5 station address byte 4 */ | ||
1417 | /* 16 23 24 31 */ | ||
1418 | /* station address byte 3 station address byte 2 */ | ||
1419 | value |= (u32) ((address_byte_2 << 0) & 0x000000FF); | ||
1420 | value |= (u32) ((address_byte_3 << 8) & 0x0000FF00); | ||
1421 | value |= (u32) ((address_byte_4 << 16) & 0x00FF0000); | ||
1422 | value |= (u32) ((address_byte_5 << 24) & 0xFF000000); | ||
1423 | |||
1424 | out_be32(macstnaddr1_register, value); | ||
1425 | |||
1426 | /* MACSTNADDR2 Register: */ | ||
1427 | |||
1428 | /* 0 7 8 15 */ | ||
1429 | /* station address byte 1 station address byte 0 */ | ||
1430 | /* 16 23 24 31 */ | ||
1431 | /* reserved reserved */ | ||
1432 | value = 0; | ||
1433 | value |= (u32) ((address_byte_0 << 16) & 0x00FF0000); | ||
1434 | value |= (u32) ((address_byte_1 << 24) & 0xFF000000); | ||
1435 | |||
1436 | out_be32(macstnaddr2_register, value); | ||
1437 | |||
1438 | return 0; | ||
1439 | } | ||
1440 | |||
1441 | static int init_mac_duplex_mode(int full_duplex, | ||
1442 | int limited_to_full_duplex, | ||
1443 | volatile u32 *maccfg2_register) | ||
1444 | { | ||
1445 | u32 value = 0; | ||
1446 | |||
1447 | /* some interfaces must work in full duplex mode */ | ||
1448 | if ((full_duplex == 0) && (limited_to_full_duplex == 1)) | ||
1449 | return -EINVAL; | ||
1450 | |||
1451 | value = in_be32(maccfg2_register); | ||
1452 | |||
1453 | if (full_duplex) | ||
1454 | value |= MACCFG2_FDX; | ||
1455 | else | ||
1456 | value &= ~MACCFG2_FDX; | ||
1457 | |||
1458 | out_be32(maccfg2_register, value); | ||
1459 | return 0; | ||
1460 | } | ||
1461 | |||
1462 | static int init_check_frame_length_mode(int length_check, | ||
1463 | volatile u32 *maccfg2_register) | ||
1464 | { | ||
1465 | u32 value = 0; | ||
1466 | |||
1467 | value = in_be32(maccfg2_register); | ||
1468 | |||
1469 | if (length_check) | ||
1470 | value |= MACCFG2_LC; | ||
1471 | else | ||
1472 | value &= ~MACCFG2_LC; | ||
1473 | |||
1474 | out_be32(maccfg2_register, value); | ||
1475 | return 0; | ||
1476 | } | ||
1477 | |||
1478 | static int init_preamble_length(u8 preamble_length, | ||
1479 | volatile u32 *maccfg2_register) | ||
1480 | { | ||
1481 | u32 value = 0; | ||
1482 | |||
1483 | if ((preamble_length < 3) || (preamble_length > 7)) | ||
1484 | return -EINVAL; | ||
1485 | |||
1486 | value = in_be32(maccfg2_register); | ||
1487 | value &= ~MACCFG2_PREL_MASK; | ||
1488 | value |= (preamble_length << MACCFG2_PREL_SHIFT); | ||
1489 | out_be32(maccfg2_register, value); | ||
1490 | return 0; | ||
1491 | } | ||
1492 | |||
1493 | static int init_mii_management_configuration(int reset_mgmt, | ||
1494 | int preamble_supress, | ||
1495 | volatile u32 *miimcfg_register, | ||
1496 | volatile u32 *miimind_register) | ||
1497 | { | ||
1498 | unsigned int timeout = PHY_INIT_TIMEOUT; | ||
1499 | u32 value = 0; | ||
1500 | |||
1501 | value = in_be32(miimcfg_register); | ||
1502 | if (reset_mgmt) { | ||
1503 | value |= MIIMCFG_RESET_MANAGEMENT; | ||
1504 | out_be32(miimcfg_register, value); | ||
1505 | } | ||
1506 | |||
1507 | value = 0; | ||
1508 | |||
1509 | if (preamble_supress) | ||
1510 | value |= MIIMCFG_NO_PREAMBLE; | ||
1511 | |||
1512 | value |= UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT; | ||
1513 | out_be32(miimcfg_register, value); | ||
1514 | |||
1515 | /* Wait until the bus is free */ | ||
1516 | while ((in_be32(miimind_register) & MIIMIND_BUSY) && timeout--) | ||
1517 | cpu_relax(); | ||
1518 | |||
1519 | if (timeout <= 0) { | ||
1520 | ugeth_err("%s: The MII Bus is stuck!", __FUNCTION__); | ||
1521 | return -ETIMEDOUT; | ||
1522 | } | ||
1523 | |||
1524 | return 0; | ||
1525 | } | ||
1526 | |||
1527 | static int init_rx_parameters(int reject_broadcast, | ||
1528 | int receive_short_frames, | ||
1529 | int promiscuous, volatile u32 *upsmr_register) | ||
1530 | { | ||
1531 | u32 value = 0; | ||
1532 | |||
1533 | value = in_be32(upsmr_register); | ||
1534 | |||
1535 | if (reject_broadcast) | ||
1536 | value |= UPSMR_BRO; | ||
1537 | else | ||
1538 | value &= ~UPSMR_BRO; | ||
1539 | |||
1540 | if (receive_short_frames) | ||
1541 | value |= UPSMR_RSH; | ||
1542 | else | ||
1543 | value &= ~UPSMR_RSH; | ||
1544 | |||
1545 | if (promiscuous) | ||
1546 | value |= UPSMR_PRO; | ||
1547 | else | ||
1548 | value &= ~UPSMR_PRO; | ||
1549 | |||
1550 | out_be32(upsmr_register, value); | ||
1551 | |||
1552 | return 0; | ||
1553 | } | ||
1554 | |||
1555 | static int init_max_rx_buff_len(u16 max_rx_buf_len, | ||
1556 | volatile u16 *mrblr_register) | ||
1557 | { | ||
1558 | /* max_rx_buf_len value must be a multiple of 128 */ | ||
1559 | if ((max_rx_buf_len == 0) | ||
1560 | || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT)) | ||
1561 | return -EINVAL; | ||
1562 | |||
1563 | out_be16(mrblr_register, max_rx_buf_len); | ||
1564 | return 0; | ||
1565 | } | ||
1566 | |||
1567 | static int init_min_frame_len(u16 min_frame_length, | ||
1568 | volatile u16 *minflr_register, | ||
1569 | volatile u16 *mrblr_register) | ||
1570 | { | ||
1571 | u16 mrblr_value = 0; | ||
1572 | |||
1573 | mrblr_value = in_be16(mrblr_register); | ||
1574 | if (min_frame_length >= (mrblr_value - 4)) | ||
1575 | return -EINVAL; | ||
1576 | |||
1577 | out_be16(minflr_register, min_frame_length); | ||
1578 | return 0; | ||
1579 | } | ||
1580 | |||
1581 | static int adjust_enet_interface(ucc_geth_private_t *ugeth) | ||
1582 | { | ||
1583 | ucc_geth_info_t *ug_info; | ||
1584 | ucc_geth_t *ug_regs; | ||
1585 | ucc_fast_t *uf_regs; | ||
1586 | enet_speed_e speed; | ||
1587 | int ret_val, rpm = 0, tbi = 0, r10m = 0, rmm = | ||
1588 | 0, limited_to_full_duplex = 0; | ||
1589 | u32 upsmr, maccfg2, utbipar, tbiBaseAddress; | ||
1590 | u16 value; | ||
1591 | |||
1592 | ugeth_vdbg("%s: IN", __FUNCTION__); | ||
1593 | |||
1594 | ug_info = ugeth->ug_info; | ||
1595 | ug_regs = ugeth->ug_regs; | ||
1596 | uf_regs = ugeth->uccf->uf_regs; | ||
1597 | |||
1598 | /* Analyze enet_interface according to Interface Mode Configuration | ||
1599 | table */ | ||
1600 | ret_val = | ||
1601 | get_interface_details(ug_info->enet_interface, &speed, &r10m, &rmm, | ||
1602 | &rpm, &tbi, &limited_to_full_duplex); | ||
1603 | if (ret_val != 0) { | ||
1604 | ugeth_err | ||
1605 | ("%s: half duplex not supported in requested configuration.", | ||
1606 | __FUNCTION__); | ||
1607 | return ret_val; | ||
1608 | } | ||
1609 | |||
1610 | /* Set MACCFG2 */ | ||
1611 | maccfg2 = in_be32(&ug_regs->maccfg2); | ||
1612 | maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; | ||
1613 | if ((speed == ENET_SPEED_10BT) || (speed == ENET_SPEED_100BT)) | ||
1614 | maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; | ||
1615 | else if (speed == ENET_SPEED_1000BT) | ||
1616 | maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; | ||
1617 | maccfg2 |= ug_info->padAndCrc; | ||
1618 | out_be32(&ug_regs->maccfg2, maccfg2); | ||
1619 | |||
1620 | /* Set UPSMR */ | ||
1621 | upsmr = in_be32(&uf_regs->upsmr); | ||
1622 | upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM); | ||
1623 | if (rpm) | ||
1624 | upsmr |= UPSMR_RPM; | ||
1625 | if (r10m) | ||
1626 | upsmr |= UPSMR_R10M; | ||
1627 | if (tbi) | ||
1628 | upsmr |= UPSMR_TBIM; | ||
1629 | if (rmm) | ||
1630 | upsmr |= UPSMR_RMM; | ||
1631 | out_be32(&uf_regs->upsmr, upsmr); | ||
1632 | |||
1633 | /* Set UTBIPAR */ | ||
1634 | utbipar = in_be32(&ug_regs->utbipar); | ||
1635 | utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK; | ||
1636 | if (tbi) | ||
1637 | utbipar |= | ||
1638 | (ug_info->phy_address + | ||
1639 | ugeth->ug_info->uf_info. | ||
1640 | ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT; | ||
1641 | else | ||
1642 | utbipar |= | ||
1643 | (0x10 + | ||
1644 | ugeth->ug_info->uf_info. | ||
1645 | ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT; | ||
1646 | out_be32(&ug_regs->utbipar, utbipar); | ||
1647 | |||
1648 | /* Disable autonegotiation in tbi mode, because by default it | ||
1649 | comes up in autonegotiation mode. */ | ||
1650 | /* Note that this depends on proper setting in utbipar register. */ | ||
1651 | if (tbi) { | ||
1652 | tbiBaseAddress = in_be32(&ug_regs->utbipar); | ||
1653 | tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK; | ||
1654 | tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT; | ||
1655 | value = | ||
1656 | ugeth->mii_info->mdio_read(ugeth->dev, (u8) tbiBaseAddress, | ||
1657 | ENET_TBI_MII_CR); | ||
1658 | value &= ~0x1000; /* Turn off autonegotiation */ | ||
1659 | ugeth->mii_info->mdio_write(ugeth->dev, (u8) tbiBaseAddress, | ||
1660 | ENET_TBI_MII_CR, value); | ||
1661 | } | ||
1662 | |||
1663 | ret_val = init_mac_duplex_mode(1, | ||
1664 | limited_to_full_duplex, | ||
1665 | &ug_regs->maccfg2); | ||
1666 | if (ret_val != 0) { | ||
1667 | ugeth_err | ||
1668 | ("%s: half duplex not supported in requested configuration.", | ||
1669 | __FUNCTION__); | ||
1670 | return ret_val; | ||
1671 | } | ||
1672 | |||
1673 | init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); | ||
1674 | |||
1675 | ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); | ||
1676 | if (ret_val != 0) { | ||
1677 | ugeth_err | ||
1678 | ("%s: Preamble length must be between 3 and 7 inclusive.", | ||
1679 | __FUNCTION__); | ||
1680 | return ret_val; | ||
1681 | } | ||
1682 | |||
1683 | return 0; | ||
1684 | } | ||
1685 | |||
1686 | /* Called every time the controller might need to be made | ||
1687 | * aware of new link state. The PHY code conveys this | ||
1688 | * information through variables in the ugeth structure, and this | ||
1689 | * function converts those variables into the appropriate | ||
1690 | * register values, and can bring down the device if needed. | ||
1691 | */ | ||
1692 | static void adjust_link(struct net_device *dev) | ||
1693 | { | ||
1694 | ucc_geth_private_t *ugeth = netdev_priv(dev); | ||
1695 | ucc_geth_t *ug_regs; | ||
1696 | u32 tempval; | ||
1697 | struct ugeth_mii_info *mii_info = ugeth->mii_info; | ||
1698 | |||
1699 | ug_regs = ugeth->ug_regs; | ||
1700 | |||
1701 | if (mii_info->link) { | ||
1702 | /* Now we make sure that we can be in full duplex mode. | ||
1703 | * If not, we operate in half-duplex mode. */ | ||
1704 | if (mii_info->duplex != ugeth->oldduplex) { | ||
1705 | if (!(mii_info->duplex)) { | ||
1706 | tempval = in_be32(&ug_regs->maccfg2); | ||
1707 | tempval &= ~(MACCFG2_FDX); | ||
1708 | out_be32(&ug_regs->maccfg2, tempval); | ||
1709 | |||
1710 | ugeth_info("%s: Half Duplex", dev->name); | ||
1711 | } else { | ||
1712 | tempval = in_be32(&ug_regs->maccfg2); | ||
1713 | tempval |= MACCFG2_FDX; | ||
1714 | out_be32(&ug_regs->maccfg2, tempval); | ||
1715 | |||
1716 | ugeth_info("%s: Full Duplex", dev->name); | ||
1717 | } | ||
1718 | |||
1719 | ugeth->oldduplex = mii_info->duplex; | ||
1720 | } | ||
1721 | |||
1722 | if (mii_info->speed != ugeth->oldspeed) { | ||
1723 | switch (mii_info->speed) { | ||
1724 | case 1000: | ||
1725 | #ifdef CONFIG_MPC836x | ||
1726 | /* FIXME: This code is for 100Mbs BUG fixing, | ||
1727 | remove this when it is fixed!!! */ | ||
1728 | if (ugeth->ug_info->enet_interface == | ||
1729 | ENET_1000_GMII) | ||
1730 | /* Run the commands which initialize the PHY */ | ||
1731 | { | ||
1732 | tempval = | ||
1733 | (u32) mii_info->mdio_read(ugeth-> | ||
1734 | dev, mii_info->mii_id, 0x1b); | ||
1735 | tempval |= 0x000f; | ||
1736 | mii_info->mdio_write(ugeth->dev, | ||
1737 | mii_info->mii_id, 0x1b, | ||
1738 | (u16) tempval); | ||
1739 | tempval = | ||
1740 | (u32) mii_info->mdio_read(ugeth-> | ||
1741 | dev, mii_info->mii_id, | ||
1742 | MII_BMCR); | ||
1743 | mii_info->mdio_write(ugeth->dev, | ||
1744 | mii_info->mii_id, MII_BMCR, | ||
1745 | (u16) (tempval | BMCR_RESET)); | ||
1746 | } else if (ugeth->ug_info->enet_interface == | ||
1747 | ENET_1000_RGMII) | ||
1748 | /* Run the commands which initialize the PHY */ | ||
1749 | { | ||
1750 | tempval = | ||
1751 | (u32) mii_info->mdio_read(ugeth-> | ||
1752 | dev, mii_info->mii_id, 0x1b); | ||
1753 | tempval = (tempval & ~0x000f) | 0x000b; | ||
1754 | mii_info->mdio_write(ugeth->dev, | ||
1755 | mii_info->mii_id, 0x1b, | ||
1756 | (u16) tempval); | ||
1757 | tempval = | ||
1758 | (u32) mii_info->mdio_read(ugeth-> | ||
1759 | dev, mii_info->mii_id, | ||
1760 | MII_BMCR); | ||
1761 | mii_info->mdio_write(ugeth->dev, | ||
1762 | mii_info->mii_id, MII_BMCR, | ||
1763 | (u16) (tempval | BMCR_RESET)); | ||
1764 | } | ||
1765 | msleep(4000); | ||
1766 | #endif /* CONFIG_MPC8360 */ | ||
1767 | adjust_enet_interface(ugeth); | ||
1768 | break; | ||
1769 | case 100: | ||
1770 | case 10: | ||
1771 | #ifdef CONFIG_MPC836x | ||
1772 | /* FIXME: This code is for 100Mbs BUG fixing, | ||
1773 | remove this lines when it will be fixed!!! */ | ||
1774 | ugeth->ug_info->enet_interface = ENET_100_RGMII; | ||
1775 | tempval = | ||
1776 | (u32) mii_info->mdio_read(ugeth->dev, | ||
1777 | mii_info->mii_id, | ||
1778 | 0x1b); | ||
1779 | tempval = (tempval & ~0x000f) | 0x000b; | ||
1780 | mii_info->mdio_write(ugeth->dev, | ||
1781 | mii_info->mii_id, 0x1b, | ||
1782 | (u16) tempval); | ||
1783 | tempval = | ||
1784 | (u32) mii_info->mdio_read(ugeth->dev, | ||
1785 | mii_info->mii_id, | ||
1786 | MII_BMCR); | ||
1787 | mii_info->mdio_write(ugeth->dev, | ||
1788 | mii_info->mii_id, MII_BMCR, | ||
1789 | (u16) (tempval | | ||
1790 | BMCR_RESET)); | ||
1791 | msleep(4000); | ||
1792 | #endif /* CONFIG_MPC8360 */ | ||
1793 | adjust_enet_interface(ugeth); | ||
1794 | break; | ||
1795 | default: | ||
1796 | ugeth_warn | ||
1797 | ("%s: Ack! Speed (%d) is not 10/100/1000!", | ||
1798 | dev->name, mii_info->speed); | ||
1799 | break; | ||
1800 | } | ||
1801 | |||
1802 | ugeth_info("%s: Speed %dBT", dev->name, | ||
1803 | mii_info->speed); | ||
1804 | |||
1805 | ugeth->oldspeed = mii_info->speed; | ||
1806 | } | ||
1807 | |||
1808 | if (!ugeth->oldlink) { | ||
1809 | ugeth_info("%s: Link is up", dev->name); | ||
1810 | ugeth->oldlink = 1; | ||
1811 | netif_carrier_on(dev); | ||
1812 | netif_schedule(dev); | ||
1813 | } | ||
1814 | } else { | ||
1815 | if (ugeth->oldlink) { | ||
1816 | ugeth_info("%s: Link is down", dev->name); | ||
1817 | ugeth->oldlink = 0; | ||
1818 | ugeth->oldspeed = 0; | ||
1819 | ugeth->oldduplex = -1; | ||
1820 | netif_carrier_off(dev); | ||
1821 | } | ||
1822 | } | ||
1823 | } | ||
1824 | |||
1825 | /* Configure the PHY for dev. | ||
1826 | * returns 0 if success. -1 if failure | ||
1827 | */ | ||
1828 | static int init_phy(struct net_device *dev) | ||
1829 | { | ||
1830 | ucc_geth_private_t *ugeth = netdev_priv(dev); | ||
1831 | struct phy_info *curphy; | ||
1832 | ucc_mii_mng_t *mii_regs; | ||
1833 | struct ugeth_mii_info *mii_info; | ||
1834 | int err; | ||
1835 | |||
1836 | mii_regs = &ugeth->ug_regs->miimng; | ||
1837 | |||
1838 | ugeth->oldlink = 0; | ||
1839 | ugeth->oldspeed = 0; | ||
1840 | ugeth->oldduplex = -1; | ||
1841 | |||
1842 | mii_info = kmalloc(sizeof(struct ugeth_mii_info), GFP_KERNEL); | ||
1843 | |||
1844 | if (NULL == mii_info) { | ||
1845 | ugeth_err("%s: Could not allocate mii_info", dev->name); | ||
1846 | return -ENOMEM; | ||
1847 | } | ||
1848 | |||
1849 | mii_info->mii_regs = mii_regs; | ||
1850 | mii_info->speed = SPEED_1000; | ||
1851 | mii_info->duplex = DUPLEX_FULL; | ||
1852 | mii_info->pause = 0; | ||
1853 | mii_info->link = 0; | ||
1854 | |||
1855 | mii_info->advertising = (ADVERTISED_10baseT_Half | | ||
1856 | ADVERTISED_10baseT_Full | | ||
1857 | ADVERTISED_100baseT_Half | | ||
1858 | ADVERTISED_100baseT_Full | | ||
1859 | ADVERTISED_1000baseT_Full); | ||
1860 | mii_info->autoneg = 1; | ||
1861 | |||
1862 | mii_info->mii_id = ugeth->ug_info->phy_address; | ||
1863 | |||
1864 | mii_info->dev = dev; | ||
1865 | |||
1866 | mii_info->mdio_read = &read_phy_reg; | ||
1867 | mii_info->mdio_write = &write_phy_reg; | ||
1868 | |||
1869 | ugeth->mii_info = mii_info; | ||
1870 | |||
1871 | spin_lock_irq(&ugeth->lock); | ||
1872 | |||
1873 | /* Set this UCC to be the master of the MII managment */ | ||
1874 | ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num); | ||
1875 | |||
1876 | if (init_mii_management_configuration(1, | ||
1877 | ugeth->ug_info-> | ||
1878 | miiPreambleSupress, | ||
1879 | &mii_regs->miimcfg, | ||
1880 | &mii_regs->miimind)) { | ||
1881 | ugeth_err("%s: The MII Bus is stuck!", dev->name); | ||
1882 | err = -1; | ||
1883 | goto bus_fail; | ||
1884 | } | ||
1885 | |||
1886 | spin_unlock_irq(&ugeth->lock); | ||
1887 | |||
1888 | /* get info for this PHY */ | ||
1889 | curphy = get_phy_info(ugeth->mii_info); | ||
1890 | |||
1891 | if (curphy == NULL) { | ||
1892 | ugeth_err("%s: No PHY found", dev->name); | ||
1893 | err = -1; | ||
1894 | goto no_phy; | ||
1895 | } | ||
1896 | |||
1897 | mii_info->phyinfo = curphy; | ||
1898 | |||
1899 | /* Run the commands which initialize the PHY */ | ||
1900 | if (curphy->init) { | ||
1901 | err = curphy->init(ugeth->mii_info); | ||
1902 | if (err) | ||
1903 | goto phy_init_fail; | ||
1904 | } | ||
1905 | |||
1906 | return 0; | ||
1907 | |||
1908 | phy_init_fail: | ||
1909 | no_phy: | ||
1910 | bus_fail: | ||
1911 | kfree(mii_info); | ||
1912 | |||
1913 | return err; | ||
1914 | } | ||
1915 | |||
1916 | #ifdef CONFIG_UGETH_TX_ON_DEMOND | ||
1917 | static int ugeth_transmit_on_demand(ucc_geth_private_t *ugeth) | ||
1918 | { | ||
1919 | ucc_fast_transmit_on_demand(ugeth->uccf); | ||
1920 | |||
1921 | return 0; | ||
1922 | } | ||
1923 | #endif | ||
1924 | |||
1925 | static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth) | ||
1926 | { | ||
1927 | ucc_fast_private_t *uccf; | ||
1928 | u32 cecr_subblock; | ||
1929 | u32 temp; | ||
1930 | |||
1931 | uccf = ugeth->uccf; | ||
1932 | |||
1933 | /* Mask GRACEFUL STOP TX interrupt bit and clear it */ | ||
1934 | temp = in_be32(uccf->p_uccm); | ||
1935 | temp &= ~UCCE_GRA; | ||
1936 | out_be32(uccf->p_uccm, temp); | ||
1937 | out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */ | ||
1938 | |||
1939 | /* Issue host command */ | ||
1940 | cecr_subblock = | ||
1941 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | ||
1942 | qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, | ||
1943 | (u8) QE_CR_PROTOCOL_ETHERNET, 0); | ||
1944 | |||
1945 | /* Wait for command to complete */ | ||
1946 | do { | ||
1947 | temp = in_be32(uccf->p_ucce); | ||
1948 | } while (!(temp & UCCE_GRA)); | ||
1949 | |||
1950 | uccf->stopped_tx = 1; | ||
1951 | |||
1952 | return 0; | ||
1953 | } | ||
1954 | |||
1955 | static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth) | ||
1956 | { | ||
1957 | ucc_fast_private_t *uccf; | ||
1958 | u32 cecr_subblock; | ||
1959 | u8 temp; | ||
1960 | |||
1961 | uccf = ugeth->uccf; | ||
1962 | |||
1963 | /* Clear acknowledge bit */ | ||
1964 | temp = ugeth->p_rx_glbl_pram->rxgstpack; | ||
1965 | temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; | ||
1966 | ugeth->p_rx_glbl_pram->rxgstpack = temp; | ||
1967 | |||
1968 | /* Keep issuing command and checking acknowledge bit until | ||
1969 | it is asserted, according to spec */ | ||
1970 | do { | ||
1971 | /* Issue host command */ | ||
1972 | cecr_subblock = | ||
1973 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info. | ||
1974 | ucc_num); | ||
1975 | qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, | ||
1976 | (u8) QE_CR_PROTOCOL_ETHERNET, 0); | ||
1977 | |||
1978 | temp = ugeth->p_rx_glbl_pram->rxgstpack; | ||
1979 | } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX)); | ||
1980 | |||
1981 | uccf->stopped_rx = 1; | ||
1982 | |||
1983 | return 0; | ||
1984 | } | ||
1985 | |||
1986 | static int ugeth_restart_tx(ucc_geth_private_t *ugeth) | ||
1987 | { | ||
1988 | ucc_fast_private_t *uccf; | ||
1989 | u32 cecr_subblock; | ||
1990 | |||
1991 | uccf = ugeth->uccf; | ||
1992 | |||
1993 | cecr_subblock = | ||
1994 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | ||
1995 | qe_issue_cmd(QE_RESTART_TX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, | ||
1996 | 0); | ||
1997 | uccf->stopped_tx = 0; | ||
1998 | |||
1999 | return 0; | ||
2000 | } | ||
2001 | |||
2002 | static int ugeth_restart_rx(ucc_geth_private_t *ugeth) | ||
2003 | { | ||
2004 | ucc_fast_private_t *uccf; | ||
2005 | u32 cecr_subblock; | ||
2006 | |||
2007 | uccf = ugeth->uccf; | ||
2008 | |||
2009 | cecr_subblock = | ||
2010 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | ||
2011 | qe_issue_cmd(QE_RESTART_RX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, | ||
2012 | 0); | ||
2013 | uccf->stopped_rx = 0; | ||
2014 | |||
2015 | return 0; | ||
2016 | } | ||
2017 | |||
2018 | static int ugeth_enable(ucc_geth_private_t *ugeth, comm_dir_e mode) | ||
2019 | { | ||
2020 | ucc_fast_private_t *uccf; | ||
2021 | int enabled_tx, enabled_rx; | ||
2022 | |||
2023 | uccf = ugeth->uccf; | ||
2024 | |||
2025 | /* check if the UCC number is in range. */ | ||
2026 | if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { | ||
2027 | ugeth_err("%s: ucc_num out of range.", __FUNCTION__); | ||
2028 | return -EINVAL; | ||
2029 | } | ||
2030 | |||
2031 | enabled_tx = uccf->enabled_tx; | ||
2032 | enabled_rx = uccf->enabled_rx; | ||
2033 | |||
2034 | /* Get Tx and Rx going again, in case this channel was actively | ||
2035 | disabled. */ | ||
2036 | if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx) | ||
2037 | ugeth_restart_tx(ugeth); | ||
2038 | if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx) | ||
2039 | ugeth_restart_rx(ugeth); | ||
2040 | |||
2041 | ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */ | ||
2042 | |||
2043 | return 0; | ||
2044 | |||
2045 | } | ||
2046 | |||
2047 | static int ugeth_disable(ucc_geth_private_t * ugeth, comm_dir_e mode) | ||
2048 | { | ||
2049 | ucc_fast_private_t *uccf; | ||
2050 | |||
2051 | uccf = ugeth->uccf; | ||
2052 | |||
2053 | /* check if the UCC number is in range. */ | ||
2054 | if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { | ||
2055 | ugeth_err("%s: ucc_num out of range.", __FUNCTION__); | ||
2056 | return -EINVAL; | ||
2057 | } | ||
2058 | |||
2059 | /* Stop any transmissions */ | ||
2060 | if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx) | ||
2061 | ugeth_graceful_stop_tx(ugeth); | ||
2062 | |||
2063 | /* Stop any receptions */ | ||
2064 | if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx) | ||
2065 | ugeth_graceful_stop_rx(ugeth); | ||
2066 | |||
2067 | ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */ | ||
2068 | |||
2069 | return 0; | ||
2070 | } | ||
2071 | |||
2072 | static void ugeth_dump_regs(ucc_geth_private_t *ugeth) | ||
2073 | { | ||
2074 | #ifdef DEBUG | ||
2075 | ucc_fast_dump_regs(ugeth->uccf); | ||
2076 | dump_regs(ugeth); | ||
2077 | dump_bds(ugeth); | ||
2078 | #endif | ||
2079 | } | ||
2080 | |||
2081 | #ifdef CONFIG_UGETH_FILTERING | ||
2082 | static int ugeth_ext_filtering_serialize_tad(ucc_geth_tad_params_t * | ||
2083 | p_UccGethTadParams, | ||
2084 | qe_fltr_tad_t *qe_fltr_tad) | ||
2085 | { | ||
2086 | u16 temp; | ||
2087 | |||
2088 | /* Zero serialized TAD */ | ||
2089 | memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE); | ||
2090 | |||
2091 | qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */ | ||
2092 | if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode || | ||
2093 | (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) | ||
2094 | || (p_UccGethTadParams->vnontag_op != | ||
2095 | UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP) | ||
2096 | ) | ||
2097 | qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF; | ||
2098 | if (p_UccGethTadParams->reject_frame) | ||
2099 | qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ; | ||
2100 | temp = | ||
2101 | (u16) (((u16) p_UccGethTadParams-> | ||
2102 | vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT); | ||
2103 | qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */ | ||
2104 | |||
2105 | qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */ | ||
2106 | if (p_UccGethTadParams->vnontag_op == | ||
2107 | UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT) | ||
2108 | qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP; | ||
2109 | qe_fltr_tad->serialized[1] |= | ||
2110 | p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT; | ||
2111 | |||
2112 | qe_fltr_tad->serialized[2] |= | ||
2113 | p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT; | ||
2114 | /* upper bits */ | ||
2115 | qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8); | ||
2116 | /* lower bits */ | ||
2117 | qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff); | ||
2118 | |||
2119 | return 0; | ||
2120 | } | ||
2121 | |||
2122 | static enet_addr_container_t | ||
2123 | *ugeth_82xx_filtering_get_match_addr_in_hash(ucc_geth_private_t *ugeth, | ||
2124 | enet_addr_t *p_enet_addr) | ||
2125 | { | ||
2126 | enet_addr_container_t *enet_addr_cont; | ||
2127 | struct list_head *p_lh; | ||
2128 | u16 i, num; | ||
2129 | int32_t j; | ||
2130 | u8 *p_counter; | ||
2131 | |||
2132 | if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { | ||
2133 | p_lh = &ugeth->group_hash_q; | ||
2134 | p_counter = &(ugeth->numGroupAddrInHash); | ||
2135 | } else { | ||
2136 | p_lh = &ugeth->ind_hash_q; | ||
2137 | p_counter = &(ugeth->numIndAddrInHash); | ||
2138 | } | ||
2139 | |||
2140 | if (!p_lh) | ||
2141 | return NULL; | ||
2142 | |||
2143 | num = *p_counter; | ||
2144 | |||
2145 | for (i = 0; i < num; i++) { | ||
2146 | enet_addr_cont = | ||
2147 | (enet_addr_container_t *) | ||
2148 | ENET_ADDR_CONT_ENTRY(dequeue(p_lh)); | ||
2149 | for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) { | ||
2150 | if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j]) | ||
2151 | break; | ||
2152 | if (j == 0) | ||
2153 | return enet_addr_cont; /* Found */ | ||
2154 | } | ||
2155 | enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ | ||
2156 | } | ||
2157 | return NULL; | ||
2158 | } | ||
2159 | |||
2160 | static int ugeth_82xx_filtering_add_addr_in_hash(ucc_geth_private_t *ugeth, | ||
2161 | enet_addr_t *p_enet_addr) | ||
2162 | { | ||
2163 | ucc_geth_enet_address_recognition_location_e location; | ||
2164 | enet_addr_container_t *enet_addr_cont; | ||
2165 | struct list_head *p_lh; | ||
2166 | u8 i; | ||
2167 | u32 limit; | ||
2168 | u8 *p_counter; | ||
2169 | |||
2170 | if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { | ||
2171 | p_lh = &ugeth->group_hash_q; | ||
2172 | limit = ugeth->ug_info->maxGroupAddrInHash; | ||
2173 | location = | ||
2174 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH; | ||
2175 | p_counter = &(ugeth->numGroupAddrInHash); | ||
2176 | } else { | ||
2177 | p_lh = &ugeth->ind_hash_q; | ||
2178 | limit = ugeth->ug_info->maxIndAddrInHash; | ||
2179 | location = | ||
2180 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH; | ||
2181 | p_counter = &(ugeth->numIndAddrInHash); | ||
2182 | } | ||
2183 | |||
2184 | if ((enet_addr_cont = | ||
2185 | ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) { | ||
2186 | list_add(p_lh, &enet_addr_cont->node); /* Put it back */ | ||
2187 | return 0; | ||
2188 | } | ||
2189 | if ((!p_lh) || (!(*p_counter < limit))) | ||
2190 | return -EBUSY; | ||
2191 | if (!(enet_addr_cont = get_enet_addr_container())) | ||
2192 | return -ENOMEM; | ||
2193 | for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) | ||
2194 | (enet_addr_cont->address)[i] = (*p_enet_addr)[i]; | ||
2195 | enet_addr_cont->location = location; | ||
2196 | enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ | ||
2197 | ++(*p_counter); | ||
2198 | |||
2199 | hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address)); | ||
2200 | |||
2201 | return 0; | ||
2202 | } | ||
2203 | |||
2204 | static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth, | ||
2205 | enet_addr_t *p_enet_addr) | ||
2206 | { | ||
2207 | ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; | ||
2208 | enet_addr_container_t *enet_addr_cont; | ||
2209 | ucc_fast_private_t *uccf; | ||
2210 | comm_dir_e comm_dir; | ||
2211 | u16 i, num; | ||
2212 | struct list_head *p_lh; | ||
2213 | u32 *addr_h, *addr_l; | ||
2214 | u8 *p_counter; | ||
2215 | |||
2216 | uccf = ugeth->uccf; | ||
2217 | |||
2218 | p_82xx_addr_filt = | ||
2219 | (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> | ||
2220 | addressfiltering; | ||
2221 | |||
2222 | if (! | ||
2223 | (enet_addr_cont = | ||
2224 | ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) | ||
2225 | return -ENOENT; | ||
2226 | |||
2227 | /* It's been found and removed from the CQ. */ | ||
2228 | /* Now destroy its container */ | ||
2229 | put_enet_addr_container(enet_addr_cont); | ||
2230 | |||
2231 | if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) { | ||
2232 | addr_h = &(p_82xx_addr_filt->gaddr_h); | ||
2233 | addr_l = &(p_82xx_addr_filt->gaddr_l); | ||
2234 | p_lh = &ugeth->group_hash_q; | ||
2235 | p_counter = &(ugeth->numGroupAddrInHash); | ||
2236 | } else { | ||
2237 | addr_h = &(p_82xx_addr_filt->iaddr_h); | ||
2238 | addr_l = &(p_82xx_addr_filt->iaddr_l); | ||
2239 | p_lh = &ugeth->ind_hash_q; | ||
2240 | p_counter = &(ugeth->numIndAddrInHash); | ||
2241 | } | ||
2242 | |||
2243 | comm_dir = 0; | ||
2244 | if (uccf->enabled_tx) | ||
2245 | comm_dir |= COMM_DIR_TX; | ||
2246 | if (uccf->enabled_rx) | ||
2247 | comm_dir |= COMM_DIR_RX; | ||
2248 | if (comm_dir) | ||
2249 | ugeth_disable(ugeth, comm_dir); | ||
2250 | |||
2251 | /* Clear the hash table. */ | ||
2252 | out_be32(addr_h, 0x00000000); | ||
2253 | out_be32(addr_l, 0x00000000); | ||
2254 | |||
2255 | /* Add all remaining CQ elements back into hash */ | ||
2256 | num = --(*p_counter); | ||
2257 | for (i = 0; i < num; i++) { | ||
2258 | enet_addr_cont = | ||
2259 | (enet_addr_container_t *) | ||
2260 | ENET_ADDR_CONT_ENTRY(dequeue(p_lh)); | ||
2261 | hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address)); | ||
2262 | enqueue(p_lh, &enet_addr_cont->node); /* Put it back */ | ||
2263 | } | ||
2264 | |||
2265 | if (comm_dir) | ||
2266 | ugeth_enable(ugeth, comm_dir); | ||
2267 | |||
2268 | return 0; | ||
2269 | } | ||
2270 | #endif /* CONFIG_UGETH_FILTERING */ | ||
2271 | |||
2272 | static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t * | ||
2273 | ugeth, | ||
2274 | enet_addr_type_e | ||
2275 | enet_addr_type) | ||
2276 | { | ||
2277 | ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; | ||
2278 | ucc_fast_private_t *uccf; | ||
2279 | comm_dir_e comm_dir; | ||
2280 | struct list_head *p_lh; | ||
2281 | u16 i, num; | ||
2282 | u32 *addr_h, *addr_l; | ||
2283 | u8 *p_counter; | ||
2284 | |||
2285 | uccf = ugeth->uccf; | ||
2286 | |||
2287 | p_82xx_addr_filt = | ||
2288 | (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram-> | ||
2289 | addressfiltering; | ||
2290 | |||
2291 | if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { | ||
2292 | addr_h = &(p_82xx_addr_filt->gaddr_h); | ||
2293 | addr_l = &(p_82xx_addr_filt->gaddr_l); | ||
2294 | p_lh = &ugeth->group_hash_q; | ||
2295 | p_counter = &(ugeth->numGroupAddrInHash); | ||
2296 | } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) { | ||
2297 | addr_h = &(p_82xx_addr_filt->iaddr_h); | ||
2298 | addr_l = &(p_82xx_addr_filt->iaddr_l); | ||
2299 | p_lh = &ugeth->ind_hash_q; | ||
2300 | p_counter = &(ugeth->numIndAddrInHash); | ||
2301 | } else | ||
2302 | return -EINVAL; | ||
2303 | |||
2304 | comm_dir = 0; | ||
2305 | if (uccf->enabled_tx) | ||
2306 | comm_dir |= COMM_DIR_TX; | ||
2307 | if (uccf->enabled_rx) | ||
2308 | comm_dir |= COMM_DIR_RX; | ||
2309 | if (comm_dir) | ||
2310 | ugeth_disable(ugeth, comm_dir); | ||
2311 | |||
2312 | /* Clear the hash table. */ | ||
2313 | out_be32(addr_h, 0x00000000); | ||
2314 | out_be32(addr_l, 0x00000000); | ||
2315 | |||
2316 | if (!p_lh) | ||
2317 | return 0; | ||
2318 | |||
2319 | num = *p_counter; | ||
2320 | |||
2321 | /* Delete all remaining CQ elements */ | ||
2322 | for (i = 0; i < num; i++) | ||
2323 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh))); | ||
2324 | |||
2325 | *p_counter = 0; | ||
2326 | |||
2327 | if (comm_dir) | ||
2328 | ugeth_enable(ugeth, comm_dir); | ||
2329 | |||
2330 | return 0; | ||
2331 | } | ||
2332 | |||
2333 | #ifdef CONFIG_UGETH_FILTERING | ||
2334 | static int ugeth_82xx_filtering_add_addr_in_paddr(ucc_geth_private_t *ugeth, | ||
2335 | enet_addr_t *p_enet_addr, | ||
2336 | u8 paddr_num) | ||
2337 | { | ||
2338 | int i; | ||
2339 | |||
2340 | if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) | ||
2341 | ugeth_warn | ||
2342 | ("%s: multicast address added to paddr will have no " | ||
2343 | "effect - is this what you wanted?", | ||
2344 | __FUNCTION__); | ||
2345 | |||
2346 | ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */ | ||
2347 | /* store address in our database */ | ||
2348 | for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++) | ||
2349 | ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i]; | ||
2350 | /* put in hardware */ | ||
2351 | return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num); | ||
2352 | } | ||
2353 | #endif /* CONFIG_UGETH_FILTERING */ | ||
2354 | |||
2355 | static int ugeth_82xx_filtering_clear_addr_in_paddr(ucc_geth_private_t *ugeth, | ||
2356 | u8 paddr_num) | ||
2357 | { | ||
2358 | ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ | ||
2359 | return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ | ||
2360 | } | ||
2361 | |||
2362 | static void ucc_geth_memclean(ucc_geth_private_t *ugeth) | ||
2363 | { | ||
2364 | u16 i, j; | ||
2365 | u8 *bd; | ||
2366 | |||
2367 | if (!ugeth) | ||
2368 | return; | ||
2369 | |||
2370 | if (ugeth->uccf) | ||
2371 | ucc_fast_free(ugeth->uccf); | ||
2372 | |||
2373 | if (ugeth->p_thread_data_tx) { | ||
2374 | qe_muram_free(ugeth->thread_dat_tx_offset); | ||
2375 | ugeth->p_thread_data_tx = NULL; | ||
2376 | } | ||
2377 | if (ugeth->p_thread_data_rx) { | ||
2378 | qe_muram_free(ugeth->thread_dat_rx_offset); | ||
2379 | ugeth->p_thread_data_rx = NULL; | ||
2380 | } | ||
2381 | if (ugeth->p_exf_glbl_param) { | ||
2382 | qe_muram_free(ugeth->exf_glbl_param_offset); | ||
2383 | ugeth->p_exf_glbl_param = NULL; | ||
2384 | } | ||
2385 | if (ugeth->p_rx_glbl_pram) { | ||
2386 | qe_muram_free(ugeth->rx_glbl_pram_offset); | ||
2387 | ugeth->p_rx_glbl_pram = NULL; | ||
2388 | } | ||
2389 | if (ugeth->p_tx_glbl_pram) { | ||
2390 | qe_muram_free(ugeth->tx_glbl_pram_offset); | ||
2391 | ugeth->p_tx_glbl_pram = NULL; | ||
2392 | } | ||
2393 | if (ugeth->p_send_q_mem_reg) { | ||
2394 | qe_muram_free(ugeth->send_q_mem_reg_offset); | ||
2395 | ugeth->p_send_q_mem_reg = NULL; | ||
2396 | } | ||
2397 | if (ugeth->p_scheduler) { | ||
2398 | qe_muram_free(ugeth->scheduler_offset); | ||
2399 | ugeth->p_scheduler = NULL; | ||
2400 | } | ||
2401 | if (ugeth->p_tx_fw_statistics_pram) { | ||
2402 | qe_muram_free(ugeth->tx_fw_statistics_pram_offset); | ||
2403 | ugeth->p_tx_fw_statistics_pram = NULL; | ||
2404 | } | ||
2405 | if (ugeth->p_rx_fw_statistics_pram) { | ||
2406 | qe_muram_free(ugeth->rx_fw_statistics_pram_offset); | ||
2407 | ugeth->p_rx_fw_statistics_pram = NULL; | ||
2408 | } | ||
2409 | if (ugeth->p_rx_irq_coalescing_tbl) { | ||
2410 | qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset); | ||
2411 | ugeth->p_rx_irq_coalescing_tbl = NULL; | ||
2412 | } | ||
2413 | if (ugeth->p_rx_bd_qs_tbl) { | ||
2414 | qe_muram_free(ugeth->rx_bd_qs_tbl_offset); | ||
2415 | ugeth->p_rx_bd_qs_tbl = NULL; | ||
2416 | } | ||
2417 | if (ugeth->p_init_enet_param_shadow) { | ||
2418 | return_init_enet_entries(ugeth, | ||
2419 | &(ugeth->p_init_enet_param_shadow-> | ||
2420 | rxthread[0]), | ||
2421 | ENET_INIT_PARAM_MAX_ENTRIES_RX, | ||
2422 | ugeth->ug_info->riscRx, 1); | ||
2423 | return_init_enet_entries(ugeth, | ||
2424 | &(ugeth->p_init_enet_param_shadow-> | ||
2425 | txthread[0]), | ||
2426 | ENET_INIT_PARAM_MAX_ENTRIES_TX, | ||
2427 | ugeth->ug_info->riscTx, 0); | ||
2428 | kfree(ugeth->p_init_enet_param_shadow); | ||
2429 | ugeth->p_init_enet_param_shadow = NULL; | ||
2430 | } | ||
2431 | for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { | ||
2432 | bd = ugeth->p_tx_bd_ring[i]; | ||
2433 | for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { | ||
2434 | if (ugeth->tx_skbuff[i][j]) { | ||
2435 | dma_unmap_single(NULL, | ||
2436 | BD_BUFFER_ARG(bd), | ||
2437 | (BD_STATUS_AND_LENGTH(bd) & | ||
2438 | BD_LENGTH_MASK), | ||
2439 | DMA_TO_DEVICE); | ||
2440 | dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); | ||
2441 | ugeth->tx_skbuff[i][j] = NULL; | ||
2442 | } | ||
2443 | } | ||
2444 | |||
2445 | kfree(ugeth->tx_skbuff[i]); | ||
2446 | |||
2447 | if (ugeth->p_tx_bd_ring[i]) { | ||
2448 | if (ugeth->ug_info->uf_info.bd_mem_part == | ||
2449 | MEM_PART_SYSTEM) | ||
2450 | kfree((void *)ugeth->tx_bd_ring_offset[i]); | ||
2451 | else if (ugeth->ug_info->uf_info.bd_mem_part == | ||
2452 | MEM_PART_MURAM) | ||
2453 | qe_muram_free(ugeth->tx_bd_ring_offset[i]); | ||
2454 | ugeth->p_tx_bd_ring[i] = NULL; | ||
2455 | } | ||
2456 | } | ||
2457 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | ||
2458 | if (ugeth->p_rx_bd_ring[i]) { | ||
2459 | /* Return existing data buffers in ring */ | ||
2460 | bd = ugeth->p_rx_bd_ring[i]; | ||
2461 | for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { | ||
2462 | if (ugeth->rx_skbuff[i][j]) { | ||
2463 | dma_unmap_single(NULL, BD_BUFFER(bd), | ||
2464 | ugeth->ug_info-> | ||
2465 | uf_info. | ||
2466 | max_rx_buf_length + | ||
2467 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, | ||
2468 | DMA_FROM_DEVICE); | ||
2469 | |||
2470 | dev_kfree_skb_any(ugeth-> | ||
2471 | rx_skbuff[i][j]); | ||
2472 | ugeth->rx_skbuff[i][j] = NULL; | ||
2473 | } | ||
2474 | bd += UCC_GETH_SIZE_OF_BD; | ||
2475 | } | ||
2476 | |||
2477 | kfree(ugeth->rx_skbuff[i]); | ||
2478 | |||
2479 | if (ugeth->ug_info->uf_info.bd_mem_part == | ||
2480 | MEM_PART_SYSTEM) | ||
2481 | kfree((void *)ugeth->rx_bd_ring_offset[i]); | ||
2482 | else if (ugeth->ug_info->uf_info.bd_mem_part == | ||
2483 | MEM_PART_MURAM) | ||
2484 | qe_muram_free(ugeth->rx_bd_ring_offset[i]); | ||
2485 | ugeth->p_rx_bd_ring[i] = NULL; | ||
2486 | } | ||
2487 | } | ||
2488 | while (!list_empty(&ugeth->group_hash_q)) | ||
2489 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY | ||
2490 | (dequeue(&ugeth->group_hash_q))); | ||
2491 | while (!list_empty(&ugeth->ind_hash_q)) | ||
2492 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY | ||
2493 | (dequeue(&ugeth->ind_hash_q))); | ||
2494 | |||
2495 | } | ||
2496 | |||
2497 | static void ucc_geth_set_multi(struct net_device *dev) | ||
2498 | { | ||
2499 | ucc_geth_private_t *ugeth; | ||
2500 | struct dev_mc_list *dmi; | ||
2501 | ucc_fast_t *uf_regs; | ||
2502 | ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; | ||
2503 | enet_addr_t tempaddr; | ||
2504 | u8 *mcptr, *tdptr; | ||
2505 | int i, j; | ||
2506 | |||
2507 | ugeth = netdev_priv(dev); | ||
2508 | |||
2509 | uf_regs = ugeth->uccf->uf_regs; | ||
2510 | |||
2511 | if (dev->flags & IFF_PROMISC) { | ||
2512 | |||
2513 | /* Log any net taps. */ | ||
2514 | printk("%s: Promiscuous mode enabled.\n", dev->name); | ||
2515 | uf_regs->upsmr |= UPSMR_PRO; | ||
2516 | |||
2517 | } else { | ||
2518 | |||
2519 | uf_regs->upsmr &= ~UPSMR_PRO; | ||
2520 | |||
2521 | p_82xx_addr_filt = | ||
2522 | (ucc_geth_82xx_address_filtering_pram_t *) ugeth-> | ||
2523 | p_rx_glbl_pram->addressfiltering; | ||
2524 | |||
2525 | if (dev->flags & IFF_ALLMULTI) { | ||
2526 | /* Catch all multicast addresses, so set the | ||
2527 | * filter to all 1's. | ||
2528 | */ | ||
2529 | out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff); | ||
2530 | out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff); | ||
2531 | } else { | ||
2532 | /* Clear filter and add the addresses in the list. | ||
2533 | */ | ||
2534 | out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); | ||
2535 | out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); | ||
2536 | |||
2537 | dmi = dev->mc_list; | ||
2538 | |||
2539 | for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) { | ||
2540 | |||
2541 | /* Only support group multicast for now. | ||
2542 | */ | ||
2543 | if (!(dmi->dmi_addr[0] & 1)) | ||
2544 | continue; | ||
2545 | |||
2546 | /* The address in dmi_addr is LSB first, | ||
2547 | * and taddr is MSB first. We have to | ||
2548 | * copy bytes MSB first from dmi_addr. | ||
2549 | */ | ||
2550 | mcptr = (u8 *) dmi->dmi_addr + 5; | ||
2551 | tdptr = (u8 *) & tempaddr; | ||
2552 | for (j = 0; j < 6; j++) | ||
2553 | *tdptr++ = *mcptr--; | ||
2554 | |||
2555 | /* Ask CPM to run CRC and set bit in | ||
2556 | * filter mask. | ||
2557 | */ | ||
2558 | hw_add_addr_in_hash(ugeth, &tempaddr); | ||
2559 | |||
2560 | } | ||
2561 | } | ||
2562 | } | ||
2563 | } | ||
2564 | |||
2565 | static void ucc_geth_stop(ucc_geth_private_t *ugeth) | ||
2566 | { | ||
2567 | ucc_geth_t *ug_regs = ugeth->ug_regs; | ||
2568 | u32 tempval; | ||
2569 | |||
2570 | ugeth_vdbg("%s: IN", __FUNCTION__); | ||
2571 | |||
2572 | /* Disable the controller */ | ||
2573 | ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); | ||
2574 | |||
2575 | /* Tell the kernel the link is down */ | ||
2576 | ugeth->mii_info->link = 0; | ||
2577 | adjust_link(ugeth->dev); | ||
2578 | |||
2579 | /* Mask all interrupts */ | ||
2580 | out_be32(ugeth->uccf->p_ucce, 0x00000000); | ||
2581 | |||
2582 | /* Clear all interrupts */ | ||
2583 | out_be32(ugeth->uccf->p_ucce, 0xffffffff); | ||
2584 | |||
2585 | /* Disable Rx and Tx */ | ||
2586 | tempval = in_be32(&ug_regs->maccfg1); | ||
2587 | tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); | ||
2588 | out_be32(&ug_regs->maccfg1, tempval); | ||
2589 | |||
2590 | if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) { | ||
2591 | /* Clear any pending interrupts */ | ||
2592 | mii_clear_phy_interrupt(ugeth->mii_info); | ||
2593 | |||
2594 | /* Disable PHY Interrupts */ | ||
2595 | mii_configure_phy_interrupt(ugeth->mii_info, | ||
2596 | MII_INTERRUPT_DISABLED); | ||
2597 | } | ||
2598 | |||
2599 | free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev); | ||
2600 | |||
2601 | if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) { | ||
2602 | free_irq(ugeth->ug_info->phy_interrupt, ugeth->dev); | ||
2603 | } else { | ||
2604 | del_timer_sync(&ugeth->phy_info_timer); | ||
2605 | } | ||
2606 | |||
2607 | ucc_geth_memclean(ugeth); | ||
2608 | } | ||
2609 | |||
2610 | static int ucc_geth_startup(ucc_geth_private_t *ugeth) | ||
2611 | { | ||
2612 | ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt; | ||
2613 | ucc_geth_init_pram_t *p_init_enet_pram; | ||
2614 | ucc_fast_private_t *uccf; | ||
2615 | ucc_geth_info_t *ug_info; | ||
2616 | ucc_fast_info_t *uf_info; | ||
2617 | ucc_fast_t *uf_regs; | ||
2618 | ucc_geth_t *ug_regs; | ||
2619 | int ret_val = -EINVAL; | ||
2620 | u32 remoder = UCC_GETH_REMODER_INIT; | ||
2621 | u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; | ||
2622 | u32 ifstat, i, j, size, l2qt, l3qt, length; | ||
2623 | u16 temoder = UCC_GETH_TEMODER_INIT; | ||
2624 | u16 test; | ||
2625 | u8 function_code = 0; | ||
2626 | u8 *bd, *endOfRing; | ||
2627 | u8 numThreadsRxNumerical, numThreadsTxNumerical; | ||
2628 | |||
2629 | ugeth_vdbg("%s: IN", __FUNCTION__); | ||
2630 | |||
2631 | ug_info = ugeth->ug_info; | ||
2632 | uf_info = &ug_info->uf_info; | ||
2633 | |||
2634 | if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || | ||
2635 | (uf_info->bd_mem_part == MEM_PART_MURAM))) { | ||
2636 | ugeth_err("%s: Bad memory partition value.", __FUNCTION__); | ||
2637 | return -EINVAL; | ||
2638 | } | ||
2639 | |||
2640 | /* Rx BD lengths */ | ||
2641 | for (i = 0; i < ug_info->numQueuesRx; i++) { | ||
2642 | if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || | ||
2643 | (ug_info->bdRingLenRx[i] % | ||
2644 | UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { | ||
2645 | ugeth_err | ||
2646 | ("%s: Rx BD ring length must be multiple of 4," | ||
2647 | " no smaller than 8.", __FUNCTION__); | ||
2648 | return -EINVAL; | ||
2649 | } | ||
2650 | } | ||
2651 | |||
2652 | /* Tx BD lengths */ | ||
2653 | for (i = 0; i < ug_info->numQueuesTx; i++) { | ||
2654 | if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { | ||
2655 | ugeth_err | ||
2656 | ("%s: Tx BD ring length must be no smaller than 2.", | ||
2657 | __FUNCTION__); | ||
2658 | return -EINVAL; | ||
2659 | } | ||
2660 | } | ||
2661 | |||
2662 | /* mrblr */ | ||
2663 | if ((uf_info->max_rx_buf_length == 0) || | ||
2664 | (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { | ||
2665 | ugeth_err | ||
2666 | ("%s: max_rx_buf_length must be non-zero multiple of 128.", | ||
2667 | __FUNCTION__); | ||
2668 | return -EINVAL; | ||
2669 | } | ||
2670 | |||
2671 | /* num Tx queues */ | ||
2672 | if (ug_info->numQueuesTx > NUM_TX_QUEUES) { | ||
2673 | ugeth_err("%s: number of tx queues too large.", __FUNCTION__); | ||
2674 | return -EINVAL; | ||
2675 | } | ||
2676 | |||
2677 | /* num Rx queues */ | ||
2678 | if (ug_info->numQueuesRx > NUM_RX_QUEUES) { | ||
2679 | ugeth_err("%s: number of rx queues too large.", __FUNCTION__); | ||
2680 | return -EINVAL; | ||
2681 | } | ||
2682 | |||
2683 | /* l2qt */ | ||
2684 | for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { | ||
2685 | if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { | ||
2686 | ugeth_err | ||
2687 | ("%s: VLAN priority table entry must not be" | ||
2688 | " larger than number of Rx queues.", | ||
2689 | __FUNCTION__); | ||
2690 | return -EINVAL; | ||
2691 | } | ||
2692 | } | ||
2693 | |||
2694 | /* l3qt */ | ||
2695 | for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { | ||
2696 | if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { | ||
2697 | ugeth_err | ||
2698 | ("%s: IP priority table entry must not be" | ||
2699 | " larger than number of Rx queues.", | ||
2700 | __FUNCTION__); | ||
2701 | return -EINVAL; | ||
2702 | } | ||
2703 | } | ||
2704 | |||
2705 | if (ug_info->cam && !ug_info->ecamptr) { | ||
2706 | ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", | ||
2707 | __FUNCTION__); | ||
2708 | return -EINVAL; | ||
2709 | } | ||
2710 | |||
2711 | if ((ug_info->numStationAddresses != | ||
2712 | UCC_GETH_NUM_OF_STATION_ADDRESSES_1) | ||
2713 | && ug_info->rxExtendedFiltering) { | ||
2714 | ugeth_err("%s: Number of station addresses greater than 1 " | ||
2715 | "not allowed in extended parsing mode.", | ||
2716 | __FUNCTION__); | ||
2717 | return -EINVAL; | ||
2718 | } | ||
2719 | |||
2720 | /* Generate uccm_mask for receive */ | ||
2721 | uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ | ||
2722 | for (i = 0; i < ug_info->numQueuesRx; i++) | ||
2723 | uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i); | ||
2724 | |||
2725 | for (i = 0; i < ug_info->numQueuesTx; i++) | ||
2726 | uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i); | ||
2727 | /* Initialize the general fast UCC block. */ | ||
2728 | if (ucc_fast_init(uf_info, &uccf)) { | ||
2729 | ugeth_err("%s: Failed to init uccf.", __FUNCTION__); | ||
2730 | ucc_geth_memclean(ugeth); | ||
2731 | return -ENOMEM; | ||
2732 | } | ||
2733 | ugeth->uccf = uccf; | ||
2734 | |||
2735 | switch (ug_info->numThreadsRx) { | ||
2736 | case UCC_GETH_NUM_OF_THREADS_1: | ||
2737 | numThreadsRxNumerical = 1; | ||
2738 | break; | ||
2739 | case UCC_GETH_NUM_OF_THREADS_2: | ||
2740 | numThreadsRxNumerical = 2; | ||
2741 | break; | ||
2742 | case UCC_GETH_NUM_OF_THREADS_4: | ||
2743 | numThreadsRxNumerical = 4; | ||
2744 | break; | ||
2745 | case UCC_GETH_NUM_OF_THREADS_6: | ||
2746 | numThreadsRxNumerical = 6; | ||
2747 | break; | ||
2748 | case UCC_GETH_NUM_OF_THREADS_8: | ||
2749 | numThreadsRxNumerical = 8; | ||
2750 | break; | ||
2751 | default: | ||
2752 | ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__); | ||
2753 | ucc_geth_memclean(ugeth); | ||
2754 | return -EINVAL; | ||
2755 | break; | ||
2756 | } | ||
2757 | |||
2758 | switch (ug_info->numThreadsTx) { | ||
2759 | case UCC_GETH_NUM_OF_THREADS_1: | ||
2760 | numThreadsTxNumerical = 1; | ||
2761 | break; | ||
2762 | case UCC_GETH_NUM_OF_THREADS_2: | ||
2763 | numThreadsTxNumerical = 2; | ||
2764 | break; | ||
2765 | case UCC_GETH_NUM_OF_THREADS_4: | ||
2766 | numThreadsTxNumerical = 4; | ||
2767 | break; | ||
2768 | case UCC_GETH_NUM_OF_THREADS_6: | ||
2769 | numThreadsTxNumerical = 6; | ||
2770 | break; | ||
2771 | case UCC_GETH_NUM_OF_THREADS_8: | ||
2772 | numThreadsTxNumerical = 8; | ||
2773 | break; | ||
2774 | default: | ||
2775 | ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__); | ||
2776 | ucc_geth_memclean(ugeth); | ||
2777 | return -EINVAL; | ||
2778 | break; | ||
2779 | } | ||
2780 | |||
2781 | /* Calculate rx_extended_features */ | ||
2782 | ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck || | ||
2783 | ug_info->ipAddressAlignment || | ||
2784 | (ug_info->numStationAddresses != | ||
2785 | UCC_GETH_NUM_OF_STATION_ADDRESSES_1); | ||
2786 | |||
2787 | ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features || | ||
2788 | (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) | ||
2789 | || (ug_info->vlanOperationNonTagged != | ||
2790 | UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); | ||
2791 | |||
2792 | uf_regs = uccf->uf_regs; | ||
2793 | ug_regs = (ucc_geth_t *) (uccf->uf_regs); | ||
2794 | ugeth->ug_regs = ug_regs; | ||
2795 | |||
2796 | init_default_reg_vals(&uf_regs->upsmr, | ||
2797 | &ug_regs->maccfg1, &ug_regs->maccfg2); | ||
2798 | |||
2799 | /* Set UPSMR */ | ||
2800 | /* For more details see the hardware spec. */ | ||
2801 | init_rx_parameters(ug_info->bro, | ||
2802 | ug_info->rsh, ug_info->pro, &uf_regs->upsmr); | ||
2803 | |||
2804 | /* We're going to ignore other registers for now, */ | ||
2805 | /* except as needed to get up and running */ | ||
2806 | |||
2807 | /* Set MACCFG1 */ | ||
2808 | /* For more details see the hardware spec. */ | ||
2809 | init_flow_control_params(ug_info->aufc, | ||
2810 | ug_info->receiveFlowControl, | ||
2811 | 1, | ||
2812 | ug_info->pausePeriod, | ||
2813 | ug_info->extensionField, | ||
2814 | &uf_regs->upsmr, | ||
2815 | &ug_regs->uempr, &ug_regs->maccfg1); | ||
2816 | |||
2817 | maccfg1 = in_be32(&ug_regs->maccfg1); | ||
2818 | maccfg1 |= MACCFG1_ENABLE_RX; | ||
2819 | maccfg1 |= MACCFG1_ENABLE_TX; | ||
2820 | out_be32(&ug_regs->maccfg1, maccfg1); | ||
2821 | |||
2822 | /* Set IPGIFG */ | ||
2823 | /* For more details see the hardware spec. */ | ||
2824 | ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1, | ||
2825 | ug_info->nonBackToBackIfgPart2, | ||
2826 | ug_info-> | ||
2827 | miminumInterFrameGapEnforcement, | ||
2828 | ug_info->backToBackInterFrameGap, | ||
2829 | &ug_regs->ipgifg); | ||
2830 | if (ret_val != 0) { | ||
2831 | ugeth_err("%s: IPGIFG initialization parameter too large.", | ||
2832 | __FUNCTION__); | ||
2833 | ucc_geth_memclean(ugeth); | ||
2834 | return ret_val; | ||
2835 | } | ||
2836 | |||
2837 | /* Set HAFDUP */ | ||
2838 | /* For more details see the hardware spec. */ | ||
2839 | ret_val = init_half_duplex_params(ug_info->altBeb, | ||
2840 | ug_info->backPressureNoBackoff, | ||
2841 | ug_info->noBackoff, | ||
2842 | ug_info->excessDefer, | ||
2843 | ug_info->altBebTruncation, | ||
2844 | ug_info->maxRetransmission, | ||
2845 | ug_info->collisionWindow, | ||
2846 | &ug_regs->hafdup); | ||
2847 | if (ret_val != 0) { | ||
2848 | ugeth_err("%s: Half Duplex initialization parameter too large.", | ||
2849 | __FUNCTION__); | ||
2850 | ucc_geth_memclean(ugeth); | ||
2851 | return ret_val; | ||
2852 | } | ||
2853 | |||
2854 | /* Set IFSTAT */ | ||
2855 | /* For more details see the hardware spec. */ | ||
2856 | /* Read only - resets upon read */ | ||
2857 | ifstat = in_be32(&ug_regs->ifstat); | ||
2858 | |||
2859 | /* Clear UEMPR */ | ||
2860 | /* For more details see the hardware spec. */ | ||
2861 | out_be32(&ug_regs->uempr, 0); | ||
2862 | |||
2863 | /* Set UESCR */ | ||
2864 | /* For more details see the hardware spec. */ | ||
2865 | init_hw_statistics_gathering_mode((ug_info->statisticsMode & | ||
2866 | UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), | ||
2867 | 0, &uf_regs->upsmr, &ug_regs->uescr); | ||
2868 | |||
2869 | /* Allocate Tx bds */ | ||
2870 | for (j = 0; j < ug_info->numQueuesTx; j++) { | ||
2871 | /* Allocate in multiple of | ||
2872 | UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, | ||
2873 | according to spec */ | ||
2874 | length = ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) | ||
2875 | / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) | ||
2876 | * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; | ||
2877 | if ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) % | ||
2878 | UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) | ||
2879 | length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; | ||
2880 | if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { | ||
2881 | u32 align = 4; | ||
2882 | if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) | ||
2883 | align = UCC_GETH_TX_BD_RING_ALIGNMENT; | ||
2884 | ugeth->tx_bd_ring_offset[j] = | ||
2885 | (u32) (kmalloc((u32) (length + align), | ||
2886 | GFP_KERNEL)); | ||
2887 | if (ugeth->tx_bd_ring_offset[j] != 0) | ||
2888 | ugeth->p_tx_bd_ring[j] = | ||
2889 | (void*)((ugeth->tx_bd_ring_offset[j] + | ||
2890 | align) & ~(align - 1)); | ||
2891 | } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { | ||
2892 | ugeth->tx_bd_ring_offset[j] = | ||
2893 | qe_muram_alloc(length, | ||
2894 | UCC_GETH_TX_BD_RING_ALIGNMENT); | ||
2895 | if (!IS_MURAM_ERR(ugeth->tx_bd_ring_offset[j])) | ||
2896 | ugeth->p_tx_bd_ring[j] = | ||
2897 | (u8 *) qe_muram_addr(ugeth-> | ||
2898 | tx_bd_ring_offset[j]); | ||
2899 | } | ||
2900 | if (!ugeth->p_tx_bd_ring[j]) { | ||
2901 | ugeth_err | ||
2902 | ("%s: Can not allocate memory for Tx bd rings.", | ||
2903 | __FUNCTION__); | ||
2904 | ucc_geth_memclean(ugeth); | ||
2905 | return -ENOMEM; | ||
2906 | } | ||
2907 | /* Zero unused end of bd ring, according to spec */ | ||
2908 | memset(ugeth->p_tx_bd_ring[j] + | ||
2909 | ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD, 0, | ||
2910 | length - ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD); | ||
2911 | } | ||
2912 | |||
2913 | /* Allocate Rx bds */ | ||
2914 | for (j = 0; j < ug_info->numQueuesRx; j++) { | ||
2915 | length = ug_info->bdRingLenRx[j] * UCC_GETH_SIZE_OF_BD; | ||
2916 | if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { | ||
2917 | u32 align = 4; | ||
2918 | if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) | ||
2919 | align = UCC_GETH_RX_BD_RING_ALIGNMENT; | ||
2920 | ugeth->rx_bd_ring_offset[j] = | ||
2921 | (u32) (kmalloc((u32) (length + align), GFP_KERNEL)); | ||
2922 | if (ugeth->rx_bd_ring_offset[j] != 0) | ||
2923 | ugeth->p_rx_bd_ring[j] = | ||
2924 | (void*)((ugeth->rx_bd_ring_offset[j] + | ||
2925 | align) & ~(align - 1)); | ||
2926 | } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { | ||
2927 | ugeth->rx_bd_ring_offset[j] = | ||
2928 | qe_muram_alloc(length, | ||
2929 | UCC_GETH_RX_BD_RING_ALIGNMENT); | ||
2930 | if (!IS_MURAM_ERR(ugeth->rx_bd_ring_offset[j])) | ||
2931 | ugeth->p_rx_bd_ring[j] = | ||
2932 | (u8 *) qe_muram_addr(ugeth-> | ||
2933 | rx_bd_ring_offset[j]); | ||
2934 | } | ||
2935 | if (!ugeth->p_rx_bd_ring[j]) { | ||
2936 | ugeth_err | ||
2937 | ("%s: Can not allocate memory for Rx bd rings.", | ||
2938 | __FUNCTION__); | ||
2939 | ucc_geth_memclean(ugeth); | ||
2940 | return -ENOMEM; | ||
2941 | } | ||
2942 | } | ||
2943 | |||
2944 | /* Init Tx bds */ | ||
2945 | for (j = 0; j < ug_info->numQueuesTx; j++) { | ||
2946 | /* Setup the skbuff rings */ | ||
2947 | ugeth->tx_skbuff[j] = | ||
2948 | (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) * | ||
2949 | ugeth->ug_info->bdRingLenTx[j], | ||
2950 | GFP_KERNEL); | ||
2951 | |||
2952 | if (ugeth->tx_skbuff[j] == NULL) { | ||
2953 | ugeth_err("%s: Could not allocate tx_skbuff", | ||
2954 | __FUNCTION__); | ||
2955 | ucc_geth_memclean(ugeth); | ||
2956 | return -ENOMEM; | ||
2957 | } | ||
2958 | |||
2959 | for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) | ||
2960 | ugeth->tx_skbuff[j][i] = NULL; | ||
2961 | |||
2962 | ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; | ||
2963 | bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; | ||
2964 | for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { | ||
2965 | BD_BUFFER_CLEAR(bd); | ||
2966 | BD_STATUS_AND_LENGTH_SET(bd, 0); | ||
2967 | bd += UCC_GETH_SIZE_OF_BD; | ||
2968 | } | ||
2969 | bd -= UCC_GETH_SIZE_OF_BD; | ||
2970 | BD_STATUS_AND_LENGTH_SET(bd, T_W);/* for last BD set Wrap bit */ | ||
2971 | } | ||
2972 | |||
2973 | /* Init Rx bds */ | ||
2974 | for (j = 0; j < ug_info->numQueuesRx; j++) { | ||
2975 | /* Setup the skbuff rings */ | ||
2976 | ugeth->rx_skbuff[j] = | ||
2977 | (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) * | ||
2978 | ugeth->ug_info->bdRingLenRx[j], | ||
2979 | GFP_KERNEL); | ||
2980 | |||
2981 | if (ugeth->rx_skbuff[j] == NULL) { | ||
2982 | ugeth_err("%s: Could not allocate rx_skbuff", | ||
2983 | __FUNCTION__); | ||
2984 | ucc_geth_memclean(ugeth); | ||
2985 | return -ENOMEM; | ||
2986 | } | ||
2987 | |||
2988 | for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) | ||
2989 | ugeth->rx_skbuff[j][i] = NULL; | ||
2990 | |||
2991 | ugeth->skb_currx[j] = 0; | ||
2992 | bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; | ||
2993 | for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { | ||
2994 | BD_STATUS_AND_LENGTH_SET(bd, R_I); | ||
2995 | BD_BUFFER_CLEAR(bd); | ||
2996 | bd += UCC_GETH_SIZE_OF_BD; | ||
2997 | } | ||
2998 | bd -= UCC_GETH_SIZE_OF_BD; | ||
2999 | BD_STATUS_AND_LENGTH_SET(bd, R_W);/* for last BD set Wrap bit */ | ||
3000 | } | ||
3001 | |||
3002 | /* | ||
3003 | * Global PRAM | ||
3004 | */ | ||
3005 | /* Tx global PRAM */ | ||
3006 | /* Allocate global tx parameter RAM page */ | ||
3007 | ugeth->tx_glbl_pram_offset = | ||
3008 | qe_muram_alloc(sizeof(ucc_geth_tx_global_pram_t), | ||
3009 | UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); | ||
3010 | if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) { | ||
3011 | ugeth_err | ||
3012 | ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", | ||
3013 | __FUNCTION__); | ||
3014 | ucc_geth_memclean(ugeth); | ||
3015 | return -ENOMEM; | ||
3016 | } | ||
3017 | ugeth->p_tx_glbl_pram = | ||
3018 | (ucc_geth_tx_global_pram_t *) qe_muram_addr(ugeth-> | ||
3019 | tx_glbl_pram_offset); | ||
3020 | /* Zero out p_tx_glbl_pram */ | ||
3021 | memset(ugeth->p_tx_glbl_pram, 0, sizeof(ucc_geth_tx_global_pram_t)); | ||
3022 | |||
3023 | /* Fill global PRAM */ | ||
3024 | |||
3025 | /* TQPTR */ | ||
3026 | /* Size varies with number of Tx threads */ | ||
3027 | ugeth->thread_dat_tx_offset = | ||
3028 | qe_muram_alloc(numThreadsTxNumerical * | ||
3029 | sizeof(ucc_geth_thread_data_tx_t) + | ||
3030 | 32 * (numThreadsTxNumerical == 1), | ||
3031 | UCC_GETH_THREAD_DATA_ALIGNMENT); | ||
3032 | if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) { | ||
3033 | ugeth_err | ||
3034 | ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", | ||
3035 | __FUNCTION__); | ||
3036 | ucc_geth_memclean(ugeth); | ||
3037 | return -ENOMEM; | ||
3038 | } | ||
3039 | |||
3040 | ugeth->p_thread_data_tx = | ||
3041 | (ucc_geth_thread_data_tx_t *) qe_muram_addr(ugeth-> | ||
3042 | thread_dat_tx_offset); | ||
3043 | out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); | ||
3044 | |||
3045 | /* vtagtable */ | ||
3046 | for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++) | ||
3047 | out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i], | ||
3048 | ug_info->vtagtable[i]); | ||
3049 | |||
3050 | /* iphoffset */ | ||
3051 | for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) | ||
3052 | ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i]; | ||
3053 | |||
3054 | /* SQPTR */ | ||
3055 | /* Size varies with number of Tx queues */ | ||
3056 | ugeth->send_q_mem_reg_offset = | ||
3057 | qe_muram_alloc(ug_info->numQueuesTx * | ||
3058 | sizeof(ucc_geth_send_queue_qd_t), | ||
3059 | UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); | ||
3060 | if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) { | ||
3061 | ugeth_err | ||
3062 | ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", | ||
3063 | __FUNCTION__); | ||
3064 | ucc_geth_memclean(ugeth); | ||
3065 | return -ENOMEM; | ||
3066 | } | ||
3067 | |||
3068 | ugeth->p_send_q_mem_reg = | ||
3069 | (ucc_geth_send_queue_mem_region_t *) qe_muram_addr(ugeth-> | ||
3070 | send_q_mem_reg_offset); | ||
3071 | out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); | ||
3072 | |||
3073 | /* Setup the table */ | ||
3074 | /* Assume BD rings are already established */ | ||
3075 | for (i = 0; i < ug_info->numQueuesTx; i++) { | ||
3076 | endOfRing = | ||
3077 | ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - | ||
3078 | 1) * UCC_GETH_SIZE_OF_BD; | ||
3079 | if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { | ||
3080 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, | ||
3081 | (u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); | ||
3082 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. | ||
3083 | last_bd_completed_address, | ||
3084 | (u32) virt_to_phys(endOfRing)); | ||
3085 | } else if (ugeth->ug_info->uf_info.bd_mem_part == | ||
3086 | MEM_PART_MURAM) { | ||
3087 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, | ||
3088 | (u32) immrbar_virt_to_phys(ugeth-> | ||
3089 | p_tx_bd_ring[i])); | ||
3090 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. | ||
3091 | last_bd_completed_address, | ||
3092 | (u32) immrbar_virt_to_phys(endOfRing)); | ||
3093 | } | ||
3094 | } | ||
3095 | |||
3096 | /* schedulerbasepointer */ | ||
3097 | |||
3098 | if (ug_info->numQueuesTx > 1) { | ||
3099 | /* scheduler exists only if more than 1 tx queue */ | ||
3100 | ugeth->scheduler_offset = | ||
3101 | qe_muram_alloc(sizeof(ucc_geth_scheduler_t), | ||
3102 | UCC_GETH_SCHEDULER_ALIGNMENT); | ||
3103 | if (IS_MURAM_ERR(ugeth->scheduler_offset)) { | ||
3104 | ugeth_err | ||
3105 | ("%s: Can not allocate DPRAM memory for p_scheduler.", | ||
3106 | __FUNCTION__); | ||
3107 | ucc_geth_memclean(ugeth); | ||
3108 | return -ENOMEM; | ||
3109 | } | ||
3110 | |||
3111 | ugeth->p_scheduler = | ||
3112 | (ucc_geth_scheduler_t *) qe_muram_addr(ugeth-> | ||
3113 | scheduler_offset); | ||
3114 | out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, | ||
3115 | ugeth->scheduler_offset); | ||
3116 | /* Zero out p_scheduler */ | ||
3117 | memset(ugeth->p_scheduler, 0, sizeof(ucc_geth_scheduler_t)); | ||
3118 | |||
3119 | /* Set values in scheduler */ | ||
3120 | out_be32(&ugeth->p_scheduler->mblinterval, | ||
3121 | ug_info->mblinterval); | ||
3122 | out_be16(&ugeth->p_scheduler->nortsrbytetime, | ||
3123 | ug_info->nortsrbytetime); | ||
3124 | ugeth->p_scheduler->fracsiz = ug_info->fracsiz; | ||
3125 | ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq; | ||
3126 | ugeth->p_scheduler->txasap = ug_info->txasap; | ||
3127 | ugeth->p_scheduler->extrabw = ug_info->extrabw; | ||
3128 | for (i = 0; i < NUM_TX_QUEUES; i++) | ||
3129 | ugeth->p_scheduler->weightfactor[i] = | ||
3130 | ug_info->weightfactor[i]; | ||
3131 | |||
3132 | /* Set pointers to cpucount registers in scheduler */ | ||
3133 | ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); | ||
3134 | ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1); | ||
3135 | ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2); | ||
3136 | ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3); | ||
3137 | ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4); | ||
3138 | ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5); | ||
3139 | ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); | ||
3140 | ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7); | ||
3141 | } | ||
3142 | |||
3143 | /* schedulerbasepointer */ | ||
3144 | /* TxRMON_PTR (statistics) */ | ||
3145 | if (ug_info-> | ||
3146 | statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { | ||
3147 | ugeth->tx_fw_statistics_pram_offset = | ||
3148 | qe_muram_alloc(sizeof | ||
3149 | (ucc_geth_tx_firmware_statistics_pram_t), | ||
3150 | UCC_GETH_TX_STATISTICS_ALIGNMENT); | ||
3151 | if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) { | ||
3152 | ugeth_err | ||
3153 | ("%s: Can not allocate DPRAM memory for" | ||
3154 | " p_tx_fw_statistics_pram.", __FUNCTION__); | ||
3155 | ucc_geth_memclean(ugeth); | ||
3156 | return -ENOMEM; | ||
3157 | } | ||
3158 | ugeth->p_tx_fw_statistics_pram = | ||
3159 | (ucc_geth_tx_firmware_statistics_pram_t *) | ||
3160 | qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); | ||
3161 | /* Zero out p_tx_fw_statistics_pram */ | ||
3162 | memset(ugeth->p_tx_fw_statistics_pram, | ||
3163 | 0, sizeof(ucc_geth_tx_firmware_statistics_pram_t)); | ||
3164 | } | ||
3165 | |||
3166 | /* temoder */ | ||
3167 | /* Already has speed set */ | ||
3168 | |||
3169 | if (ug_info->numQueuesTx > 1) | ||
3170 | temoder |= TEMODER_SCHEDULER_ENABLE; | ||
3171 | if (ug_info->ipCheckSumGenerate) | ||
3172 | temoder |= TEMODER_IP_CHECKSUM_GENERATE; | ||
3173 | temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT); | ||
3174 | out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder); | ||
3175 | |||
3176 | test = in_be16(&ugeth->p_tx_glbl_pram->temoder); | ||
3177 | |||
3178 | /* Function code register value to be used later */ | ||
3179 | function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL; | ||
3180 | /* Required for QE */ | ||
3181 | |||
3182 | /* function code register */ | ||
3183 | out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24); | ||
3184 | |||
3185 | /* Rx global PRAM */ | ||
3186 | /* Allocate global rx parameter RAM page */ | ||
3187 | ugeth->rx_glbl_pram_offset = | ||
3188 | qe_muram_alloc(sizeof(ucc_geth_rx_global_pram_t), | ||
3189 | UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); | ||
3190 | if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) { | ||
3191 | ugeth_err | ||
3192 | ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", | ||
3193 | __FUNCTION__); | ||
3194 | ucc_geth_memclean(ugeth); | ||
3195 | return -ENOMEM; | ||
3196 | } | ||
3197 | ugeth->p_rx_glbl_pram = | ||
3198 | (ucc_geth_rx_global_pram_t *) qe_muram_addr(ugeth-> | ||
3199 | rx_glbl_pram_offset); | ||
3200 | /* Zero out p_rx_glbl_pram */ | ||
3201 | memset(ugeth->p_rx_glbl_pram, 0, sizeof(ucc_geth_rx_global_pram_t)); | ||
3202 | |||
3203 | /* Fill global PRAM */ | ||
3204 | |||
3205 | /* RQPTR */ | ||
3206 | /* Size varies with number of Rx threads */ | ||
3207 | ugeth->thread_dat_rx_offset = | ||
3208 | qe_muram_alloc(numThreadsRxNumerical * | ||
3209 | sizeof(ucc_geth_thread_data_rx_t), | ||
3210 | UCC_GETH_THREAD_DATA_ALIGNMENT); | ||
3211 | if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) { | ||
3212 | ugeth_err | ||
3213 | ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", | ||
3214 | __FUNCTION__); | ||
3215 | ucc_geth_memclean(ugeth); | ||
3216 | return -ENOMEM; | ||
3217 | } | ||
3218 | |||
3219 | ugeth->p_thread_data_rx = | ||
3220 | (ucc_geth_thread_data_rx_t *) qe_muram_addr(ugeth-> | ||
3221 | thread_dat_rx_offset); | ||
3222 | out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); | ||
3223 | |||
3224 | /* typeorlen */ | ||
3225 | out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen); | ||
3226 | |||
3227 | /* rxrmonbaseptr (statistics) */ | ||
3228 | if (ug_info-> | ||
3229 | statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { | ||
3230 | ugeth->rx_fw_statistics_pram_offset = | ||
3231 | qe_muram_alloc(sizeof | ||
3232 | (ucc_geth_rx_firmware_statistics_pram_t), | ||
3233 | UCC_GETH_RX_STATISTICS_ALIGNMENT); | ||
3234 | if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) { | ||
3235 | ugeth_err | ||
3236 | ("%s: Can not allocate DPRAM memory for" | ||
3237 | " p_rx_fw_statistics_pram.", __FUNCTION__); | ||
3238 | ucc_geth_memclean(ugeth); | ||
3239 | return -ENOMEM; | ||
3240 | } | ||
3241 | ugeth->p_rx_fw_statistics_pram = | ||
3242 | (ucc_geth_rx_firmware_statistics_pram_t *) | ||
3243 | qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); | ||
3244 | /* Zero out p_rx_fw_statistics_pram */ | ||
3245 | memset(ugeth->p_rx_fw_statistics_pram, 0, | ||
3246 | sizeof(ucc_geth_rx_firmware_statistics_pram_t)); | ||
3247 | } | ||
3248 | |||
3249 | /* intCoalescingPtr */ | ||
3250 | |||
3251 | /* Size varies with number of Rx queues */ | ||
3252 | ugeth->rx_irq_coalescing_tbl_offset = | ||
3253 | qe_muram_alloc(ug_info->numQueuesRx * | ||
3254 | sizeof(ucc_geth_rx_interrupt_coalescing_entry_t), | ||
3255 | UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); | ||
3256 | if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) { | ||
3257 | ugeth_err | ||
3258 | ("%s: Can not allocate DPRAM memory for" | ||
3259 | " p_rx_irq_coalescing_tbl.", __FUNCTION__); | ||
3260 | ucc_geth_memclean(ugeth); | ||
3261 | return -ENOMEM; | ||
3262 | } | ||
3263 | |||
3264 | ugeth->p_rx_irq_coalescing_tbl = | ||
3265 | (ucc_geth_rx_interrupt_coalescing_table_t *) | ||
3266 | qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); | ||
3267 | out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, | ||
3268 | ugeth->rx_irq_coalescing_tbl_offset); | ||
3269 | |||
3270 | /* Fill interrupt coalescing table */ | ||
3271 | for (i = 0; i < ug_info->numQueuesRx; i++) { | ||
3272 | out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. | ||
3273 | interruptcoalescingmaxvalue, | ||
3274 | ug_info->interruptcoalescingmaxvalue[i]); | ||
3275 | out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. | ||
3276 | interruptcoalescingcounter, | ||
3277 | ug_info->interruptcoalescingmaxvalue[i]); | ||
3278 | } | ||
3279 | |||
3280 | /* MRBLR */ | ||
3281 | init_max_rx_buff_len(uf_info->max_rx_buf_length, | ||
3282 | &ugeth->p_rx_glbl_pram->mrblr); | ||
3283 | /* MFLR */ | ||
3284 | out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength); | ||
3285 | /* MINFLR */ | ||
3286 | init_min_frame_len(ug_info->minFrameLength, | ||
3287 | &ugeth->p_rx_glbl_pram->minflr, | ||
3288 | &ugeth->p_rx_glbl_pram->mrblr); | ||
3289 | /* MAXD1 */ | ||
3290 | out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length); | ||
3291 | /* MAXD2 */ | ||
3292 | out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length); | ||
3293 | |||
3294 | /* l2qt */ | ||
3295 | l2qt = 0; | ||
3296 | for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) | ||
3297 | l2qt |= (ug_info->l2qt[i] << (28 - 4 * i)); | ||
3298 | out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt); | ||
3299 | |||
3300 | /* l3qt */ | ||
3301 | for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) { | ||
3302 | l3qt = 0; | ||
3303 | for (i = 0; i < 8; i++) | ||
3304 | l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); | ||
3305 | out_be32(&ugeth->p_rx_glbl_pram->l3qt[j], l3qt); | ||
3306 | } | ||
3307 | |||
3308 | /* vlantype */ | ||
3309 | out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype); | ||
3310 | |||
3311 | /* vlantci */ | ||
3312 | out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci); | ||
3313 | |||
3314 | /* ecamptr */ | ||
3315 | out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr); | ||
3316 | |||
3317 | /* RBDQPTR */ | ||
3318 | /* Size varies with number of Rx queues */ | ||
3319 | ugeth->rx_bd_qs_tbl_offset = | ||
3320 | qe_muram_alloc(ug_info->numQueuesRx * | ||
3321 | (sizeof(ucc_geth_rx_bd_queues_entry_t) + | ||
3322 | sizeof(ucc_geth_rx_prefetched_bds_t)), | ||
3323 | UCC_GETH_RX_BD_QUEUES_ALIGNMENT); | ||
3324 | if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) { | ||
3325 | ugeth_err | ||
3326 | ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", | ||
3327 | __FUNCTION__); | ||
3328 | ucc_geth_memclean(ugeth); | ||
3329 | return -ENOMEM; | ||
3330 | } | ||
3331 | |||
3332 | ugeth->p_rx_bd_qs_tbl = | ||
3333 | (ucc_geth_rx_bd_queues_entry_t *) qe_muram_addr(ugeth-> | ||
3334 | rx_bd_qs_tbl_offset); | ||
3335 | out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); | ||
3336 | /* Zero out p_rx_bd_qs_tbl */ | ||
3337 | memset(ugeth->p_rx_bd_qs_tbl, | ||
3338 | 0, | ||
3339 | ug_info->numQueuesRx * (sizeof(ucc_geth_rx_bd_queues_entry_t) + | ||
3340 | sizeof(ucc_geth_rx_prefetched_bds_t))); | ||
3341 | |||
3342 | /* Setup the table */ | ||
3343 | /* Assume BD rings are already established */ | ||
3344 | for (i = 0; i < ug_info->numQueuesRx; i++) { | ||
3345 | if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { | ||
3346 | out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, | ||
3347 | (u32) virt_to_phys(ugeth->p_rx_bd_ring[i])); | ||
3348 | } else if (ugeth->ug_info->uf_info.bd_mem_part == | ||
3349 | MEM_PART_MURAM) { | ||
3350 | out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, | ||
3351 | (u32) immrbar_virt_to_phys(ugeth-> | ||
3352 | p_rx_bd_ring[i])); | ||
3353 | } | ||
3354 | /* rest of fields handled by QE */ | ||
3355 | } | ||
3356 | |||
3357 | /* remoder */ | ||
3358 | /* Already has speed set */ | ||
3359 | |||
3360 | if (ugeth->rx_extended_features) | ||
3361 | remoder |= REMODER_RX_EXTENDED_FEATURES; | ||
3362 | if (ug_info->rxExtendedFiltering) | ||
3363 | remoder |= REMODER_RX_EXTENDED_FILTERING; | ||
3364 | if (ug_info->dynamicMaxFrameLength) | ||
3365 | remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH; | ||
3366 | if (ug_info->dynamicMinFrameLength) | ||
3367 | remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH; | ||
3368 | remoder |= | ||
3369 | ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT; | ||
3370 | remoder |= | ||
3371 | ug_info-> | ||
3372 | vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT; | ||
3373 | remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT; | ||
3374 | remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT); | ||
3375 | if (ug_info->ipCheckSumCheck) | ||
3376 | remoder |= REMODER_IP_CHECKSUM_CHECK; | ||
3377 | if (ug_info->ipAddressAlignment) | ||
3378 | remoder |= REMODER_IP_ADDRESS_ALIGNMENT; | ||
3379 | out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder); | ||
3380 | |||
3381 | /* Note that this function must be called */ | ||
3382 | /* ONLY AFTER p_tx_fw_statistics_pram */ | ||
3383 | /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */ | ||
3384 | init_firmware_statistics_gathering_mode((ug_info-> | ||
3385 | statisticsMode & | ||
3386 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX), | ||
3387 | (ug_info->statisticsMode & | ||
3388 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX), | ||
3389 | &ugeth->p_tx_glbl_pram->txrmonbaseptr, | ||
3390 | ugeth->tx_fw_statistics_pram_offset, | ||
3391 | &ugeth->p_rx_glbl_pram->rxrmonbaseptr, | ||
3392 | ugeth->rx_fw_statistics_pram_offset, | ||
3393 | &ugeth->p_tx_glbl_pram->temoder, | ||
3394 | &ugeth->p_rx_glbl_pram->remoder); | ||
3395 | |||
3396 | /* function code register */ | ||
3397 | ugeth->p_rx_glbl_pram->rstate = function_code; | ||
3398 | |||
3399 | /* initialize extended filtering */ | ||
3400 | if (ug_info->rxExtendedFiltering) { | ||
3401 | if (!ug_info->extendedFilteringChainPointer) { | ||
3402 | ugeth_err("%s: Null Extended Filtering Chain Pointer.", | ||
3403 | __FUNCTION__); | ||
3404 | ucc_geth_memclean(ugeth); | ||
3405 | return -EINVAL; | ||
3406 | } | ||
3407 | |||
3408 | /* Allocate memory for extended filtering Mode Global | ||
3409 | Parameters */ | ||
3410 | ugeth->exf_glbl_param_offset = | ||
3411 | qe_muram_alloc(sizeof(ucc_geth_exf_global_pram_t), | ||
3412 | UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); | ||
3413 | if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) { | ||
3414 | ugeth_err | ||
3415 | ("%s: Can not allocate DPRAM memory for" | ||
3416 | " p_exf_glbl_param.", __FUNCTION__); | ||
3417 | ucc_geth_memclean(ugeth); | ||
3418 | return -ENOMEM; | ||
3419 | } | ||
3420 | |||
3421 | ugeth->p_exf_glbl_param = | ||
3422 | (ucc_geth_exf_global_pram_t *) qe_muram_addr(ugeth-> | ||
3423 | exf_glbl_param_offset); | ||
3424 | out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, | ||
3425 | ugeth->exf_glbl_param_offset); | ||
3426 | out_be32(&ugeth->p_exf_glbl_param->l2pcdptr, | ||
3427 | (u32) ug_info->extendedFilteringChainPointer); | ||
3428 | |||
3429 | } else { /* initialize 82xx style address filtering */ | ||
3430 | |||
3431 | /* Init individual address recognition registers to disabled */ | ||
3432 | |||
3433 | for (j = 0; j < NUM_OF_PADDRS; j++) | ||
3434 | ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); | ||
3435 | |||
3436 | /* Create CQs for hash tables */ | ||
3437 | if (ug_info->maxGroupAddrInHash > 0) { | ||
3438 | INIT_LIST_HEAD(&ugeth->group_hash_q); | ||
3439 | } | ||
3440 | if (ug_info->maxIndAddrInHash > 0) { | ||
3441 | INIT_LIST_HEAD(&ugeth->ind_hash_q); | ||
3442 | } | ||
3443 | p_82xx_addr_filt = | ||
3444 | (ucc_geth_82xx_address_filtering_pram_t *) ugeth-> | ||
3445 | p_rx_glbl_pram->addressfiltering; | ||
3446 | |||
3447 | ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, | ||
3448 | ENET_ADDR_TYPE_GROUP); | ||
3449 | ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, | ||
3450 | ENET_ADDR_TYPE_INDIVIDUAL); | ||
3451 | } | ||
3452 | |||
3453 | /* | ||
3454 | * Initialize UCC at QE level | ||
3455 | */ | ||
3456 | |||
3457 | command = QE_INIT_TX_RX; | ||
3458 | |||
3459 | /* Allocate shadow InitEnet command parameter structure. | ||
3460 | * This is needed because after the InitEnet command is executed, | ||
3461 | * the structure in DPRAM is released, because DPRAM is a premium | ||
3462 | * resource. | ||
3463 | * This shadow structure keeps a copy of what was done so that the | ||
3464 | * allocated resources can be released when the channel is freed. | ||
3465 | */ | ||
3466 | if (!(ugeth->p_init_enet_param_shadow = | ||
3467 | (ucc_geth_init_pram_t *) kmalloc(sizeof(ucc_geth_init_pram_t), | ||
3468 | GFP_KERNEL))) { | ||
3469 | ugeth_err | ||
3470 | ("%s: Can not allocate memory for" | ||
3471 | " p_UccInitEnetParamShadows.", __FUNCTION__); | ||
3472 | ucc_geth_memclean(ugeth); | ||
3473 | return -ENOMEM; | ||
3474 | } | ||
3475 | /* Zero out *p_init_enet_param_shadow */ | ||
3476 | memset((char *)ugeth->p_init_enet_param_shadow, | ||
3477 | 0, sizeof(ucc_geth_init_pram_t)); | ||
3478 | |||
3479 | /* Fill shadow InitEnet command parameter structure */ | ||
3480 | |||
3481 | ugeth->p_init_enet_param_shadow->resinit1 = | ||
3482 | ENET_INIT_PARAM_MAGIC_RES_INIT1; | ||
3483 | ugeth->p_init_enet_param_shadow->resinit2 = | ||
3484 | ENET_INIT_PARAM_MAGIC_RES_INIT2; | ||
3485 | ugeth->p_init_enet_param_shadow->resinit3 = | ||
3486 | ENET_INIT_PARAM_MAGIC_RES_INIT3; | ||
3487 | ugeth->p_init_enet_param_shadow->resinit4 = | ||
3488 | ENET_INIT_PARAM_MAGIC_RES_INIT4; | ||
3489 | ugeth->p_init_enet_param_shadow->resinit5 = | ||
3490 | ENET_INIT_PARAM_MAGIC_RES_INIT5; | ||
3491 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= | ||
3492 | ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT; | ||
3493 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= | ||
3494 | ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT; | ||
3495 | |||
3496 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= | ||
3497 | ugeth->rx_glbl_pram_offset | ug_info->riscRx; | ||
3498 | if ((ug_info->largestexternallookupkeysize != | ||
3499 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) | ||
3500 | && (ug_info->largestexternallookupkeysize != | ||
3501 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) | ||
3502 | && (ug_info->largestexternallookupkeysize != | ||
3503 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { | ||
3504 | ugeth_err("%s: Invalid largest External Lookup Key Size.", | ||
3505 | __FUNCTION__); | ||
3506 | ucc_geth_memclean(ugeth); | ||
3507 | return -EINVAL; | ||
3508 | } | ||
3509 | ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = | ||
3510 | ug_info->largestexternallookupkeysize; | ||
3511 | size = sizeof(ucc_geth_thread_rx_pram_t); | ||
3512 | if (ug_info->rxExtendedFiltering) { | ||
3513 | size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; | ||
3514 | if (ug_info->largestexternallookupkeysize == | ||
3515 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) | ||
3516 | size += | ||
3517 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; | ||
3518 | if (ug_info->largestexternallookupkeysize == | ||
3519 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) | ||
3520 | size += | ||
3521 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; | ||
3522 | } | ||
3523 | |||
3524 | if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth-> | ||
3525 | p_init_enet_param_shadow->rxthread[0]), | ||
3526 | (u8) (numThreadsRxNumerical + 1) | ||
3527 | /* Rx needs one extra for terminator */ | ||
3528 | , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, | ||
3529 | ug_info->riscRx, 1)) != 0) { | ||
3530 | ugeth_err("%s: Can not fill p_init_enet_param_shadow.", | ||
3531 | __FUNCTION__); | ||
3532 | ucc_geth_memclean(ugeth); | ||
3533 | return ret_val; | ||
3534 | } | ||
3535 | |||
3536 | ugeth->p_init_enet_param_shadow->txglobal = | ||
3537 | ugeth->tx_glbl_pram_offset | ug_info->riscTx; | ||
3538 | if ((ret_val = | ||
3539 | fill_init_enet_entries(ugeth, | ||
3540 | &(ugeth->p_init_enet_param_shadow-> | ||
3541 | txthread[0]), numThreadsTxNumerical, | ||
3542 | sizeof(ucc_geth_thread_tx_pram_t), | ||
3543 | UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, | ||
3544 | ug_info->riscTx, 0)) != 0) { | ||
3545 | ugeth_err("%s: Can not fill p_init_enet_param_shadow.", | ||
3546 | __FUNCTION__); | ||
3547 | ucc_geth_memclean(ugeth); | ||
3548 | return ret_val; | ||
3549 | } | ||
3550 | |||
3551 | /* Load Rx bds with buffers */ | ||
3552 | for (i = 0; i < ug_info->numQueuesRx; i++) { | ||
3553 | if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { | ||
3554 | ugeth_err("%s: Can not fill Rx bds with buffers.", | ||
3555 | __FUNCTION__); | ||
3556 | ucc_geth_memclean(ugeth); | ||
3557 | return ret_val; | ||
3558 | } | ||
3559 | } | ||
3560 | |||
3561 | /* Allocate InitEnet command parameter structure */ | ||
3562 | init_enet_pram_offset = qe_muram_alloc(sizeof(ucc_geth_init_pram_t), 4); | ||
3563 | if (IS_MURAM_ERR(init_enet_pram_offset)) { | ||
3564 | ugeth_err | ||
3565 | ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", | ||
3566 | __FUNCTION__); | ||
3567 | ucc_geth_memclean(ugeth); | ||
3568 | return -ENOMEM; | ||
3569 | } | ||
3570 | p_init_enet_pram = | ||
3571 | (ucc_geth_init_pram_t *) qe_muram_addr(init_enet_pram_offset); | ||
3572 | |||
3573 | /* Copy shadow InitEnet command parameter structure into PRAM */ | ||
3574 | p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1; | ||
3575 | p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2; | ||
3576 | p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3; | ||
3577 | p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4; | ||
3578 | out_be16(&p_init_enet_pram->resinit5, | ||
3579 | ugeth->p_init_enet_param_shadow->resinit5); | ||
3580 | p_init_enet_pram->largestexternallookupkeysize = | ||
3581 | ugeth->p_init_enet_param_shadow->largestexternallookupkeysize; | ||
3582 | out_be32(&p_init_enet_pram->rgftgfrxglobal, | ||
3583 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal); | ||
3584 | for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) | ||
3585 | out_be32(&p_init_enet_pram->rxthread[i], | ||
3586 | ugeth->p_init_enet_param_shadow->rxthread[i]); | ||
3587 | out_be32(&p_init_enet_pram->txglobal, | ||
3588 | ugeth->p_init_enet_param_shadow->txglobal); | ||
3589 | for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++) | ||
3590 | out_be32(&p_init_enet_pram->txthread[i], | ||
3591 | ugeth->p_init_enet_param_shadow->txthread[i]); | ||
3592 | |||
3593 | /* Issue QE command */ | ||
3594 | cecr_subblock = | ||
3595 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | ||
3596 | qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET, | ||
3597 | init_enet_pram_offset); | ||
3598 | |||
3599 | /* Free InitEnet command parameter */ | ||
3600 | qe_muram_free(init_enet_pram_offset); | ||
3601 | |||
3602 | return 0; | ||
3603 | } | ||
3604 | |||
3605 | /* returns a net_device_stats structure pointer */ | ||
3606 | static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev) | ||
3607 | { | ||
3608 | ucc_geth_private_t *ugeth = netdev_priv(dev); | ||
3609 | |||
3610 | return &(ugeth->stats); | ||
3611 | } | ||
3612 | |||
3613 | /* ucc_geth_timeout gets called when a packet has not been | ||
3614 | * transmitted after a set amount of time. | ||
3615 | * For now, assume that clearing out all the structures, and | ||
3616 | * starting over will fix the problem. */ | ||
3617 | static void ucc_geth_timeout(struct net_device *dev) | ||
3618 | { | ||
3619 | ucc_geth_private_t *ugeth = netdev_priv(dev); | ||
3620 | |||
3621 | ugeth_vdbg("%s: IN", __FUNCTION__); | ||
3622 | |||
3623 | ugeth->stats.tx_errors++; | ||
3624 | |||
3625 | ugeth_dump_regs(ugeth); | ||
3626 | |||
3627 | if (dev->flags & IFF_UP) { | ||
3628 | ucc_geth_stop(ugeth); | ||
3629 | ucc_geth_startup(ugeth); | ||
3630 | } | ||
3631 | |||
3632 | netif_schedule(dev); | ||
3633 | } | ||
3634 | |||
3635 | /* This is called by the kernel when a frame is ready for transmission. */ | ||
3636 | /* It is pointed to by the dev->hard_start_xmit function pointer */ | ||
3637 | static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
3638 | { | ||
3639 | ucc_geth_private_t *ugeth = netdev_priv(dev); | ||
3640 | u8 *bd; /* BD pointer */ | ||
3641 | u32 bd_status; | ||
3642 | u8 txQ = 0; | ||
3643 | |||
3644 | ugeth_vdbg("%s: IN", __FUNCTION__); | ||
3645 | |||
3646 | spin_lock_irq(&ugeth->lock); | ||
3647 | |||
3648 | ugeth->stats.tx_bytes += skb->len; | ||
3649 | |||
3650 | /* Start from the next BD that should be filled */ | ||
3651 | bd = ugeth->txBd[txQ]; | ||
3652 | bd_status = BD_STATUS_AND_LENGTH(bd); | ||
3653 | /* Save the skb pointer so we can free it later */ | ||
3654 | ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; | ||
3655 | |||
3656 | /* Update the current skb pointer (wrapping if this was the last) */ | ||
3657 | ugeth->skb_curtx[txQ] = | ||
3658 | (ugeth->skb_curtx[txQ] + | ||
3659 | 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); | ||
3660 | |||
3661 | /* set up the buffer descriptor */ | ||
3662 | BD_BUFFER_SET(bd, | ||
3663 | dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); | ||
3664 | |||
3665 | //printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); | ||
3666 | |||
3667 | bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; | ||
3668 | |||
3669 | BD_STATUS_AND_LENGTH_SET(bd, bd_status); | ||
3670 | |||
3671 | dev->trans_start = jiffies; | ||
3672 | |||
3673 | /* Move to next BD in the ring */ | ||
3674 | if (!(bd_status & T_W)) | ||
3675 | ugeth->txBd[txQ] = bd + UCC_GETH_SIZE_OF_BD; | ||
3676 | else | ||
3677 | ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ]; | ||
3678 | |||
3679 | /* If the next BD still needs to be cleaned up, then the bds | ||
3680 | are full. We need to tell the kernel to stop sending us stuff. */ | ||
3681 | if (bd == ugeth->confBd[txQ]) { | ||
3682 | if (!netif_queue_stopped(dev)) | ||
3683 | netif_stop_queue(dev); | ||
3684 | } | ||
3685 | |||
3686 | if (ugeth->p_scheduler) { | ||
3687 | ugeth->cpucount[txQ]++; | ||
3688 | /* Indicate to QE that there are more Tx bds ready for | ||
3689 | transmission */ | ||
3690 | /* This is done by writing a running counter of the bd | ||
3691 | count to the scheduler PRAM. */ | ||
3692 | out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]); | ||
3693 | } | ||
3694 | |||
3695 | spin_unlock_irq(&ugeth->lock); | ||
3696 | |||
3697 | return 0; | ||
3698 | } | ||
3699 | |||
3700 | static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit) | ||
3701 | { | ||
3702 | struct sk_buff *skb; | ||
3703 | u8 *bd; | ||
3704 | u16 length, howmany = 0; | ||
3705 | u32 bd_status; | ||
3706 | u8 *bdBuffer; | ||
3707 | |||
3708 | ugeth_vdbg("%s: IN", __FUNCTION__); | ||
3709 | |||
3710 | spin_lock(&ugeth->lock); | ||
3711 | /* collect received buffers */ | ||
3712 | bd = ugeth->rxBd[rxQ]; | ||
3713 | |||
3714 | bd_status = BD_STATUS_AND_LENGTH(bd); | ||
3715 | |||
3716 | /* while there are received buffers and BD is full (~R_E) */ | ||
3717 | while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { | ||
3718 | bdBuffer = (u8 *) BD_BUFFER(bd); | ||
3719 | length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); | ||
3720 | skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; | ||
3721 | |||
3722 | /* determine whether buffer is first, last, first and last | ||
3723 | (single buffer frame) or middle (not first and not last) */ | ||
3724 | if (!skb || | ||
3725 | (!(bd_status & (R_F | R_L))) || | ||
3726 | (bd_status & R_ERRORS_FATAL)) { | ||
3727 | ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x", | ||
3728 | __FUNCTION__, __LINE__, (u32) skb); | ||
3729 | if (skb) | ||
3730 | dev_kfree_skb_any(skb); | ||
3731 | |||
3732 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; | ||
3733 | ugeth->stats.rx_dropped++; | ||
3734 | } else { | ||
3735 | ugeth->stats.rx_packets++; | ||
3736 | howmany++; | ||
3737 | |||
3738 | /* Prep the skb for the packet */ | ||
3739 | skb_put(skb, length); | ||
3740 | |||
3741 | /* Tell the skb what kind of packet this is */ | ||
3742 | skb->protocol = eth_type_trans(skb, ugeth->dev); | ||
3743 | |||
3744 | ugeth->stats.rx_bytes += length; | ||
3745 | /* Send the packet up the stack */ | ||
3746 | #ifdef CONFIG_UGETH_NAPI | ||
3747 | netif_receive_skb(skb); | ||
3748 | #else | ||
3749 | netif_rx(skb); | ||
3750 | #endif /* CONFIG_UGETH_NAPI */ | ||
3751 | } | ||
3752 | |||
3753 | ugeth->dev->last_rx = jiffies; | ||
3754 | |||
3755 | skb = get_new_skb(ugeth, bd); | ||
3756 | if (!skb) { | ||
3757 | ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__); | ||
3758 | spin_unlock(&ugeth->lock); | ||
3759 | ugeth->stats.rx_dropped++; | ||
3760 | break; | ||
3761 | } | ||
3762 | |||
3763 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb; | ||
3764 | |||
3765 | /* update to point at the next skb */ | ||
3766 | ugeth->skb_currx[rxQ] = | ||
3767 | (ugeth->skb_currx[rxQ] + | ||
3768 | 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]); | ||
3769 | |||
3770 | if (bd_status & R_W) | ||
3771 | bd = ugeth->p_rx_bd_ring[rxQ]; | ||
3772 | else | ||
3773 | bd += UCC_GETH_SIZE_OF_BD; | ||
3774 | |||
3775 | bd_status = BD_STATUS_AND_LENGTH(bd); | ||
3776 | } | ||
3777 | |||
3778 | ugeth->rxBd[rxQ] = bd; | ||
3779 | spin_unlock(&ugeth->lock); | ||
3780 | return howmany; | ||
3781 | } | ||
3782 | |||
3783 | static int ucc_geth_tx(struct net_device *dev, u8 txQ) | ||
3784 | { | ||
3785 | /* Start from the next BD that should be filled */ | ||
3786 | ucc_geth_private_t *ugeth = netdev_priv(dev); | ||
3787 | u8 *bd; /* BD pointer */ | ||
3788 | u32 bd_status; | ||
3789 | |||
3790 | bd = ugeth->confBd[txQ]; | ||
3791 | bd_status = BD_STATUS_AND_LENGTH(bd); | ||
3792 | |||
3793 | /* Normal processing. */ | ||
3794 | while ((bd_status & T_R) == 0) { | ||
3795 | /* BD contains already transmitted buffer. */ | ||
3796 | /* Handle the transmitted buffer and release */ | ||
3797 | /* the BD to be used with the current frame */ | ||
3798 | |||
3799 | if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) | ||
3800 | break; | ||
3801 | |||
3802 | ugeth->stats.tx_packets++; | ||
3803 | |||
3804 | /* Free the sk buffer associated with this TxBD */ | ||
3805 | dev_kfree_skb_irq(ugeth-> | ||
3806 | tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]); | ||
3807 | ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; | ||
3808 | ugeth->skb_dirtytx[txQ] = | ||
3809 | (ugeth->skb_dirtytx[txQ] + | ||
3810 | 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); | ||
3811 | |||
3812 | /* We freed a buffer, so now we can restart transmission */ | ||
3813 | if (netif_queue_stopped(dev)) | ||
3814 | netif_wake_queue(dev); | ||
3815 | |||
3816 | /* Advance the confirmation BD pointer */ | ||
3817 | if (!(bd_status & T_W)) | ||
3818 | ugeth->confBd[txQ] += UCC_GETH_SIZE_OF_BD; | ||
3819 | else | ||
3820 | ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ]; | ||
3821 | } | ||
3822 | return 0; | ||
3823 | } | ||
3824 | |||
3825 | #ifdef CONFIG_UGETH_NAPI | ||
3826 | static int ucc_geth_poll(struct net_device *dev, int *budget) | ||
3827 | { | ||
3828 | ucc_geth_private_t *ugeth = netdev_priv(dev); | ||
3829 | int howmany; | ||
3830 | int rx_work_limit = *budget; | ||
3831 | u8 rxQ = 0; | ||
3832 | |||
3833 | if (rx_work_limit > dev->quota) | ||
3834 | rx_work_limit = dev->quota; | ||
3835 | |||
3836 | howmany = ucc_geth_rx(ugeth, rxQ, rx_work_limit); | ||
3837 | |||
3838 | dev->quota -= howmany; | ||
3839 | rx_work_limit -= howmany; | ||
3840 | *budget -= howmany; | ||
3841 | |||
3842 | if (rx_work_limit >= 0) | ||
3843 | netif_rx_complete(dev); | ||
3844 | |||
3845 | return (rx_work_limit < 0) ? 1 : 0; | ||
3846 | } | ||
3847 | #endif /* CONFIG_UGETH_NAPI */ | ||
3848 | |||
3849 | static irqreturn_t ucc_geth_irq_handler(int irq, void *info, | ||
3850 | struct pt_regs *regs) | ||
3851 | { | ||
3852 | struct net_device *dev = (struct net_device *)info; | ||
3853 | ucc_geth_private_t *ugeth = netdev_priv(dev); | ||
3854 | ucc_fast_private_t *uccf; | ||
3855 | ucc_geth_info_t *ug_info; | ||
3856 | register u32 ucce = 0; | ||
3857 | register u32 bit_mask = UCCE_RXBF_SINGLE_MASK; | ||
3858 | register u32 tx_mask = UCCE_TXBF_SINGLE_MASK; | ||
3859 | register u8 i; | ||
3860 | |||
3861 | ugeth_vdbg("%s: IN", __FUNCTION__); | ||
3862 | |||
3863 | if (!ugeth) | ||
3864 | return IRQ_NONE; | ||
3865 | |||
3866 | uccf = ugeth->uccf; | ||
3867 | ug_info = ugeth->ug_info; | ||
3868 | |||
3869 | do { | ||
3870 | ucce |= (u32) (in_be32(uccf->p_ucce) & in_be32(uccf->p_uccm)); | ||
3871 | |||
3872 | /* clear event bits for next time */ | ||
3873 | /* Side effect here is to mask ucce variable | ||
3874 | for future processing below. */ | ||
3875 | out_be32(uccf->p_ucce, ucce); /* Clear with ones, | ||
3876 | but only bits in UCCM */ | ||
3877 | |||
3878 | /* We ignore Tx interrupts because Tx confirmation is | ||
3879 | done inside Tx routine */ | ||
3880 | |||
3881 | for (i = 0; i < ug_info->numQueuesRx; i++) { | ||
3882 | if (ucce & bit_mask) | ||
3883 | ucc_geth_rx(ugeth, i, | ||
3884 | (int)ugeth->ug_info-> | ||
3885 | bdRingLenRx[i]); | ||
3886 | ucce &= ~bit_mask; | ||
3887 | bit_mask <<= 1; | ||
3888 | } | ||
3889 | |||
3890 | for (i = 0; i < ug_info->numQueuesTx; i++) { | ||
3891 | if (ucce & tx_mask) | ||
3892 | ucc_geth_tx(dev, i); | ||
3893 | ucce &= ~tx_mask; | ||
3894 | tx_mask <<= 1; | ||
3895 | } | ||
3896 | |||
3897 | /* Exceptions */ | ||
3898 | if (ucce & UCCE_BSY) { | ||
3899 | ugeth_vdbg("Got BUSY irq!!!!"); | ||
3900 | ugeth->stats.rx_errors++; | ||
3901 | ucce &= ~UCCE_BSY; | ||
3902 | } | ||
3903 | if (ucce & UCCE_OTHER) { | ||
3904 | ugeth_vdbg("Got frame with error (ucce - 0x%08x)!!!!", | ||
3905 | ucce); | ||
3906 | ugeth->stats.rx_errors++; | ||
3907 | ucce &= ~ucce; | ||
3908 | } | ||
3909 | } | ||
3910 | while (ucce); | ||
3911 | |||
3912 | return IRQ_HANDLED; | ||
3913 | } | ||
3914 | |||
3915 | static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
3916 | { | ||
3917 | struct net_device *dev = (struct net_device *)dev_id; | ||
3918 | ucc_geth_private_t *ugeth = netdev_priv(dev); | ||
3919 | |||
3920 | ugeth_vdbg("%s: IN", __FUNCTION__); | ||
3921 | |||
3922 | /* Clear the interrupt */ | ||
3923 | mii_clear_phy_interrupt(ugeth->mii_info); | ||
3924 | |||
3925 | /* Disable PHY interrupts */ | ||
3926 | mii_configure_phy_interrupt(ugeth->mii_info, MII_INTERRUPT_DISABLED); | ||
3927 | |||
3928 | /* Schedule the phy change */ | ||
3929 | schedule_work(&ugeth->tq); | ||
3930 | |||
3931 | return IRQ_HANDLED; | ||
3932 | } | ||
3933 | |||
3934 | /* Scheduled by the phy_interrupt/timer to handle PHY changes */ | ||
3935 | static void ugeth_phy_change(void *data) | ||
3936 | { | ||
3937 | struct net_device *dev = (struct net_device *)data; | ||
3938 | ucc_geth_private_t *ugeth = netdev_priv(dev); | ||
3939 | ucc_geth_t *ug_regs; | ||
3940 | int result = 0; | ||
3941 | |||
3942 | ugeth_vdbg("%s: IN", __FUNCTION__); | ||
3943 | |||
3944 | ug_regs = ugeth->ug_regs; | ||
3945 | |||
3946 | /* Delay to give the PHY a chance to change the | ||
3947 | * register state */ | ||
3948 | msleep(1); | ||
3949 | |||
3950 | /* Update the link, speed, duplex */ | ||
3951 | result = ugeth->mii_info->phyinfo->read_status(ugeth->mii_info); | ||
3952 | |||
3953 | /* Adjust the known status as long as the link | ||
3954 | * isn't still coming up */ | ||
3955 | if ((0 == result) || (ugeth->mii_info->link == 0)) | ||
3956 | adjust_link(dev); | ||
3957 | |||
3958 | /* Reenable interrupts, if needed */ | ||
3959 | if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) | ||
3960 | mii_configure_phy_interrupt(ugeth->mii_info, | ||
3961 | MII_INTERRUPT_ENABLED); | ||
3962 | } | ||
3963 | |||
3964 | /* Called every so often on systems that don't interrupt | ||
3965 | * the core for PHY changes */ | ||
3966 | static void ugeth_phy_timer(unsigned long data) | ||
3967 | { | ||
3968 | struct net_device *dev = (struct net_device *)data; | ||
3969 | ucc_geth_private_t *ugeth = netdev_priv(dev); | ||
3970 | |||
3971 | schedule_work(&ugeth->tq); | ||
3972 | |||
3973 | mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ); | ||
3974 | } | ||
3975 | |||
3976 | /* Keep trying aneg for some time | ||
3977 | * If, after GFAR_AN_TIMEOUT seconds, it has not | ||
3978 | * finished, we switch to forced. | ||
3979 | * Either way, once the process has completed, we either | ||
3980 | * request the interrupt, or switch the timer over to | ||
3981 | * using ugeth_phy_timer to check status */ | ||
3982 | static void ugeth_phy_startup_timer(unsigned long data) | ||
3983 | { | ||
3984 | struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data; | ||
3985 | ucc_geth_private_t *ugeth = netdev_priv(mii_info->dev); | ||
3986 | static int secondary = UGETH_AN_TIMEOUT; | ||
3987 | int result; | ||
3988 | |||
3989 | /* Configure the Auto-negotiation */ | ||
3990 | result = mii_info->phyinfo->config_aneg(mii_info); | ||
3991 | |||
3992 | /* If autonegotiation failed to start, and | ||
3993 | * we haven't timed out, reset the timer, and return */ | ||
3994 | if (result && secondary--) { | ||
3995 | mod_timer(&ugeth->phy_info_timer, jiffies + HZ); | ||
3996 | return; | ||
3997 | } else if (result) { | ||
3998 | /* Couldn't start autonegotiation. | ||
3999 | * Try switching to forced */ | ||
4000 | mii_info->autoneg = 0; | ||
4001 | result = mii_info->phyinfo->config_aneg(mii_info); | ||
4002 | |||
4003 | /* Forcing failed! Give up */ | ||
4004 | if (result) { | ||
4005 | ugeth_err("%s: Forcing failed!", mii_info->dev->name); | ||
4006 | return; | ||
4007 | } | ||
4008 | } | ||
4009 | |||
4010 | /* Kill the timer so it can be restarted */ | ||
4011 | del_timer_sync(&ugeth->phy_info_timer); | ||
4012 | |||
4013 | /* Grab the PHY interrupt, if necessary/possible */ | ||
4014 | if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) { | ||
4015 | if (request_irq(ugeth->ug_info->phy_interrupt, | ||
4016 | phy_interrupt, | ||
4017 | SA_SHIRQ, "phy_interrupt", mii_info->dev) < 0) { | ||
4018 | ugeth_err("%s: Can't get IRQ %d (PHY)", | ||
4019 | mii_info->dev->name, | ||
4020 | ugeth->ug_info->phy_interrupt); | ||
4021 | } else { | ||
4022 | mii_configure_phy_interrupt(ugeth->mii_info, | ||
4023 | MII_INTERRUPT_ENABLED); | ||
4024 | return; | ||
4025 | } | ||
4026 | } | ||
4027 | |||
4028 | /* Start the timer again, this time in order to | ||
4029 | * handle a change in status */ | ||
4030 | init_timer(&ugeth->phy_info_timer); | ||
4031 | ugeth->phy_info_timer.function = &ugeth_phy_timer; | ||
4032 | ugeth->phy_info_timer.data = (unsigned long)mii_info->dev; | ||
4033 | mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ); | ||
4034 | } | ||
4035 | |||
4036 | /* Called when something needs to use the ethernet device */ | ||
4037 | /* Returns 0 for success. */ | ||
4038 | static int ucc_geth_open(struct net_device *dev) | ||
4039 | { | ||
4040 | ucc_geth_private_t *ugeth = netdev_priv(dev); | ||
4041 | int err; | ||
4042 | |||
4043 | ugeth_vdbg("%s: IN", __FUNCTION__); | ||
4044 | |||
4045 | /* Test station address */ | ||
4046 | if (dev->dev_addr[0] & ENET_GROUP_ADDR) { | ||
4047 | ugeth_err("%s: Multicast address used for station address" | ||
4048 | " - is this what you wanted?", __FUNCTION__); | ||
4049 | return -EINVAL; | ||
4050 | } | ||
4051 | |||
4052 | err = ucc_geth_startup(ugeth); | ||
4053 | if (err) { | ||
4054 | ugeth_err("%s: Cannot configure net device, aborting.", | ||
4055 | dev->name); | ||
4056 | return err; | ||
4057 | } | ||
4058 | |||
4059 | err = adjust_enet_interface(ugeth); | ||
4060 | if (err) { | ||
4061 | ugeth_err("%s: Cannot configure net device, aborting.", | ||
4062 | dev->name); | ||
4063 | return err; | ||
4064 | } | ||
4065 | |||
4066 | /* Set MACSTNADDR1, MACSTNADDR2 */ | ||
4067 | /* For more details see the hardware spec. */ | ||
4068 | init_mac_station_addr_regs(dev->dev_addr[0], | ||
4069 | dev->dev_addr[1], | ||
4070 | dev->dev_addr[2], | ||
4071 | dev->dev_addr[3], | ||
4072 | dev->dev_addr[4], | ||
4073 | dev->dev_addr[5], | ||
4074 | &ugeth->ug_regs->macstnaddr1, | ||
4075 | &ugeth->ug_regs->macstnaddr2); | ||
4076 | |||
4077 | err = init_phy(dev); | ||
4078 | if (err) { | ||
4079 | ugeth_err("%s: Cannot initialzie PHY, aborting.", dev->name); | ||
4080 | return err; | ||
4081 | } | ||
4082 | #ifndef CONFIG_UGETH_NAPI | ||
4083 | err = | ||
4084 | request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0, | ||
4085 | "UCC Geth", dev); | ||
4086 | if (err) { | ||
4087 | ugeth_err("%s: Cannot get IRQ for net device, aborting.", | ||
4088 | dev->name); | ||
4089 | ucc_geth_stop(ugeth); | ||
4090 | return err; | ||
4091 | } | ||
4092 | #endif /* CONFIG_UGETH_NAPI */ | ||
4093 | |||
4094 | /* Set up the PHY change work queue */ | ||
4095 | INIT_WORK(&ugeth->tq, ugeth_phy_change, dev); | ||
4096 | |||
4097 | init_timer(&ugeth->phy_info_timer); | ||
4098 | ugeth->phy_info_timer.function = &ugeth_phy_startup_timer; | ||
4099 | ugeth->phy_info_timer.data = (unsigned long)ugeth->mii_info; | ||
4100 | mod_timer(&ugeth->phy_info_timer, jiffies + HZ); | ||
4101 | |||
4102 | err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); | ||
4103 | if (err) { | ||
4104 | ugeth_err("%s: Cannot enable net device, aborting.", dev->name); | ||
4105 | ucc_geth_stop(ugeth); | ||
4106 | return err; | ||
4107 | } | ||
4108 | |||
4109 | netif_start_queue(dev); | ||
4110 | |||
4111 | return err; | ||
4112 | } | ||
4113 | |||
4114 | /* Stops the kernel queue, and halts the controller */ | ||
4115 | static int ucc_geth_close(struct net_device *dev) | ||
4116 | { | ||
4117 | ucc_geth_private_t *ugeth = netdev_priv(dev); | ||
4118 | |||
4119 | ugeth_vdbg("%s: IN", __FUNCTION__); | ||
4120 | |||
4121 | ucc_geth_stop(ugeth); | ||
4122 | |||
4123 | /* Shutdown the PHY */ | ||
4124 | if (ugeth->mii_info->phyinfo->close) | ||
4125 | ugeth->mii_info->phyinfo->close(ugeth->mii_info); | ||
4126 | |||
4127 | kfree(ugeth->mii_info); | ||
4128 | |||
4129 | netif_stop_queue(dev); | ||
4130 | |||
4131 | return 0; | ||
4132 | } | ||
4133 | |||
4134 | struct ethtool_ops ucc_geth_ethtool_ops = { | ||
4135 | .get_settings = NULL, | ||
4136 | .get_drvinfo = NULL, | ||
4137 | .get_regs_len = NULL, | ||
4138 | .get_regs = NULL, | ||
4139 | .get_link = NULL, | ||
4140 | .get_coalesce = NULL, | ||
4141 | .set_coalesce = NULL, | ||
4142 | .get_ringparam = NULL, | ||
4143 | .set_ringparam = NULL, | ||
4144 | .get_strings = NULL, | ||
4145 | .get_stats_count = NULL, | ||
4146 | .get_ethtool_stats = NULL, | ||
4147 | }; | ||
4148 | |||
4149 | static int ucc_geth_probe(struct device *device) | ||
4150 | { | ||
4151 | struct platform_device *pdev = to_platform_device(device); | ||
4152 | struct ucc_geth_platform_data *ugeth_pdata; | ||
4153 | struct net_device *dev = NULL; | ||
4154 | struct ucc_geth_private *ugeth = NULL; | ||
4155 | struct ucc_geth_info *ug_info; | ||
4156 | int err; | ||
4157 | static int mii_mng_configured = 0; | ||
4158 | |||
4159 | ugeth_vdbg("%s: IN", __FUNCTION__); | ||
4160 | |||
4161 | ugeth_pdata = (struct ucc_geth_platform_data *)pdev->dev.platform_data; | ||
4162 | |||
4163 | ug_info = &ugeth_info[pdev->id]; | ||
4164 | ug_info->uf_info.ucc_num = pdev->id; | ||
4165 | ug_info->uf_info.rx_clock = ugeth_pdata->rx_clock; | ||
4166 | ug_info->uf_info.tx_clock = ugeth_pdata->tx_clock; | ||
4167 | ug_info->uf_info.regs = ugeth_pdata->phy_reg_addr; | ||
4168 | ug_info->uf_info.irq = platform_get_irq(pdev, 0); | ||
4169 | ug_info->phy_address = ugeth_pdata->phy_id; | ||
4170 | ug_info->enet_interface = ugeth_pdata->phy_interface; | ||
4171 | ug_info->board_flags = ugeth_pdata->board_flags; | ||
4172 | ug_info->phy_interrupt = ugeth_pdata->phy_interrupt; | ||
4173 | |||
4174 | printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", | ||
4175 | ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, | ||
4176 | ug_info->uf_info.irq); | ||
4177 | |||
4178 | if (ug_info == NULL) { | ||
4179 | ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__, | ||
4180 | pdev->id); | ||
4181 | return -ENODEV; | ||
4182 | } | ||
4183 | |||
4184 | if (!mii_mng_configured) { | ||
4185 | ucc_set_qe_mux_mii_mng(ug_info->uf_info.ucc_num); | ||
4186 | mii_mng_configured = 1; | ||
4187 | } | ||
4188 | |||
4189 | /* Create an ethernet device instance */ | ||
4190 | dev = alloc_etherdev(sizeof(*ugeth)); | ||
4191 | |||
4192 | if (dev == NULL) | ||
4193 | return -ENOMEM; | ||
4194 | |||
4195 | ugeth = netdev_priv(dev); | ||
4196 | spin_lock_init(&ugeth->lock); | ||
4197 | |||
4198 | dev_set_drvdata(device, dev); | ||
4199 | |||
4200 | /* Set the dev->base_addr to the gfar reg region */ | ||
4201 | dev->base_addr = (unsigned long)(ug_info->uf_info.regs); | ||
4202 | |||
4203 | SET_MODULE_OWNER(dev); | ||
4204 | SET_NETDEV_DEV(dev, device); | ||
4205 | |||
4206 | /* Fill in the dev structure */ | ||
4207 | dev->open = ucc_geth_open; | ||
4208 | dev->hard_start_xmit = ucc_geth_start_xmit; | ||
4209 | dev->tx_timeout = ucc_geth_timeout; | ||
4210 | dev->watchdog_timeo = TX_TIMEOUT; | ||
4211 | #ifdef CONFIG_UGETH_NAPI | ||
4212 | dev->poll = ucc_geth_poll; | ||
4213 | dev->weight = UCC_GETH_DEV_WEIGHT; | ||
4214 | #endif /* CONFIG_UGETH_NAPI */ | ||
4215 | dev->stop = ucc_geth_close; | ||
4216 | dev->get_stats = ucc_geth_get_stats; | ||
4217 | // dev->change_mtu = ucc_geth_change_mtu; | ||
4218 | dev->mtu = 1500; | ||
4219 | dev->set_multicast_list = ucc_geth_set_multi; | ||
4220 | dev->ethtool_ops = &ucc_geth_ethtool_ops; | ||
4221 | |||
4222 | err = register_netdev(dev); | ||
4223 | if (err) { | ||
4224 | ugeth_err("%s: Cannot register net device, aborting.", | ||
4225 | dev->name); | ||
4226 | free_netdev(dev); | ||
4227 | return err; | ||
4228 | } | ||
4229 | |||
4230 | ugeth->ug_info = ug_info; | ||
4231 | ugeth->dev = dev; | ||
4232 | memcpy(dev->dev_addr, ugeth_pdata->mac_addr, 6); | ||
4233 | |||
4234 | return 0; | ||
4235 | } | ||
4236 | |||
4237 | static int ucc_geth_remove(struct device *device) | ||
4238 | { | ||
4239 | struct net_device *dev = dev_get_drvdata(device); | ||
4240 | struct ucc_geth_private *ugeth = netdev_priv(dev); | ||
4241 | |||
4242 | dev_set_drvdata(device, NULL); | ||
4243 | ucc_geth_memclean(ugeth); | ||
4244 | free_netdev(dev); | ||
4245 | |||
4246 | return 0; | ||
4247 | } | ||
4248 | |||
4249 | /* Structure for a device driver */ | ||
4250 | static struct device_driver ucc_geth_driver = { | ||
4251 | .name = DRV_NAME, | ||
4252 | .bus = &platform_bus_type, | ||
4253 | .probe = ucc_geth_probe, | ||
4254 | .remove = ucc_geth_remove, | ||
4255 | }; | ||
4256 | |||
4257 | static int __init ucc_geth_init(void) | ||
4258 | { | ||
4259 | int i; | ||
4260 | printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); | ||
4261 | for (i = 0; i < 8; i++) | ||
4262 | memcpy(&(ugeth_info[i]), &ugeth_primary_info, | ||
4263 | sizeof(ugeth_primary_info)); | ||
4264 | |||
4265 | return driver_register(&ucc_geth_driver); | ||
4266 | } | ||
4267 | |||
4268 | static void __exit ucc_geth_exit(void) | ||
4269 | { | ||
4270 | driver_unregister(&ucc_geth_driver); | ||
4271 | } | ||
4272 | |||
4273 | module_init(ucc_geth_init); | ||
4274 | module_exit(ucc_geth_exit); | ||
4275 | |||
4276 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); | ||
4277 | MODULE_DESCRIPTION(DRV_DESC); | ||
4278 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h new file mode 100644 index 000000000000..005965f5dd9b --- /dev/null +++ b/drivers/net/ucc_geth.h | |||
@@ -0,0 +1,1339 @@ | |||
1 | /* | ||
2 | * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. | ||
3 | * | ||
4 | * Author: Shlomi Gridish <gridish@freescale.com> | ||
5 | * | ||
6 | * Description: | ||
7 | * Internal header file for UCC Gigabit Ethernet unit routines. | ||
8 | * | ||
9 | * Changelog: | ||
10 | * Jun 28, 2006 Li Yang <LeoLi@freescale.com> | ||
11 | * - Rearrange code and style fixes | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify it | ||
14 | * under the terms of the GNU General Public License as published by the | ||
15 | * Free Software Foundation; either version 2 of the License, or (at your | ||
16 | * option) any later version. | ||
17 | */ | ||
18 | #ifndef __UCC_GETH_H__ | ||
19 | #define __UCC_GETH_H__ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/list.h> | ||
23 | #include <linux/fsl_devices.h> | ||
24 | |||
25 | #include <asm/immap_qe.h> | ||
26 | #include <asm/qe.h> | ||
27 | |||
28 | #include <asm/ucc.h> | ||
29 | #include <asm/ucc_fast.h> | ||
30 | |||
31 | #define NUM_TX_QUEUES 8 | ||
32 | #define NUM_RX_QUEUES 8 | ||
33 | #define NUM_BDS_IN_PREFETCHED_BDS 4 | ||
34 | #define TX_IP_OFFSET_ENTRY_MAX 8 | ||
35 | #define NUM_OF_PADDRS 4 | ||
36 | #define ENET_INIT_PARAM_MAX_ENTRIES_RX 9 | ||
37 | #define ENET_INIT_PARAM_MAX_ENTRIES_TX 8 | ||
38 | |||
39 | typedef struct ucc_mii_mng { | ||
40 | u32 miimcfg; /* MII management configuration reg */ | ||
41 | u32 miimcom; /* MII management command reg */ | ||
42 | u32 miimadd; /* MII management address reg */ | ||
43 | u32 miimcon; /* MII management control reg */ | ||
44 | u32 miimstat; /* MII management status reg */ | ||
45 | u32 miimind; /* MII management indication reg */ | ||
46 | } __attribute__ ((packed)) ucc_mii_mng_t; | ||
47 | |||
48 | typedef struct ucc_geth { | ||
49 | ucc_fast_t uccf; | ||
50 | |||
51 | u32 maccfg1; /* mac configuration reg. 1 */ | ||
52 | u32 maccfg2; /* mac configuration reg. 2 */ | ||
53 | u32 ipgifg; /* interframe gap reg. */ | ||
54 | u32 hafdup; /* half-duplex reg. */ | ||
55 | u8 res1[0x10]; | ||
56 | ucc_mii_mng_t miimng; /* MII management structure */ | ||
57 | u32 ifctl; /* interface control reg */ | ||
58 | u32 ifstat; /* interface statux reg */ | ||
59 | u32 macstnaddr1; /* mac station address part 1 reg */ | ||
60 | u32 macstnaddr2; /* mac station address part 2 reg */ | ||
61 | u8 res2[0x8]; | ||
62 | u32 uempr; /* UCC Ethernet Mac parameter reg */ | ||
63 | u32 utbipar; /* UCC tbi address reg */ | ||
64 | u16 uescr; /* UCC Ethernet statistics control reg */ | ||
65 | u8 res3[0x180 - 0x15A]; | ||
66 | u32 tx64; /* Total number of frames (including bad | ||
67 | frames) transmitted that were exactly of the | ||
68 | minimal length (64 for un tagged, 68 for | ||
69 | tagged, or with length exactly equal to the | ||
70 | parameter MINLength */ | ||
71 | u32 tx127; /* Total number of frames (including bad | ||
72 | frames) transmitted that were between | ||
73 | MINLength (Including FCS length==4) and 127 | ||
74 | octets */ | ||
75 | u32 tx255; /* Total number of frames (including bad | ||
76 | frames) transmitted that were between 128 | ||
77 | (Including FCS length==4) and 255 octets */ | ||
78 | u32 rx64; /* Total number of frames received including | ||
79 | bad frames that were exactly of the mninimal | ||
80 | length (64 bytes) */ | ||
81 | u32 rx127; /* Total number of frames (including bad | ||
82 | frames) received that were between MINLength | ||
83 | (Including FCS length==4) and 127 octets */ | ||
84 | u32 rx255; /* Total number of frames (including bad | ||
85 | frames) received that were between 128 | ||
86 | (Including FCS length==4) and 255 octets */ | ||
87 | u32 txok; /* Total number of octets residing in frames | ||
88 | that where involved in succesfull | ||
89 | transmission */ | ||
90 | u16 txcf; /* Total number of PAUSE control frames | ||
91 | transmitted by this MAC */ | ||
92 | u8 res4[0x2]; | ||
93 | u32 tmca; /* Total number of frames that were transmitted | ||
94 | succesfully with the group address bit set | ||
95 | that are not broadcast frames */ | ||
96 | u32 tbca; /* Total number of frames transmitted | ||
97 | succesfully that had destination address | ||
98 | field equal to the broadcast address */ | ||
99 | u32 rxfok; /* Total number of frames received OK */ | ||
100 | u32 rxbok; /* Total number of octets received OK */ | ||
101 | u32 rbyt; /* Total number of octets received including | ||
102 | octets in bad frames. Must be implemented in | ||
103 | HW because it includes octets in frames that | ||
104 | never even reach the UCC */ | ||
105 | u32 rmca; /* Total number of frames that were received | ||
106 | succesfully with the group address bit set | ||
107 | that are not broadcast frames */ | ||
108 | u32 rbca; /* Total number of frames received succesfully | ||
109 | that had destination address equal to the | ||
110 | broadcast address */ | ||
111 | u32 scar; /* Statistics carry register */ | ||
112 | u32 scam; /* Statistics caryy mask register */ | ||
113 | u8 res5[0x200 - 0x1c4]; | ||
114 | } __attribute__ ((packed)) ucc_geth_t; | ||
115 | |||
116 | /* UCC GETH TEMODR Register */ | ||
117 | #define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics | ||
118 | */ | ||
119 | #define TEMODER_SCHEDULER_ENABLE 0x2000 /* enable scheduler */ | ||
120 | #define TEMODER_IP_CHECKSUM_GENERATE 0x0400 /* generate IPv4 | ||
121 | checksums */ | ||
122 | #define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200 /* enable performance | ||
123 | optimization | ||
124 | enhancement (mode1) */ | ||
125 | #define TEMODER_RMON_STATISTICS 0x0100 /* enable tx statistics | ||
126 | */ | ||
127 | #define TEMODER_NUM_OF_QUEUES_SHIFT (15-15) /* Number of queues << | ||
128 | shift */ | ||
129 | |||
130 | /* UCC GETH TEMODR Register */ | ||
131 | #define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000 /* enable Rx | ||
132 | statistics */ | ||
133 | #define REMODER_RX_EXTENDED_FEATURES 0x80000000 /* enable | ||
134 | extended | ||
135 | features */ | ||
136 | #define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 ) /* vlan operation | ||
137 | tagged << shift */ | ||
138 | #define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10) /* vlan operation non | ||
139 | tagged << shift */ | ||
140 | #define REMODER_RX_QOS_MODE_SHIFT (31-15) /* rx QoS mode << shift | ||
141 | */ | ||
142 | #define REMODER_RMON_STATISTICS 0x00001000 /* enable rx | ||
143 | statistics */ | ||
144 | #define REMODER_RX_EXTENDED_FILTERING 0x00000800 /* extended | ||
145 | filtering | ||
146 | vs. | ||
147 | mpc82xx-like | ||
148 | filtering */ | ||
149 | #define REMODER_NUM_OF_QUEUES_SHIFT (31-23) /* Number of queues << | ||
150 | shift */ | ||
151 | #define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008 /* enable | ||
152 | dynamic max | ||
153 | frame length | ||
154 | */ | ||
155 | #define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004 /* enable | ||
156 | dynamic min | ||
157 | frame length | ||
158 | */ | ||
159 | #define REMODER_IP_CHECKSUM_CHECK 0x00000002 /* check IPv4 | ||
160 | checksums */ | ||
161 | #define REMODER_IP_ADDRESS_ALIGNMENT 0x00000001 /* align ip | ||
162 | address to | ||
163 | 4-byte | ||
164 | boundary */ | ||
165 | |||
166 | /* UCC GETH Event Register */ | ||
167 | #define UCCE_MPD 0x80000000 /* Magic packet | ||
168 | detection */ | ||
169 | #define UCCE_SCAR 0x40000000 | ||
170 | #define UCCE_GRA 0x20000000 /* Tx graceful | ||
171 | stop | ||
172 | complete */ | ||
173 | #define UCCE_CBPR 0x10000000 | ||
174 | #define UCCE_BSY 0x08000000 | ||
175 | #define UCCE_RXC 0x04000000 | ||
176 | #define UCCE_TXC 0x02000000 | ||
177 | #define UCCE_TXE 0x01000000 | ||
178 | #define UCCE_TXB7 0x00800000 | ||
179 | #define UCCE_TXB6 0x00400000 | ||
180 | #define UCCE_TXB5 0x00200000 | ||
181 | #define UCCE_TXB4 0x00100000 | ||
182 | #define UCCE_TXB3 0x00080000 | ||
183 | #define UCCE_TXB2 0x00040000 | ||
184 | #define UCCE_TXB1 0x00020000 | ||
185 | #define UCCE_TXB0 0x00010000 | ||
186 | #define UCCE_RXB7 0x00008000 | ||
187 | #define UCCE_RXB6 0x00004000 | ||
188 | #define UCCE_RXB5 0x00002000 | ||
189 | #define UCCE_RXB4 0x00001000 | ||
190 | #define UCCE_RXB3 0x00000800 | ||
191 | #define UCCE_RXB2 0x00000400 | ||
192 | #define UCCE_RXB1 0x00000200 | ||
193 | #define UCCE_RXB0 0x00000100 | ||
194 | #define UCCE_RXF7 0x00000080 | ||
195 | #define UCCE_RXF6 0x00000040 | ||
196 | #define UCCE_RXF5 0x00000020 | ||
197 | #define UCCE_RXF4 0x00000010 | ||
198 | #define UCCE_RXF3 0x00000008 | ||
199 | #define UCCE_RXF2 0x00000004 | ||
200 | #define UCCE_RXF1 0x00000002 | ||
201 | #define UCCE_RXF0 0x00000001 | ||
202 | |||
203 | #define UCCE_RXBF_SINGLE_MASK (UCCE_RXF0) | ||
204 | #define UCCE_TXBF_SINGLE_MASK (UCCE_TXB0) | ||
205 | |||
206 | #define UCCE_TXB (UCCE_TXB7 | UCCE_TXB6 | UCCE_TXB5 | UCCE_TXB4 |\ | ||
207 | UCCE_TXB3 | UCCE_TXB2 | UCCE_TXB1 | UCCE_TXB0) | ||
208 | #define UCCE_RXB (UCCE_RXB7 | UCCE_RXB6 | UCCE_RXB5 | UCCE_RXB4 |\ | ||
209 | UCCE_RXB3 | UCCE_RXB2 | UCCE_RXB1 | UCCE_RXB0) | ||
210 | #define UCCE_RXF (UCCE_RXF7 | UCCE_RXF6 | UCCE_RXF5 | UCCE_RXF4 |\ | ||
211 | UCCE_RXF3 | UCCE_RXF2 | UCCE_RXF1 | UCCE_RXF0) | ||
212 | #define UCCE_OTHER (UCCE_SCAR | UCCE_GRA | UCCE_CBPR | UCCE_BSY |\ | ||
213 | UCCE_RXC | UCCE_TXC | UCCE_TXE) | ||
214 | |||
215 | /* UCC GETH UPSMR (Protocol Specific Mode Register) */ | ||
216 | #define UPSMR_ECM 0x04000000 /* Enable CAM | ||
217 | Miss or | ||
218 | Enable | ||
219 | Filtering | ||
220 | Miss */ | ||
221 | #define UPSMR_HSE 0x02000000 /* Hardware | ||
222 | Statistics | ||
223 | Enable */ | ||
224 | #define UPSMR_PRO 0x00400000 /* Promiscuous*/ | ||
225 | #define UPSMR_CAP 0x00200000 /* CAM polarity | ||
226 | */ | ||
227 | #define UPSMR_RSH 0x00100000 /* Receive | ||
228 | Short Frames | ||
229 | */ | ||
230 | #define UPSMR_RPM 0x00080000 /* Reduced Pin | ||
231 | Mode | ||
232 | interfaces */ | ||
233 | #define UPSMR_R10M 0x00040000 /* RGMII/RMII | ||
234 | 10 Mode */ | ||
235 | #define UPSMR_RLPB 0x00020000 /* RMII | ||
236 | Loopback | ||
237 | Mode */ | ||
238 | #define UPSMR_TBIM 0x00010000 /* Ten-bit | ||
239 | Interface | ||
240 | Mode */ | ||
241 | #define UPSMR_RMM 0x00001000 /* RMII/RGMII | ||
242 | Mode */ | ||
243 | #define UPSMR_CAM 0x00000400 /* CAM Address | ||
244 | Matching */ | ||
245 | #define UPSMR_BRO 0x00000200 /* Broadcast | ||
246 | Address */ | ||
247 | #define UPSMR_RES1 0x00002000 /* Reserved | ||
248 | feild - must | ||
249 | be 1 */ | ||
250 | |||
251 | /* UCC GETH MACCFG1 (MAC Configuration 1 Register) */ | ||
252 | #define MACCFG1_FLOW_RX 0x00000020 /* Flow Control | ||
253 | Rx */ | ||
254 | #define MACCFG1_FLOW_TX 0x00000010 /* Flow Control | ||
255 | Tx */ | ||
256 | #define MACCFG1_ENABLE_SYNCHED_RX 0x00000008 /* Rx Enable | ||
257 | synchronized | ||
258 | to Rx stream | ||
259 | */ | ||
260 | #define MACCFG1_ENABLE_RX 0x00000004 /* Enable Rx */ | ||
261 | #define MACCFG1_ENABLE_SYNCHED_TX 0x00000002 /* Tx Enable | ||
262 | synchronized | ||
263 | to Tx stream | ||
264 | */ | ||
265 | #define MACCFG1_ENABLE_TX 0x00000001 /* Enable Tx */ | ||
266 | |||
267 | /* UCC GETH MACCFG2 (MAC Configuration 2 Register) */ | ||
268 | #define MACCFG2_PREL_SHIFT (31 - 19) /* Preamble | ||
269 | Length << | ||
270 | shift */ | ||
271 | #define MACCFG2_PREL_MASK 0x0000f000 /* Preamble | ||
272 | Length mask */ | ||
273 | #define MACCFG2_SRP 0x00000080 /* Soft Receive | ||
274 | Preamble */ | ||
275 | #define MACCFG2_STP 0x00000040 /* Soft | ||
276 | Transmit | ||
277 | Preamble */ | ||
278 | #define MACCFG2_RESERVED_1 0x00000020 /* Reserved - | ||
279 | must be set | ||
280 | to 1 */ | ||
281 | #define MACCFG2_LC 0x00000010 /* Length Check | ||
282 | */ | ||
283 | #define MACCFG2_MPE 0x00000008 /* Magic packet | ||
284 | detect */ | ||
285 | #define MACCFG2_FDX 0x00000001 /* Full Duplex */ | ||
286 | #define MACCFG2_FDX_MASK 0x00000001 /* Full Duplex | ||
287 | mask */ | ||
288 | #define MACCFG2_PAD_CRC 0x00000004 | ||
289 | #define MACCFG2_CRC_EN 0x00000002 | ||
290 | #define MACCFG2_PAD_AND_CRC_MODE_NONE 0x00000000 /* Neither | ||
291 | Padding | ||
292 | short frames | ||
293 | nor CRC */ | ||
294 | #define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY 0x00000002 /* Append CRC | ||
295 | only */ | ||
296 | #define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 0x00000004 | ||
297 | #define MACCFG2_INTERFACE_MODE_NIBBLE 0x00000100 /* nibble mode | ||
298 | (MII/RMII/RGMII | ||
299 | 10/100bps) */ | ||
300 | #define MACCFG2_INTERFACE_MODE_BYTE 0x00000200 /* byte mode | ||
301 | (GMII/TBI/RTB/RGMII | ||
302 | 1000bps ) */ | ||
303 | #define MACCFG2_INTERFACE_MODE_MASK 0x00000300 /* mask | ||
304 | covering all | ||
305 | relevant | ||
306 | bits */ | ||
307 | |||
308 | /* UCC GETH IPGIFG (Inter-frame Gap / Inter-Frame Gap Register) */ | ||
309 | #define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT (31 - 7) /* Non | ||
310 | back-to-back | ||
311 | inter frame | ||
312 | gap part 1. | ||
313 | << shift */ | ||
314 | #define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT (31 - 15) /* Non | ||
315 | back-to-back | ||
316 | inter frame | ||
317 | gap part 2. | ||
318 | << shift */ | ||
319 | #define IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT (31 - 23) /* Mimimum IFG | ||
320 | Enforcement | ||
321 | << shift */ | ||
322 | #define IPGIFG_BACK_TO_BACK_IFG_SHIFT (31 - 31) /* back-to-back | ||
323 | inter frame | ||
324 | gap << shift | ||
325 | */ | ||
326 | #define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX 127 /* Non back-to-back | ||
327 | inter frame gap part | ||
328 | 1. max val */ | ||
329 | #define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX 127 /* Non back-to-back | ||
330 | inter frame gap part | ||
331 | 2. max val */ | ||
332 | #define IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX 255 /* Mimimum IFG | ||
333 | Enforcement max val */ | ||
334 | #define IPGIFG_BACK_TO_BACK_IFG_MAX 127 /* back-to-back inter | ||
335 | frame gap max val */ | ||
336 | #define IPGIFG_NBTB_CS_IPG_MASK 0x7F000000 | ||
337 | #define IPGIFG_NBTB_IPG_MASK 0x007F0000 | ||
338 | #define IPGIFG_MIN_IFG_MASK 0x0000FF00 | ||
339 | #define IPGIFG_BTB_IPG_MASK 0x0000007F | ||
340 | |||
341 | /* UCC GETH HAFDUP (Half Duplex Register) */ | ||
342 | #define HALFDUP_ALT_BEB_TRUNCATION_SHIFT (31 - 11) /* Alternate | ||
343 | Binary | ||
344 | Exponential | ||
345 | Backoff | ||
346 | Truncation | ||
347 | << shift */ | ||
348 | #define HALFDUP_ALT_BEB_TRUNCATION_MAX 0xf /* Alternate Binary | ||
349 | Exponential Backoff | ||
350 | Truncation max val */ | ||
351 | #define HALFDUP_ALT_BEB 0x00080000 /* Alternate | ||
352 | Binary | ||
353 | Exponential | ||
354 | Backoff */ | ||
355 | #define HALFDUP_BACK_PRESSURE_NO_BACKOFF 0x00040000 /* Back | ||
356 | pressure no | ||
357 | backoff */ | ||
358 | #define HALFDUP_NO_BACKOFF 0x00020000 /* No Backoff */ | ||
359 | #define HALFDUP_EXCESSIVE_DEFER 0x00010000 /* Excessive | ||
360 | Defer */ | ||
361 | #define HALFDUP_MAX_RETRANSMISSION_SHIFT (31 - 19) /* Maximum | ||
362 | Retransmission | ||
363 | << shift */ | ||
364 | #define HALFDUP_MAX_RETRANSMISSION_MAX 0xf /* Maximum | ||
365 | Retransmission max | ||
366 | val */ | ||
367 | #define HALFDUP_COLLISION_WINDOW_SHIFT (31 - 31) /* Collision | ||
368 | Window << | ||
369 | shift */ | ||
370 | #define HALFDUP_COLLISION_WINDOW_MAX 0x3f /* Collision Window max | ||
371 | val */ | ||
372 | #define HALFDUP_ALT_BEB_TR_MASK 0x00F00000 | ||
373 | #define HALFDUP_RETRANS_MASK 0x0000F000 | ||
374 | #define HALFDUP_COL_WINDOW_MASK 0x0000003F | ||
375 | |||
376 | /* UCC GETH UCCS (Ethernet Status Register) */ | ||
377 | #define UCCS_BPR 0x02 /* Back pressure (in | ||
378 | half duplex mode) */ | ||
379 | #define UCCS_PAU 0x02 /* Pause state (in full | ||
380 | duplex mode) */ | ||
381 | #define UCCS_MPD 0x01 /* Magic Packet | ||
382 | Detected */ | ||
383 | |||
384 | /* UCC GETH MIIMCFG (MII Management Configuration Register) */ | ||
385 | #define MIIMCFG_RESET_MANAGEMENT 0x80000000 /* Reset | ||
386 | management */ | ||
387 | #define MIIMCFG_NO_PREAMBLE 0x00000010 /* Preamble | ||
388 | suppress */ | ||
389 | #define MIIMCFG_CLOCK_DIVIDE_SHIFT (31 - 31) /* clock divide | ||
390 | << shift */ | ||
391 | #define MIIMCFG_CLOCK_DIVIDE_MAX 0xf /* clock divide max val | ||
392 | */ | ||
393 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_2 0x00000000 /* divide by 2 */ | ||
394 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_4 0x00000001 /* divide by 4 */ | ||
395 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_6 0x00000002 /* divide by 6 */ | ||
396 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_8 0x00000003 /* divide by 8 */ | ||
397 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_10 0x00000004 /* divide by 10 | ||
398 | */ | ||
399 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_14 0x00000005 /* divide by 14 | ||
400 | */ | ||
401 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_16 0x00000008 /* divide by 16 | ||
402 | */ | ||
403 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_20 0x00000006 /* divide by 20 | ||
404 | */ | ||
405 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_28 0x00000007 /* divide by 28 | ||
406 | */ | ||
407 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_32 0x00000009 /* divide by 32 | ||
408 | */ | ||
409 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_48 0x0000000a /* divide by 48 | ||
410 | */ | ||
411 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_64 0x0000000b /* divide by 64 | ||
412 | */ | ||
413 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_80 0x0000000c /* divide by 80 | ||
414 | */ | ||
415 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112 0x0000000d /* divide by | ||
416 | 112 */ | ||
417 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_160 0x0000000e /* divide by | ||
418 | 160 */ | ||
419 | #define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_224 0x0000000f /* divide by | ||
420 | 224 */ | ||
421 | |||
422 | /* UCC GETH MIIMCOM (MII Management Command Register) */ | ||
423 | #define MIIMCOM_SCAN_CYCLE 0x00000002 /* Scan cycle */ | ||
424 | #define MIIMCOM_READ_CYCLE 0x00000001 /* Read cycle */ | ||
425 | |||
426 | /* UCC GETH MIIMADD (MII Management Address Register) */ | ||
427 | #define MIIMADD_PHY_ADDRESS_SHIFT (31 - 23) /* PHY Address | ||
428 | << shift */ | ||
429 | #define MIIMADD_PHY_REGISTER_SHIFT (31 - 31) /* PHY Register | ||
430 | << shift */ | ||
431 | |||
432 | /* UCC GETH MIIMCON (MII Management Control Register) */ | ||
433 | #define MIIMCON_PHY_CONTROL_SHIFT (31 - 31) /* PHY Control | ||
434 | << shift */ | ||
435 | #define MIIMCON_PHY_STATUS_SHIFT (31 - 31) /* PHY Status | ||
436 | << shift */ | ||
437 | |||
438 | /* UCC GETH MIIMIND (MII Management Indicator Register) */ | ||
439 | #define MIIMIND_NOT_VALID 0x00000004 /* Not valid */ | ||
440 | #define MIIMIND_SCAN 0x00000002 /* Scan in | ||
441 | progress */ | ||
442 | #define MIIMIND_BUSY 0x00000001 | ||
443 | |||
444 | /* UCC GETH IFSTAT (Interface Status Register) */ | ||
445 | #define IFSTAT_EXCESS_DEFER 0x00000200 /* Excessive | ||
446 | transmission | ||
447 | defer */ | ||
448 | |||
449 | /* UCC GETH MACSTNADDR1 (Station Address Part 1 Register) */ | ||
450 | #define MACSTNADDR1_OCTET_6_SHIFT (31 - 7) /* Station | ||
451 | address 6th | ||
452 | octet << | ||
453 | shift */ | ||
454 | #define MACSTNADDR1_OCTET_5_SHIFT (31 - 15) /* Station | ||
455 | address 5th | ||
456 | octet << | ||
457 | shift */ | ||
458 | #define MACSTNADDR1_OCTET_4_SHIFT (31 - 23) /* Station | ||
459 | address 4th | ||
460 | octet << | ||
461 | shift */ | ||
462 | #define MACSTNADDR1_OCTET_3_SHIFT (31 - 31) /* Station | ||
463 | address 3rd | ||
464 | octet << | ||
465 | shift */ | ||
466 | |||
467 | /* UCC GETH MACSTNADDR2 (Station Address Part 2 Register) */ | ||
468 | #define MACSTNADDR2_OCTET_2_SHIFT (31 - 7) /* Station | ||
469 | address 2nd | ||
470 | octet << | ||
471 | shift */ | ||
472 | #define MACSTNADDR2_OCTET_1_SHIFT (31 - 15) /* Station | ||
473 | address 1st | ||
474 | octet << | ||
475 | shift */ | ||
476 | |||
477 | /* UCC GETH UEMPR (Ethernet Mac Parameter Register) */ | ||
478 | #define UEMPR_PAUSE_TIME_VALUE_SHIFT (31 - 15) /* Pause time | ||
479 | value << | ||
480 | shift */ | ||
481 | #define UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT (31 - 31) /* Extended | ||
482 | pause time | ||
483 | value << | ||
484 | shift */ | ||
485 | |||
486 | /* UCC GETH UTBIPAR (Ten Bit Interface Physical Address Register) */ | ||
487 | #define UTBIPAR_PHY_ADDRESS_SHIFT (31 - 31) /* Phy address | ||
488 | << shift */ | ||
489 | #define UTBIPAR_PHY_ADDRESS_MASK 0x0000001f /* Phy address | ||
490 | mask */ | ||
491 | |||
492 | /* UCC GETH UESCR (Ethernet Statistics Control Register) */ | ||
493 | #define UESCR_AUTOZ 0x8000 /* Automatically zero | ||
494 | addressed | ||
495 | statistical counter | ||
496 | values */ | ||
497 | #define UESCR_CLRCNT 0x4000 /* Clear all statistics | ||
498 | counters */ | ||
499 | #define UESCR_MAXCOV_SHIFT (15 - 7) /* Max | ||
500 | Coalescing | ||
501 | Value << | ||
502 | shift */ | ||
503 | #define UESCR_SCOV_SHIFT (15 - 15) /* Status | ||
504 | Coalescing | ||
505 | Value << | ||
506 | shift */ | ||
507 | |||
508 | /* UCC GETH UDSR (Data Synchronization Register) */ | ||
509 | #define UDSR_MAGIC 0x067E | ||
510 | |||
511 | typedef struct ucc_geth_thread_data_tx { | ||
512 | u8 res0[104]; | ||
513 | } __attribute__ ((packed)) ucc_geth_thread_data_tx_t; | ||
514 | |||
515 | typedef struct ucc_geth_thread_data_rx { | ||
516 | u8 res0[40]; | ||
517 | } __attribute__ ((packed)) ucc_geth_thread_data_rx_t; | ||
518 | |||
519 | /* Send Queue Queue-Descriptor */ | ||
520 | typedef struct ucc_geth_send_queue_qd { | ||
521 | u32 bd_ring_base; /* pointer to BD ring base address */ | ||
522 | u8 res0[0x8]; | ||
523 | u32 last_bd_completed_address;/* initialize to last entry in BD ring */ | ||
524 | u8 res1[0x30]; | ||
525 | } __attribute__ ((packed)) ucc_geth_send_queue_qd_t; | ||
526 | |||
527 | typedef struct ucc_geth_send_queue_mem_region { | ||
528 | ucc_geth_send_queue_qd_t sqqd[NUM_TX_QUEUES]; | ||
529 | } __attribute__ ((packed)) ucc_geth_send_queue_mem_region_t; | ||
530 | |||
531 | typedef struct ucc_geth_thread_tx_pram { | ||
532 | u8 res0[64]; | ||
533 | } __attribute__ ((packed)) ucc_geth_thread_tx_pram_t; | ||
534 | |||
535 | typedef struct ucc_geth_thread_rx_pram { | ||
536 | u8 res0[128]; | ||
537 | } __attribute__ ((packed)) ucc_geth_thread_rx_pram_t; | ||
538 | |||
539 | #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64 | ||
540 | #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64 | ||
541 | #define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96 | ||
542 | |||
543 | typedef struct ucc_geth_scheduler { | ||
544 | u16 cpucount0; /* CPU packet counter */ | ||
545 | u16 cpucount1; /* CPU packet counter */ | ||
546 | u16 cecount0; /* QE packet counter */ | ||
547 | u16 cecount1; /* QE packet counter */ | ||
548 | u16 cpucount2; /* CPU packet counter */ | ||
549 | u16 cpucount3; /* CPU packet counter */ | ||
550 | u16 cecount2; /* QE packet counter */ | ||
551 | u16 cecount3; /* QE packet counter */ | ||
552 | u16 cpucount4; /* CPU packet counter */ | ||
553 | u16 cpucount5; /* CPU packet counter */ | ||
554 | u16 cecount4; /* QE packet counter */ | ||
555 | u16 cecount5; /* QE packet counter */ | ||
556 | u16 cpucount6; /* CPU packet counter */ | ||
557 | u16 cpucount7; /* CPU packet counter */ | ||
558 | u16 cecount6; /* QE packet counter */ | ||
559 | u16 cecount7; /* QE packet counter */ | ||
560 | u32 weightstatus[NUM_TX_QUEUES]; /* accumulated weight factor */ | ||
561 | u32 rtsrshadow; /* temporary variable handled by QE */ | ||
562 | u32 time; /* temporary variable handled by QE */ | ||
563 | u32 ttl; /* temporary variable handled by QE */ | ||
564 | u32 mblinterval; /* max burst length interval */ | ||
565 | u16 nortsrbytetime; /* normalized value of byte time in tsr units */ | ||
566 | u8 fracsiz; /* radix 2 log value of denom. of | ||
567 | NorTSRByteTime */ | ||
568 | u8 res0[1]; | ||
569 | u8 strictpriorityq; /* Strict Priority Mask register */ | ||
570 | u8 txasap; /* Transmit ASAP register */ | ||
571 | u8 extrabw; /* Extra BandWidth register */ | ||
572 | u8 oldwfqmask; /* temporary variable handled by QE */ | ||
573 | u8 weightfactor[NUM_TX_QUEUES]; | ||
574 | /**< weight factor for queues */ | ||
575 | u32 minw; /* temporary variable handled by QE */ | ||
576 | u8 res1[0x70 - 0x64]; | ||
577 | } __attribute__ ((packed)) ucc_geth_scheduler_t; | ||
578 | |||
579 | typedef struct ucc_geth_tx_firmware_statistics_pram { | ||
580 | u32 sicoltx; /* single collision */ | ||
581 | u32 mulcoltx; /* multiple collision */ | ||
582 | u32 latecoltxfr; /* late collision */ | ||
583 | u32 frabortduecol; /* frames aborted due to transmit collision */ | ||
584 | u32 frlostinmactxer; /* frames lost due to internal MAC error | ||
585 | transmission that are not counted on any | ||
586 | other counter */ | ||
587 | u32 carriersenseertx; /* carrier sense error */ | ||
588 | u32 frtxok; /* frames transmitted OK */ | ||
589 | u32 txfrexcessivedefer; /* frames with defferal time greater than | ||
590 | specified threshold */ | ||
591 | u32 txpkts256; /* total packets (including bad) between 256 | ||
592 | and 511 octets */ | ||
593 | u32 txpkts512; /* total packets (including bad) between 512 | ||
594 | and 1023 octets */ | ||
595 | u32 txpkts1024; /* total packets (including bad) between 1024 | ||
596 | and 1518 octets */ | ||
597 | u32 txpktsjumbo; /* total packets (including bad) between 1024 | ||
598 | and MAXLength octets */ | ||
599 | } __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_pram_t; | ||
600 | |||
601 | typedef struct ucc_geth_rx_firmware_statistics_pram { | ||
602 | u32 frrxfcser; /* frames with crc error */ | ||
603 | u32 fraligner; /* frames with alignment error */ | ||
604 | u32 inrangelenrxer; /* in range length error */ | ||
605 | u32 outrangelenrxer; /* out of range length error */ | ||
606 | u32 frtoolong; /* frame too long */ | ||
607 | u32 runt; /* runt */ | ||
608 | u32 verylongevent; /* very long event */ | ||
609 | u32 symbolerror; /* symbol error */ | ||
610 | u32 dropbsy; /* drop because of BD not ready */ | ||
611 | u8 res0[0x8]; | ||
612 | u32 mismatchdrop; /* drop because of MAC filtering (e.g. address | ||
613 | or type mismatch) */ | ||
614 | u32 underpkts; /* total frames less than 64 octets */ | ||
615 | u32 pkts256; /* total frames (including bad) between 256 and | ||
616 | 511 octets */ | ||
617 | u32 pkts512; /* total frames (including bad) between 512 and | ||
618 | 1023 octets */ | ||
619 | u32 pkts1024; /* total frames (including bad) between 1024 | ||
620 | and 1518 octets */ | ||
621 | u32 pktsjumbo; /* total frames (including bad) between 1024 | ||
622 | and MAXLength octets */ | ||
623 | u32 frlossinmacer; /* frames lost because of internal MAC error | ||
624 | that is not counted in any other counter */ | ||
625 | u32 pausefr; /* pause frames */ | ||
626 | u8 res1[0x4]; | ||
627 | u32 removevlan; /* total frames that had their VLAN tag removed | ||
628 | */ | ||
629 | u32 replacevlan; /* total frames that had their VLAN tag | ||
630 | replaced */ | ||
631 | u32 insertvlan; /* total frames that had their VLAN tag | ||
632 | inserted */ | ||
633 | } __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_pram_t; | ||
634 | |||
635 | typedef struct ucc_geth_rx_interrupt_coalescing_entry { | ||
636 | u32 interruptcoalescingmaxvalue; /* interrupt coalescing max | ||
637 | value */ | ||
638 | u32 interruptcoalescingcounter; /* interrupt coalescing counter, | ||
639 | initialize to | ||
640 | interruptcoalescingmaxvalue */ | ||
641 | } __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_entry_t; | ||
642 | |||
643 | typedef struct ucc_geth_rx_interrupt_coalescing_table { | ||
644 | ucc_geth_rx_interrupt_coalescing_entry_t coalescingentry[NUM_RX_QUEUES]; | ||
645 | /**< interrupt coalescing entry */ | ||
646 | } __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_table_t; | ||
647 | |||
648 | typedef struct ucc_geth_rx_prefetched_bds { | ||
649 | qe_bd_t bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */ | ||
650 | } __attribute__ ((packed)) ucc_geth_rx_prefetched_bds_t; | ||
651 | |||
652 | typedef struct ucc_geth_rx_bd_queues_entry { | ||
653 | u32 bdbaseptr; /* BD base pointer */ | ||
654 | u32 bdptr; /* BD pointer */ | ||
655 | u32 externalbdbaseptr; /* external BD base pointer */ | ||
656 | u32 externalbdptr; /* external BD pointer */ | ||
657 | } __attribute__ ((packed)) ucc_geth_rx_bd_queues_entry_t; | ||
658 | |||
659 | typedef struct ucc_geth_tx_global_pram { | ||
660 | u16 temoder; | ||
661 | u8 res0[0x38 - 0x02]; | ||
662 | u32 sqptr; /* a base pointer to send queue memory region */ | ||
663 | u32 schedulerbasepointer; /* a base pointer to scheduler memory | ||
664 | region */ | ||
665 | u32 txrmonbaseptr; /* base pointer to Tx RMON statistics counter */ | ||
666 | u32 tstate; /* tx internal state. High byte contains | ||
667 | function code */ | ||
668 | u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX]; | ||
669 | u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */ | ||
670 | u32 tqptr; /* a base pointer to the Tx Queues Memory | ||
671 | Region */ | ||
672 | u8 res2[0x80 - 0x74]; | ||
673 | } __attribute__ ((packed)) ucc_geth_tx_global_pram_t; | ||
674 | |||
675 | /* structure representing Extended Filtering Global Parameters in PRAM */ | ||
676 | typedef struct ucc_geth_exf_global_pram { | ||
677 | u32 l2pcdptr; /* individual address filter, high */ | ||
678 | u8 res0[0x10 - 0x04]; | ||
679 | } __attribute__ ((packed)) ucc_geth_exf_global_pram_t; | ||
680 | |||
681 | typedef struct ucc_geth_rx_global_pram { | ||
682 | u32 remoder; /* ethernet mode reg. */ | ||
683 | u32 rqptr; /* base pointer to the Rx Queues Memory Region*/ | ||
684 | u32 res0[0x1]; | ||
685 | u8 res1[0x20 - 0xC]; | ||
686 | u16 typeorlen; /* cutoff point less than which, type/len field | ||
687 | is considered length */ | ||
688 | u8 res2[0x1]; | ||
689 | u8 rxgstpack; /* acknowledgement on GRACEFUL STOP RX command*/ | ||
690 | u32 rxrmonbaseptr; /* base pointer to Rx RMON statistics counter */ | ||
691 | u8 res3[0x30 - 0x28]; | ||
692 | u32 intcoalescingptr; /* Interrupt coalescing table pointer */ | ||
693 | u8 res4[0x36 - 0x34]; | ||
694 | u8 rstate; /* rx internal state. High byte contains | ||
695 | function code */ | ||
696 | u8 res5[0x46 - 0x37]; | ||
697 | u16 mrblr; /* max receive buffer length reg. */ | ||
698 | u32 rbdqptr; /* base pointer to RxBD parameter table | ||
699 | description */ | ||
700 | u16 mflr; /* max frame length reg. */ | ||
701 | u16 minflr; /* min frame length reg. */ | ||
702 | u16 maxd1; /* max dma1 length reg. */ | ||
703 | u16 maxd2; /* max dma2 length reg. */ | ||
704 | u32 ecamptr; /* external CAM address */ | ||
705 | u32 l2qt; /* VLAN priority mapping table. */ | ||
706 | u32 l3qt[0x8]; /* IP priority mapping table. */ | ||
707 | u16 vlantype; /* vlan type */ | ||
708 | u16 vlantci; /* default vlan tci */ | ||
709 | u8 addressfiltering[64]; /* address filtering data structure */ | ||
710 | u32 exfGlobalParam; /* base address for extended filtering global | ||
711 | parameters */ | ||
712 | u8 res6[0x100 - 0xC4]; /* Initialize to zero */ | ||
713 | } __attribute__ ((packed)) ucc_geth_rx_global_pram_t; | ||
714 | |||
715 | #define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01 | ||
716 | |||
717 | /* structure representing InitEnet command */ | ||
718 | typedef struct ucc_geth_init_pram { | ||
719 | u8 resinit1; | ||
720 | u8 resinit2; | ||
721 | u8 resinit3; | ||
722 | u8 resinit4; | ||
723 | u16 resinit5; | ||
724 | u8 res1[0x1]; | ||
725 | u8 largestexternallookupkeysize; | ||
726 | u32 rgftgfrxglobal; | ||
727 | u32 rxthread[ENET_INIT_PARAM_MAX_ENTRIES_RX]; /* rx threads */ | ||
728 | u8 res2[0x38 - 0x30]; | ||
729 | u32 txglobal; /* tx global */ | ||
730 | u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */ | ||
731 | u8 res3[0x1]; | ||
732 | } __attribute__ ((packed)) ucc_geth_init_pram_t; | ||
733 | |||
734 | #define ENET_INIT_PARAM_RGF_SHIFT (32 - 4) | ||
735 | #define ENET_INIT_PARAM_TGF_SHIFT (32 - 8) | ||
736 | |||
737 | #define ENET_INIT_PARAM_RISC_MASK 0x0000003f | ||
738 | #define ENET_INIT_PARAM_PTR_MASK 0x00ffffc0 | ||
739 | #define ENET_INIT_PARAM_SNUM_MASK 0xff000000 | ||
740 | #define ENET_INIT_PARAM_SNUM_SHIFT 24 | ||
741 | |||
742 | #define ENET_INIT_PARAM_MAGIC_RES_INIT1 0x06 | ||
743 | #define ENET_INIT_PARAM_MAGIC_RES_INIT2 0x30 | ||
744 | #define ENET_INIT_PARAM_MAGIC_RES_INIT3 0xff | ||
745 | #define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x00 | ||
746 | #define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400 | ||
747 | |||
748 | /* structure representing 82xx Address Filtering Enet Address in PRAM */ | ||
749 | typedef struct ucc_geth_82xx_enet_address { | ||
750 | u8 res1[0x2]; | ||
751 | u16 h; /* address (MSB) */ | ||
752 | u16 m; /* address */ | ||
753 | u16 l; /* address (LSB) */ | ||
754 | } __attribute__ ((packed)) ucc_geth_82xx_enet_address_t; | ||
755 | |||
756 | /* structure representing 82xx Address Filtering PRAM */ | ||
757 | typedef struct ucc_geth_82xx_address_filtering_pram { | ||
758 | u32 iaddr_h; /* individual address filter, high */ | ||
759 | u32 iaddr_l; /* individual address filter, low */ | ||
760 | u32 gaddr_h; /* group address filter, high */ | ||
761 | u32 gaddr_l; /* group address filter, low */ | ||
762 | ucc_geth_82xx_enet_address_t taddr; | ||
763 | ucc_geth_82xx_enet_address_t paddr[NUM_OF_PADDRS]; | ||
764 | u8 res0[0x40 - 0x38]; | ||
765 | } __attribute__ ((packed)) ucc_geth_82xx_address_filtering_pram_t; | ||
766 | |||
767 | /* GETH Tx firmware statistics structure, used when calling | ||
768 | UCC_GETH_GetStatistics. */ | ||
769 | typedef struct ucc_geth_tx_firmware_statistics { | ||
770 | u32 sicoltx; /* single collision */ | ||
771 | u32 mulcoltx; /* multiple collision */ | ||
772 | u32 latecoltxfr; /* late collision */ | ||
773 | u32 frabortduecol; /* frames aborted due to transmit collision */ | ||
774 | u32 frlostinmactxer; /* frames lost due to internal MAC error | ||
775 | transmission that are not counted on any | ||
776 | other counter */ | ||
777 | u32 carriersenseertx; /* carrier sense error */ | ||
778 | u32 frtxok; /* frames transmitted OK */ | ||
779 | u32 txfrexcessivedefer; /* frames with defferal time greater than | ||
780 | specified threshold */ | ||
781 | u32 txpkts256; /* total packets (including bad) between 256 | ||
782 | and 511 octets */ | ||
783 | u32 txpkts512; /* total packets (including bad) between 512 | ||
784 | and 1023 octets */ | ||
785 | u32 txpkts1024; /* total packets (including bad) between 1024 | ||
786 | and 1518 octets */ | ||
787 | u32 txpktsjumbo; /* total packets (including bad) between 1024 | ||
788 | and MAXLength octets */ | ||
789 | } __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_t; | ||
790 | |||
791 | /* GETH Rx firmware statistics structure, used when calling | ||
792 | UCC_GETH_GetStatistics. */ | ||
793 | typedef struct ucc_geth_rx_firmware_statistics { | ||
794 | u32 frrxfcser; /* frames with crc error */ | ||
795 | u32 fraligner; /* frames with alignment error */ | ||
796 | u32 inrangelenrxer; /* in range length error */ | ||
797 | u32 outrangelenrxer; /* out of range length error */ | ||
798 | u32 frtoolong; /* frame too long */ | ||
799 | u32 runt; /* runt */ | ||
800 | u32 verylongevent; /* very long event */ | ||
801 | u32 symbolerror; /* symbol error */ | ||
802 | u32 dropbsy; /* drop because of BD not ready */ | ||
803 | u8 res0[0x8]; | ||
804 | u32 mismatchdrop; /* drop because of MAC filtering (e.g. address | ||
805 | or type mismatch) */ | ||
806 | u32 underpkts; /* total frames less than 64 octets */ | ||
807 | u32 pkts256; /* total frames (including bad) between 256 and | ||
808 | 511 octets */ | ||
809 | u32 pkts512; /* total frames (including bad) between 512 and | ||
810 | 1023 octets */ | ||
811 | u32 pkts1024; /* total frames (including bad) between 1024 | ||
812 | and 1518 octets */ | ||
813 | u32 pktsjumbo; /* total frames (including bad) between 1024 | ||
814 | and MAXLength octets */ | ||
815 | u32 frlossinmacer; /* frames lost because of internal MAC error | ||
816 | that is not counted in any other counter */ | ||
817 | u32 pausefr; /* pause frames */ | ||
818 | u8 res1[0x4]; | ||
819 | u32 removevlan; /* total frames that had their VLAN tag removed | ||
820 | */ | ||
821 | u32 replacevlan; /* total frames that had their VLAN tag | ||
822 | replaced */ | ||
823 | u32 insertvlan; /* total frames that had their VLAN tag | ||
824 | inserted */ | ||
825 | } __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_t; | ||
826 | |||
827 | /* GETH hardware statistics structure, used when calling | ||
828 | UCC_GETH_GetStatistics. */ | ||
829 | typedef struct ucc_geth_hardware_statistics { | ||
830 | u32 tx64; /* Total number of frames (including bad | ||
831 | frames) transmitted that were exactly of the | ||
832 | minimal length (64 for un tagged, 68 for | ||
833 | tagged, or with length exactly equal to the | ||
834 | parameter MINLength */ | ||
835 | u32 tx127; /* Total number of frames (including bad | ||
836 | frames) transmitted that were between | ||
837 | MINLength (Including FCS length==4) and 127 | ||
838 | octets */ | ||
839 | u32 tx255; /* Total number of frames (including bad | ||
840 | frames) transmitted that were between 128 | ||
841 | (Including FCS length==4) and 255 octets */ | ||
842 | u32 rx64; /* Total number of frames received including | ||
843 | bad frames that were exactly of the mninimal | ||
844 | length (64 bytes) */ | ||
845 | u32 rx127; /* Total number of frames (including bad | ||
846 | frames) received that were between MINLength | ||
847 | (Including FCS length==4) and 127 octets */ | ||
848 | u32 rx255; /* Total number of frames (including bad | ||
849 | frames) received that were between 128 | ||
850 | (Including FCS length==4) and 255 octets */ | ||
851 | u32 txok; /* Total number of octets residing in frames | ||
852 | that where involved in succesfull | ||
853 | transmission */ | ||
854 | u16 txcf; /* Total number of PAUSE control frames | ||
855 | transmitted by this MAC */ | ||
856 | u32 tmca; /* Total number of frames that were transmitted | ||
857 | succesfully with the group address bit set | ||
858 | that are not broadcast frames */ | ||
859 | u32 tbca; /* Total number of frames transmitted | ||
860 | succesfully that had destination address | ||
861 | field equal to the broadcast address */ | ||
862 | u32 rxfok; /* Total number of frames received OK */ | ||
863 | u32 rxbok; /* Total number of octets received OK */ | ||
864 | u32 rbyt; /* Total number of octets received including | ||
865 | octets in bad frames. Must be implemented in | ||
866 | HW because it includes octets in frames that | ||
867 | never even reach the UCC */ | ||
868 | u32 rmca; /* Total number of frames that were received | ||
869 | succesfully with the group address bit set | ||
870 | that are not broadcast frames */ | ||
871 | u32 rbca; /* Total number of frames received succesfully | ||
872 | that had destination address equal to the | ||
873 | broadcast address */ | ||
874 | } __attribute__ ((packed)) ucc_geth_hardware_statistics_t; | ||
875 | |||
876 | /* UCC GETH Tx errors returned via TxConf callback */ | ||
877 | #define TX_ERRORS_DEF 0x0200 | ||
878 | #define TX_ERRORS_EXDEF 0x0100 | ||
879 | #define TX_ERRORS_LC 0x0080 | ||
880 | #define TX_ERRORS_RL 0x0040 | ||
881 | #define TX_ERRORS_RC_MASK 0x003C | ||
882 | #define TX_ERRORS_RC_SHIFT 2 | ||
883 | #define TX_ERRORS_UN 0x0002 | ||
884 | #define TX_ERRORS_CSL 0x0001 | ||
885 | |||
886 | /* UCC GETH Rx errors returned via RxStore callback */ | ||
887 | #define RX_ERRORS_CMR 0x0200 | ||
888 | #define RX_ERRORS_M 0x0100 | ||
889 | #define RX_ERRORS_BC 0x0080 | ||
890 | #define RX_ERRORS_MC 0x0040 | ||
891 | |||
892 | /* Transmit BD. These are in addition to values defined in uccf. */ | ||
893 | #define T_VID 0x003c0000 /* insert VLAN id index mask. */ | ||
894 | #define T_DEF (((u32) TX_ERRORS_DEF ) << 16) | ||
895 | #define T_EXDEF (((u32) TX_ERRORS_EXDEF ) << 16) | ||
896 | #define T_LC (((u32) TX_ERRORS_LC ) << 16) | ||
897 | #define T_RL (((u32) TX_ERRORS_RL ) << 16) | ||
898 | #define T_RC_MASK (((u32) TX_ERRORS_RC_MASK ) << 16) | ||
899 | #define T_UN (((u32) TX_ERRORS_UN ) << 16) | ||
900 | #define T_CSL (((u32) TX_ERRORS_CSL ) << 16) | ||
901 | #define T_ERRORS_REPORT (T_DEF | T_EXDEF | T_LC | T_RL | T_RC_MASK \ | ||
902 | | T_UN | T_CSL) /* transmit errors to report */ | ||
903 | |||
904 | /* Receive BD. These are in addition to values defined in uccf. */ | ||
905 | #define R_LG 0x00200000 /* Frame length violation. */ | ||
906 | #define R_NO 0x00100000 /* Non-octet aligned frame. */ | ||
907 | #define R_SH 0x00080000 /* Short frame. */ | ||
908 | #define R_CR 0x00040000 /* CRC error. */ | ||
909 | #define R_OV 0x00020000 /* Overrun. */ | ||
910 | #define R_IPCH 0x00010000 /* IP checksum check failed. */ | ||
911 | #define R_CMR (((u32) RX_ERRORS_CMR ) << 16) | ||
912 | #define R_M (((u32) RX_ERRORS_M ) << 16) | ||
913 | #define R_BC (((u32) RX_ERRORS_BC ) << 16) | ||
914 | #define R_MC (((u32) RX_ERRORS_MC ) << 16) | ||
915 | #define R_ERRORS_REPORT (R_CMR | R_M | R_BC | R_MC) /* receive errors to | ||
916 | report */ | ||
917 | #define R_ERRORS_FATAL (R_LG | R_NO | R_SH | R_CR | \ | ||
918 | R_OV | R_IPCH) /* receive errors to discard */ | ||
919 | |||
920 | /* Alignments */ | ||
921 | #define UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT 256 | ||
922 | #define UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT 128 | ||
923 | #define UCC_GETH_THREAD_RX_PRAM_ALIGNMENT 128 | ||
924 | #define UCC_GETH_THREAD_TX_PRAM_ALIGNMENT 64 | ||
925 | #define UCC_GETH_THREAD_DATA_ALIGNMENT 256 /* spec gives values | ||
926 | based on num of | ||
927 | threads, but always | ||
928 | using the maximum is | ||
929 | easier */ | ||
930 | #define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32 | ||
931 | #define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */ | ||
932 | #define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */ | ||
933 | #define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */ | ||
934 | #define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 4 /* This is a | ||
935 | guess */ | ||
936 | #define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */ | ||
937 | #define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */ | ||
938 | #define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This | ||
939 | is a | ||
940 | guess | ||
941 | */ | ||
942 | #define UCC_GETH_RX_BD_RING_ALIGNMENT 32 | ||
943 | #define UCC_GETH_TX_BD_RING_ALIGNMENT 32 | ||
944 | #define UCC_GETH_MRBLR_ALIGNMENT 128 | ||
945 | #define UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT 4 | ||
946 | #define UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT 32 | ||
947 | #define UCC_GETH_RX_DATA_BUF_ALIGNMENT 64 | ||
948 | |||
949 | #define UCC_GETH_TAD_EF 0x80 | ||
950 | #define UCC_GETH_TAD_V 0x40 | ||
951 | #define UCC_GETH_TAD_REJ 0x20 | ||
952 | #define UCC_GETH_TAD_VTAG_OP_RIGHT_SHIFT 2 | ||
953 | #define UCC_GETH_TAD_VTAG_OP_SHIFT 6 | ||
954 | #define UCC_GETH_TAD_V_NON_VTAG_OP 0x20 | ||
955 | #define UCC_GETH_TAD_RQOS_SHIFT 0 | ||
956 | #define UCC_GETH_TAD_V_PRIORITY_SHIFT 5 | ||
957 | #define UCC_GETH_TAD_CFI 0x10 | ||
958 | |||
959 | #define UCC_GETH_VLAN_PRIORITY_MAX 8 | ||
960 | #define UCC_GETH_IP_PRIORITY_MAX 64 | ||
961 | #define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8 | ||
962 | #define UCC_GETH_RX_BD_RING_SIZE_MIN 8 | ||
963 | #define UCC_GETH_TX_BD_RING_SIZE_MIN 2 | ||
964 | |||
965 | #define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD | ||
966 | |||
967 | /* Driver definitions */ | ||
968 | #define TX_BD_RING_LEN 0x10 | ||
969 | #define RX_BD_RING_LEN 0x10 | ||
970 | #define UCC_GETH_DEV_WEIGHT TX_BD_RING_LEN | ||
971 | |||
972 | #define TX_RING_MOD_MASK(size) (size-1) | ||
973 | #define RX_RING_MOD_MASK(size) (size-1) | ||
974 | |||
975 | #define ENET_NUM_OCTETS_PER_ADDRESS 6 | ||
976 | #define ENET_GROUP_ADDR 0x01 /* Group address mask | ||
977 | for ethernet | ||
978 | addresses */ | ||
979 | |||
980 | #define TX_TIMEOUT (1*HZ) | ||
981 | #define SKB_ALLOC_TIMEOUT 100000 | ||
982 | #define PHY_INIT_TIMEOUT 100000 | ||
983 | #define PHY_CHANGE_TIME 2 | ||
984 | |||
985 | /* Fast Ethernet (10/100 Mbps) */ | ||
986 | #define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size | ||
987 | */ | ||
988 | #define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */ | ||
989 | #define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */ | ||
990 | #define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size | ||
991 | */ | ||
992 | #define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */ | ||
993 | #define UCC_GETH_UTFTT_INIT 128 | ||
994 | /* Gigabit Ethernet (1000 Mbps) */ | ||
995 | #define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual | ||
996 | FIFO size */ | ||
997 | #define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */ | ||
998 | #define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */ | ||
999 | #define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual | ||
1000 | FIFO size */ | ||
1001 | #define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */ | ||
1002 | #define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */ | ||
1003 | |||
1004 | #define UCC_GETH_REMODER_INIT 0 /* bits that must be | ||
1005 | set */ | ||
1006 | #define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */ | ||
1007 | #define UCC_GETH_UPSMR_INIT (UPSMR_RES1) /* Start value | ||
1008 | for this | ||
1009 | register */ | ||
1010 | #define UCC_GETH_MACCFG1_INIT 0 | ||
1011 | #define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1) | ||
1012 | #define UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT \ | ||
1013 | (MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112) | ||
1014 | |||
1015 | /* Ethernet speed */ | ||
1016 | typedef enum enet_speed { | ||
1017 | ENET_SPEED_10BT, /* 10 Base T */ | ||
1018 | ENET_SPEED_100BT, /* 100 Base T */ | ||
1019 | ENET_SPEED_1000BT /* 1000 Base T */ | ||
1020 | } enet_speed_e; | ||
1021 | |||
1022 | /* Ethernet Address Type. */ | ||
1023 | typedef enum enet_addr_type { | ||
1024 | ENET_ADDR_TYPE_INDIVIDUAL, | ||
1025 | ENET_ADDR_TYPE_GROUP, | ||
1026 | ENET_ADDR_TYPE_BROADCAST | ||
1027 | } enet_addr_type_e; | ||
1028 | |||
1029 | /* TBI / MII Set Register */ | ||
1030 | typedef enum enet_tbi_mii_reg { | ||
1031 | ENET_TBI_MII_CR = 0x00, /* Control (CR ) */ | ||
1032 | ENET_TBI_MII_SR = 0x01, /* Status (SR ) */ | ||
1033 | ENET_TBI_MII_ANA = 0x04, /* AN advertisement (ANA ) */ | ||
1034 | ENET_TBI_MII_ANLPBPA = 0x05, /* AN link partner base page ability | ||
1035 | (ANLPBPA) */ | ||
1036 | ENET_TBI_MII_ANEX = 0x06, /* AN expansion (ANEX ) */ | ||
1037 | ENET_TBI_MII_ANNPT = 0x07, /* AN next page transmit (ANNPT ) */ | ||
1038 | ENET_TBI_MII_ANLPANP = 0x08, /* AN link partner ability next page | ||
1039 | (ANLPANP) */ | ||
1040 | ENET_TBI_MII_EXST = 0x0F, /* Extended status (EXST ) */ | ||
1041 | ENET_TBI_MII_JD = 0x10, /* Jitter diagnostics (JD ) */ | ||
1042 | ENET_TBI_MII_TBICON = 0x11 /* TBI control (TBICON ) */ | ||
1043 | } enet_tbi_mii_reg_e; | ||
1044 | |||
1045 | /* UCC GETH 82xx Ethernet Address Recognition Location */ | ||
1046 | typedef enum ucc_geth_enet_address_recognition_location { | ||
1047 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station | ||
1048 | address */ | ||
1049 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional | ||
1050 | station | ||
1051 | address | ||
1052 | paddr1 */ | ||
1053 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR2, /* additional | ||
1054 | station | ||
1055 | address | ||
1056 | paddr2 */ | ||
1057 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR3, /* additional | ||
1058 | station | ||
1059 | address | ||
1060 | paddr3 */ | ||
1061 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_LAST, /* additional | ||
1062 | station | ||
1063 | address | ||
1064 | paddr4 */ | ||
1065 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */ | ||
1066 | UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual | ||
1067 | hash */ | ||
1068 | } ucc_geth_enet_address_recognition_location_e; | ||
1069 | |||
1070 | /* UCC GETH vlan operation tagged */ | ||
1071 | typedef enum ucc_geth_vlan_operation_tagged { | ||
1072 | UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */ | ||
1073 | UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG | ||
1074 | = 0x1, /* Tagged - replace vid portion of q tag */ | ||
1075 | UCC_GETH_VLAN_OPERATION_TAGGED_IF_VID0_REPLACE_VID_WITH_DEFAULT_VALUE | ||
1076 | = 0x2, /* Tagged - if vid0 replace vid with default value */ | ||
1077 | UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME | ||
1078 | = 0x3 /* Tagged - extract q tag from frame */ | ||
1079 | } ucc_geth_vlan_operation_tagged_e; | ||
1080 | |||
1081 | /* UCC GETH vlan operation non-tagged */ | ||
1082 | typedef enum ucc_geth_vlan_operation_non_tagged { | ||
1083 | UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */ | ||
1084 | UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged - | ||
1085 | q tag insert | ||
1086 | */ | ||
1087 | } ucc_geth_vlan_operation_non_tagged_e; | ||
1088 | |||
1089 | /* UCC GETH Rx Quality of Service Mode */ | ||
1090 | typedef enum ucc_geth_qos_mode { | ||
1091 | UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */ | ||
1092 | UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue | ||
1093 | determined | ||
1094 | by L2 | ||
1095 | criteria */ | ||
1096 | UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L3_CRITERIA = 0x2 /* queue | ||
1097 | determined | ||
1098 | by L3 | ||
1099 | criteria */ | ||
1100 | } ucc_geth_qos_mode_e; | ||
1101 | |||
1102 | /* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together | ||
1103 | for combined functionality */ | ||
1104 | typedef enum ucc_geth_statistics_gathering_mode { | ||
1105 | UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No | ||
1106 | statistics | ||
1107 | gathering */ | ||
1108 | UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE = 0x00000001,/* Enable | ||
1109 | hardware | ||
1110 | statistics | ||
1111 | gathering | ||
1112 | */ | ||
1113 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX = 0x00000004,/*Enable | ||
1114 | firmware | ||
1115 | tx | ||
1116 | statistics | ||
1117 | gathering | ||
1118 | */ | ||
1119 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX = 0x00000008/* Enable | ||
1120 | firmware | ||
1121 | rx | ||
1122 | statistics | ||
1123 | gathering | ||
1124 | */ | ||
1125 | } ucc_geth_statistics_gathering_mode_e; | ||
1126 | |||
1127 | /* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */ | ||
1128 | typedef enum ucc_geth_maccfg2_pad_and_crc_mode { | ||
1129 | UCC_GETH_PAD_AND_CRC_MODE_NONE | ||
1130 | = MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding | ||
1131 | short frames | ||
1132 | nor CRC */ | ||
1133 | UCC_GETH_PAD_AND_CRC_MODE_CRC_ONLY | ||
1134 | = MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY, /* Append | ||
1135 | CRC only */ | ||
1136 | UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC = | ||
1137 | MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC | ||
1138 | } ucc_geth_maccfg2_pad_and_crc_mode_e; | ||
1139 | |||
1140 | /* UCC GETH upsmr Flow Control Mode */ | ||
1141 | typedef enum ucc_geth_flow_control_mode { | ||
1142 | UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic | ||
1143 | flow control | ||
1144 | */ | ||
1145 | UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY | ||
1146 | = 0x00004000 /* Send pause frame when RxFIFO reaches its | ||
1147 | emergency threshold */ | ||
1148 | } ucc_geth_flow_control_mode_e; | ||
1149 | |||
1150 | /* UCC GETH number of threads */ | ||
1151 | typedef enum ucc_geth_num_of_threads { | ||
1152 | UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */ | ||
1153 | UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */ | ||
1154 | UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */ | ||
1155 | UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */ | ||
1156 | UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */ | ||
1157 | } ucc_geth_num_of_threads_e; | ||
1158 | |||
1159 | /* UCC GETH number of station addresses */ | ||
1160 | typedef enum ucc_geth_num_of_station_addresses { | ||
1161 | UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */ | ||
1162 | UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */ | ||
1163 | } ucc_geth_num_of_station_addresses_e; | ||
1164 | |||
1165 | typedef u8 enet_addr_t[ENET_NUM_OCTETS_PER_ADDRESS]; | ||
1166 | |||
1167 | /* UCC GETH 82xx Ethernet Address Container */ | ||
1168 | typedef struct enet_addr_container { | ||
1169 | enet_addr_t address; /* ethernet address */ | ||
1170 | ucc_geth_enet_address_recognition_location_e location; /* location in | ||
1171 | 82xx address | ||
1172 | recognition | ||
1173 | hardware */ | ||
1174 | struct list_head node; | ||
1175 | } enet_addr_container_t; | ||
1176 | |||
1177 | #define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, enet_addr_container_t, node) | ||
1178 | |||
1179 | /* UCC GETH Termination Action Descriptor (TAD) structure. */ | ||
1180 | typedef struct ucc_geth_tad_params { | ||
1181 | int rx_non_dynamic_extended_features_mode; | ||
1182 | int reject_frame; | ||
1183 | ucc_geth_vlan_operation_tagged_e vtag_op; | ||
1184 | ucc_geth_vlan_operation_non_tagged_e vnontag_op; | ||
1185 | ucc_geth_qos_mode_e rqos; | ||
1186 | u8 vpri; | ||
1187 | u16 vid; | ||
1188 | } ucc_geth_tad_params_t; | ||
1189 | |||
1190 | /* GETH protocol initialization structure */ | ||
1191 | typedef struct ucc_geth_info { | ||
1192 | ucc_fast_info_t uf_info; | ||
1193 | u8 numQueuesTx; | ||
1194 | u8 numQueuesRx; | ||
1195 | int ipCheckSumCheck; | ||
1196 | int ipCheckSumGenerate; | ||
1197 | int rxExtendedFiltering; | ||
1198 | u32 extendedFilteringChainPointer; | ||
1199 | u16 typeorlen; | ||
1200 | int dynamicMaxFrameLength; | ||
1201 | int dynamicMinFrameLength; | ||
1202 | u8 nonBackToBackIfgPart1; | ||
1203 | u8 nonBackToBackIfgPart2; | ||
1204 | u8 miminumInterFrameGapEnforcement; | ||
1205 | u8 backToBackInterFrameGap; | ||
1206 | int ipAddressAlignment; | ||
1207 | int lengthCheckRx; | ||
1208 | u32 mblinterval; | ||
1209 | u16 nortsrbytetime; | ||
1210 | u8 fracsiz; | ||
1211 | u8 strictpriorityq; | ||
1212 | u8 txasap; | ||
1213 | u8 extrabw; | ||
1214 | int miiPreambleSupress; | ||
1215 | u8 altBebTruncation; | ||
1216 | int altBeb; | ||
1217 | int backPressureNoBackoff; | ||
1218 | int noBackoff; | ||
1219 | int excessDefer; | ||
1220 | u8 maxRetransmission; | ||
1221 | u8 collisionWindow; | ||
1222 | int pro; | ||
1223 | int cap; | ||
1224 | int rsh; | ||
1225 | int rlpb; | ||
1226 | int cam; | ||
1227 | int bro; | ||
1228 | int ecm; | ||
1229 | int receiveFlowControl; | ||
1230 | u8 maxGroupAddrInHash; | ||
1231 | u8 maxIndAddrInHash; | ||
1232 | u8 prel; | ||
1233 | u16 maxFrameLength; | ||
1234 | u16 minFrameLength; | ||
1235 | u16 maxD1Length; | ||
1236 | u16 maxD2Length; | ||
1237 | u16 vlantype; | ||
1238 | u16 vlantci; | ||
1239 | u32 ecamptr; | ||
1240 | u32 eventRegMask; | ||
1241 | u16 pausePeriod; | ||
1242 | u16 extensionField; | ||
1243 | u8 phy_address; | ||
1244 | u32 board_flags; | ||
1245 | u32 phy_interrupt; | ||
1246 | u8 weightfactor[NUM_TX_QUEUES]; | ||
1247 | u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; | ||
1248 | u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX]; | ||
1249 | u8 l3qt[UCC_GETH_IP_PRIORITY_MAX]; | ||
1250 | u32 vtagtable[UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX]; | ||
1251 | u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX]; | ||
1252 | u16 bdRingLenTx[NUM_TX_QUEUES]; | ||
1253 | u16 bdRingLenRx[NUM_RX_QUEUES]; | ||
1254 | enet_interface_e enet_interface; | ||
1255 | ucc_geth_num_of_station_addresses_e numStationAddresses; | ||
1256 | qe_fltr_largest_external_tbl_lookup_key_size_e | ||
1257 | largestexternallookupkeysize; | ||
1258 | ucc_geth_statistics_gathering_mode_e statisticsMode; | ||
1259 | ucc_geth_vlan_operation_tagged_e vlanOperationTagged; | ||
1260 | ucc_geth_vlan_operation_non_tagged_e vlanOperationNonTagged; | ||
1261 | ucc_geth_qos_mode_e rxQoSMode; | ||
1262 | ucc_geth_flow_control_mode_e aufc; | ||
1263 | ucc_geth_maccfg2_pad_and_crc_mode_e padAndCrc; | ||
1264 | ucc_geth_num_of_threads_e numThreadsTx; | ||
1265 | ucc_geth_num_of_threads_e numThreadsRx; | ||
1266 | qe_risc_allocation_e riscTx; | ||
1267 | qe_risc_allocation_e riscRx; | ||
1268 | } ucc_geth_info_t; | ||
1269 | |||
1270 | /* structure representing UCC GETH */ | ||
1271 | typedef struct ucc_geth_private { | ||
1272 | ucc_geth_info_t *ug_info; | ||
1273 | ucc_fast_private_t *uccf; | ||
1274 | struct net_device *dev; | ||
1275 | struct net_device_stats stats; /* linux network statistics */ | ||
1276 | ucc_geth_t *ug_regs; | ||
1277 | ucc_geth_init_pram_t *p_init_enet_param_shadow; | ||
1278 | ucc_geth_exf_global_pram_t *p_exf_glbl_param; | ||
1279 | u32 exf_glbl_param_offset; | ||
1280 | ucc_geth_rx_global_pram_t *p_rx_glbl_pram; | ||
1281 | u32 rx_glbl_pram_offset; | ||
1282 | ucc_geth_tx_global_pram_t *p_tx_glbl_pram; | ||
1283 | u32 tx_glbl_pram_offset; | ||
1284 | ucc_geth_send_queue_mem_region_t *p_send_q_mem_reg; | ||
1285 | u32 send_q_mem_reg_offset; | ||
1286 | ucc_geth_thread_data_tx_t *p_thread_data_tx; | ||
1287 | u32 thread_dat_tx_offset; | ||
1288 | ucc_geth_thread_data_rx_t *p_thread_data_rx; | ||
1289 | u32 thread_dat_rx_offset; | ||
1290 | ucc_geth_scheduler_t *p_scheduler; | ||
1291 | u32 scheduler_offset; | ||
1292 | ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram; | ||
1293 | u32 tx_fw_statistics_pram_offset; | ||
1294 | ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram; | ||
1295 | u32 rx_fw_statistics_pram_offset; | ||
1296 | ucc_geth_rx_interrupt_coalescing_table_t *p_rx_irq_coalescing_tbl; | ||
1297 | u32 rx_irq_coalescing_tbl_offset; | ||
1298 | ucc_geth_rx_bd_queues_entry_t *p_rx_bd_qs_tbl; | ||
1299 | u32 rx_bd_qs_tbl_offset; | ||
1300 | u8 *p_tx_bd_ring[NUM_TX_QUEUES]; | ||
1301 | u32 tx_bd_ring_offset[NUM_TX_QUEUES]; | ||
1302 | u8 *p_rx_bd_ring[NUM_RX_QUEUES]; | ||
1303 | u32 rx_bd_ring_offset[NUM_RX_QUEUES]; | ||
1304 | u8 *confBd[NUM_TX_QUEUES]; | ||
1305 | u8 *txBd[NUM_TX_QUEUES]; | ||
1306 | u8 *rxBd[NUM_RX_QUEUES]; | ||
1307 | int badFrame[NUM_RX_QUEUES]; | ||
1308 | u16 cpucount[NUM_TX_QUEUES]; | ||
1309 | volatile u16 *p_cpucount[NUM_TX_QUEUES]; | ||
1310 | int indAddrRegUsed[NUM_OF_PADDRS]; | ||
1311 | enet_addr_t paddr[NUM_OF_PADDRS]; | ||
1312 | u8 numGroupAddrInHash; | ||
1313 | u8 numIndAddrInHash; | ||
1314 | u8 numIndAddrInReg; | ||
1315 | int rx_extended_features; | ||
1316 | int rx_non_dynamic_extended_features; | ||
1317 | struct list_head conf_skbs; | ||
1318 | struct list_head group_hash_q; | ||
1319 | struct list_head ind_hash_q; | ||
1320 | u32 saved_uccm; | ||
1321 | spinlock_t lock; | ||
1322 | /* pointers to arrays of skbuffs for tx and rx */ | ||
1323 | struct sk_buff **tx_skbuff[NUM_TX_QUEUES]; | ||
1324 | struct sk_buff **rx_skbuff[NUM_RX_QUEUES]; | ||
1325 | /* indices pointing to the next free sbk in skb arrays */ | ||
1326 | u16 skb_curtx[NUM_TX_QUEUES]; | ||
1327 | u16 skb_currx[NUM_RX_QUEUES]; | ||
1328 | /* index of the first skb which hasn't been transmitted yet. */ | ||
1329 | u16 skb_dirtytx[NUM_TX_QUEUES]; | ||
1330 | |||
1331 | struct work_struct tq; | ||
1332 | struct timer_list phy_info_timer; | ||
1333 | struct ugeth_mii_info *mii_info; | ||
1334 | int oldspeed; | ||
1335 | int oldduplex; | ||
1336 | int oldlink; | ||
1337 | } ucc_geth_private_t; | ||
1338 | |||
1339 | #endif /* __UCC_GETH_H__ */ | ||
diff --git a/drivers/net/ucc_geth_phy.c b/drivers/net/ucc_geth_phy.c new file mode 100644 index 000000000000..f91028c5386d --- /dev/null +++ b/drivers/net/ucc_geth_phy.c | |||
@@ -0,0 +1,801 @@ | |||
1 | /* | ||
2 | * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. | ||
3 | * | ||
4 | * Author: Shlomi Gridish <gridish@freescale.com> | ||
5 | * | ||
6 | * Description: | ||
7 | * UCC GETH Driver -- PHY handling | ||
8 | * | ||
9 | * Changelog: | ||
10 | * Jun 28, 2006 Li Yang <LeoLi@freescale.com> | ||
11 | * - Rearrange code and style fixes | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify it | ||
14 | * under the terms of the GNU General Public License as published by the | ||
15 | * Free Software Foundation; either version 2 of the License, or (at your | ||
16 | * option) any later version. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/config.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/string.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/netdevice.h> | ||
30 | #include <linux/etherdevice.h> | ||
31 | #include <linux/skbuff.h> | ||
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/mm.h> | ||
34 | #include <linux/module.h> | ||
35 | #include <linux/version.h> | ||
36 | #include <linux/crc32.h> | ||
37 | #include <linux/mii.h> | ||
38 | #include <linux/ethtool.h> | ||
39 | |||
40 | #include <asm/io.h> | ||
41 | #include <asm/irq.h> | ||
42 | #include <asm/uaccess.h> | ||
43 | |||
44 | #include "ucc_geth.h" | ||
45 | #include "ucc_geth_phy.h" | ||
46 | #include <platforms/83xx/mpc8360e_pb.h> | ||
47 | |||
48 | #define ugphy_printk(level, format, arg...) \ | ||
49 | printk(level format "\n", ## arg) | ||
50 | |||
51 | #define ugphy_dbg(format, arg...) \ | ||
52 | ugphy_printk(KERN_DEBUG, format , ## arg) | ||
53 | #define ugphy_err(format, arg...) \ | ||
54 | ugphy_printk(KERN_ERR, format , ## arg) | ||
55 | #define ugphy_info(format, arg...) \ | ||
56 | ugphy_printk(KERN_INFO, format , ## arg) | ||
57 | #define ugphy_warn(format, arg...) \ | ||
58 | ugphy_printk(KERN_WARNING, format , ## arg) | ||
59 | |||
60 | #ifdef UGETH_VERBOSE_DEBUG | ||
61 | #define ugphy_vdbg ugphy_dbg | ||
62 | #else | ||
63 | #define ugphy_vdbg(fmt, args...) do { } while (0) | ||
64 | #endif /* UGETH_VERBOSE_DEBUG */ | ||
65 | |||
66 | static void config_genmii_advert(struct ugeth_mii_info *mii_info); | ||
67 | static void genmii_setup_forced(struct ugeth_mii_info *mii_info); | ||
68 | static void genmii_restart_aneg(struct ugeth_mii_info *mii_info); | ||
69 | static int gbit_config_aneg(struct ugeth_mii_info *mii_info); | ||
70 | static int genmii_config_aneg(struct ugeth_mii_info *mii_info); | ||
71 | static int genmii_update_link(struct ugeth_mii_info *mii_info); | ||
72 | static int genmii_read_status(struct ugeth_mii_info *mii_info); | ||
73 | u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum); | ||
74 | void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val); | ||
75 | |||
76 | static u8 *bcsr_regs = NULL; | ||
77 | |||
78 | /* Write value to the PHY for this device to the register at regnum, */ | ||
79 | /* waiting until the write is done before it returns. All PHY */ | ||
80 | /* configuration has to be done through the TSEC1 MIIM regs */ | ||
81 | void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value) | ||
82 | { | ||
83 | ucc_geth_private_t *ugeth = netdev_priv(dev); | ||
84 | ucc_mii_mng_t *mii_regs; | ||
85 | enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum; | ||
86 | u32 tmp_reg; | ||
87 | |||
88 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
89 | |||
90 | spin_lock_irq(&ugeth->lock); | ||
91 | |||
92 | mii_regs = ugeth->mii_info->mii_regs; | ||
93 | |||
94 | /* Set this UCC to be the master of the MII managment */ | ||
95 | ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num); | ||
96 | |||
97 | /* Stop the MII management read cycle */ | ||
98 | out_be32(&mii_regs->miimcom, 0); | ||
99 | /* Setting up the MII Mangement Address Register */ | ||
100 | tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg; | ||
101 | out_be32(&mii_regs->miimadd, tmp_reg); | ||
102 | |||
103 | /* Setting up the MII Mangement Control Register with the value */ | ||
104 | out_be32(&mii_regs->miimcon, (u32) value); | ||
105 | |||
106 | /* Wait till MII management write is complete */ | ||
107 | while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY) | ||
108 | cpu_relax(); | ||
109 | |||
110 | spin_unlock_irq(&ugeth->lock); | ||
111 | |||
112 | udelay(10000); | ||
113 | } | ||
114 | |||
115 | /* Reads from register regnum in the PHY for device dev, */ | ||
116 | /* returning the value. Clears miimcom first. All PHY */ | ||
117 | /* configuration has to be done through the TSEC1 MIIM regs */ | ||
118 | int read_phy_reg(struct net_device *dev, int mii_id, int regnum) | ||
119 | { | ||
120 | ucc_geth_private_t *ugeth = netdev_priv(dev); | ||
121 | ucc_mii_mng_t *mii_regs; | ||
122 | enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum; | ||
123 | u32 tmp_reg; | ||
124 | u16 value; | ||
125 | |||
126 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
127 | |||
128 | spin_lock_irq(&ugeth->lock); | ||
129 | |||
130 | mii_regs = ugeth->mii_info->mii_regs; | ||
131 | |||
132 | /* Setting up the MII Mangement Address Register */ | ||
133 | tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg; | ||
134 | out_be32(&mii_regs->miimadd, tmp_reg); | ||
135 | |||
136 | /* Perform an MII management read cycle */ | ||
137 | out_be32(&mii_regs->miimcom, MIIMCOM_READ_CYCLE); | ||
138 | |||
139 | /* Wait till MII management write is complete */ | ||
140 | while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY) | ||
141 | cpu_relax(); | ||
142 | |||
143 | udelay(10000); | ||
144 | |||
145 | /* Read MII management status */ | ||
146 | value = (u16) in_be32(&mii_regs->miimstat); | ||
147 | out_be32(&mii_regs->miimcom, 0); | ||
148 | if (value == 0xffff) | ||
149 | ugphy_warn("read wrong value : mii_id %d,mii_reg %d, base %08x", | ||
150 | mii_id, mii_reg, (u32) & (mii_regs->miimcfg)); | ||
151 | |||
152 | spin_unlock_irq(&ugeth->lock); | ||
153 | |||
154 | return (value); | ||
155 | } | ||
156 | |||
157 | void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info) | ||
158 | { | ||
159 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
160 | |||
161 | if (mii_info->phyinfo->ack_interrupt) | ||
162 | mii_info->phyinfo->ack_interrupt(mii_info); | ||
163 | } | ||
164 | |||
165 | void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info, | ||
166 | u32 interrupts) | ||
167 | { | ||
168 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
169 | |||
170 | mii_info->interrupts = interrupts; | ||
171 | if (mii_info->phyinfo->config_intr) | ||
172 | mii_info->phyinfo->config_intr(mii_info); | ||
173 | } | ||
174 | |||
175 | /* Writes MII_ADVERTISE with the appropriate values, after | ||
176 | * sanitizing advertise to make sure only supported features | ||
177 | * are advertised | ||
178 | */ | ||
179 | static void config_genmii_advert(struct ugeth_mii_info *mii_info) | ||
180 | { | ||
181 | u32 advertise; | ||
182 | u16 adv; | ||
183 | |||
184 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
185 | |||
186 | /* Only allow advertising what this PHY supports */ | ||
187 | mii_info->advertising &= mii_info->phyinfo->features; | ||
188 | advertise = mii_info->advertising; | ||
189 | |||
190 | /* Setup standard advertisement */ | ||
191 | adv = phy_read(mii_info, MII_ADVERTISE); | ||
192 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); | ||
193 | if (advertise & ADVERTISED_10baseT_Half) | ||
194 | adv |= ADVERTISE_10HALF; | ||
195 | if (advertise & ADVERTISED_10baseT_Full) | ||
196 | adv |= ADVERTISE_10FULL; | ||
197 | if (advertise & ADVERTISED_100baseT_Half) | ||
198 | adv |= ADVERTISE_100HALF; | ||
199 | if (advertise & ADVERTISED_100baseT_Full) | ||
200 | adv |= ADVERTISE_100FULL; | ||
201 | phy_write(mii_info, MII_ADVERTISE, adv); | ||
202 | } | ||
203 | |||
204 | static void genmii_setup_forced(struct ugeth_mii_info *mii_info) | ||
205 | { | ||
206 | u16 ctrl; | ||
207 | u32 features = mii_info->phyinfo->features; | ||
208 | |||
209 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
210 | |||
211 | ctrl = phy_read(mii_info, MII_BMCR); | ||
212 | |||
213 | ctrl &= | ||
214 | ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); | ||
215 | ctrl |= BMCR_RESET; | ||
216 | |||
217 | switch (mii_info->speed) { | ||
218 | case SPEED_1000: | ||
219 | if (features & (SUPPORTED_1000baseT_Half | ||
220 | | SUPPORTED_1000baseT_Full)) { | ||
221 | ctrl |= BMCR_SPEED1000; | ||
222 | break; | ||
223 | } | ||
224 | mii_info->speed = SPEED_100; | ||
225 | case SPEED_100: | ||
226 | if (features & (SUPPORTED_100baseT_Half | ||
227 | | SUPPORTED_100baseT_Full)) { | ||
228 | ctrl |= BMCR_SPEED100; | ||
229 | break; | ||
230 | } | ||
231 | mii_info->speed = SPEED_10; | ||
232 | case SPEED_10: | ||
233 | if (features & (SUPPORTED_10baseT_Half | ||
234 | | SUPPORTED_10baseT_Full)) | ||
235 | break; | ||
236 | default: /* Unsupported speed! */ | ||
237 | ugphy_err("%s: Bad speed!", mii_info->dev->name); | ||
238 | break; | ||
239 | } | ||
240 | |||
241 | phy_write(mii_info, MII_BMCR, ctrl); | ||
242 | } | ||
243 | |||
244 | /* Enable and Restart Autonegotiation */ | ||
245 | static void genmii_restart_aneg(struct ugeth_mii_info *mii_info) | ||
246 | { | ||
247 | u16 ctl; | ||
248 | |||
249 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
250 | |||
251 | ctl = phy_read(mii_info, MII_BMCR); | ||
252 | ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); | ||
253 | phy_write(mii_info, MII_BMCR, ctl); | ||
254 | } | ||
255 | |||
256 | static int gbit_config_aneg(struct ugeth_mii_info *mii_info) | ||
257 | { | ||
258 | u16 adv; | ||
259 | u32 advertise; | ||
260 | |||
261 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
262 | |||
263 | if (mii_info->autoneg) { | ||
264 | /* Configure the ADVERTISE register */ | ||
265 | config_genmii_advert(mii_info); | ||
266 | advertise = mii_info->advertising; | ||
267 | |||
268 | adv = phy_read(mii_info, MII_1000BASETCONTROL); | ||
269 | adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP | | ||
270 | MII_1000BASETCONTROL_HALFDUPLEXCAP); | ||
271 | if (advertise & SUPPORTED_1000baseT_Half) | ||
272 | adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; | ||
273 | if (advertise & SUPPORTED_1000baseT_Full) | ||
274 | adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; | ||
275 | phy_write(mii_info, MII_1000BASETCONTROL, adv); | ||
276 | |||
277 | /* Start/Restart aneg */ | ||
278 | genmii_restart_aneg(mii_info); | ||
279 | } else | ||
280 | genmii_setup_forced(mii_info); | ||
281 | |||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static int genmii_config_aneg(struct ugeth_mii_info *mii_info) | ||
286 | { | ||
287 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
288 | |||
289 | if (mii_info->autoneg) { | ||
290 | config_genmii_advert(mii_info); | ||
291 | genmii_restart_aneg(mii_info); | ||
292 | } else | ||
293 | genmii_setup_forced(mii_info); | ||
294 | |||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | static int genmii_update_link(struct ugeth_mii_info *mii_info) | ||
299 | { | ||
300 | u16 status; | ||
301 | |||
302 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
303 | |||
304 | /* Do a fake read */ | ||
305 | phy_read(mii_info, MII_BMSR); | ||
306 | |||
307 | /* Read link and autonegotiation status */ | ||
308 | status = phy_read(mii_info, MII_BMSR); | ||
309 | if ((status & BMSR_LSTATUS) == 0) | ||
310 | mii_info->link = 0; | ||
311 | else | ||
312 | mii_info->link = 1; | ||
313 | |||
314 | /* If we are autonegotiating, and not done, | ||
315 | * return an error */ | ||
316 | if (mii_info->autoneg && !(status & BMSR_ANEGCOMPLETE)) | ||
317 | return -EAGAIN; | ||
318 | |||
319 | return 0; | ||
320 | } | ||
321 | |||
322 | static int genmii_read_status(struct ugeth_mii_info *mii_info) | ||
323 | { | ||
324 | u16 status; | ||
325 | int err; | ||
326 | |||
327 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
328 | |||
329 | /* Update the link, but return if there | ||
330 | * was an error */ | ||
331 | err = genmii_update_link(mii_info); | ||
332 | if (err) | ||
333 | return err; | ||
334 | |||
335 | if (mii_info->autoneg) { | ||
336 | status = phy_read(mii_info, MII_LPA); | ||
337 | |||
338 | if (status & (LPA_10FULL | LPA_100FULL)) | ||
339 | mii_info->duplex = DUPLEX_FULL; | ||
340 | else | ||
341 | mii_info->duplex = DUPLEX_HALF; | ||
342 | if (status & (LPA_100FULL | LPA_100HALF)) | ||
343 | mii_info->speed = SPEED_100; | ||
344 | else | ||
345 | mii_info->speed = SPEED_10; | ||
346 | mii_info->pause = 0; | ||
347 | } | ||
348 | /* On non-aneg, we assume what we put in BMCR is the speed, | ||
349 | * though magic-aneg shouldn't prevent this case from occurring | ||
350 | */ | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | static int marvell_init(struct ugeth_mii_info *mii_info) | ||
356 | { | ||
357 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
358 | |||
359 | phy_write(mii_info, 0x14, 0x0cd2); | ||
360 | phy_write(mii_info, MII_BMCR, | ||
361 | phy_read(mii_info, MII_BMCR) | BMCR_RESET); | ||
362 | msleep(4000); | ||
363 | |||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | static int marvell_config_aneg(struct ugeth_mii_info *mii_info) | ||
368 | { | ||
369 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
370 | |||
371 | /* The Marvell PHY has an errata which requires | ||
372 | * that certain registers get written in order | ||
373 | * to restart autonegotiation */ | ||
374 | phy_write(mii_info, MII_BMCR, BMCR_RESET); | ||
375 | |||
376 | phy_write(mii_info, 0x1d, 0x1f); | ||
377 | phy_write(mii_info, 0x1e, 0x200c); | ||
378 | phy_write(mii_info, 0x1d, 0x5); | ||
379 | phy_write(mii_info, 0x1e, 0); | ||
380 | phy_write(mii_info, 0x1e, 0x100); | ||
381 | |||
382 | gbit_config_aneg(mii_info); | ||
383 | |||
384 | return 0; | ||
385 | } | ||
386 | |||
387 | static int marvell_read_status(struct ugeth_mii_info *mii_info) | ||
388 | { | ||
389 | u16 status; | ||
390 | int err; | ||
391 | |||
392 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
393 | |||
394 | /* Update the link, but return if there | ||
395 | * was an error */ | ||
396 | err = genmii_update_link(mii_info); | ||
397 | if (err) | ||
398 | return err; | ||
399 | |||
400 | /* If the link is up, read the speed and duplex */ | ||
401 | /* If we aren't autonegotiating, assume speeds | ||
402 | * are as set */ | ||
403 | if (mii_info->autoneg && mii_info->link) { | ||
404 | int speed; | ||
405 | status = phy_read(mii_info, MII_M1011_PHY_SPEC_STATUS); | ||
406 | |||
407 | /* Get the duplexity */ | ||
408 | if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX) | ||
409 | mii_info->duplex = DUPLEX_FULL; | ||
410 | else | ||
411 | mii_info->duplex = DUPLEX_HALF; | ||
412 | |||
413 | /* Get the speed */ | ||
414 | speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK; | ||
415 | switch (speed) { | ||
416 | case MII_M1011_PHY_SPEC_STATUS_1000: | ||
417 | mii_info->speed = SPEED_1000; | ||
418 | break; | ||
419 | case MII_M1011_PHY_SPEC_STATUS_100: | ||
420 | mii_info->speed = SPEED_100; | ||
421 | break; | ||
422 | default: | ||
423 | mii_info->speed = SPEED_10; | ||
424 | break; | ||
425 | } | ||
426 | mii_info->pause = 0; | ||
427 | } | ||
428 | |||
429 | return 0; | ||
430 | } | ||
431 | |||
432 | static int marvell_ack_interrupt(struct ugeth_mii_info *mii_info) | ||
433 | { | ||
434 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
435 | |||
436 | /* Clear the interrupts by reading the reg */ | ||
437 | phy_read(mii_info, MII_M1011_IEVENT); | ||
438 | |||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | static int marvell_config_intr(struct ugeth_mii_info *mii_info) | ||
443 | { | ||
444 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
445 | |||
446 | if (mii_info->interrupts == MII_INTERRUPT_ENABLED) | ||
447 | phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT); | ||
448 | else | ||
449 | phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR); | ||
450 | |||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | static int cis820x_init(struct ugeth_mii_info *mii_info) | ||
455 | { | ||
456 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
457 | |||
458 | phy_write(mii_info, MII_CIS8201_AUX_CONSTAT, | ||
459 | MII_CIS8201_AUXCONSTAT_INIT); | ||
460 | phy_write(mii_info, MII_CIS8201_EXT_CON1, MII_CIS8201_EXTCON1_INIT); | ||
461 | |||
462 | return 0; | ||
463 | } | ||
464 | |||
465 | static int cis820x_read_status(struct ugeth_mii_info *mii_info) | ||
466 | { | ||
467 | u16 status; | ||
468 | int err; | ||
469 | |||
470 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
471 | |||
472 | /* Update the link, but return if there | ||
473 | * was an error */ | ||
474 | err = genmii_update_link(mii_info); | ||
475 | if (err) | ||
476 | return err; | ||
477 | |||
478 | /* If the link is up, read the speed and duplex */ | ||
479 | /* If we aren't autonegotiating, assume speeds | ||
480 | * are as set */ | ||
481 | if (mii_info->autoneg && mii_info->link) { | ||
482 | int speed; | ||
483 | |||
484 | status = phy_read(mii_info, MII_CIS8201_AUX_CONSTAT); | ||
485 | if (status & MII_CIS8201_AUXCONSTAT_DUPLEX) | ||
486 | mii_info->duplex = DUPLEX_FULL; | ||
487 | else | ||
488 | mii_info->duplex = DUPLEX_HALF; | ||
489 | |||
490 | speed = status & MII_CIS8201_AUXCONSTAT_SPEED; | ||
491 | |||
492 | switch (speed) { | ||
493 | case MII_CIS8201_AUXCONSTAT_GBIT: | ||
494 | mii_info->speed = SPEED_1000; | ||
495 | break; | ||
496 | case MII_CIS8201_AUXCONSTAT_100: | ||
497 | mii_info->speed = SPEED_100; | ||
498 | break; | ||
499 | default: | ||
500 | mii_info->speed = SPEED_10; | ||
501 | break; | ||
502 | } | ||
503 | } | ||
504 | |||
505 | return 0; | ||
506 | } | ||
507 | |||
508 | static int cis820x_ack_interrupt(struct ugeth_mii_info *mii_info) | ||
509 | { | ||
510 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
511 | |||
512 | phy_read(mii_info, MII_CIS8201_ISTAT); | ||
513 | |||
514 | return 0; | ||
515 | } | ||
516 | |||
517 | static int cis820x_config_intr(struct ugeth_mii_info *mii_info) | ||
518 | { | ||
519 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
520 | |||
521 | if (mii_info->interrupts == MII_INTERRUPT_ENABLED) | ||
522 | phy_write(mii_info, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK); | ||
523 | else | ||
524 | phy_write(mii_info, MII_CIS8201_IMASK, 0); | ||
525 | |||
526 | return 0; | ||
527 | } | ||
528 | |||
529 | #define DM9161_DELAY 10 | ||
530 | |||
531 | static int dm9161_read_status(struct ugeth_mii_info *mii_info) | ||
532 | { | ||
533 | u16 status; | ||
534 | int err; | ||
535 | |||
536 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
537 | |||
538 | /* Update the link, but return if there | ||
539 | * was an error */ | ||
540 | err = genmii_update_link(mii_info); | ||
541 | if (err) | ||
542 | return err; | ||
543 | |||
544 | /* If the link is up, read the speed and duplex */ | ||
545 | /* If we aren't autonegotiating, assume speeds | ||
546 | * are as set */ | ||
547 | if (mii_info->autoneg && mii_info->link) { | ||
548 | status = phy_read(mii_info, MII_DM9161_SCSR); | ||
549 | if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H)) | ||
550 | mii_info->speed = SPEED_100; | ||
551 | else | ||
552 | mii_info->speed = SPEED_10; | ||
553 | |||
554 | if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F)) | ||
555 | mii_info->duplex = DUPLEX_FULL; | ||
556 | else | ||
557 | mii_info->duplex = DUPLEX_HALF; | ||
558 | } | ||
559 | |||
560 | return 0; | ||
561 | } | ||
562 | |||
563 | static int dm9161_config_aneg(struct ugeth_mii_info *mii_info) | ||
564 | { | ||
565 | struct dm9161_private *priv = mii_info->priv; | ||
566 | |||
567 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
568 | |||
569 | if (0 == priv->resetdone) | ||
570 | return -EAGAIN; | ||
571 | |||
572 | return 0; | ||
573 | } | ||
574 | |||
575 | static void dm9161_timer(unsigned long data) | ||
576 | { | ||
577 | struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data; | ||
578 | struct dm9161_private *priv = mii_info->priv; | ||
579 | u16 status = phy_read(mii_info, MII_BMSR); | ||
580 | |||
581 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
582 | |||
583 | if (status & BMSR_ANEGCOMPLETE) { | ||
584 | priv->resetdone = 1; | ||
585 | } else | ||
586 | mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ); | ||
587 | } | ||
588 | |||
589 | static int dm9161_init(struct ugeth_mii_info *mii_info) | ||
590 | { | ||
591 | struct dm9161_private *priv; | ||
592 | |||
593 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
594 | |||
595 | /* Allocate the private data structure */ | ||
596 | priv = kmalloc(sizeof(struct dm9161_private), GFP_KERNEL); | ||
597 | |||
598 | if (NULL == priv) | ||
599 | return -ENOMEM; | ||
600 | |||
601 | mii_info->priv = priv; | ||
602 | |||
603 | /* Reset is not done yet */ | ||
604 | priv->resetdone = 0; | ||
605 | |||
606 | phy_write(mii_info, MII_BMCR, | ||
607 | phy_read(mii_info, MII_BMCR) | BMCR_RESET); | ||
608 | |||
609 | phy_write(mii_info, MII_BMCR, | ||
610 | phy_read(mii_info, MII_BMCR) & ~BMCR_ISOLATE); | ||
611 | |||
612 | config_genmii_advert(mii_info); | ||
613 | /* Start/Restart aneg */ | ||
614 | genmii_config_aneg(mii_info); | ||
615 | |||
616 | /* Start a timer for DM9161_DELAY seconds to wait | ||
617 | * for the PHY to be ready */ | ||
618 | init_timer(&priv->timer); | ||
619 | priv->timer.function = &dm9161_timer; | ||
620 | priv->timer.data = (unsigned long)mii_info; | ||
621 | mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ); | ||
622 | |||
623 | return 0; | ||
624 | } | ||
625 | |||
626 | static void dm9161_close(struct ugeth_mii_info *mii_info) | ||
627 | { | ||
628 | struct dm9161_private *priv = mii_info->priv; | ||
629 | |||
630 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
631 | |||
632 | del_timer_sync(&priv->timer); | ||
633 | kfree(priv); | ||
634 | } | ||
635 | |||
636 | static int dm9161_ack_interrupt(struct ugeth_mii_info *mii_info) | ||
637 | { | ||
638 | /* FIXME: This lines are for BUG fixing in the mpc8325. | ||
639 | Remove this from here when it's fixed */ | ||
640 | if (bcsr_regs == NULL) | ||
641 | bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE); | ||
642 | bcsr_regs[14] |= 0x40; | ||
643 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
644 | |||
645 | /* Clear the interrupts by reading the reg */ | ||
646 | phy_read(mii_info, MII_DM9161_INTR); | ||
647 | |||
648 | |||
649 | return 0; | ||
650 | } | ||
651 | |||
652 | static int dm9161_config_intr(struct ugeth_mii_info *mii_info) | ||
653 | { | ||
654 | /* FIXME: This lines are for BUG fixing in the mpc8325. | ||
655 | Remove this from here when it's fixed */ | ||
656 | if (bcsr_regs == NULL) { | ||
657 | bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE); | ||
658 | bcsr_regs[14] &= ~0x40; | ||
659 | } | ||
660 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
661 | |||
662 | if (mii_info->interrupts == MII_INTERRUPT_ENABLED) | ||
663 | phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_INIT); | ||
664 | else | ||
665 | phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_STOP); | ||
666 | |||
667 | return 0; | ||
668 | } | ||
669 | |||
670 | /* Cicada 820x */ | ||
671 | static struct phy_info phy_info_cis820x = { | ||
672 | .phy_id = 0x000fc440, | ||
673 | .name = "Cicada Cis8204", | ||
674 | .phy_id_mask = 0x000fffc0, | ||
675 | .features = MII_GBIT_FEATURES, | ||
676 | .init = &cis820x_init, | ||
677 | .config_aneg = &gbit_config_aneg, | ||
678 | .read_status = &cis820x_read_status, | ||
679 | .ack_interrupt = &cis820x_ack_interrupt, | ||
680 | .config_intr = &cis820x_config_intr, | ||
681 | }; | ||
682 | |||
683 | static struct phy_info phy_info_dm9161 = { | ||
684 | .phy_id = 0x0181b880, | ||
685 | .phy_id_mask = 0x0ffffff0, | ||
686 | .name = "Davicom DM9161E", | ||
687 | .init = dm9161_init, | ||
688 | .config_aneg = dm9161_config_aneg, | ||
689 | .read_status = dm9161_read_status, | ||
690 | .close = dm9161_close, | ||
691 | }; | ||
692 | |||
693 | static struct phy_info phy_info_dm9161a = { | ||
694 | .phy_id = 0x0181b8a0, | ||
695 | .phy_id_mask = 0x0ffffff0, | ||
696 | .name = "Davicom DM9161A", | ||
697 | .features = MII_BASIC_FEATURES, | ||
698 | .init = dm9161_init, | ||
699 | .config_aneg = dm9161_config_aneg, | ||
700 | .read_status = dm9161_read_status, | ||
701 | .ack_interrupt = dm9161_ack_interrupt, | ||
702 | .config_intr = dm9161_config_intr, | ||
703 | .close = dm9161_close, | ||
704 | }; | ||
705 | |||
706 | static struct phy_info phy_info_marvell = { | ||
707 | .phy_id = 0x01410c00, | ||
708 | .phy_id_mask = 0xffffff00, | ||
709 | .name = "Marvell 88E11x1", | ||
710 | .features = MII_GBIT_FEATURES, | ||
711 | .init = &marvell_init, | ||
712 | .config_aneg = &marvell_config_aneg, | ||
713 | .read_status = &marvell_read_status, | ||
714 | .ack_interrupt = &marvell_ack_interrupt, | ||
715 | .config_intr = &marvell_config_intr, | ||
716 | }; | ||
717 | |||
718 | static struct phy_info phy_info_genmii = { | ||
719 | .phy_id = 0x00000000, | ||
720 | .phy_id_mask = 0x00000000, | ||
721 | .name = "Generic MII", | ||
722 | .features = MII_BASIC_FEATURES, | ||
723 | .config_aneg = genmii_config_aneg, | ||
724 | .read_status = genmii_read_status, | ||
725 | }; | ||
726 | |||
727 | static struct phy_info *phy_info[] = { | ||
728 | &phy_info_cis820x, | ||
729 | &phy_info_marvell, | ||
730 | &phy_info_dm9161, | ||
731 | &phy_info_dm9161a, | ||
732 | &phy_info_genmii, | ||
733 | NULL | ||
734 | }; | ||
735 | |||
736 | u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum) | ||
737 | { | ||
738 | u16 retval; | ||
739 | unsigned long flags; | ||
740 | |||
741 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
742 | |||
743 | spin_lock_irqsave(&mii_info->mdio_lock, flags); | ||
744 | retval = mii_info->mdio_read(mii_info->dev, mii_info->mii_id, regnum); | ||
745 | spin_unlock_irqrestore(&mii_info->mdio_lock, flags); | ||
746 | |||
747 | return retval; | ||
748 | } | ||
749 | |||
750 | void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val) | ||
751 | { | ||
752 | unsigned long flags; | ||
753 | |||
754 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
755 | |||
756 | spin_lock_irqsave(&mii_info->mdio_lock, flags); | ||
757 | mii_info->mdio_write(mii_info->dev, mii_info->mii_id, regnum, val); | ||
758 | spin_unlock_irqrestore(&mii_info->mdio_lock, flags); | ||
759 | } | ||
760 | |||
761 | /* Use the PHY ID registers to determine what type of PHY is attached | ||
762 | * to device dev. return a struct phy_info structure describing that PHY | ||
763 | */ | ||
764 | struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info) | ||
765 | { | ||
766 | u16 phy_reg; | ||
767 | u32 phy_ID; | ||
768 | int i; | ||
769 | struct phy_info *theInfo = NULL; | ||
770 | struct net_device *dev = mii_info->dev; | ||
771 | |||
772 | ugphy_vdbg("%s: IN", __FUNCTION__); | ||
773 | |||
774 | /* Grab the bits from PHYIR1, and put them in the upper half */ | ||
775 | phy_reg = phy_read(mii_info, MII_PHYSID1); | ||
776 | phy_ID = (phy_reg & 0xffff) << 16; | ||
777 | |||
778 | /* Grab the bits from PHYIR2, and put them in the lower half */ | ||
779 | phy_reg = phy_read(mii_info, MII_PHYSID2); | ||
780 | phy_ID |= (phy_reg & 0xffff); | ||
781 | |||
782 | /* loop through all the known PHY types, and find one that */ | ||
783 | /* matches the ID we read from the PHY. */ | ||
784 | for (i = 0; phy_info[i]; i++) | ||
785 | if (phy_info[i]->phy_id == (phy_ID & phy_info[i]->phy_id_mask)){ | ||
786 | theInfo = phy_info[i]; | ||
787 | break; | ||
788 | } | ||
789 | |||
790 | /* This shouldn't happen, as we have generic PHY support */ | ||
791 | if (theInfo == NULL) { | ||
792 | ugphy_info("%s: PHY id %x is not supported!", dev->name, | ||
793 | phy_ID); | ||
794 | return NULL; | ||
795 | } else { | ||
796 | ugphy_info("%s: PHY is %s (%x)", dev->name, theInfo->name, | ||
797 | phy_ID); | ||
798 | } | ||
799 | |||
800 | return theInfo; | ||
801 | } | ||
diff --git a/drivers/net/ucc_geth_phy.h b/drivers/net/ucc_geth_phy.h new file mode 100644 index 000000000000..2f98b8f1bb0a --- /dev/null +++ b/drivers/net/ucc_geth_phy.h | |||
@@ -0,0 +1,217 @@ | |||
1 | /* | ||
2 | * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. | ||
3 | * | ||
4 | * Author: Shlomi Gridish <gridish@freescale.com> | ||
5 | * | ||
6 | * Description: | ||
7 | * UCC GETH Driver -- PHY handling | ||
8 | * | ||
9 | * Changelog: | ||
10 | * Jun 28, 2006 Li Yang <LeoLi@freescale.com> | ||
11 | * - Rearrange code and style fixes | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify it | ||
14 | * under the terms of the GNU General Public License as published by the | ||
15 | * Free Software Foundation; either version 2 of the License, or (at your | ||
16 | * option) any later version. | ||
17 | * | ||
18 | */ | ||
19 | #ifndef __UCC_GETH_PHY_H__ | ||
20 | #define __UCC_GETH_PHY_H__ | ||
21 | |||
22 | #define MII_end ((u32)-2) | ||
23 | #define MII_read ((u32)-1) | ||
24 | |||
25 | #define MIIMIND_BUSY 0x00000001 | ||
26 | #define MIIMIND_NOTVALID 0x00000004 | ||
27 | |||
28 | #define UGETH_AN_TIMEOUT 2000 | ||
29 | |||
30 | /* 1000BT control (Marvell & BCM54xx at least) */ | ||
31 | #define MII_1000BASETCONTROL 0x09 | ||
32 | #define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200 | ||
33 | #define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100 | ||
34 | |||
35 | /* Cicada Extended Control Register 1 */ | ||
36 | #define MII_CIS8201_EXT_CON1 0x17 | ||
37 | #define MII_CIS8201_EXTCON1_INIT 0x0000 | ||
38 | |||
39 | /* Cicada Interrupt Mask Register */ | ||
40 | #define MII_CIS8201_IMASK 0x19 | ||
41 | #define MII_CIS8201_IMASK_IEN 0x8000 | ||
42 | #define MII_CIS8201_IMASK_SPEED 0x4000 | ||
43 | #define MII_CIS8201_IMASK_LINK 0x2000 | ||
44 | #define MII_CIS8201_IMASK_DUPLEX 0x1000 | ||
45 | #define MII_CIS8201_IMASK_MASK 0xf000 | ||
46 | |||
47 | /* Cicada Interrupt Status Register */ | ||
48 | #define MII_CIS8201_ISTAT 0x1a | ||
49 | #define MII_CIS8201_ISTAT_STATUS 0x8000 | ||
50 | #define MII_CIS8201_ISTAT_SPEED 0x4000 | ||
51 | #define MII_CIS8201_ISTAT_LINK 0x2000 | ||
52 | #define MII_CIS8201_ISTAT_DUPLEX 0x1000 | ||
53 | |||
54 | /* Cicada Auxiliary Control/Status Register */ | ||
55 | #define MII_CIS8201_AUX_CONSTAT 0x1c | ||
56 | #define MII_CIS8201_AUXCONSTAT_INIT 0x0004 | ||
57 | #define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020 | ||
58 | #define MII_CIS8201_AUXCONSTAT_SPEED 0x0018 | ||
59 | #define MII_CIS8201_AUXCONSTAT_GBIT 0x0010 | ||
60 | #define MII_CIS8201_AUXCONSTAT_100 0x0008 | ||
61 | |||
62 | /* 88E1011 PHY Status Register */ | ||
63 | #define MII_M1011_PHY_SPEC_STATUS 0x11 | ||
64 | #define MII_M1011_PHY_SPEC_STATUS_1000 0x8000 | ||
65 | #define MII_M1011_PHY_SPEC_STATUS_100 0x4000 | ||
66 | #define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000 | ||
67 | #define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000 | ||
68 | #define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800 | ||
69 | #define MII_M1011_PHY_SPEC_STATUS_LINK 0x0400 | ||
70 | |||
71 | #define MII_M1011_IEVENT 0x13 | ||
72 | #define MII_M1011_IEVENT_CLEAR 0x0000 | ||
73 | |||
74 | #define MII_M1011_IMASK 0x12 | ||
75 | #define MII_M1011_IMASK_INIT 0x6400 | ||
76 | #define MII_M1011_IMASK_CLEAR 0x0000 | ||
77 | |||
78 | #define MII_DM9161_SCR 0x10 | ||
79 | #define MII_DM9161_SCR_INIT 0x0610 | ||
80 | |||
81 | /* DM9161 Specified Configuration and Status Register */ | ||
82 | #define MII_DM9161_SCSR 0x11 | ||
83 | #define MII_DM9161_SCSR_100F 0x8000 | ||
84 | #define MII_DM9161_SCSR_100H 0x4000 | ||
85 | #define MII_DM9161_SCSR_10F 0x2000 | ||
86 | #define MII_DM9161_SCSR_10H 0x1000 | ||
87 | |||
88 | /* DM9161 Interrupt Register */ | ||
89 | #define MII_DM9161_INTR 0x15 | ||
90 | #define MII_DM9161_INTR_PEND 0x8000 | ||
91 | #define MII_DM9161_INTR_DPLX_MASK 0x0800 | ||
92 | #define MII_DM9161_INTR_SPD_MASK 0x0400 | ||
93 | #define MII_DM9161_INTR_LINK_MASK 0x0200 | ||
94 | #define MII_DM9161_INTR_MASK 0x0100 | ||
95 | #define MII_DM9161_INTR_DPLX_CHANGE 0x0010 | ||
96 | #define MII_DM9161_INTR_SPD_CHANGE 0x0008 | ||
97 | #define MII_DM9161_INTR_LINK_CHANGE 0x0004 | ||
98 | #define MII_DM9161_INTR_INIT 0x0000 | ||
99 | #define MII_DM9161_INTR_STOP \ | ||
100 | (MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \ | ||
101 | | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK) | ||
102 | |||
103 | /* DM9161 10BT Configuration/Status */ | ||
104 | #define MII_DM9161_10BTCSR 0x12 | ||
105 | #define MII_DM9161_10BTCSR_INIT 0x7800 | ||
106 | |||
107 | #define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | \ | ||
108 | SUPPORTED_10baseT_Full | \ | ||
109 | SUPPORTED_100baseT_Half | \ | ||
110 | SUPPORTED_100baseT_Full | \ | ||
111 | SUPPORTED_Autoneg | \ | ||
112 | SUPPORTED_TP | \ | ||
113 | SUPPORTED_MII) | ||
114 | |||
115 | #define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \ | ||
116 | SUPPORTED_1000baseT_Half | \ | ||
117 | SUPPORTED_1000baseT_Full) | ||
118 | |||
119 | #define MII_READ_COMMAND 0x00000001 | ||
120 | |||
121 | #define MII_INTERRUPT_DISABLED 0x0 | ||
122 | #define MII_INTERRUPT_ENABLED 0x1 | ||
123 | /* Taken from mii_if_info and sungem_phy.h */ | ||
124 | struct ugeth_mii_info { | ||
125 | /* Information about the PHY type */ | ||
126 | /* And management functions */ | ||
127 | struct phy_info *phyinfo; | ||
128 | |||
129 | ucc_mii_mng_t *mii_regs; | ||
130 | |||
131 | /* forced speed & duplex (no autoneg) | ||
132 | * partner speed & duplex & pause (autoneg) | ||
133 | */ | ||
134 | int speed; | ||
135 | int duplex; | ||
136 | int pause; | ||
137 | |||
138 | /* The most recently read link state */ | ||
139 | int link; | ||
140 | |||
141 | /* Enabled Interrupts */ | ||
142 | u32 interrupts; | ||
143 | |||
144 | u32 advertising; | ||
145 | int autoneg; | ||
146 | int mii_id; | ||
147 | |||
148 | /* private data pointer */ | ||
149 | /* For use by PHYs to maintain extra state */ | ||
150 | void *priv; | ||
151 | |||
152 | /* Provided by host chip */ | ||
153 | struct net_device *dev; | ||
154 | |||
155 | /* A lock to ensure that only one thing can read/write | ||
156 | * the MDIO bus at a time */ | ||
157 | spinlock_t mdio_lock; | ||
158 | |||
159 | /* Provided by ethernet driver */ | ||
160 | int (*mdio_read) (struct net_device * dev, int mii_id, int reg); | ||
161 | void (*mdio_write) (struct net_device * dev, int mii_id, int reg, | ||
162 | int val); | ||
163 | }; | ||
164 | |||
165 | /* struct phy_info: a structure which defines attributes for a PHY | ||
166 | * | ||
167 | * id will contain a number which represents the PHY. During | ||
168 | * startup, the driver will poll the PHY to find out what its | ||
169 | * UID--as defined by registers 2 and 3--is. The 32-bit result | ||
170 | * gotten from the PHY will be ANDed with phy_id_mask to | ||
171 | * discard any bits which may change based on revision numbers | ||
172 | * unimportant to functionality | ||
173 | * | ||
174 | * There are 6 commands which take a ugeth_mii_info structure. | ||
175 | * Each PHY must declare config_aneg, and read_status. | ||
176 | */ | ||
177 | struct phy_info { | ||
178 | u32 phy_id; | ||
179 | char *name; | ||
180 | unsigned int phy_id_mask; | ||
181 | u32 features; | ||
182 | |||
183 | /* Called to initialize the PHY */ | ||
184 | int (*init) (struct ugeth_mii_info * mii_info); | ||
185 | |||
186 | /* Called to suspend the PHY for power */ | ||
187 | int (*suspend) (struct ugeth_mii_info * mii_info); | ||
188 | |||
189 | /* Reconfigures autonegotiation (or disables it) */ | ||
190 | int (*config_aneg) (struct ugeth_mii_info * mii_info); | ||
191 | |||
192 | /* Determines the negotiated speed and duplex */ | ||
193 | int (*read_status) (struct ugeth_mii_info * mii_info); | ||
194 | |||
195 | /* Clears any pending interrupts */ | ||
196 | int (*ack_interrupt) (struct ugeth_mii_info * mii_info); | ||
197 | |||
198 | /* Enables or disables interrupts */ | ||
199 | int (*config_intr) (struct ugeth_mii_info * mii_info); | ||
200 | |||
201 | /* Clears up any memory if needed */ | ||
202 | void (*close) (struct ugeth_mii_info * mii_info); | ||
203 | }; | ||
204 | |||
205 | struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info); | ||
206 | void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value); | ||
207 | int read_phy_reg(struct net_device *dev, int mii_id, int regnum); | ||
208 | void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info); | ||
209 | void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info, | ||
210 | u32 interrupts); | ||
211 | |||
212 | struct dm9161_private { | ||
213 | struct timer_list timer; | ||
214 | int resetdone; | ||
215 | }; | ||
216 | |||
217 | #endif /* __UCC_GETH_PHY_H__ */ | ||
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 98b6f3207d3d..ae971080e2e4 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c | |||
@@ -25,117 +25,13 @@ | |||
25 | version. He may or may not be interested in bug reports on this | 25 | version. He may or may not be interested in bug reports on this |
26 | code. You can find his versions at: | 26 | code. You can find his versions at: |
27 | http://www.scyld.com/network/via-rhine.html | 27 | http://www.scyld.com/network/via-rhine.html |
28 | 28 | [link no longer provides useful info -jgarzik] | |
29 | |||
30 | Linux kernel version history: | ||
31 | |||
32 | LK1.1.0: | ||
33 | - Jeff Garzik: softnet 'n stuff | ||
34 | |||
35 | LK1.1.1: | ||
36 | - Justin Guyett: softnet and locking fixes | ||
37 | - Jeff Garzik: use PCI interface | ||
38 | |||
39 | LK1.1.2: | ||
40 | - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions | ||
41 | |||
42 | LK1.1.3: | ||
43 | - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c | ||
44 | code) update "Theory of Operation" with | ||
45 | softnet/locking changes | ||
46 | - Dave Miller: PCI DMA and endian fixups | ||
47 | - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation | ||
48 | |||
49 | LK1.1.4: | ||
50 | - Urban Widmark: fix gcc 2.95.2 problem and | ||
51 | remove writel's to fixed address 0x7c | ||
52 | |||
53 | LK1.1.5: | ||
54 | - Urban Widmark: mdio locking, bounce buffer changes | ||
55 | merges from Beckers 1.05 version | ||
56 | added netif_running_on/off support | ||
57 | |||
58 | LK1.1.6: | ||
59 | - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio) | ||
60 | set netif_running_on/off on startup, del_timer_sync | ||
61 | |||
62 | LK1.1.7: | ||
63 | - Manfred Spraul: added reset into tx_timeout | ||
64 | |||
65 | LK1.1.9: | ||
66 | - Urban Widmark: merges from Beckers 1.10 version | ||
67 | (media selection + eeprom reload) | ||
68 | - David Vrabel: merges from D-Link "1.11" version | ||
69 | (disable WOL and PME on startup) | ||
70 | |||
71 | LK1.1.10: | ||
72 | - Manfred Spraul: use "singlecopy" for unaligned buffers | ||
73 | don't allocate bounce buffers for !ReqTxAlign cards | ||
74 | |||
75 | LK1.1.11: | ||
76 | - David Woodhouse: Set dev->base_addr before the first time we call | ||
77 | wait_for_reset(). It's a lot happier that way. | ||
78 | Free np->tx_bufs only if we actually allocated it. | ||
79 | |||
80 | LK1.1.12: | ||
81 | - Martin Eriksson: Allow Memory-Mapped IO to be enabled. | ||
82 | |||
83 | LK1.1.13 (jgarzik): | ||
84 | - Add ethtool support | ||
85 | - Replace some MII-related magic numbers with constants | ||
86 | |||
87 | LK1.1.14 (Ivan G.): | ||
88 | - fixes comments for Rhine-III | ||
89 | - removes W_MAX_TIMEOUT (unused) | ||
90 | - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card | ||
91 | is R-I and has Davicom chip, flag is referenced in kernel driver) | ||
92 | - sends chip_id as a parameter to wait_for_reset since np is not | ||
93 | initialized on first call | ||
94 | - changes mmio "else if (chip_id==VT6102)" to "else" so it will work | ||
95 | for Rhine-III's (documentation says same bit is correct) | ||
96 | - transmit frame queue message is off by one - fixed | ||
97 | - adds IntrNormalSummary to "Something Wicked" exclusion list | ||
98 | so normal interrupts will not trigger the message (src: Donald Becker) | ||
99 | (Roger Luethi) | ||
100 | - show confused chip where to continue after Tx error | ||
101 | - location of collision counter is chip specific | ||
102 | - allow selecting backoff algorithm (module parameter) | ||
103 | |||
104 | LK1.1.15 (jgarzik): | ||
105 | - Use new MII lib helper generic_mii_ioctl | ||
106 | |||
107 | LK1.1.16 (Roger Luethi) | ||
108 | - Etherleak fix | ||
109 | - Handle Tx buffer underrun | ||
110 | - Fix bugs in full duplex handling | ||
111 | - New reset code uses "force reset" cmd on Rhine-II | ||
112 | - Various clean ups | ||
113 | |||
114 | LK1.1.17 (Roger Luethi) | ||
115 | - Fix race in via_rhine_start_tx() | ||
116 | - On errors, wait for Tx engine to turn off before scavenging | ||
117 | - Handle Tx descriptor write-back race on Rhine-II | ||
118 | - Force flushing for PCI posted writes | ||
119 | - More reset code changes | ||
120 | |||
121 | LK1.1.18 (Roger Luethi) | ||
122 | - No filtering multicast in promisc mode (Edward Peng) | ||
123 | - Fix for Rhine-I Tx timeouts | ||
124 | |||
125 | LK1.1.19 (Roger Luethi) | ||
126 | - Increase Tx threshold for unspecified errors | ||
127 | |||
128 | LK1.2.0-2.6 (Roger Luethi) | ||
129 | - Massive clean-up | ||
130 | - Rewrite PHY, media handling (remove options, full_duplex, backoff) | ||
131 | - Fix Tx engine race for good | ||
132 | - Craig Brind: Zero padded aligned buffers for short packets. | ||
133 | 29 | ||
134 | */ | 30 | */ |
135 | 31 | ||
136 | #define DRV_NAME "via-rhine" | 32 | #define DRV_NAME "via-rhine" |
137 | #define DRV_VERSION "1.2.0-2.6" | 33 | #define DRV_VERSION "1.4.1" |
138 | #define DRV_RELDATE "June-10-2004" | 34 | #define DRV_RELDATE "July-24-2006" |
139 | 35 | ||
140 | 36 | ||
141 | /* A few user-configurable values. | 37 | /* A few user-configurable values. |
@@ -148,6 +44,10 @@ static int max_interrupt_work = 20; | |||
148 | Setting to > 1518 effectively disables this feature. */ | 44 | Setting to > 1518 effectively disables this feature. */ |
149 | static int rx_copybreak; | 45 | static int rx_copybreak; |
150 | 46 | ||
47 | /* Work-around for broken BIOSes: they are unable to get the chip back out of | ||
48 | power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ | ||
49 | static int avoid_D3; | ||
50 | |||
151 | /* | 51 | /* |
152 | * In case you are looking for 'options[]' or 'full_duplex[]', they | 52 | * In case you are looking for 'options[]' or 'full_duplex[]', they |
153 | * are gone. Use ethtool(8) instead. | 53 | * are gone. Use ethtool(8) instead. |
@@ -167,7 +67,11 @@ static const int multicast_filter_limit = 32; | |||
167 | There are no ill effects from too-large receive rings. */ | 67 | There are no ill effects from too-large receive rings. */ |
168 | #define TX_RING_SIZE 16 | 68 | #define TX_RING_SIZE 16 |
169 | #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ | 69 | #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ |
70 | #ifdef CONFIG_VIA_RHINE_NAPI | ||
71 | #define RX_RING_SIZE 64 | ||
72 | #else | ||
170 | #define RX_RING_SIZE 16 | 73 | #define RX_RING_SIZE 16 |
74 | #endif | ||
171 | 75 | ||
172 | 76 | ||
173 | /* Operational parameters that usually are not changed. */ | 77 | /* Operational parameters that usually are not changed. */ |
@@ -220,9 +124,11 @@ MODULE_LICENSE("GPL"); | |||
220 | module_param(max_interrupt_work, int, 0); | 124 | module_param(max_interrupt_work, int, 0); |
221 | module_param(debug, int, 0); | 125 | module_param(debug, int, 0); |
222 | module_param(rx_copybreak, int, 0); | 126 | module_param(rx_copybreak, int, 0); |
127 | module_param(avoid_D3, bool, 0); | ||
223 | MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt"); | 128 | MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt"); |
224 | MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); | 129 | MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); |
225 | MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); | 130 | MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); |
131 | MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); | ||
226 | 132 | ||
227 | /* | 133 | /* |
228 | Theory of Operation | 134 | Theory of Operation |
@@ -356,12 +262,11 @@ enum rhine_quirks { | |||
356 | /* Beware of PCI posted writes */ | 262 | /* Beware of PCI posted writes */ |
357 | #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0) | 263 | #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0) |
358 | 264 | ||
359 | static struct pci_device_id rhine_pci_tbl[] = | 265 | static const struct pci_device_id rhine_pci_tbl[] = { |
360 | { | 266 | { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */ |
361 | {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT86C100A */ | 267 | { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */ |
362 | {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6102 */ | 268 | { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */ |
363 | {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* 6105{,L,LOM} */ | 269 | { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */ |
364 | {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6105M */ | ||
365 | { } /* terminate list */ | 270 | { } /* terminate list */ |
366 | }; | 271 | }; |
367 | MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); | 272 | MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); |
@@ -501,7 +406,7 @@ static void rhine_tx_timeout(struct net_device *dev); | |||
501 | static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); | 406 | static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); |
502 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); | 407 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); |
503 | static void rhine_tx(struct net_device *dev); | 408 | static void rhine_tx(struct net_device *dev); |
504 | static void rhine_rx(struct net_device *dev); | 409 | static int rhine_rx(struct net_device *dev, int limit); |
505 | static void rhine_error(struct net_device *dev, int intr_status); | 410 | static void rhine_error(struct net_device *dev, int intr_status); |
506 | static void rhine_set_rx_mode(struct net_device *dev); | 411 | static void rhine_set_rx_mode(struct net_device *dev); |
507 | static struct net_device_stats *rhine_get_stats(struct net_device *dev); | 412 | static struct net_device_stats *rhine_get_stats(struct net_device *dev); |
@@ -669,6 +574,32 @@ static void rhine_poll(struct net_device *dev) | |||
669 | } | 574 | } |
670 | #endif | 575 | #endif |
671 | 576 | ||
577 | #ifdef CONFIG_VIA_RHINE_NAPI | ||
578 | static int rhine_napipoll(struct net_device *dev, int *budget) | ||
579 | { | ||
580 | struct rhine_private *rp = netdev_priv(dev); | ||
581 | void __iomem *ioaddr = rp->base; | ||
582 | int done, limit = min(dev->quota, *budget); | ||
583 | |||
584 | done = rhine_rx(dev, limit); | ||
585 | *budget -= done; | ||
586 | dev->quota -= done; | ||
587 | |||
588 | if (done < limit) { | ||
589 | netif_rx_complete(dev); | ||
590 | |||
591 | iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | | ||
592 | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | | ||
593 | IntrTxDone | IntrTxError | IntrTxUnderrun | | ||
594 | IntrPCIErr | IntrStatsMax | IntrLinkChange, | ||
595 | ioaddr + IntrEnable); | ||
596 | return 0; | ||
597 | } | ||
598 | else | ||
599 | return 1; | ||
600 | } | ||
601 | #endif | ||
602 | |||
672 | static void rhine_hw_init(struct net_device *dev, long pioaddr) | 603 | static void rhine_hw_init(struct net_device *dev, long pioaddr) |
673 | { | 604 | { |
674 | struct rhine_private *rp = netdev_priv(dev); | 605 | struct rhine_private *rp = netdev_priv(dev); |
@@ -849,6 +780,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, | |||
849 | #ifdef CONFIG_NET_POLL_CONTROLLER | 780 | #ifdef CONFIG_NET_POLL_CONTROLLER |
850 | dev->poll_controller = rhine_poll; | 781 | dev->poll_controller = rhine_poll; |
851 | #endif | 782 | #endif |
783 | #ifdef CONFIG_VIA_RHINE_NAPI | ||
784 | dev->poll = rhine_napipoll; | ||
785 | dev->weight = 64; | ||
786 | #endif | ||
852 | if (rp->quirks & rqRhineI) | 787 | if (rp->quirks & rqRhineI) |
853 | dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; | 788 | dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; |
854 | 789 | ||
@@ -894,6 +829,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, | |||
894 | } | 829 | } |
895 | } | 830 | } |
896 | rp->mii_if.phy_id = phy_id; | 831 | rp->mii_if.phy_id = phy_id; |
832 | if (debug > 1 && avoid_D3) | ||
833 | printk(KERN_INFO "%s: No D3 power state at shutdown.\n", | ||
834 | dev->name); | ||
897 | 835 | ||
898 | return 0; | 836 | return 0; |
899 | 837 | ||
@@ -1119,6 +1057,8 @@ static void init_registers(struct net_device *dev) | |||
1119 | 1057 | ||
1120 | rhine_set_rx_mode(dev); | 1058 | rhine_set_rx_mode(dev); |
1121 | 1059 | ||
1060 | netif_poll_enable(dev); | ||
1061 | |||
1122 | /* Enable interrupts by setting the interrupt mask. */ | 1062 | /* Enable interrupts by setting the interrupt mask. */ |
1123 | iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | | 1063 | iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | |
1124 | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | | 1064 | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | |
@@ -1373,8 +1313,18 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs * | |||
1373 | dev->name, intr_status); | 1313 | dev->name, intr_status); |
1374 | 1314 | ||
1375 | if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | | 1315 | if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | |
1376 | IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) | 1316 | IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) { |
1377 | rhine_rx(dev); | 1317 | #ifdef CONFIG_VIA_RHINE_NAPI |
1318 | iowrite16(IntrTxAborted | | ||
1319 | IntrTxDone | IntrTxError | IntrTxUnderrun | | ||
1320 | IntrPCIErr | IntrStatsMax | IntrLinkChange, | ||
1321 | ioaddr + IntrEnable); | ||
1322 | |||
1323 | netif_rx_schedule(dev); | ||
1324 | #else | ||
1325 | rhine_rx(dev, RX_RING_SIZE); | ||
1326 | #endif | ||
1327 | } | ||
1378 | 1328 | ||
1379 | if (intr_status & (IntrTxErrSummary | IntrTxDone)) { | 1329 | if (intr_status & (IntrTxErrSummary | IntrTxDone)) { |
1380 | if (intr_status & IntrTxErrSummary) { | 1330 | if (intr_status & IntrTxErrSummary) { |
@@ -1472,13 +1422,12 @@ static void rhine_tx(struct net_device *dev) | |||
1472 | spin_unlock(&rp->lock); | 1422 | spin_unlock(&rp->lock); |
1473 | } | 1423 | } |
1474 | 1424 | ||
1475 | /* This routine is logically part of the interrupt handler, but isolated | 1425 | /* Process up to limit frames from receive ring */ |
1476 | for clarity and better register allocation. */ | 1426 | static int rhine_rx(struct net_device *dev, int limit) |
1477 | static void rhine_rx(struct net_device *dev) | ||
1478 | { | 1427 | { |
1479 | struct rhine_private *rp = netdev_priv(dev); | 1428 | struct rhine_private *rp = netdev_priv(dev); |
1429 | int count; | ||
1480 | int entry = rp->cur_rx % RX_RING_SIZE; | 1430 | int entry = rp->cur_rx % RX_RING_SIZE; |
1481 | int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx; | ||
1482 | 1431 | ||
1483 | if (debug > 4) { | 1432 | if (debug > 4) { |
1484 | printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n", | 1433 | printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n", |
@@ -1487,16 +1436,18 @@ static void rhine_rx(struct net_device *dev) | |||
1487 | } | 1436 | } |
1488 | 1437 | ||
1489 | /* If EOP is set on the next entry, it's a new packet. Send it up. */ | 1438 | /* If EOP is set on the next entry, it's a new packet. Send it up. */ |
1490 | while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) { | 1439 | for (count = 0; count < limit; ++count) { |
1491 | struct rx_desc *desc = rp->rx_head_desc; | 1440 | struct rx_desc *desc = rp->rx_head_desc; |
1492 | u32 desc_status = le32_to_cpu(desc->rx_status); | 1441 | u32 desc_status = le32_to_cpu(desc->rx_status); |
1493 | int data_size = desc_status >> 16; | 1442 | int data_size = desc_status >> 16; |
1494 | 1443 | ||
1444 | if (desc_status & DescOwn) | ||
1445 | break; | ||
1446 | |||
1495 | if (debug > 4) | 1447 | if (debug > 4) |
1496 | printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n", | 1448 | printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n", |
1497 | desc_status); | 1449 | desc_status); |
1498 | if (--boguscnt < 0) | 1450 | |
1499 | break; | ||
1500 | if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { | 1451 | if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { |
1501 | if ((desc_status & RxWholePkt) != RxWholePkt) { | 1452 | if ((desc_status & RxWholePkt) != RxWholePkt) { |
1502 | printk(KERN_WARNING "%s: Oversized Ethernet " | 1453 | printk(KERN_WARNING "%s: Oversized Ethernet " |
@@ -1565,7 +1516,11 @@ static void rhine_rx(struct net_device *dev) | |||
1565 | PCI_DMA_FROMDEVICE); | 1516 | PCI_DMA_FROMDEVICE); |
1566 | } | 1517 | } |
1567 | skb->protocol = eth_type_trans(skb, dev); | 1518 | skb->protocol = eth_type_trans(skb, dev); |
1519 | #ifdef CONFIG_VIA_RHINE_NAPI | ||
1520 | netif_receive_skb(skb); | ||
1521 | #else | ||
1568 | netif_rx(skb); | 1522 | netif_rx(skb); |
1523 | #endif | ||
1569 | dev->last_rx = jiffies; | 1524 | dev->last_rx = jiffies; |
1570 | rp->stats.rx_bytes += pkt_len; | 1525 | rp->stats.rx_bytes += pkt_len; |
1571 | rp->stats.rx_packets++; | 1526 | rp->stats.rx_packets++; |
@@ -1592,6 +1547,8 @@ static void rhine_rx(struct net_device *dev) | |||
1592 | } | 1547 | } |
1593 | rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); | 1548 | rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); |
1594 | } | 1549 | } |
1550 | |||
1551 | return count; | ||
1595 | } | 1552 | } |
1596 | 1553 | ||
1597 | /* | 1554 | /* |
@@ -1881,6 +1838,7 @@ static int rhine_close(struct net_device *dev) | |||
1881 | spin_lock_irq(&rp->lock); | 1838 | spin_lock_irq(&rp->lock); |
1882 | 1839 | ||
1883 | netif_stop_queue(dev); | 1840 | netif_stop_queue(dev); |
1841 | netif_poll_disable(dev); | ||
1884 | 1842 | ||
1885 | if (debug > 1) | 1843 | if (debug > 1) |
1886 | printk(KERN_DEBUG "%s: Shutting down ethercard, " | 1844 | printk(KERN_DEBUG "%s: Shutting down ethercard, " |
@@ -1962,7 +1920,8 @@ static void rhine_shutdown (struct pci_dev *pdev) | |||
1962 | } | 1920 | } |
1963 | 1921 | ||
1964 | /* Hit power state D3 (sleep) */ | 1922 | /* Hit power state D3 (sleep) */ |
1965 | iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); | 1923 | if (!avoid_D3) |
1924 | iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); | ||
1966 | 1925 | ||
1967 | /* TODO: Check use of pci_enable_wake() */ | 1926 | /* TODO: Check use of pci_enable_wake() */ |
1968 | 1927 | ||
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index ba2972ba3757..aa9cd92f46b2 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -229,7 +229,8 @@ static int rx_copybreak = 200; | |||
229 | module_param(rx_copybreak, int, 0644); | 229 | module_param(rx_copybreak, int, 0644); |
230 | MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); | 230 | MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); |
231 | 231 | ||
232 | static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info); | 232 | static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, |
233 | const struct velocity_info_tbl *info); | ||
233 | static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev); | 234 | static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev); |
234 | static void velocity_print_info(struct velocity_info *vptr); | 235 | static void velocity_print_info(struct velocity_info *vptr); |
235 | static int velocity_open(struct net_device *dev); | 236 | static int velocity_open(struct net_device *dev); |
@@ -294,9 +295,9 @@ static void velocity_unregister_notifier(void) | |||
294 | * Internal board variants. At the moment we have only one | 295 | * Internal board variants. At the moment we have only one |
295 | */ | 296 | */ |
296 | 297 | ||
297 | static struct velocity_info_tbl chip_info_table[] = { | 298 | static const struct velocity_info_tbl chip_info_table[] __devinitdata = { |
298 | {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 256, 1, 0x00FFFFFFUL}, | 299 | {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL}, |
299 | {0, NULL} | 300 | { } |
300 | }; | 301 | }; |
301 | 302 | ||
302 | /* | 303 | /* |
@@ -304,10 +305,9 @@ static struct velocity_info_tbl chip_info_table[] = { | |||
304 | * device driver. Used for hotplug autoloading. | 305 | * device driver. Used for hotplug autoloading. |
305 | */ | 306 | */ |
306 | 307 | ||
307 | static struct pci_device_id velocity_id_table[] __devinitdata = { | 308 | static const struct pci_device_id velocity_id_table[] __devinitdata = { |
308 | {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X, | 309 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) }, |
309 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) chip_info_table}, | 310 | { } |
310 | {0, } | ||
311 | }; | 311 | }; |
312 | 312 | ||
313 | MODULE_DEVICE_TABLE(pci, velocity_id_table); | 313 | MODULE_DEVICE_TABLE(pci, velocity_id_table); |
@@ -341,7 +341,7 @@ static char __devinit *get_chip_name(enum chip_type chip_id) | |||
341 | static void __devexit velocity_remove1(struct pci_dev *pdev) | 341 | static void __devexit velocity_remove1(struct pci_dev *pdev) |
342 | { | 342 | { |
343 | struct net_device *dev = pci_get_drvdata(pdev); | 343 | struct net_device *dev = pci_get_drvdata(pdev); |
344 | struct velocity_info *vptr = dev->priv; | 344 | struct velocity_info *vptr = netdev_priv(dev); |
345 | 345 | ||
346 | #ifdef CONFIG_PM | 346 | #ifdef CONFIG_PM |
347 | unsigned long flags; | 347 | unsigned long flags; |
@@ -686,21 +686,23 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi | |||
686 | static int first = 1; | 686 | static int first = 1; |
687 | struct net_device *dev; | 687 | struct net_device *dev; |
688 | int i; | 688 | int i; |
689 | struct velocity_info_tbl *info = (struct velocity_info_tbl *) ent->driver_data; | 689 | const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data]; |
690 | struct velocity_info *vptr; | 690 | struct velocity_info *vptr; |
691 | struct mac_regs __iomem * regs; | 691 | struct mac_regs __iomem * regs; |
692 | int ret = -ENOMEM; | 692 | int ret = -ENOMEM; |
693 | 693 | ||
694 | /* FIXME: this driver, like almost all other ethernet drivers, | ||
695 | * can support more than MAX_UNITS. | ||
696 | */ | ||
694 | if (velocity_nics >= MAX_UNITS) { | 697 | if (velocity_nics >= MAX_UNITS) { |
695 | printk(KERN_NOTICE VELOCITY_NAME ": already found %d NICs.\n", | 698 | dev_notice(&pdev->dev, "already found %d NICs.\n", |
696 | velocity_nics); | 699 | velocity_nics); |
697 | return -ENODEV; | 700 | return -ENODEV; |
698 | } | 701 | } |
699 | 702 | ||
700 | dev = alloc_etherdev(sizeof(struct velocity_info)); | 703 | dev = alloc_etherdev(sizeof(struct velocity_info)); |
701 | 704 | if (!dev) { | |
702 | if (dev == NULL) { | 705 | dev_err(&pdev->dev, "allocate net device failed.\n"); |
703 | printk(KERN_ERR VELOCITY_NAME ": allocate net device failed.\n"); | ||
704 | goto out; | 706 | goto out; |
705 | } | 707 | } |
706 | 708 | ||
@@ -708,7 +710,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi | |||
708 | 710 | ||
709 | SET_MODULE_OWNER(dev); | 711 | SET_MODULE_OWNER(dev); |
710 | SET_NETDEV_DEV(dev, &pdev->dev); | 712 | SET_NETDEV_DEV(dev, &pdev->dev); |
711 | vptr = dev->priv; | 713 | vptr = netdev_priv(dev); |
712 | 714 | ||
713 | 715 | ||
714 | if (first) { | 716 | if (first) { |
@@ -731,17 +733,17 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi | |||
731 | 733 | ||
732 | ret = velocity_get_pci_info(vptr, pdev); | 734 | ret = velocity_get_pci_info(vptr, pdev); |
733 | if (ret < 0) { | 735 | if (ret < 0) { |
734 | printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n"); | 736 | /* error message already printed */ |
735 | goto err_disable; | 737 | goto err_disable; |
736 | } | 738 | } |
737 | 739 | ||
738 | ret = pci_request_regions(pdev, VELOCITY_NAME); | 740 | ret = pci_request_regions(pdev, VELOCITY_NAME); |
739 | if (ret < 0) { | 741 | if (ret < 0) { |
740 | printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n"); | 742 | dev_err(&pdev->dev, "No PCI resources.\n"); |
741 | goto err_disable; | 743 | goto err_disable; |
742 | } | 744 | } |
743 | 745 | ||
744 | regs = ioremap(vptr->memaddr, vptr->io_size); | 746 | regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE); |
745 | if (regs == NULL) { | 747 | if (regs == NULL) { |
746 | ret = -EIO; | 748 | ret = -EIO; |
747 | goto err_release_res; | 749 | goto err_release_res; |
@@ -859,13 +861,14 @@ static void __devinit velocity_print_info(struct velocity_info *vptr) | |||
859 | * discovered. | 861 | * discovered. |
860 | */ | 862 | */ |
861 | 863 | ||
862 | static void __devinit velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info) | 864 | static void __devinit velocity_init_info(struct pci_dev *pdev, |
865 | struct velocity_info *vptr, | ||
866 | const struct velocity_info_tbl *info) | ||
863 | { | 867 | { |
864 | memset(vptr, 0, sizeof(struct velocity_info)); | 868 | memset(vptr, 0, sizeof(struct velocity_info)); |
865 | 869 | ||
866 | vptr->pdev = pdev; | 870 | vptr->pdev = pdev; |
867 | vptr->chip_id = info->chip_id; | 871 | vptr->chip_id = info->chip_id; |
868 | vptr->io_size = info->io_size; | ||
869 | vptr->num_txq = info->txqueue; | 872 | vptr->num_txq = info->txqueue; |
870 | vptr->multicast_limit = MCAM_SIZE; | 873 | vptr->multicast_limit = MCAM_SIZE; |
871 | spin_lock_init(&vptr->lock); | 874 | spin_lock_init(&vptr->lock); |
@@ -883,8 +886,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev, struct velocity_i | |||
883 | 886 | ||
884 | static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev) | 887 | static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev) |
885 | { | 888 | { |
886 | 889 | if (pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0) | |
887 | if(pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0) | ||
888 | return -EIO; | 890 | return -EIO; |
889 | 891 | ||
890 | pci_set_master(pdev); | 892 | pci_set_master(pdev); |
@@ -892,24 +894,20 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc | |||
892 | vptr->ioaddr = pci_resource_start(pdev, 0); | 894 | vptr->ioaddr = pci_resource_start(pdev, 0); |
893 | vptr->memaddr = pci_resource_start(pdev, 1); | 895 | vptr->memaddr = pci_resource_start(pdev, 1); |
894 | 896 | ||
895 | if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) | 897 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) { |
896 | { | 898 | dev_err(&pdev->dev, |
897 | printk(KERN_ERR "%s: region #0 is not an I/O resource, aborting.\n", | 899 | "region #0 is not an I/O resource, aborting.\n"); |
898 | pci_name(pdev)); | ||
899 | return -EINVAL; | 900 | return -EINVAL; |
900 | } | 901 | } |
901 | 902 | ||
902 | if((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) | 903 | if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) { |
903 | { | 904 | dev_err(&pdev->dev, |
904 | printk(KERN_ERR "%s: region #1 is an I/O resource, aborting.\n", | 905 | "region #1 is an I/O resource, aborting.\n"); |
905 | pci_name(pdev)); | ||
906 | return -EINVAL; | 906 | return -EINVAL; |
907 | } | 907 | } |
908 | 908 | ||
909 | if(pci_resource_len(pdev, 1) < 256) | 909 | if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) { |
910 | { | 910 | dev_err(&pdev->dev, "region #1 is too small.\n"); |
911 | printk(KERN_ERR "%s: region #1 is too small.\n", | ||
912 | pci_name(pdev)); | ||
913 | return -EINVAL; | 911 | return -EINVAL; |
914 | } | 912 | } |
915 | vptr->pdev = pdev; | 913 | vptr->pdev = pdev; |
@@ -1728,7 +1726,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_ | |||
1728 | 1726 | ||
1729 | static int velocity_open(struct net_device *dev) | 1727 | static int velocity_open(struct net_device *dev) |
1730 | { | 1728 | { |
1731 | struct velocity_info *vptr = dev->priv; | 1729 | struct velocity_info *vptr = netdev_priv(dev); |
1732 | int ret; | 1730 | int ret; |
1733 | 1731 | ||
1734 | vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32); | 1732 | vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32); |
@@ -1785,7 +1783,7 @@ err_free_desc_rings: | |||
1785 | 1783 | ||
1786 | static int velocity_change_mtu(struct net_device *dev, int new_mtu) | 1784 | static int velocity_change_mtu(struct net_device *dev, int new_mtu) |
1787 | { | 1785 | { |
1788 | struct velocity_info *vptr = dev->priv; | 1786 | struct velocity_info *vptr = netdev_priv(dev); |
1789 | unsigned long flags; | 1787 | unsigned long flags; |
1790 | int oldmtu = dev->mtu; | 1788 | int oldmtu = dev->mtu; |
1791 | int ret = 0; | 1789 | int ret = 0; |
@@ -1861,7 +1859,7 @@ static void velocity_shutdown(struct velocity_info *vptr) | |||
1861 | 1859 | ||
1862 | static int velocity_close(struct net_device *dev) | 1860 | static int velocity_close(struct net_device *dev) |
1863 | { | 1861 | { |
1864 | struct velocity_info *vptr = dev->priv; | 1862 | struct velocity_info *vptr = netdev_priv(dev); |
1865 | 1863 | ||
1866 | netif_stop_queue(dev); | 1864 | netif_stop_queue(dev); |
1867 | velocity_shutdown(vptr); | 1865 | velocity_shutdown(vptr); |
@@ -1894,7 +1892,7 @@ static int velocity_close(struct net_device *dev) | |||
1894 | 1892 | ||
1895 | static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | 1893 | static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) |
1896 | { | 1894 | { |
1897 | struct velocity_info *vptr = dev->priv; | 1895 | struct velocity_info *vptr = netdev_priv(dev); |
1898 | int qnum = 0; | 1896 | int qnum = 0; |
1899 | struct tx_desc *td_ptr; | 1897 | struct tx_desc *td_ptr; |
1900 | struct velocity_td_info *tdinfo; | 1898 | struct velocity_td_info *tdinfo; |
@@ -2049,7 +2047,7 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2049 | static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs) | 2047 | static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs) |
2050 | { | 2048 | { |
2051 | struct net_device *dev = dev_instance; | 2049 | struct net_device *dev = dev_instance; |
2052 | struct velocity_info *vptr = dev->priv; | 2050 | struct velocity_info *vptr = netdev_priv(dev); |
2053 | u32 isr_status; | 2051 | u32 isr_status; |
2054 | int max_count = 0; | 2052 | int max_count = 0; |
2055 | 2053 | ||
@@ -2104,7 +2102,7 @@ static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs) | |||
2104 | 2102 | ||
2105 | static void velocity_set_multi(struct net_device *dev) | 2103 | static void velocity_set_multi(struct net_device *dev) |
2106 | { | 2104 | { |
2107 | struct velocity_info *vptr = dev->priv; | 2105 | struct velocity_info *vptr = netdev_priv(dev); |
2108 | struct mac_regs __iomem * regs = vptr->mac_regs; | 2106 | struct mac_regs __iomem * regs = vptr->mac_regs; |
2109 | u8 rx_mode; | 2107 | u8 rx_mode; |
2110 | int i; | 2108 | int i; |
@@ -2153,7 +2151,7 @@ static void velocity_set_multi(struct net_device *dev) | |||
2153 | 2151 | ||
2154 | static struct net_device_stats *velocity_get_stats(struct net_device *dev) | 2152 | static struct net_device_stats *velocity_get_stats(struct net_device *dev) |
2155 | { | 2153 | { |
2156 | struct velocity_info *vptr = dev->priv; | 2154 | struct velocity_info *vptr = netdev_priv(dev); |
2157 | 2155 | ||
2158 | /* If the hardware is down, don't touch MII */ | 2156 | /* If the hardware is down, don't touch MII */ |
2159 | if(!netif_running(dev)) | 2157 | if(!netif_running(dev)) |
@@ -2196,7 +2194,7 @@ static struct net_device_stats *velocity_get_stats(struct net_device *dev) | |||
2196 | 2194 | ||
2197 | static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 2195 | static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
2198 | { | 2196 | { |
2199 | struct velocity_info *vptr = dev->priv; | 2197 | struct velocity_info *vptr = netdev_priv(dev); |
2200 | int ret; | 2198 | int ret; |
2201 | 2199 | ||
2202 | /* If we are asked for information and the device is power | 2200 | /* If we are asked for information and the device is power |
@@ -2744,7 +2742,7 @@ static u32 check_connection_type(struct mac_regs __iomem * regs) | |||
2744 | 2742 | ||
2745 | if (PHYSR0 & PHYSR0_SPDG) | 2743 | if (PHYSR0 & PHYSR0_SPDG) |
2746 | status |= VELOCITY_SPEED_1000; | 2744 | status |= VELOCITY_SPEED_1000; |
2747 | if (PHYSR0 & PHYSR0_SPD10) | 2745 | else if (PHYSR0 & PHYSR0_SPD10) |
2748 | status |= VELOCITY_SPEED_10; | 2746 | status |= VELOCITY_SPEED_10; |
2749 | else | 2747 | else |
2750 | status |= VELOCITY_SPEED_100; | 2748 | status |= VELOCITY_SPEED_100; |
@@ -2825,7 +2823,7 @@ static void enable_flow_control_ability(struct velocity_info *vptr) | |||
2825 | 2823 | ||
2826 | static int velocity_ethtool_up(struct net_device *dev) | 2824 | static int velocity_ethtool_up(struct net_device *dev) |
2827 | { | 2825 | { |
2828 | struct velocity_info *vptr = dev->priv; | 2826 | struct velocity_info *vptr = netdev_priv(dev); |
2829 | if (!netif_running(dev)) | 2827 | if (!netif_running(dev)) |
2830 | pci_set_power_state(vptr->pdev, PCI_D0); | 2828 | pci_set_power_state(vptr->pdev, PCI_D0); |
2831 | return 0; | 2829 | return 0; |
@@ -2841,20 +2839,29 @@ static int velocity_ethtool_up(struct net_device *dev) | |||
2841 | 2839 | ||
2842 | static void velocity_ethtool_down(struct net_device *dev) | 2840 | static void velocity_ethtool_down(struct net_device *dev) |
2843 | { | 2841 | { |
2844 | struct velocity_info *vptr = dev->priv; | 2842 | struct velocity_info *vptr = netdev_priv(dev); |
2845 | if (!netif_running(dev)) | 2843 | if (!netif_running(dev)) |
2846 | pci_set_power_state(vptr->pdev, PCI_D3hot); | 2844 | pci_set_power_state(vptr->pdev, PCI_D3hot); |
2847 | } | 2845 | } |
2848 | 2846 | ||
2849 | static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 2847 | static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
2850 | { | 2848 | { |
2851 | struct velocity_info *vptr = dev->priv; | 2849 | struct velocity_info *vptr = netdev_priv(dev); |
2852 | struct mac_regs __iomem * regs = vptr->mac_regs; | 2850 | struct mac_regs __iomem * regs = vptr->mac_regs; |
2853 | u32 status; | 2851 | u32 status; |
2854 | status = check_connection_type(vptr->mac_regs); | 2852 | status = check_connection_type(vptr->mac_regs); |
2855 | 2853 | ||
2856 | cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; | 2854 | cmd->supported = SUPPORTED_TP | |
2857 | if (status & VELOCITY_SPEED_100) | 2855 | SUPPORTED_Autoneg | |
2856 | SUPPORTED_10baseT_Half | | ||
2857 | SUPPORTED_10baseT_Full | | ||
2858 | SUPPORTED_100baseT_Half | | ||
2859 | SUPPORTED_100baseT_Full | | ||
2860 | SUPPORTED_1000baseT_Half | | ||
2861 | SUPPORTED_1000baseT_Full; | ||
2862 | if (status & VELOCITY_SPEED_1000) | ||
2863 | cmd->speed = SPEED_1000; | ||
2864 | else if (status & VELOCITY_SPEED_100) | ||
2858 | cmd->speed = SPEED_100; | 2865 | cmd->speed = SPEED_100; |
2859 | else | 2866 | else |
2860 | cmd->speed = SPEED_10; | 2867 | cmd->speed = SPEED_10; |
@@ -2873,7 +2880,7 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd | |||
2873 | 2880 | ||
2874 | static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 2881 | static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
2875 | { | 2882 | { |
2876 | struct velocity_info *vptr = dev->priv; | 2883 | struct velocity_info *vptr = netdev_priv(dev); |
2877 | u32 curr_status; | 2884 | u32 curr_status; |
2878 | u32 new_status = 0; | 2885 | u32 new_status = 0; |
2879 | int ret = 0; | 2886 | int ret = 0; |
@@ -2896,14 +2903,14 @@ static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd | |||
2896 | 2903 | ||
2897 | static u32 velocity_get_link(struct net_device *dev) | 2904 | static u32 velocity_get_link(struct net_device *dev) |
2898 | { | 2905 | { |
2899 | struct velocity_info *vptr = dev->priv; | 2906 | struct velocity_info *vptr = netdev_priv(dev); |
2900 | struct mac_regs __iomem * regs = vptr->mac_regs; | 2907 | struct mac_regs __iomem * regs = vptr->mac_regs; |
2901 | return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 0 : 1; | 2908 | return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 1 : 0; |
2902 | } | 2909 | } |
2903 | 2910 | ||
2904 | static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 2911 | static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
2905 | { | 2912 | { |
2906 | struct velocity_info *vptr = dev->priv; | 2913 | struct velocity_info *vptr = netdev_priv(dev); |
2907 | strcpy(info->driver, VELOCITY_NAME); | 2914 | strcpy(info->driver, VELOCITY_NAME); |
2908 | strcpy(info->version, VELOCITY_VERSION); | 2915 | strcpy(info->version, VELOCITY_VERSION); |
2909 | strcpy(info->bus_info, pci_name(vptr->pdev)); | 2916 | strcpy(info->bus_info, pci_name(vptr->pdev)); |
@@ -2911,7 +2918,7 @@ static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo | |||
2911 | 2918 | ||
2912 | static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 2919 | static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
2913 | { | 2920 | { |
2914 | struct velocity_info *vptr = dev->priv; | 2921 | struct velocity_info *vptr = netdev_priv(dev); |
2915 | wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP; | 2922 | wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP; |
2916 | wol->wolopts |= WAKE_MAGIC; | 2923 | wol->wolopts |= WAKE_MAGIC; |
2917 | /* | 2924 | /* |
@@ -2927,7 +2934,7 @@ static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_woli | |||
2927 | 2934 | ||
2928 | static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 2935 | static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
2929 | { | 2936 | { |
2930 | struct velocity_info *vptr = dev->priv; | 2937 | struct velocity_info *vptr = netdev_priv(dev); |
2931 | 2938 | ||
2932 | if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP))) | 2939 | if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP))) |
2933 | return -EFAULT; | 2940 | return -EFAULT; |
@@ -2992,7 +2999,7 @@ static struct ethtool_ops velocity_ethtool_ops = { | |||
2992 | 2999 | ||
2993 | static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 3000 | static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
2994 | { | 3001 | { |
2995 | struct velocity_info *vptr = dev->priv; | 3002 | struct velocity_info *vptr = netdev_priv(dev); |
2996 | struct mac_regs __iomem * regs = vptr->mac_regs; | 3003 | struct mac_regs __iomem * regs = vptr->mac_regs; |
2997 | unsigned long flags; | 3004 | unsigned long flags; |
2998 | struct mii_ioctl_data *miidata = if_mii(ifr); | 3005 | struct mii_ioctl_data *miidata = if_mii(ifr); |
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index f1b2640ebdc6..496c3d597444 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h | |||
@@ -31,6 +31,8 @@ | |||
31 | #define VELOCITY_FULL_DRV_NAM "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver" | 31 | #define VELOCITY_FULL_DRV_NAM "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver" |
32 | #define VELOCITY_VERSION "1.13" | 32 | #define VELOCITY_VERSION "1.13" |
33 | 33 | ||
34 | #define VELOCITY_IO_SIZE 256 | ||
35 | |||
34 | #define PKT_BUF_SZ 1540 | 36 | #define PKT_BUF_SZ 1540 |
35 | 37 | ||
36 | #define MAX_UNITS 8 | 38 | #define MAX_UNITS 8 |
@@ -1191,7 +1193,6 @@ enum chip_type { | |||
1191 | struct velocity_info_tbl { | 1193 | struct velocity_info_tbl { |
1192 | enum chip_type chip_id; | 1194 | enum chip_type chip_id; |
1193 | char *name; | 1195 | char *name; |
1194 | int io_size; | ||
1195 | int txqueue; | 1196 | int txqueue; |
1196 | u32 flags; | 1197 | u32 flags; |
1197 | }; | 1198 | }; |
@@ -1751,7 +1752,6 @@ struct velocity_info { | |||
1751 | struct mac_regs __iomem * mac_regs; | 1752 | struct mac_regs __iomem * mac_regs; |
1752 | unsigned long memaddr; | 1753 | unsigned long memaddr; |
1753 | unsigned long ioaddr; | 1754 | unsigned long ioaddr; |
1754 | u32 io_size; | ||
1755 | 1755 | ||
1756 | u8 rev_id; | 1756 | u8 rev_id; |
1757 | 1757 | ||
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index b5328b0ff927..54b8e492ef97 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig | |||
@@ -134,18 +134,6 @@ config SEALEVEL_4021 | |||
134 | The driver will be compiled as a module: the | 134 | The driver will be compiled as a module: the |
135 | module will be called sealevel. | 135 | module will be called sealevel. |
136 | 136 | ||
137 | config SYNCLINK_SYNCPPP | ||
138 | tristate "SyncLink HDLC/SYNCPPP support" | ||
139 | depends on WAN | ||
140 | help | ||
141 | Enables HDLC/SYNCPPP support for the SyncLink WAN driver. | ||
142 | |||
143 | Normally the SyncLink WAN driver works with the main PPP driver | ||
144 | <file:drivers/net/ppp_generic.c> and pppd program. | ||
145 | HDLC/SYNCPPP support allows use of the Cisco HDLC/PPP driver | ||
146 | <file:drivers/net/wan/syncppp.c>. The SyncLink WAN driver (in | ||
147 | character devices) must also be enabled. | ||
148 | |||
149 | # Generic HDLC | 137 | # Generic HDLC |
150 | config HDLC | 138 | config HDLC |
151 | tristate "Generic HDLC layer" | 139 | tristate "Generic HDLC layer" |
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index 823c6d5ab90d..316ca6869d5e 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile | |||
@@ -28,7 +28,6 @@ obj-$(CONFIG_COSA) += syncppp.o cosa.o | |||
28 | obj-$(CONFIG_FARSYNC) += syncppp.o farsync.o | 28 | obj-$(CONFIG_FARSYNC) += syncppp.o farsync.o |
29 | obj-$(CONFIG_DSCC4) += dscc4.o | 29 | obj-$(CONFIG_DSCC4) += dscc4.o |
30 | obj-$(CONFIG_LANMEDIA) += syncppp.o | 30 | obj-$(CONFIG_LANMEDIA) += syncppp.o |
31 | obj-$(CONFIG_SYNCLINK_SYNCPPP) += syncppp.o | ||
32 | obj-$(CONFIG_X25_ASY) += x25_asy.o | 31 | obj-$(CONFIG_X25_ASY) += x25_asy.o |
33 | 32 | ||
34 | obj-$(CONFIG_LANMEDIA) += lmc/ | 33 | obj-$(CONFIG_LANMEDIA) += lmc/ |
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c index c92ac9fde083..6b63b350cd52 100644 --- a/drivers/net/wan/c101.c +++ b/drivers/net/wan/c101.c | |||
@@ -116,27 +116,34 @@ static inline void openwin(card_t *card, u8 page) | |||
116 | #include "hd6457x.c" | 116 | #include "hd6457x.c" |
117 | 117 | ||
118 | 118 | ||
119 | static inline void set_carrier(port_t *port) | ||
120 | { | ||
121 | if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD)) | ||
122 | netif_carrier_on(port_to_dev(port)); | ||
123 | else | ||
124 | netif_carrier_off(port_to_dev(port)); | ||
125 | } | ||
126 | |||
127 | |||
119 | static void sca_msci_intr(port_t *port) | 128 | static void sca_msci_intr(port_t *port) |
120 | { | 129 | { |
121 | struct net_device *dev = port_to_dev(port); | 130 | u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */ |
122 | card_t* card = port_to_card(port); | ||
123 | u8 stat = sca_in(MSCI1_OFFSET + ST1, card); /* read MSCI ST1 status */ | ||
124 | 131 | ||
125 | /* Reset MSCI TX underrun status bit */ | 132 | /* Reset MSCI TX underrun and CDCD (ignored) status bit */ |
126 | sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, card); | 133 | sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port); |
127 | 134 | ||
128 | if (stat & ST1_UDRN) { | 135 | if (stat & ST1_UDRN) { |
129 | struct net_device_stats *stats = hdlc_stats(dev); | 136 | struct net_device_stats *stats = hdlc_stats(port_to_dev(port)); |
130 | stats->tx_errors++; /* TX Underrun error detected */ | 137 | stats->tx_errors++; /* TX Underrun error detected */ |
131 | stats->tx_fifo_errors++; | 138 | stats->tx_fifo_errors++; |
132 | } | 139 | } |
133 | 140 | ||
141 | stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */ | ||
134 | /* Reset MSCI CDCD status bit - uses ch#2 DCD input */ | 142 | /* Reset MSCI CDCD status bit - uses ch#2 DCD input */ |
135 | sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, card); | 143 | sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port); |
136 | 144 | ||
137 | if (stat & ST1_CDCD) | 145 | if (stat & ST1_CDCD) |
138 | hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, card) & ST3_DCD), | 146 | set_carrier(port); |
139 | dev); | ||
140 | } | 147 | } |
141 | 148 | ||
142 | 149 | ||
@@ -190,8 +197,7 @@ static int c101_open(struct net_device *dev) | |||
190 | sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port); | 197 | sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port); |
191 | sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port); | 198 | sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port); |
192 | 199 | ||
193 | hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD), dev); | 200 | set_carrier(port); |
194 | printk(KERN_DEBUG "0x%X\n", sca_in(MSCI1_OFFSET + ST3, port)); | ||
195 | 201 | ||
196 | /* enable MSCI1 CDCD interrupt */ | 202 | /* enable MSCI1 CDCD interrupt */ |
197 | sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port); | 203 | sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port); |
@@ -378,7 +384,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase) | |||
378 | } | 384 | } |
379 | 385 | ||
380 | sca_init_sync_port(card); /* Set up C101 memory */ | 386 | sca_init_sync_port(card); /* Set up C101 memory */ |
381 | hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, card) & ST3_DCD), dev); | 387 | set_carrier(card); |
382 | 388 | ||
383 | printk(KERN_INFO "%s: Moxa C101 on IRQ%u," | 389 | printk(KERN_INFO "%s: Moxa C101 on IRQ%u," |
384 | " using %u TX + %u RX packets rings\n", | 390 | " using %u TX + %u RX packets rings\n", |
@@ -443,4 +449,5 @@ module_exit(c101_cleanup); | |||
443 | MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); | 449 | MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); |
444 | MODULE_DESCRIPTION("Moxa C101 serial port driver"); | 450 | MODULE_DESCRIPTION("Moxa C101 serial port driver"); |
445 | MODULE_LICENSE("GPL v2"); | 451 | MODULE_LICENSE("GPL v2"); |
446 | module_param(hw, charp, 0444); /* hw=irq,ram:irq,... */ | 452 | module_param(hw, charp, 0444); |
453 | MODULE_PARM_DESC(hw, "irq,ram:irq,..."); | ||
diff --git a/drivers/net/wan/hd6457x.c b/drivers/net/wan/hd6457x.c index d3743321a977..dce2bb317b82 100644 --- a/drivers/net/wan/hd6457x.c +++ b/drivers/net/wan/hd6457x.c | |||
@@ -168,6 +168,23 @@ static inline u32 buffer_offset(port_t *port, u16 desc, int transmit) | |||
168 | } | 168 | } |
169 | 169 | ||
170 | 170 | ||
171 | static inline void sca_set_carrier(port_t *port) | ||
172 | { | ||
173 | if (!(sca_in(get_msci(port) + ST3, port_to_card(port)) & ST3_DCD)) { | ||
174 | #ifdef DEBUG_LINK | ||
175 | printk(KERN_DEBUG "%s: sca_set_carrier on\n", | ||
176 | port_to_dev(port)->name); | ||
177 | #endif | ||
178 | netif_carrier_on(port_to_dev(port)); | ||
179 | } else { | ||
180 | #ifdef DEBUG_LINK | ||
181 | printk(KERN_DEBUG "%s: sca_set_carrier off\n", | ||
182 | port_to_dev(port)->name); | ||
183 | #endif | ||
184 | netif_carrier_off(port_to_dev(port)); | ||
185 | } | ||
186 | } | ||
187 | |||
171 | 188 | ||
172 | static void sca_init_sync_port(port_t *port) | 189 | static void sca_init_sync_port(port_t *port) |
173 | { | 190 | { |
@@ -237,9 +254,7 @@ static void sca_init_sync_port(port_t *port) | |||
237 | sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card); | 254 | sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card); |
238 | } | 255 | } |
239 | } | 256 | } |
240 | 257 | sca_set_carrier(port); | |
241 | hdlc_set_carrier(!(sca_in(get_msci(port) + ST3, card) & ST3_DCD), | ||
242 | port_to_dev(port)); | ||
243 | } | 258 | } |
244 | 259 | ||
245 | 260 | ||
@@ -262,8 +277,7 @@ static inline void sca_msci_intr(port_t *port) | |||
262 | } | 277 | } |
263 | 278 | ||
264 | if (stat & ST1_CDCD) | 279 | if (stat & ST1_CDCD) |
265 | hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD), | 280 | sca_set_carrier(port); |
266 | port_to_dev(port)); | ||
267 | } | 281 | } |
268 | #endif | 282 | #endif |
269 | 283 | ||
@@ -566,7 +580,7 @@ static void sca_open(struct net_device *dev) | |||
566 | - all DMA interrupts | 580 | - all DMA interrupts |
567 | */ | 581 | */ |
568 | 582 | ||
569 | hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD), dev); | 583 | sca_set_carrier(port); |
570 | 584 | ||
571 | #ifdef __HD64570_H | 585 | #ifdef __HD64570_H |
572 | /* MSCI TX INT and RX INT A IRQ enable */ | 586 | /* MSCI TX INT and RX INT A IRQ enable */ |
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 1fd04662c4fc..f289daba0c7b 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c | |||
@@ -192,9 +192,7 @@ static int cisco_rx(struct sk_buff *skb) | |||
192 | "uptime %ud%uh%um%us)\n", | 192 | "uptime %ud%uh%um%us)\n", |
193 | dev->name, days, hrs, | 193 | dev->name, days, hrs, |
194 | min, sec); | 194 | min, sec); |
195 | #if 0 | 195 | netif_dormant_off(dev); |
196 | netif_carrier_on(dev); | ||
197 | #endif | ||
198 | hdlc->state.cisco.up = 1; | 196 | hdlc->state.cisco.up = 1; |
199 | } | 197 | } |
200 | } | 198 | } |
@@ -227,9 +225,7 @@ static void cisco_timer(unsigned long arg) | |||
227 | hdlc->state.cisco.settings.timeout * HZ)) { | 225 | hdlc->state.cisco.settings.timeout * HZ)) { |
228 | hdlc->state.cisco.up = 0; | 226 | hdlc->state.cisco.up = 0; |
229 | printk(KERN_INFO "%s: Link down\n", dev->name); | 227 | printk(KERN_INFO "%s: Link down\n", dev->name); |
230 | #if 0 | 228 | netif_dormant_on(dev); |
231 | netif_carrier_off(dev); | ||
232 | #endif | ||
233 | } | 229 | } |
234 | 230 | ||
235 | cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, | 231 | cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, |
@@ -265,10 +261,7 @@ static void cisco_stop(struct net_device *dev) | |||
265 | { | 261 | { |
266 | hdlc_device *hdlc = dev_to_hdlc(dev); | 262 | hdlc_device *hdlc = dev_to_hdlc(dev); |
267 | del_timer_sync(&hdlc->state.cisco.timer); | 263 | del_timer_sync(&hdlc->state.cisco.timer); |
268 | #if 0 | 264 | netif_dormant_on(dev); |
269 | if (netif_carrier_ok(dev)) | ||
270 | netif_carrier_off(dev); | ||
271 | #endif | ||
272 | hdlc->state.cisco.up = 0; | 265 | hdlc->state.cisco.up = 0; |
273 | hdlc->state.cisco.request_sent = 0; | 266 | hdlc->state.cisco.request_sent = 0; |
274 | } | 267 | } |
@@ -328,6 +321,7 @@ int hdlc_cisco_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
328 | dev->type = ARPHRD_CISCO; | 321 | dev->type = ARPHRD_CISCO; |
329 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | 322 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; |
330 | dev->addr_len = 0; | 323 | dev->addr_len = 0; |
324 | netif_dormant_on(dev); | ||
331 | return 0; | 325 | return 0; |
332 | } | 326 | } |
333 | 327 | ||
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 523afe17564e..7bb737bbdeb9 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c | |||
@@ -301,7 +301,7 @@ static int pvc_open(struct net_device *dev) | |||
301 | if (pvc->open_count++ == 0) { | 301 | if (pvc->open_count++ == 0) { |
302 | hdlc_device *hdlc = dev_to_hdlc(pvc->master); | 302 | hdlc_device *hdlc = dev_to_hdlc(pvc->master); |
303 | if (hdlc->state.fr.settings.lmi == LMI_NONE) | 303 | if (hdlc->state.fr.settings.lmi == LMI_NONE) |
304 | pvc->state.active = hdlc->carrier; | 304 | pvc->state.active = netif_carrier_ok(pvc->master); |
305 | 305 | ||
306 | pvc_carrier(pvc->state.active, pvc); | 306 | pvc_carrier(pvc->state.active, pvc); |
307 | hdlc->state.fr.dce_changed = 1; | 307 | hdlc->state.fr.dce_changed = 1; |
@@ -545,11 +545,7 @@ static void fr_set_link_state(int reliable, struct net_device *dev) | |||
545 | 545 | ||
546 | hdlc->state.fr.reliable = reliable; | 546 | hdlc->state.fr.reliable = reliable; |
547 | if (reliable) { | 547 | if (reliable) { |
548 | #if 0 | 548 | netif_dormant_off(dev); |
549 | if (!netif_carrier_ok(dev)) | ||
550 | netif_carrier_on(dev); | ||
551 | #endif | ||
552 | |||
553 | hdlc->state.fr.n391cnt = 0; /* Request full status */ | 549 | hdlc->state.fr.n391cnt = 0; /* Request full status */ |
554 | hdlc->state.fr.dce_changed = 1; | 550 | hdlc->state.fr.dce_changed = 1; |
555 | 551 | ||
@@ -562,11 +558,7 @@ static void fr_set_link_state(int reliable, struct net_device *dev) | |||
562 | } | 558 | } |
563 | } | 559 | } |
564 | } else { | 560 | } else { |
565 | #if 0 | 561 | netif_dormant_on(dev); |
566 | if (netif_carrier_ok(dev)) | ||
567 | netif_carrier_off(dev); | ||
568 | #endif | ||
569 | |||
570 | while (pvc) { /* Deactivate all PVCs */ | 562 | while (pvc) { /* Deactivate all PVCs */ |
571 | pvc_carrier(0, pvc); | 563 | pvc_carrier(0, pvc); |
572 | pvc->state.exist = pvc->state.active = 0; | 564 | pvc->state.exist = pvc->state.active = 0; |
diff --git a/drivers/net/wan/hdlc_generic.c b/drivers/net/wan/hdlc_generic.c index b7da55140fbd..04ca1f7b6424 100644 --- a/drivers/net/wan/hdlc_generic.c +++ b/drivers/net/wan/hdlc_generic.c | |||
@@ -34,10 +34,11 @@ | |||
34 | #include <linux/inetdevice.h> | 34 | #include <linux/inetdevice.h> |
35 | #include <linux/lapb.h> | 35 | #include <linux/lapb.h> |
36 | #include <linux/rtnetlink.h> | 36 | #include <linux/rtnetlink.h> |
37 | #include <linux/notifier.h> | ||
37 | #include <linux/hdlc.h> | 38 | #include <linux/hdlc.h> |
38 | 39 | ||
39 | 40 | ||
40 | static const char* version = "HDLC support module revision 1.18"; | 41 | static const char* version = "HDLC support module revision 1.19"; |
41 | 42 | ||
42 | #undef DEBUG_LINK | 43 | #undef DEBUG_LINK |
43 | 44 | ||
@@ -73,57 +74,51 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, | |||
73 | 74 | ||
74 | 75 | ||
75 | 76 | ||
76 | static void __hdlc_set_carrier_on(struct net_device *dev) | 77 | static inline void hdlc_proto_start(struct net_device *dev) |
77 | { | 78 | { |
78 | hdlc_device *hdlc = dev_to_hdlc(dev); | 79 | hdlc_device *hdlc = dev_to_hdlc(dev); |
79 | if (hdlc->proto.start) | 80 | if (hdlc->proto.start) |
80 | return hdlc->proto.start(dev); | 81 | return hdlc->proto.start(dev); |
81 | #if 0 | ||
82 | #ifdef DEBUG_LINK | ||
83 | if (netif_carrier_ok(dev)) | ||
84 | printk(KERN_ERR "hdlc_set_carrier_on(): already on\n"); | ||
85 | #endif | ||
86 | netif_carrier_on(dev); | ||
87 | #endif | ||
88 | } | 82 | } |
89 | 83 | ||
90 | 84 | ||
91 | 85 | ||
92 | static void __hdlc_set_carrier_off(struct net_device *dev) | 86 | static inline void hdlc_proto_stop(struct net_device *dev) |
93 | { | 87 | { |
94 | hdlc_device *hdlc = dev_to_hdlc(dev); | 88 | hdlc_device *hdlc = dev_to_hdlc(dev); |
95 | if (hdlc->proto.stop) | 89 | if (hdlc->proto.stop) |
96 | return hdlc->proto.stop(dev); | 90 | return hdlc->proto.stop(dev); |
97 | |||
98 | #if 0 | ||
99 | #ifdef DEBUG_LINK | ||
100 | if (!netif_carrier_ok(dev)) | ||
101 | printk(KERN_ERR "hdlc_set_carrier_off(): already off\n"); | ||
102 | #endif | ||
103 | netif_carrier_off(dev); | ||
104 | #endif | ||
105 | } | 91 | } |
106 | 92 | ||
107 | 93 | ||
108 | 94 | ||
109 | void hdlc_set_carrier(int on, struct net_device *dev) | 95 | static int hdlc_device_event(struct notifier_block *this, unsigned long event, |
96 | void *ptr) | ||
110 | { | 97 | { |
111 | hdlc_device *hdlc = dev_to_hdlc(dev); | 98 | struct net_device *dev = ptr; |
99 | hdlc_device *hdlc; | ||
112 | unsigned long flags; | 100 | unsigned long flags; |
113 | on = on ? 1 : 0; | 101 | int on; |
102 | |||
103 | if (dev->get_stats != hdlc_get_stats) | ||
104 | return NOTIFY_DONE; /* not an HDLC device */ | ||
105 | |||
106 | if (event != NETDEV_CHANGE) | ||
107 | return NOTIFY_DONE; /* Only interrested in carrier changes */ | ||
108 | |||
109 | on = netif_carrier_ok(dev); | ||
114 | 110 | ||
115 | #ifdef DEBUG_LINK | 111 | #ifdef DEBUG_LINK |
116 | printk(KERN_DEBUG "hdlc_set_carrier %i\n", on); | 112 | printk(KERN_DEBUG "%s: hdlc_device_event NETDEV_CHANGE, carrier %i\n", |
113 | dev->name, on); | ||
117 | #endif | 114 | #endif |
118 | 115 | ||
116 | hdlc = dev_to_hdlc(dev); | ||
119 | spin_lock_irqsave(&hdlc->state_lock, flags); | 117 | spin_lock_irqsave(&hdlc->state_lock, flags); |
120 | 118 | ||
121 | if (hdlc->carrier == on) | 119 | if (hdlc->carrier == on) |
122 | goto carrier_exit; /* no change in DCD line level */ | 120 | goto carrier_exit; /* no change in DCD line level */ |
123 | 121 | ||
124 | #ifdef DEBUG_LINK | ||
125 | printk(KERN_INFO "%s: carrier %s\n", dev->name, on ? "ON" : "off"); | ||
126 | #endif | ||
127 | hdlc->carrier = on; | 122 | hdlc->carrier = on; |
128 | 123 | ||
129 | if (!hdlc->open) | 124 | if (!hdlc->open) |
@@ -131,14 +126,15 @@ void hdlc_set_carrier(int on, struct net_device *dev) | |||
131 | 126 | ||
132 | if (hdlc->carrier) { | 127 | if (hdlc->carrier) { |
133 | printk(KERN_INFO "%s: Carrier detected\n", dev->name); | 128 | printk(KERN_INFO "%s: Carrier detected\n", dev->name); |
134 | __hdlc_set_carrier_on(dev); | 129 | hdlc_proto_start(dev); |
135 | } else { | 130 | } else { |
136 | printk(KERN_INFO "%s: Carrier lost\n", dev->name); | 131 | printk(KERN_INFO "%s: Carrier lost\n", dev->name); |
137 | __hdlc_set_carrier_off(dev); | 132 | hdlc_proto_stop(dev); |
138 | } | 133 | } |
139 | 134 | ||
140 | carrier_exit: | 135 | carrier_exit: |
141 | spin_unlock_irqrestore(&hdlc->state_lock, flags); | 136 | spin_unlock_irqrestore(&hdlc->state_lock, flags); |
137 | return NOTIFY_DONE; | ||
142 | } | 138 | } |
143 | 139 | ||
144 | 140 | ||
@@ -165,7 +161,7 @@ int hdlc_open(struct net_device *dev) | |||
165 | 161 | ||
166 | if (hdlc->carrier) { | 162 | if (hdlc->carrier) { |
167 | printk(KERN_INFO "%s: Carrier detected\n", dev->name); | 163 | printk(KERN_INFO "%s: Carrier detected\n", dev->name); |
168 | __hdlc_set_carrier_on(dev); | 164 | hdlc_proto_start(dev); |
169 | } else | 165 | } else |
170 | printk(KERN_INFO "%s: No carrier\n", dev->name); | 166 | printk(KERN_INFO "%s: No carrier\n", dev->name); |
171 | 167 | ||
@@ -190,7 +186,7 @@ void hdlc_close(struct net_device *dev) | |||
190 | 186 | ||
191 | hdlc->open = 0; | 187 | hdlc->open = 0; |
192 | if (hdlc->carrier) | 188 | if (hdlc->carrier) |
193 | __hdlc_set_carrier_off(dev); | 189 | hdlc_proto_stop(dev); |
194 | 190 | ||
195 | spin_unlock_irq(&hdlc->state_lock); | 191 | spin_unlock_irq(&hdlc->state_lock); |
196 | 192 | ||
@@ -303,7 +299,6 @@ MODULE_LICENSE("GPL v2"); | |||
303 | 299 | ||
304 | EXPORT_SYMBOL(hdlc_open); | 300 | EXPORT_SYMBOL(hdlc_open); |
305 | EXPORT_SYMBOL(hdlc_close); | 301 | EXPORT_SYMBOL(hdlc_close); |
306 | EXPORT_SYMBOL(hdlc_set_carrier); | ||
307 | EXPORT_SYMBOL(hdlc_ioctl); | 302 | EXPORT_SYMBOL(hdlc_ioctl); |
308 | EXPORT_SYMBOL(hdlc_setup); | 303 | EXPORT_SYMBOL(hdlc_setup); |
309 | EXPORT_SYMBOL(alloc_hdlcdev); | 304 | EXPORT_SYMBOL(alloc_hdlcdev); |
@@ -315,9 +310,18 @@ static struct packet_type hdlc_packet_type = { | |||
315 | }; | 310 | }; |
316 | 311 | ||
317 | 312 | ||
313 | static struct notifier_block hdlc_notifier = { | ||
314 | .notifier_call = hdlc_device_event, | ||
315 | }; | ||
316 | |||
317 | |||
318 | static int __init hdlc_module_init(void) | 318 | static int __init hdlc_module_init(void) |
319 | { | 319 | { |
320 | int result; | ||
321 | |||
320 | printk(KERN_INFO "%s\n", version); | 322 | printk(KERN_INFO "%s\n", version); |
323 | if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0) | ||
324 | return result; | ||
321 | dev_add_pack(&hdlc_packet_type); | 325 | dev_add_pack(&hdlc_packet_type); |
322 | return 0; | 326 | return 0; |
323 | } | 327 | } |
@@ -327,6 +331,7 @@ static int __init hdlc_module_init(void) | |||
327 | static void __exit hdlc_module_exit(void) | 331 | static void __exit hdlc_module_exit(void) |
328 | { | 332 | { |
329 | dev_remove_pack(&hdlc_packet_type); | 333 | dev_remove_pack(&hdlc_packet_type); |
334 | unregister_netdevice_notifier(&hdlc_notifier); | ||
330 | } | 335 | } |
331 | 336 | ||
332 | 337 | ||
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index b81263eaede0..fbaab5bf71eb 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c | |||
@@ -107,6 +107,7 @@ int hdlc_ppp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
107 | dev->hard_header = NULL; | 107 | dev->hard_header = NULL; |
108 | dev->type = ARPHRD_PPP; | 108 | dev->type = ARPHRD_PPP; |
109 | dev->addr_len = 0; | 109 | dev->addr_len = 0; |
110 | netif_dormant_off(dev); | ||
110 | return 0; | 111 | return 0; |
111 | } | 112 | } |
112 | 113 | ||
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c index 9456d31cb1c1..f15aa6ba77f1 100644 --- a/drivers/net/wan/hdlc_raw.c +++ b/drivers/net/wan/hdlc_raw.c | |||
@@ -82,6 +82,7 @@ int hdlc_raw_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
82 | dev->type = ARPHRD_RAWHDLC; | 82 | dev->type = ARPHRD_RAWHDLC; |
83 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | 83 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; |
84 | dev->addr_len = 0; | 84 | dev->addr_len = 0; |
85 | netif_dormant_off(dev); | ||
85 | return 0; | 86 | return 0; |
86 | } | 87 | } |
87 | 88 | ||
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c index b1285cc8fee6..d1884987f94e 100644 --- a/drivers/net/wan/hdlc_raw_eth.c +++ b/drivers/net/wan/hdlc_raw_eth.c | |||
@@ -100,6 +100,7 @@ int hdlc_raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
100 | dev->tx_queue_len = old_qlen; | 100 | dev->tx_queue_len = old_qlen; |
101 | memcpy(dev->dev_addr, "\x00\x01", 2); | 101 | memcpy(dev->dev_addr, "\x00\x01", 2); |
102 | get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); | 102 | get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); |
103 | netif_dormant_off(dev); | ||
103 | return 0; | 104 | return 0; |
104 | } | 105 | } |
105 | 106 | ||
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index 07e5eef1fe0f..a867fb411f89 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c | |||
@@ -212,6 +212,7 @@ int hdlc_x25_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
212 | dev->hard_header = NULL; | 212 | dev->hard_header = NULL; |
213 | dev->type = ARPHRD_X25; | 213 | dev->type = ARPHRD_X25; |
214 | dev->addr_len = 0; | 214 | dev->addr_len = 0; |
215 | netif_dormant_off(dev); | ||
215 | return 0; | 216 | return 0; |
216 | } | 217 | } |
217 | 218 | ||
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c index e013b817cab8..dcf46add3adf 100644 --- a/drivers/net/wan/n2.c +++ b/drivers/net/wan/n2.c | |||
@@ -564,4 +564,5 @@ module_exit(n2_cleanup); | |||
564 | MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); | 564 | MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); |
565 | MODULE_DESCRIPTION("RISCom/N2 serial port driver"); | 565 | MODULE_DESCRIPTION("RISCom/N2 serial port driver"); |
566 | MODULE_LICENSE("GPL v2"); | 566 | MODULE_LICENSE("GPL v2"); |
567 | module_param(hw, charp, 0444); /* hw=io,irq,ram,ports:io,irq,... */ | 567 | module_param(hw, charp, 0444); |
568 | MODULE_PARM_DESC(hw, "io,irq,ram,ports:io,irq,..."); | ||
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index d564224cdca9..b2031dfc4bb1 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c | |||
@@ -149,7 +149,10 @@ static inline void wanxl_cable_intr(port_t *port) | |||
149 | printk(KERN_INFO "%s: %s%s module, %s cable%s%s\n", | 149 | printk(KERN_INFO "%s: %s%s module, %s cable%s%s\n", |
150 | port->dev->name, pm, dte, cable, dsr, dcd); | 150 | port->dev->name, pm, dte, cable, dsr, dcd); |
151 | 151 | ||
152 | hdlc_set_carrier(value & STATUS_CABLE_DCD, port->dev); | 152 | if (value & STATUS_CABLE_DCD) |
153 | netif_carrier_on(port->dev); | ||
154 | else | ||
155 | netif_carrier_off(port->dev); | ||
153 | } | 156 | } |
154 | 157 | ||
155 | 158 | ||
diff --git a/drivers/net/wd.c b/drivers/net/wd.c index 7caa8dc88a58..b1ba1872f315 100644 --- a/drivers/net/wd.c +++ b/drivers/net/wd.c | |||
@@ -500,8 +500,8 @@ MODULE_LICENSE("GPL"); | |||
500 | 500 | ||
501 | /* This is set up so that only a single autoprobe takes place per call. | 501 | /* This is set up so that only a single autoprobe takes place per call. |
502 | ISA device autoprobes on a running machine are not recommended. */ | 502 | ISA device autoprobes on a running machine are not recommended. */ |
503 | int | 503 | |
504 | init_module(void) | 504 | int __init init_module(void) |
505 | { | 505 | { |
506 | struct net_device *dev; | 506 | struct net_device *dev; |
507 | int this_dev, found = 0; | 507 | int this_dev, found = 0; |
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 30ec235e6935..2e8ac995d56f 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig | |||
@@ -447,6 +447,7 @@ config AIRO_CS | |||
447 | tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" | 447 | tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" |
448 | depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) | 448 | depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) |
449 | select CRYPTO | 449 | select CRYPTO |
450 | select CRYPTO_AES | ||
450 | ---help--- | 451 | ---help--- |
451 | This is the standard Linux driver to support Cisco/Aironet PCMCIA | 452 | This is the standard Linux driver to support Cisco/Aironet PCMCIA |
452 | 802.11 wireless cards. This driver is the same as the Aironet | 453 | 802.11 wireless cards. This driver is the same as the Aironet |
@@ -550,6 +551,7 @@ config USB_ZD1201 | |||
550 | 551 | ||
551 | source "drivers/net/wireless/hostap/Kconfig" | 552 | source "drivers/net/wireless/hostap/Kconfig" |
552 | source "drivers/net/wireless/bcm43xx/Kconfig" | 553 | source "drivers/net/wireless/bcm43xx/Kconfig" |
554 | source "drivers/net/wireless/zd1211rw/Kconfig" | ||
553 | 555 | ||
554 | # yes, this works even when no drivers are selected | 556 | # yes, this works even when no drivers are selected |
555 | config NET_WIRELESS | 557 | config NET_WIRELESS |
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 512603de309a..c613af17a159 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile | |||
@@ -36,6 +36,7 @@ obj-$(CONFIG_PRISM54) += prism54/ | |||
36 | 36 | ||
37 | obj-$(CONFIG_HOSTAP) += hostap/ | 37 | obj-$(CONFIG_HOSTAP) += hostap/ |
38 | obj-$(CONFIG_BCM43XX) += bcm43xx/ | 38 | obj-$(CONFIG_BCM43XX) += bcm43xx/ |
39 | obj-$(CONFIG_ZD1211RW) += zd1211rw/ | ||
39 | 40 | ||
40 | # 16-bit wireless PCMCIA client drivers | 41 | # 16-bit wireless PCMCIA client drivers |
41 | obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o | 42 | obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index d8f5600578b4..df317c1e12a8 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c | |||
@@ -1547,7 +1547,7 @@ static void handle_irq_noise(struct bcm43xx_private *bcm) | |||
1547 | goto generate_new; | 1547 | goto generate_new; |
1548 | 1548 | ||
1549 | /* Get the noise samples. */ | 1549 | /* Get the noise samples. */ |
1550 | assert(bcm->noisecalc.nr_samples <= 8); | 1550 | assert(bcm->noisecalc.nr_samples < 8); |
1551 | i = bcm->noisecalc.nr_samples; | 1551 | i = bcm->noisecalc.nr_samples; |
1552 | noise[0] = limit_value(noise[0], 0, ARRAY_SIZE(radio->nrssi_lt) - 1); | 1552 | noise[0] = limit_value(noise[0], 0, ARRAY_SIZE(radio->nrssi_lt) - 1); |
1553 | noise[1] = limit_value(noise[1], 0, ARRAY_SIZE(radio->nrssi_lt) - 1); | 1553 | noise[1] = limit_value(noise[1], 0, ARRAY_SIZE(radio->nrssi_lt) - 1); |
@@ -1885,6 +1885,15 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re | |||
1885 | 1885 | ||
1886 | spin_lock(&bcm->irq_lock); | 1886 | spin_lock(&bcm->irq_lock); |
1887 | 1887 | ||
1888 | /* Only accept IRQs, if we are initialized properly. | ||
1889 | * This avoids an RX race while initializing. | ||
1890 | * We should probably not enable IRQs before we are initialized | ||
1891 | * completely, but some careful work is needed to fix this. I think it | ||
1892 | * is best to stay with this cheap workaround for now... . | ||
1893 | */ | ||
1894 | if (unlikely(bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)) | ||
1895 | goto out; | ||
1896 | |||
1888 | reason = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); | 1897 | reason = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); |
1889 | if (reason == 0xffffffff) { | 1898 | if (reason == 0xffffffff) { |
1890 | /* irq not for us (shared irq) */ | 1899 | /* irq not for us (shared irq) */ |
@@ -1906,19 +1915,11 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re | |||
1906 | 1915 | ||
1907 | bcm43xx_interrupt_ack(bcm, reason); | 1916 | bcm43xx_interrupt_ack(bcm, reason); |
1908 | 1917 | ||
1909 | /* Only accept IRQs, if we are initialized properly. | 1918 | /* disable all IRQs. They are enabled again in the bottom half. */ |
1910 | * This avoids an RX race while initializing. | 1919 | bcm->irq_savedstate = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL); |
1911 | * We should probably not enable IRQs before we are initialized | 1920 | /* save the reason code and call our bottom half. */ |
1912 | * completely, but some careful work is needed to fix this. I think it | 1921 | bcm->irq_reason = reason; |
1913 | * is best to stay with this cheap workaround for now... . | 1922 | tasklet_schedule(&bcm->isr_tasklet); |
1914 | */ | ||
1915 | if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)) { | ||
1916 | /* disable all IRQs. They are enabled again in the bottom half. */ | ||
1917 | bcm->irq_savedstate = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL); | ||
1918 | /* save the reason code and call our bottom half. */ | ||
1919 | bcm->irq_reason = reason; | ||
1920 | tasklet_schedule(&bcm->isr_tasklet); | ||
1921 | } | ||
1922 | 1923 | ||
1923 | out: | 1924 | out: |
1924 | mmiowb(); | 1925 | mmiowb(); |
@@ -3698,6 +3699,10 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev, | |||
3698 | secinfo->encrypt = sec->encrypt; | 3699 | secinfo->encrypt = sec->encrypt; |
3699 | dprintk(", .encrypt = %d", sec->encrypt); | 3700 | dprintk(", .encrypt = %d", sec->encrypt); |
3700 | } | 3701 | } |
3702 | if (sec->flags & SEC_AUTH_MODE) { | ||
3703 | secinfo->auth_mode = sec->auth_mode; | ||
3704 | dprintk(", .auth_mode = %d", sec->auth_mode); | ||
3705 | } | ||
3701 | dprintk("\n"); | 3706 | dprintk("\n"); |
3702 | if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED && | 3707 | if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED && |
3703 | !bcm->ieee->host_encrypt) { | 3708 | !bcm->ieee->host_encrypt) { |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h index 30a202b258b5..116493671f88 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h | |||
@@ -112,30 +112,6 @@ int bcm43xx_channel_to_freq(struct bcm43xx_private *bcm, | |||
112 | return bcm43xx_channel_to_freq_bg(channel); | 112 | return bcm43xx_channel_to_freq_bg(channel); |
113 | } | 113 | } |
114 | 114 | ||
115 | /* Lightweight function to check if a channel number is valid. | ||
116 | * Note that this does _NOT_ check for geographical restrictions! | ||
117 | */ | ||
118 | static inline | ||
119 | int bcm43xx_is_valid_channel_a(u8 channel) | ||
120 | { | ||
121 | return (channel >= IEEE80211_52GHZ_MIN_CHANNEL | ||
122 | && channel <= IEEE80211_52GHZ_MAX_CHANNEL); | ||
123 | } | ||
124 | static inline | ||
125 | int bcm43xx_is_valid_channel_bg(u8 channel) | ||
126 | { | ||
127 | return (channel >= IEEE80211_24GHZ_MIN_CHANNEL | ||
128 | && channel <= IEEE80211_24GHZ_MAX_CHANNEL); | ||
129 | } | ||
130 | static inline | ||
131 | int bcm43xx_is_valid_channel(struct bcm43xx_private *bcm, | ||
132 | u8 channel) | ||
133 | { | ||
134 | if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_A) | ||
135 | return bcm43xx_is_valid_channel_a(channel); | ||
136 | return bcm43xx_is_valid_channel_bg(channel); | ||
137 | } | ||
138 | |||
139 | void bcm43xx_tsf_read(struct bcm43xx_private *bcm, u64 *tsf); | 115 | void bcm43xx_tsf_read(struct bcm43xx_private *bcm, u64 *tsf); |
140 | void bcm43xx_tsf_write(struct bcm43xx_private *bcm, u64 tsf); | 116 | void bcm43xx_tsf_write(struct bcm43xx_private *bcm, u64 tsf); |
141 | 117 | ||
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c index af5c0bff1696..bb9c484d7e19 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c | |||
@@ -1594,11 +1594,11 @@ int bcm43xx_radio_selectchannel(struct bcm43xx_private *bcm, | |||
1594 | u16 r8, tmp; | 1594 | u16 r8, tmp; |
1595 | u16 freq; | 1595 | u16 freq; |
1596 | 1596 | ||
1597 | if (!ieee80211_is_valid_channel(bcm->ieee, channel)) | ||
1598 | return -EINVAL; | ||
1597 | if ((radio->manufact == 0x17F) && | 1599 | if ((radio->manufact == 0x17F) && |
1598 | (radio->version == 0x2060) && | 1600 | (radio->version == 0x2060) && |
1599 | (radio->revision == 1)) { | 1601 | (radio->revision == 1)) { |
1600 | if (channel > 200) | ||
1601 | return -EINVAL; | ||
1602 | freq = channel2freq_a(channel); | 1602 | freq = channel2freq_a(channel); |
1603 | 1603 | ||
1604 | r8 = bcm43xx_radio_read16(bcm, 0x0008); | 1604 | r8 = bcm43xx_radio_read16(bcm, 0x0008); |
@@ -1651,9 +1651,6 @@ int bcm43xx_radio_selectchannel(struct bcm43xx_private *bcm, | |||
1651 | TODO(); //TODO: TSSI2dbm workaround | 1651 | TODO(); //TODO: TSSI2dbm workaround |
1652 | bcm43xx_phy_xmitpower(bcm);//FIXME correct? | 1652 | bcm43xx_phy_xmitpower(bcm);//FIXME correct? |
1653 | } else { | 1653 | } else { |
1654 | if ((channel < 1) || (channel > 14)) | ||
1655 | return -EINVAL; | ||
1656 | |||
1657 | if (synthetic_pu_workaround) | 1654 | if (synthetic_pu_workaround) |
1658 | bcm43xx_synth_pu_workaround(bcm, channel); | 1655 | bcm43xx_synth_pu_workaround(bcm, channel); |
1659 | 1656 | ||
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c index c35cb3a0777e..5c36e29efff7 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c | |||
@@ -119,7 +119,7 @@ static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev, | |||
119 | channel = bcm43xx_freq_to_channel(bcm, data->freq.m); | 119 | channel = bcm43xx_freq_to_channel(bcm, data->freq.m); |
120 | freq = data->freq.m; | 120 | freq = data->freq.m; |
121 | } | 121 | } |
122 | if (!bcm43xx_is_valid_channel(bcm, channel)) | 122 | if (!ieee80211_is_valid_channel(bcm->ieee, channel)) |
123 | goto out_unlock; | 123 | goto out_unlock; |
124 | if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) { | 124 | if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) { |
125 | //ieee80211softmac_disassoc(softmac, $REASON); | 125 | //ieee80211softmac_disassoc(softmac, $REASON); |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c index d8ece28c079f..6dbd855b3647 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c | |||
@@ -296,11 +296,14 @@ void bcm43xx_generate_txhdr(struct bcm43xx_private *bcm, | |||
296 | u16 control = 0; | 296 | u16 control = 0; |
297 | u16 wsec_rate = 0; | 297 | u16 wsec_rate = 0; |
298 | u16 encrypt_frame; | 298 | u16 encrypt_frame; |
299 | const u16 ftype = WLAN_FC_GET_TYPE(le16_to_cpu(wireless_header->frame_ctl)); | ||
300 | const int is_mgt = (ftype == IEEE80211_FTYPE_MGMT); | ||
299 | 301 | ||
300 | /* Now construct the TX header. */ | 302 | /* Now construct the TX header. */ |
301 | memset(txhdr, 0, sizeof(*txhdr)); | 303 | memset(txhdr, 0, sizeof(*txhdr)); |
302 | 304 | ||
303 | bitrate = bcm->softmac->txrates.default_rate; | 305 | bitrate = ieee80211softmac_suggest_txrate(bcm->softmac, |
306 | is_multicast_ether_addr(wireless_header->addr1), is_mgt); | ||
304 | ofdm_modulation = !(ieee80211_is_cck_rate(bitrate)); | 307 | ofdm_modulation = !(ieee80211_is_cck_rate(bitrate)); |
305 | fallback_bitrate = bcm43xx_calc_fallback_rate(bitrate); | 308 | fallback_bitrate = bcm43xx_calc_fallback_rate(bitrate); |
306 | fallback_ofdm_modulation = !(ieee80211_is_cck_rate(fallback_bitrate)); | 309 | fallback_ofdm_modulation = !(ieee80211_is_cck_rate(fallback_bitrate)); |
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index dafaa5ff5aa6..d500012fdc7a 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c | |||
@@ -1042,6 +1042,9 @@ static int prism2_reset_port(struct net_device *dev) | |||
1042 | dev->name, local->fragm_threshold); | 1042 | dev->name, local->fragm_threshold); |
1043 | } | 1043 | } |
1044 | 1044 | ||
1045 | /* Some firmwares lose antenna selection settings on reset */ | ||
1046 | (void) hostap_set_antsel(local); | ||
1047 | |||
1045 | return res; | 1048 | return res; |
1046 | } | 1049 | } |
1047 | 1050 | ||
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c index 49860fa61c30..6dfa041be66d 100644 --- a/drivers/net/wireless/hostap/hostap_plx.c +++ b/drivers/net/wireless/hostap/hostap_plx.c | |||
@@ -66,10 +66,12 @@ static struct pci_device_id prism2_plx_id_table[] __devinitdata = { | |||
66 | PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"), | 66 | PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"), |
67 | PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"), | 67 | PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"), |
68 | PLXDEV(0x126c, 0x8030, "Nortel emobility"), | 68 | PLXDEV(0x126c, 0x8030, "Nortel emobility"), |
69 | PLXDEV(0x1562, 0x0001, "Symbol LA-4123"), | ||
69 | PLXDEV(0x1385, 0x4100, "Netgear MA301"), | 70 | PLXDEV(0x1385, 0x4100, "Netgear MA301"), |
70 | PLXDEV(0x15e8, 0x0130, "National Datacomm NCP130 (PLX9052)"), | 71 | PLXDEV(0x15e8, 0x0130, "National Datacomm NCP130 (PLX9052)"), |
71 | PLXDEV(0x15e8, 0x0131, "National Datacomm NCP130 (TMD7160)"), | 72 | PLXDEV(0x15e8, 0x0131, "National Datacomm NCP130 (TMD7160)"), |
72 | PLXDEV(0x1638, 0x1100, "Eumitcom WL11000"), | 73 | PLXDEV(0x1638, 0x1100, "Eumitcom WL11000"), |
74 | PLXDEV(0x16ab, 0x1100, "Global Sun Tech GL24110P"), | ||
73 | PLXDEV(0x16ab, 0x1101, "Global Sun Tech GL24110P (?)"), | 75 | PLXDEV(0x16ab, 0x1101, "Global Sun Tech GL24110P (?)"), |
74 | PLXDEV(0x16ab, 0x1102, "Linksys WPC11 with WDT11"), | 76 | PLXDEV(0x16ab, 0x1102, "Linksys WPC11 with WDT11"), |
75 | PLXDEV(0x16ab, 0x1103, "Longshine 8031"), | 77 | PLXDEV(0x16ab, 0x1103, "Longshine 8031"), |
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index d6ed5781b93a..317ace7f9aae 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c | |||
@@ -2875,7 +2875,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev, | |||
2875 | if (orinoco_lock(priv, &flags) != 0) | 2875 | if (orinoco_lock(priv, &flags) != 0) |
2876 | return -EBUSY; | 2876 | return -EBUSY; |
2877 | 2877 | ||
2878 | if (erq->pointer) { | 2878 | if (erq->length > 0) { |
2879 | if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) | 2879 | if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) |
2880 | index = priv->tx_key; | 2880 | index = priv->tx_key; |
2881 | 2881 | ||
@@ -2918,7 +2918,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev, | |||
2918 | if (erq->flags & IW_ENCODE_RESTRICTED) | 2918 | if (erq->flags & IW_ENCODE_RESTRICTED) |
2919 | restricted = 1; | 2919 | restricted = 1; |
2920 | 2920 | ||
2921 | if (erq->pointer) { | 2921 | if (erq->pointer && erq->length > 0) { |
2922 | priv->keys[index].len = cpu_to_le16(xlen); | 2922 | priv->keys[index].len = cpu_to_le16(xlen); |
2923 | memset(priv->keys[index].data, 0, | 2923 | memset(priv->keys[index].data, 0, |
2924 | sizeof(priv->keys[index].data)); | 2924 | sizeof(priv->keys[index].data)); |
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c index 15465278c789..bcc7038130f6 100644 --- a/drivers/net/wireless/spectrum_cs.c +++ b/drivers/net/wireless/spectrum_cs.c | |||
@@ -34,8 +34,6 @@ | |||
34 | 34 | ||
35 | #include "orinoco.h" | 35 | #include "orinoco.h" |
36 | 36 | ||
37 | static unsigned char *primsym; | ||
38 | static unsigned char *secsym; | ||
39 | static const char primary_fw_name[] = "symbol_sp24t_prim_fw"; | 37 | static const char primary_fw_name[] = "symbol_sp24t_prim_fw"; |
40 | static const char secondary_fw_name[] = "symbol_sp24t_sec_fw"; | 38 | static const char secondary_fw_name[] = "symbol_sp24t_sec_fw"; |
41 | 39 | ||
@@ -244,7 +242,7 @@ spectrum_reset(struct pcmcia_device *link, int idle) | |||
244 | u_int save_cor; | 242 | u_int save_cor; |
245 | 243 | ||
246 | /* Doing it if hardware is gone is guaranteed crash */ | 244 | /* Doing it if hardware is gone is guaranteed crash */ |
247 | if (pcmcia_dev_present(link)) | 245 | if (!pcmcia_dev_present(link)) |
248 | return -ENODEV; | 246 | return -ENODEV; |
249 | 247 | ||
250 | /* Save original COR value */ | 248 | /* Save original COR value */ |
@@ -440,7 +438,7 @@ spectrum_load_blocks(hermes_t *hw, const struct dblock *first_block) | |||
440 | */ | 438 | */ |
441 | static int | 439 | static int |
442 | spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link, | 440 | spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link, |
443 | const unsigned char *image) | 441 | const unsigned char *image, int secondary) |
444 | { | 442 | { |
445 | int ret; | 443 | int ret; |
446 | const unsigned char *ptr; | 444 | const unsigned char *ptr; |
@@ -455,7 +453,7 @@ spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link, | |||
455 | first_block = (const struct dblock *) ptr; | 453 | first_block = (const struct dblock *) ptr; |
456 | 454 | ||
457 | /* Read the PDA */ | 455 | /* Read the PDA */ |
458 | if (image != primsym) { | 456 | if (secondary) { |
459 | ret = spectrum_read_pda(hw, pda, sizeof(pda)); | 457 | ret = spectrum_read_pda(hw, pda, sizeof(pda)); |
460 | if (ret) | 458 | if (ret) |
461 | return ret; | 459 | return ret; |
@@ -472,7 +470,7 @@ spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link, | |||
472 | return ret; | 470 | return ret; |
473 | 471 | ||
474 | /* Write the PDA to the adapter */ | 472 | /* Write the PDA to the adapter */ |
475 | if (image != primsym) { | 473 | if (secondary) { |
476 | ret = spectrum_apply_pda(hw, first_block, pda); | 474 | ret = spectrum_apply_pda(hw, first_block, pda); |
477 | if (ret) | 475 | if (ret) |
478 | return ret; | 476 | return ret; |
@@ -487,7 +485,7 @@ spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link, | |||
487 | ret = hermes_init(hw); | 485 | ret = hermes_init(hw); |
488 | 486 | ||
489 | /* hermes_reset() should return 0 with the secondary firmware */ | 487 | /* hermes_reset() should return 0 with the secondary firmware */ |
490 | if (image != primsym && ret != 0) | 488 | if (secondary && ret != 0) |
491 | return -ENODEV; | 489 | return -ENODEV; |
492 | 490 | ||
493 | /* And this should work with any firmware */ | 491 | /* And this should work with any firmware */ |
@@ -509,33 +507,30 @@ spectrum_dl_firmware(hermes_t *hw, struct pcmcia_device *link) | |||
509 | const struct firmware *fw_entry; | 507 | const struct firmware *fw_entry; |
510 | 508 | ||
511 | if (request_firmware(&fw_entry, primary_fw_name, | 509 | if (request_firmware(&fw_entry, primary_fw_name, |
512 | &handle_to_dev(link)) == 0) { | 510 | &handle_to_dev(link)) != 0) { |
513 | primsym = fw_entry->data; | ||
514 | } else { | ||
515 | printk(KERN_ERR PFX "Cannot find firmware: %s\n", | 511 | printk(KERN_ERR PFX "Cannot find firmware: %s\n", |
516 | primary_fw_name); | 512 | primary_fw_name); |
517 | return -ENOENT; | 513 | return -ENOENT; |
518 | } | 514 | } |
519 | 515 | ||
520 | if (request_firmware(&fw_entry, secondary_fw_name, | ||
521 | &handle_to_dev(link)) == 0) { | ||
522 | secsym = fw_entry->data; | ||
523 | } else { | ||
524 | printk(KERN_ERR PFX "Cannot find firmware: %s\n", | ||
525 | secondary_fw_name); | ||
526 | return -ENOENT; | ||
527 | } | ||
528 | |||
529 | /* Load primary firmware */ | 516 | /* Load primary firmware */ |
530 | ret = spectrum_dl_image(hw, link, primsym); | 517 | ret = spectrum_dl_image(hw, link, fw_entry->data, 0); |
518 | release_firmware(fw_entry); | ||
531 | if (ret) { | 519 | if (ret) { |
532 | printk(KERN_ERR PFX "Primary firmware download failed\n"); | 520 | printk(KERN_ERR PFX "Primary firmware download failed\n"); |
533 | return ret; | 521 | return ret; |
534 | } | 522 | } |
535 | 523 | ||
536 | /* Load secondary firmware */ | 524 | if (request_firmware(&fw_entry, secondary_fw_name, |
537 | ret = spectrum_dl_image(hw, link, secsym); | 525 | &handle_to_dev(link)) != 0) { |
526 | printk(KERN_ERR PFX "Cannot find firmware: %s\n", | ||
527 | secondary_fw_name); | ||
528 | return -ENOENT; | ||
529 | } | ||
538 | 530 | ||
531 | /* Load secondary firmware */ | ||
532 | ret = spectrum_dl_image(hw, link, fw_entry->data, 1); | ||
533 | release_firmware(fw_entry); | ||
539 | if (ret) { | 534 | if (ret) { |
540 | printk(KERN_ERR PFX "Secondary firmware download failed\n"); | 535 | printk(KERN_ERR PFX "Secondary firmware download failed\n"); |
541 | } | 536 | } |
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 662ecc8a33ff..c52e9bcf8d02 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c | |||
@@ -1820,6 +1820,8 @@ static int zd1201_probe(struct usb_interface *interface, | |||
1820 | zd->dev->name); | 1820 | zd->dev->name); |
1821 | 1821 | ||
1822 | usb_set_intfdata(interface, zd); | 1822 | usb_set_intfdata(interface, zd); |
1823 | zd1201_enable(zd); /* zd1201 likes to startup enabled, */ | ||
1824 | zd1201_disable(zd); /* interfering with all the wifis in range */ | ||
1823 | return 0; | 1825 | return 0; |
1824 | 1826 | ||
1825 | err_net: | 1827 | err_net: |
diff --git a/drivers/net/wireless/zd1211rw/Kconfig b/drivers/net/wireless/zd1211rw/Kconfig new file mode 100644 index 000000000000..66ed55bc5460 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/Kconfig | |||
@@ -0,0 +1,19 @@ | |||
1 | config ZD1211RW | ||
2 | tristate "ZyDAS ZD1211/ZD1211B USB-wireless support" | ||
3 | depends on USB && IEEE80211 && IEEE80211_SOFTMAC && NET_RADIO && EXPERIMENTAL | ||
4 | select FW_LOADER | ||
5 | ---help--- | ||
6 | This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless | ||
7 | chip, present in many USB-wireless adapters. | ||
8 | |||
9 | Device firmware is required alongside this driver. You can download the | ||
10 | firmware distribution from http://zd1211.ath.cx/get-firmware | ||
11 | |||
12 | config ZD1211RW_DEBUG | ||
13 | bool "ZyDAS ZD1211 debugging" | ||
14 | depends on ZD1211RW | ||
15 | ---help--- | ||
16 | ZD1211 debugging messages. Choosing Y will result in additional debug | ||
17 | messages being saved to your kernel logs, which may help debug any | ||
18 | problems. | ||
19 | |||
diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile new file mode 100644 index 000000000000..500314fc74d2 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/Makefile | |||
@@ -0,0 +1,11 @@ | |||
1 | obj-$(CONFIG_ZD1211RW) += zd1211rw.o | ||
2 | |||
3 | zd1211rw-objs := zd_chip.o zd_ieee80211.o \ | ||
4 | zd_mac.o zd_netdev.o \ | ||
5 | zd_rf_al2230.o zd_rf_rf2959.o \ | ||
6 | zd_rf.o zd_usb.o zd_util.o | ||
7 | |||
8 | ifeq ($(CONFIG_ZD1211RW_DEBUG),y) | ||
9 | EXTRA_CFLAGS += -DDEBUG | ||
10 | endif | ||
11 | |||
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c new file mode 100644 index 000000000000..da9d06bdb818 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_chip.c | |||
@@ -0,0 +1,1615 @@ | |||
1 | /* zd_chip.c | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | /* This file implements all the hardware specific functions for the ZD1211 | ||
19 | * and ZD1211B chips. Support for the ZD1211B was possible after Timothy | ||
20 | * Legge sent me a ZD1211B device. Thank you Tim. -- Uli | ||
21 | */ | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/errno.h> | ||
25 | |||
26 | #include "zd_def.h" | ||
27 | #include "zd_chip.h" | ||
28 | #include "zd_ieee80211.h" | ||
29 | #include "zd_mac.h" | ||
30 | #include "zd_rf.h" | ||
31 | #include "zd_util.h" | ||
32 | |||
33 | void zd_chip_init(struct zd_chip *chip, | ||
34 | struct net_device *netdev, | ||
35 | struct usb_interface *intf) | ||
36 | { | ||
37 | memset(chip, 0, sizeof(*chip)); | ||
38 | mutex_init(&chip->mutex); | ||
39 | zd_usb_init(&chip->usb, netdev, intf); | ||
40 | zd_rf_init(&chip->rf); | ||
41 | } | ||
42 | |||
43 | void zd_chip_clear(struct zd_chip *chip) | ||
44 | { | ||
45 | mutex_lock(&chip->mutex); | ||
46 | zd_usb_clear(&chip->usb); | ||
47 | zd_rf_clear(&chip->rf); | ||
48 | mutex_unlock(&chip->mutex); | ||
49 | mutex_destroy(&chip->mutex); | ||
50 | memset(chip, 0, sizeof(*chip)); | ||
51 | } | ||
52 | |||
53 | static int scnprint_mac_oui(const u8 *addr, char *buffer, size_t size) | ||
54 | { | ||
55 | return scnprintf(buffer, size, "%02x-%02x-%02x", | ||
56 | addr[0], addr[1], addr[2]); | ||
57 | } | ||
58 | |||
59 | /* Prints an identifier line, which will support debugging. */ | ||
60 | static int scnprint_id(struct zd_chip *chip, char *buffer, size_t size) | ||
61 | { | ||
62 | int i = 0; | ||
63 | |||
64 | i = scnprintf(buffer, size, "zd1211%s chip ", | ||
65 | chip->is_zd1211b ? "b" : ""); | ||
66 | i += zd_usb_scnprint_id(&chip->usb, buffer+i, size-i); | ||
67 | i += scnprintf(buffer+i, size-i, " "); | ||
68 | i += scnprint_mac_oui(chip->e2p_mac, buffer+i, size-i); | ||
69 | i += scnprintf(buffer+i, size-i, " "); | ||
70 | i += zd_rf_scnprint_id(&chip->rf, buffer+i, size-i); | ||
71 | i += scnprintf(buffer+i, size-i, " pa%1x %c%c%c", chip->pa_type, | ||
72 | chip->patch_cck_gain ? 'g' : '-', | ||
73 | chip->patch_cr157 ? '7' : '-', | ||
74 | chip->patch_6m_band_edge ? '6' : '-'); | ||
75 | return i; | ||
76 | } | ||
77 | |||
78 | static void print_id(struct zd_chip *chip) | ||
79 | { | ||
80 | char buffer[80]; | ||
81 | |||
82 | scnprint_id(chip, buffer, sizeof(buffer)); | ||
83 | buffer[sizeof(buffer)-1] = 0; | ||
84 | dev_info(zd_chip_dev(chip), "%s\n", buffer); | ||
85 | } | ||
86 | |||
87 | /* Read a variable number of 32-bit values. Parameter count is not allowed to | ||
88 | * exceed USB_MAX_IOREAD32_COUNT. | ||
89 | */ | ||
90 | int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr, | ||
91 | unsigned int count) | ||
92 | { | ||
93 | int r; | ||
94 | int i; | ||
95 | zd_addr_t *a16 = (zd_addr_t *)NULL; | ||
96 | u16 *v16; | ||
97 | unsigned int count16; | ||
98 | |||
99 | if (count > USB_MAX_IOREAD32_COUNT) | ||
100 | return -EINVAL; | ||
101 | |||
102 | /* Allocate a single memory block for values and addresses. */ | ||
103 | count16 = 2*count; | ||
104 | a16 = (zd_addr_t *)kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)), | ||
105 | GFP_NOFS); | ||
106 | if (!a16) { | ||
107 | dev_dbg_f(zd_chip_dev(chip), | ||
108 | "error ENOMEM in allocation of a16\n"); | ||
109 | r = -ENOMEM; | ||
110 | goto out; | ||
111 | } | ||
112 | v16 = (u16 *)(a16 + count16); | ||
113 | |||
114 | for (i = 0; i < count; i++) { | ||
115 | int j = 2*i; | ||
116 | /* We read the high word always first. */ | ||
117 | a16[j] = zd_inc_word(addr[i]); | ||
118 | a16[j+1] = addr[i]; | ||
119 | } | ||
120 | |||
121 | r = zd_ioread16v_locked(chip, v16, a16, count16); | ||
122 | if (r) { | ||
123 | dev_dbg_f(zd_chip_dev(chip), | ||
124 | "error: zd_ioread16v_locked. Error number %d\n", r); | ||
125 | goto out; | ||
126 | } | ||
127 | |||
128 | for (i = 0; i < count; i++) { | ||
129 | int j = 2*i; | ||
130 | values[i] = (v16[j] << 16) | v16[j+1]; | ||
131 | } | ||
132 | |||
133 | out: | ||
134 | kfree((void *)a16); | ||
135 | return r; | ||
136 | } | ||
137 | |||
138 | int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs, | ||
139 | unsigned int count) | ||
140 | { | ||
141 | int i, j, r; | ||
142 | struct zd_ioreq16 *ioreqs16; | ||
143 | unsigned int count16; | ||
144 | |||
145 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
146 | |||
147 | if (count == 0) | ||
148 | return 0; | ||
149 | if (count > USB_MAX_IOWRITE32_COUNT) | ||
150 | return -EINVAL; | ||
151 | |||
152 | /* Allocate a single memory block for values and addresses. */ | ||
153 | count16 = 2*count; | ||
154 | ioreqs16 = kmalloc(count16 * sizeof(struct zd_ioreq16), GFP_NOFS); | ||
155 | if (!ioreqs16) { | ||
156 | r = -ENOMEM; | ||
157 | dev_dbg_f(zd_chip_dev(chip), | ||
158 | "error %d in ioreqs16 allocation\n", r); | ||
159 | goto out; | ||
160 | } | ||
161 | |||
162 | for (i = 0; i < count; i++) { | ||
163 | j = 2*i; | ||
164 | /* We write the high word always first. */ | ||
165 | ioreqs16[j].value = ioreqs[i].value >> 16; | ||
166 | ioreqs16[j].addr = zd_inc_word(ioreqs[i].addr); | ||
167 | ioreqs16[j+1].value = ioreqs[i].value; | ||
168 | ioreqs16[j+1].addr = ioreqs[i].addr; | ||
169 | } | ||
170 | |||
171 | r = zd_usb_iowrite16v(&chip->usb, ioreqs16, count16); | ||
172 | #ifdef DEBUG | ||
173 | if (r) { | ||
174 | dev_dbg_f(zd_chip_dev(chip), | ||
175 | "error %d in zd_usb_write16v\n", r); | ||
176 | } | ||
177 | #endif /* DEBUG */ | ||
178 | out: | ||
179 | kfree(ioreqs16); | ||
180 | return r; | ||
181 | } | ||
182 | |||
183 | int zd_iowrite16a_locked(struct zd_chip *chip, | ||
184 | const struct zd_ioreq16 *ioreqs, unsigned int count) | ||
185 | { | ||
186 | int r; | ||
187 | unsigned int i, j, t, max; | ||
188 | |||
189 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
190 | for (i = 0; i < count; i += j + t) { | ||
191 | t = 0; | ||
192 | max = count-i; | ||
193 | if (max > USB_MAX_IOWRITE16_COUNT) | ||
194 | max = USB_MAX_IOWRITE16_COUNT; | ||
195 | for (j = 0; j < max; j++) { | ||
196 | if (!ioreqs[i+j].addr) { | ||
197 | t = 1; | ||
198 | break; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | r = zd_usb_iowrite16v(&chip->usb, &ioreqs[i], j); | ||
203 | if (r) { | ||
204 | dev_dbg_f(zd_chip_dev(chip), | ||
205 | "error zd_usb_iowrite16v. Error number %d\n", | ||
206 | r); | ||
207 | return r; | ||
208 | } | ||
209 | } | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | /* Writes a variable number of 32 bit registers. The functions will split | ||
215 | * that in several USB requests. A split can be forced by inserting an IO | ||
216 | * request with an zero address field. | ||
217 | */ | ||
218 | int zd_iowrite32a_locked(struct zd_chip *chip, | ||
219 | const struct zd_ioreq32 *ioreqs, unsigned int count) | ||
220 | { | ||
221 | int r; | ||
222 | unsigned int i, j, t, max; | ||
223 | |||
224 | for (i = 0; i < count; i += j + t) { | ||
225 | t = 0; | ||
226 | max = count-i; | ||
227 | if (max > USB_MAX_IOWRITE32_COUNT) | ||
228 | max = USB_MAX_IOWRITE32_COUNT; | ||
229 | for (j = 0; j < max; j++) { | ||
230 | if (!ioreqs[i+j].addr) { | ||
231 | t = 1; | ||
232 | break; | ||
233 | } | ||
234 | } | ||
235 | |||
236 | r = _zd_iowrite32v_locked(chip, &ioreqs[i], j); | ||
237 | if (r) { | ||
238 | dev_dbg_f(zd_chip_dev(chip), | ||
239 | "error _zd_iowrite32v_locked." | ||
240 | " Error number %d\n", r); | ||
241 | return r; | ||
242 | } | ||
243 | } | ||
244 | |||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value) | ||
249 | { | ||
250 | int r; | ||
251 | |||
252 | ZD_ASSERT(!mutex_is_locked(&chip->mutex)); | ||
253 | mutex_lock(&chip->mutex); | ||
254 | r = zd_ioread16_locked(chip, value, addr); | ||
255 | mutex_unlock(&chip->mutex); | ||
256 | return r; | ||
257 | } | ||
258 | |||
259 | int zd_ioread32(struct zd_chip *chip, zd_addr_t addr, u32 *value) | ||
260 | { | ||
261 | int r; | ||
262 | |||
263 | ZD_ASSERT(!mutex_is_locked(&chip->mutex)); | ||
264 | mutex_lock(&chip->mutex); | ||
265 | r = zd_ioread32_locked(chip, value, addr); | ||
266 | mutex_unlock(&chip->mutex); | ||
267 | return r; | ||
268 | } | ||
269 | |||
270 | int zd_iowrite16(struct zd_chip *chip, zd_addr_t addr, u16 value) | ||
271 | { | ||
272 | int r; | ||
273 | |||
274 | ZD_ASSERT(!mutex_is_locked(&chip->mutex)); | ||
275 | mutex_lock(&chip->mutex); | ||
276 | r = zd_iowrite16_locked(chip, value, addr); | ||
277 | mutex_unlock(&chip->mutex); | ||
278 | return r; | ||
279 | } | ||
280 | |||
281 | int zd_iowrite32(struct zd_chip *chip, zd_addr_t addr, u32 value) | ||
282 | { | ||
283 | int r; | ||
284 | |||
285 | ZD_ASSERT(!mutex_is_locked(&chip->mutex)); | ||
286 | mutex_lock(&chip->mutex); | ||
287 | r = zd_iowrite32_locked(chip, value, addr); | ||
288 | mutex_unlock(&chip->mutex); | ||
289 | return r; | ||
290 | } | ||
291 | |||
292 | int zd_ioread32v(struct zd_chip *chip, const zd_addr_t *addresses, | ||
293 | u32 *values, unsigned int count) | ||
294 | { | ||
295 | int r; | ||
296 | |||
297 | ZD_ASSERT(!mutex_is_locked(&chip->mutex)); | ||
298 | mutex_lock(&chip->mutex); | ||
299 | r = zd_ioread32v_locked(chip, values, addresses, count); | ||
300 | mutex_unlock(&chip->mutex); | ||
301 | return r; | ||
302 | } | ||
303 | |||
304 | int zd_iowrite32a(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs, | ||
305 | unsigned int count) | ||
306 | { | ||
307 | int r; | ||
308 | |||
309 | ZD_ASSERT(!mutex_is_locked(&chip->mutex)); | ||
310 | mutex_lock(&chip->mutex); | ||
311 | r = zd_iowrite32a_locked(chip, ioreqs, count); | ||
312 | mutex_unlock(&chip->mutex); | ||
313 | return r; | ||
314 | } | ||
315 | |||
316 | static int read_pod(struct zd_chip *chip, u8 *rf_type) | ||
317 | { | ||
318 | int r; | ||
319 | u32 value; | ||
320 | |||
321 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
322 | r = zd_ioread32_locked(chip, &value, E2P_POD); | ||
323 | if (r) | ||
324 | goto error; | ||
325 | dev_dbg_f(zd_chip_dev(chip), "E2P_POD %#010x\n", value); | ||
326 | |||
327 | /* FIXME: AL2230 handling (Bit 7 in POD) */ | ||
328 | *rf_type = value & 0x0f; | ||
329 | chip->pa_type = (value >> 16) & 0x0f; | ||
330 | chip->patch_cck_gain = (value >> 8) & 0x1; | ||
331 | chip->patch_cr157 = (value >> 13) & 0x1; | ||
332 | chip->patch_6m_band_edge = (value >> 21) & 0x1; | ||
333 | |||
334 | dev_dbg_f(zd_chip_dev(chip), | ||
335 | "RF %s %#01x PA type %#01x patch CCK %d patch CR157 %d " | ||
336 | "patch 6M %d\n", | ||
337 | zd_rf_name(*rf_type), *rf_type, | ||
338 | chip->pa_type, chip->patch_cck_gain, | ||
339 | chip->patch_cr157, chip->patch_6m_band_edge); | ||
340 | return 0; | ||
341 | error: | ||
342 | *rf_type = 0; | ||
343 | chip->pa_type = 0; | ||
344 | chip->patch_cck_gain = 0; | ||
345 | chip->patch_cr157 = 0; | ||
346 | chip->patch_6m_band_edge = 0; | ||
347 | return r; | ||
348 | } | ||
349 | |||
350 | static int _read_mac_addr(struct zd_chip *chip, u8 *mac_addr, | ||
351 | const zd_addr_t *addr) | ||
352 | { | ||
353 | int r; | ||
354 | u32 parts[2]; | ||
355 | |||
356 | r = zd_ioread32v_locked(chip, parts, (const zd_addr_t *)addr, 2); | ||
357 | if (r) { | ||
358 | dev_dbg_f(zd_chip_dev(chip), | ||
359 | "error: couldn't read e2p macs. Error number %d\n", r); | ||
360 | return r; | ||
361 | } | ||
362 | |||
363 | mac_addr[0] = parts[0]; | ||
364 | mac_addr[1] = parts[0] >> 8; | ||
365 | mac_addr[2] = parts[0] >> 16; | ||
366 | mac_addr[3] = parts[0] >> 24; | ||
367 | mac_addr[4] = parts[1]; | ||
368 | mac_addr[5] = parts[1] >> 8; | ||
369 | |||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | static int read_e2p_mac_addr(struct zd_chip *chip) | ||
374 | { | ||
375 | static const zd_addr_t addr[2] = { E2P_MAC_ADDR_P1, E2P_MAC_ADDR_P2 }; | ||
376 | |||
377 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
378 | return _read_mac_addr(chip, chip->e2p_mac, (const zd_addr_t *)addr); | ||
379 | } | ||
380 | |||
381 | /* MAC address: if custom mac addresses are to to be used CR_MAC_ADDR_P1 and | ||
382 | * CR_MAC_ADDR_P2 must be overwritten | ||
383 | */ | ||
384 | void zd_get_e2p_mac_addr(struct zd_chip *chip, u8 *mac_addr) | ||
385 | { | ||
386 | mutex_lock(&chip->mutex); | ||
387 | memcpy(mac_addr, chip->e2p_mac, ETH_ALEN); | ||
388 | mutex_unlock(&chip->mutex); | ||
389 | } | ||
390 | |||
391 | static int read_mac_addr(struct zd_chip *chip, u8 *mac_addr) | ||
392 | { | ||
393 | static const zd_addr_t addr[2] = { CR_MAC_ADDR_P1, CR_MAC_ADDR_P2 }; | ||
394 | return _read_mac_addr(chip, mac_addr, (const zd_addr_t *)addr); | ||
395 | } | ||
396 | |||
397 | int zd_read_mac_addr(struct zd_chip *chip, u8 *mac_addr) | ||
398 | { | ||
399 | int r; | ||
400 | |||
401 | dev_dbg_f(zd_chip_dev(chip), "\n"); | ||
402 | mutex_lock(&chip->mutex); | ||
403 | r = read_mac_addr(chip, mac_addr); | ||
404 | mutex_unlock(&chip->mutex); | ||
405 | return r; | ||
406 | } | ||
407 | |||
408 | int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr) | ||
409 | { | ||
410 | int r; | ||
411 | struct zd_ioreq32 reqs[2] = { | ||
412 | [0] = { .addr = CR_MAC_ADDR_P1 }, | ||
413 | [1] = { .addr = CR_MAC_ADDR_P2 }, | ||
414 | }; | ||
415 | |||
416 | reqs[0].value = (mac_addr[3] << 24) | ||
417 | | (mac_addr[2] << 16) | ||
418 | | (mac_addr[1] << 8) | ||
419 | | mac_addr[0]; | ||
420 | reqs[1].value = (mac_addr[5] << 8) | ||
421 | | mac_addr[4]; | ||
422 | |||
423 | dev_dbg_f(zd_chip_dev(chip), | ||
424 | "mac addr " MAC_FMT "\n", MAC_ARG(mac_addr)); | ||
425 | |||
426 | mutex_lock(&chip->mutex); | ||
427 | r = zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); | ||
428 | #ifdef DEBUG | ||
429 | { | ||
430 | u8 tmp[ETH_ALEN]; | ||
431 | read_mac_addr(chip, tmp); | ||
432 | } | ||
433 | #endif /* DEBUG */ | ||
434 | mutex_unlock(&chip->mutex); | ||
435 | return r; | ||
436 | } | ||
437 | |||
438 | int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain) | ||
439 | { | ||
440 | int r; | ||
441 | u32 value; | ||
442 | |||
443 | mutex_lock(&chip->mutex); | ||
444 | r = zd_ioread32_locked(chip, &value, E2P_SUBID); | ||
445 | mutex_unlock(&chip->mutex); | ||
446 | if (r) | ||
447 | return r; | ||
448 | |||
449 | *regdomain = value >> 16; | ||
450 | dev_dbg_f(zd_chip_dev(chip), "regdomain: %#04x\n", *regdomain); | ||
451 | |||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | static int read_values(struct zd_chip *chip, u8 *values, size_t count, | ||
456 | zd_addr_t e2p_addr, u32 guard) | ||
457 | { | ||
458 | int r; | ||
459 | int i; | ||
460 | u32 v; | ||
461 | |||
462 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
463 | for (i = 0;;) { | ||
464 | r = zd_ioread32_locked(chip, &v, e2p_addr+i/2); | ||
465 | if (r) | ||
466 | return r; | ||
467 | v -= guard; | ||
468 | if (i+4 < count) { | ||
469 | values[i++] = v; | ||
470 | values[i++] = v >> 8; | ||
471 | values[i++] = v >> 16; | ||
472 | values[i++] = v >> 24; | ||
473 | continue; | ||
474 | } | ||
475 | for (;i < count; i++) | ||
476 | values[i] = v >> (8*(i%3)); | ||
477 | return 0; | ||
478 | } | ||
479 | } | ||
480 | |||
481 | static int read_pwr_cal_values(struct zd_chip *chip) | ||
482 | { | ||
483 | return read_values(chip, chip->pwr_cal_values, | ||
484 | E2P_CHANNEL_COUNT, E2P_PWR_CAL_VALUE1, | ||
485 | 0); | ||
486 | } | ||
487 | |||
488 | static int read_pwr_int_values(struct zd_chip *chip) | ||
489 | { | ||
490 | return read_values(chip, chip->pwr_int_values, | ||
491 | E2P_CHANNEL_COUNT, E2P_PWR_INT_VALUE1, | ||
492 | E2P_PWR_INT_GUARD); | ||
493 | } | ||
494 | |||
495 | static int read_ofdm_cal_values(struct zd_chip *chip) | ||
496 | { | ||
497 | int r; | ||
498 | int i; | ||
499 | static const zd_addr_t addresses[] = { | ||
500 | E2P_36M_CAL_VALUE1, | ||
501 | E2P_48M_CAL_VALUE1, | ||
502 | E2P_54M_CAL_VALUE1, | ||
503 | }; | ||
504 | |||
505 | for (i = 0; i < 3; i++) { | ||
506 | r = read_values(chip, chip->ofdm_cal_values[i], | ||
507 | E2P_CHANNEL_COUNT, addresses[i], 0); | ||
508 | if (r) | ||
509 | return r; | ||
510 | } | ||
511 | return 0; | ||
512 | } | ||
513 | |||
514 | static int read_cal_int_tables(struct zd_chip *chip) | ||
515 | { | ||
516 | int r; | ||
517 | |||
518 | r = read_pwr_cal_values(chip); | ||
519 | if (r) | ||
520 | return r; | ||
521 | r = read_pwr_int_values(chip); | ||
522 | if (r) | ||
523 | return r; | ||
524 | r = read_ofdm_cal_values(chip); | ||
525 | if (r) | ||
526 | return r; | ||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | /* phy means physical registers */ | ||
531 | int zd_chip_lock_phy_regs(struct zd_chip *chip) | ||
532 | { | ||
533 | int r; | ||
534 | u32 tmp; | ||
535 | |||
536 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
537 | r = zd_ioread32_locked(chip, &tmp, CR_REG1); | ||
538 | if (r) { | ||
539 | dev_err(zd_chip_dev(chip), "error ioread32(CR_REG1): %d\n", r); | ||
540 | return r; | ||
541 | } | ||
542 | |||
543 | dev_dbg_f(zd_chip_dev(chip), | ||
544 | "CR_REG1: 0x%02x -> 0x%02x\n", tmp, tmp & ~UNLOCK_PHY_REGS); | ||
545 | tmp &= ~UNLOCK_PHY_REGS; | ||
546 | |||
547 | r = zd_iowrite32_locked(chip, tmp, CR_REG1); | ||
548 | if (r) | ||
549 | dev_err(zd_chip_dev(chip), "error iowrite32(CR_REG1): %d\n", r); | ||
550 | return r; | ||
551 | } | ||
552 | |||
553 | int zd_chip_unlock_phy_regs(struct zd_chip *chip) | ||
554 | { | ||
555 | int r; | ||
556 | u32 tmp; | ||
557 | |||
558 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
559 | r = zd_ioread32_locked(chip, &tmp, CR_REG1); | ||
560 | if (r) { | ||
561 | dev_err(zd_chip_dev(chip), | ||
562 | "error ioread32(CR_REG1): %d\n", r); | ||
563 | return r; | ||
564 | } | ||
565 | |||
566 | dev_dbg_f(zd_chip_dev(chip), | ||
567 | "CR_REG1: 0x%02x -> 0x%02x\n", tmp, tmp | UNLOCK_PHY_REGS); | ||
568 | tmp |= UNLOCK_PHY_REGS; | ||
569 | |||
570 | r = zd_iowrite32_locked(chip, tmp, CR_REG1); | ||
571 | if (r) | ||
572 | dev_err(zd_chip_dev(chip), "error iowrite32(CR_REG1): %d\n", r); | ||
573 | return r; | ||
574 | } | ||
575 | |||
576 | /* CR157 can be optionally patched by the EEPROM */ | ||
577 | static int patch_cr157(struct zd_chip *chip) | ||
578 | { | ||
579 | int r; | ||
580 | u32 value; | ||
581 | |||
582 | if (!chip->patch_cr157) | ||
583 | return 0; | ||
584 | |||
585 | r = zd_ioread32_locked(chip, &value, E2P_PHY_REG); | ||
586 | if (r) | ||
587 | return r; | ||
588 | |||
589 | dev_dbg_f(zd_chip_dev(chip), "patching value %x\n", value >> 8); | ||
590 | return zd_iowrite32_locked(chip, value >> 8, CR157); | ||
591 | } | ||
592 | |||
593 | /* | ||
594 | * 6M band edge can be optionally overwritten for certain RF's | ||
595 | * Vendor driver says: for FCC regulation, enabled per HWFeature 6M band edge | ||
596 | * bit (for AL2230, AL2230S) | ||
597 | */ | ||
598 | static int patch_6m_band_edge(struct zd_chip *chip, int channel) | ||
599 | { | ||
600 | struct zd_ioreq16 ioreqs[] = { | ||
601 | { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 }, | ||
602 | { CR47, 0x1e }, | ||
603 | }; | ||
604 | |||
605 | if (!chip->patch_6m_band_edge || !chip->rf.patch_6m_band_edge) | ||
606 | return 0; | ||
607 | |||
608 | /* FIXME: Channel 11 is not the edge for all regulatory domains. */ | ||
609 | if (channel == 1 || channel == 11) | ||
610 | ioreqs[0].value = 0x12; | ||
611 | |||
612 | dev_dbg_f(zd_chip_dev(chip), "patching for channel %d\n", channel); | ||
613 | return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); | ||
614 | } | ||
615 | |||
616 | static int zd1211_hw_reset_phy(struct zd_chip *chip) | ||
617 | { | ||
618 | static const struct zd_ioreq16 ioreqs[] = { | ||
619 | { CR0, 0x0a }, { CR1, 0x06 }, { CR2, 0x26 }, | ||
620 | { CR3, 0x38 }, { CR4, 0x80 }, { CR9, 0xa0 }, | ||
621 | { CR10, 0x81 }, { CR11, 0x00 }, { CR12, 0x7f }, | ||
622 | { CR13, 0x8c }, { CR14, 0x80 }, { CR15, 0x3d }, | ||
623 | { CR16, 0x20 }, { CR17, 0x1e }, { CR18, 0x0a }, | ||
624 | { CR19, 0x48 }, { CR20, 0x0c }, { CR21, 0x0c }, | ||
625 | { CR22, 0x23 }, { CR23, 0x90 }, { CR24, 0x14 }, | ||
626 | { CR25, 0x40 }, { CR26, 0x10 }, { CR27, 0x19 }, | ||
627 | { CR28, 0x7f }, { CR29, 0x80 }, { CR30, 0x4b }, | ||
628 | { CR31, 0x60 }, { CR32, 0x43 }, { CR33, 0x08 }, | ||
629 | { CR34, 0x06 }, { CR35, 0x0a }, { CR36, 0x00 }, | ||
630 | { CR37, 0x00 }, { CR38, 0x38 }, { CR39, 0x0c }, | ||
631 | { CR40, 0x84 }, { CR41, 0x2a }, { CR42, 0x80 }, | ||
632 | { CR43, 0x10 }, { CR44, 0x12 }, { CR46, 0xff }, | ||
633 | { CR47, 0x1E }, { CR48, 0x26 }, { CR49, 0x5b }, | ||
634 | { CR64, 0xd0 }, { CR65, 0x04 }, { CR66, 0x58 }, | ||
635 | { CR67, 0xc9 }, { CR68, 0x88 }, { CR69, 0x41 }, | ||
636 | { CR70, 0x23 }, { CR71, 0x10 }, { CR72, 0xff }, | ||
637 | { CR73, 0x32 }, { CR74, 0x30 }, { CR75, 0x65 }, | ||
638 | { CR76, 0x41 }, { CR77, 0x1b }, { CR78, 0x30 }, | ||
639 | { CR79, 0x68 }, { CR80, 0x64 }, { CR81, 0x64 }, | ||
640 | { CR82, 0x00 }, { CR83, 0x00 }, { CR84, 0x00 }, | ||
641 | { CR85, 0x02 }, { CR86, 0x00 }, { CR87, 0x00 }, | ||
642 | { CR88, 0xff }, { CR89, 0xfc }, { CR90, 0x00 }, | ||
643 | { CR91, 0x00 }, { CR92, 0x00 }, { CR93, 0x08 }, | ||
644 | { CR94, 0x00 }, { CR95, 0x00 }, { CR96, 0xff }, | ||
645 | { CR97, 0xe7 }, { CR98, 0x00 }, { CR99, 0x00 }, | ||
646 | { CR100, 0x00 }, { CR101, 0xae }, { CR102, 0x02 }, | ||
647 | { CR103, 0x00 }, { CR104, 0x03 }, { CR105, 0x65 }, | ||
648 | { CR106, 0x04 }, { CR107, 0x00 }, { CR108, 0x0a }, | ||
649 | { CR109, 0xaa }, { CR110, 0xaa }, { CR111, 0x25 }, | ||
650 | { CR112, 0x25 }, { CR113, 0x00 }, { CR119, 0x1e }, | ||
651 | { CR125, 0x90 }, { CR126, 0x00 }, { CR127, 0x00 }, | ||
652 | { }, | ||
653 | { CR5, 0x00 }, { CR6, 0x00 }, { CR7, 0x00 }, | ||
654 | { CR8, 0x00 }, { CR9, 0x20 }, { CR12, 0xf0 }, | ||
655 | { CR20, 0x0e }, { CR21, 0x0e }, { CR27, 0x10 }, | ||
656 | { CR44, 0x33 }, { CR47, 0x1E }, { CR83, 0x24 }, | ||
657 | { CR84, 0x04 }, { CR85, 0x00 }, { CR86, 0x0C }, | ||
658 | { CR87, 0x12 }, { CR88, 0x0C }, { CR89, 0x00 }, | ||
659 | { CR90, 0x10 }, { CR91, 0x08 }, { CR93, 0x00 }, | ||
660 | { CR94, 0x01 }, { CR95, 0x00 }, { CR96, 0x50 }, | ||
661 | { CR97, 0x37 }, { CR98, 0x35 }, { CR101, 0x13 }, | ||
662 | { CR102, 0x27 }, { CR103, 0x27 }, { CR104, 0x18 }, | ||
663 | { CR105, 0x12 }, { CR109, 0x27 }, { CR110, 0x27 }, | ||
664 | { CR111, 0x27 }, { CR112, 0x27 }, { CR113, 0x27 }, | ||
665 | { CR114, 0x27 }, { CR115, 0x26 }, { CR116, 0x24 }, | ||
666 | { CR117, 0xfc }, { CR118, 0xfa }, { CR120, 0x4f }, | ||
667 | { CR123, 0x27 }, { CR125, 0xaa }, { CR127, 0x03 }, | ||
668 | { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 }, | ||
669 | { CR131, 0x0C }, { CR136, 0xdf }, { CR137, 0x40 }, | ||
670 | { CR138, 0xa0 }, { CR139, 0xb0 }, { CR140, 0x99 }, | ||
671 | { CR141, 0x82 }, { CR142, 0x54 }, { CR143, 0x1c }, | ||
672 | { CR144, 0x6c }, { CR147, 0x07 }, { CR148, 0x4c }, | ||
673 | { CR149, 0x50 }, { CR150, 0x0e }, { CR151, 0x18 }, | ||
674 | { CR160, 0xfe }, { CR161, 0xee }, { CR162, 0xaa }, | ||
675 | { CR163, 0xfa }, { CR164, 0xfa }, { CR165, 0xea }, | ||
676 | { CR166, 0xbe }, { CR167, 0xbe }, { CR168, 0x6a }, | ||
677 | { CR169, 0xba }, { CR170, 0xba }, { CR171, 0xba }, | ||
678 | /* Note: CR204 must lead the CR203 */ | ||
679 | { CR204, 0x7d }, | ||
680 | { }, | ||
681 | { CR203, 0x30 }, | ||
682 | }; | ||
683 | |||
684 | int r, t; | ||
685 | |||
686 | dev_dbg_f(zd_chip_dev(chip), "\n"); | ||
687 | |||
688 | r = zd_chip_lock_phy_regs(chip); | ||
689 | if (r) | ||
690 | goto out; | ||
691 | |||
692 | r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); | ||
693 | if (r) | ||
694 | goto unlock; | ||
695 | |||
696 | r = patch_cr157(chip); | ||
697 | unlock: | ||
698 | t = zd_chip_unlock_phy_regs(chip); | ||
699 | if (t && !r) | ||
700 | r = t; | ||
701 | out: | ||
702 | return r; | ||
703 | } | ||
704 | |||
705 | static int zd1211b_hw_reset_phy(struct zd_chip *chip) | ||
706 | { | ||
707 | static const struct zd_ioreq16 ioreqs[] = { | ||
708 | { CR0, 0x14 }, { CR1, 0x06 }, { CR2, 0x26 }, | ||
709 | { CR3, 0x38 }, { CR4, 0x80 }, { CR9, 0xe0 }, | ||
710 | { CR10, 0x81 }, | ||
711 | /* power control { { CR11, 1 << 6 }, */ | ||
712 | { CR11, 0x00 }, | ||
713 | { CR12, 0xf0 }, { CR13, 0x8c }, { CR14, 0x80 }, | ||
714 | { CR15, 0x3d }, { CR16, 0x20 }, { CR17, 0x1e }, | ||
715 | { CR18, 0x0a }, { CR19, 0x48 }, | ||
716 | { CR20, 0x10 }, /* Org:0x0E, ComTrend:RalLink AP */ | ||
717 | { CR21, 0x0e }, { CR22, 0x23 }, { CR23, 0x90 }, | ||
718 | { CR24, 0x14 }, { CR25, 0x40 }, { CR26, 0x10 }, | ||
719 | { CR27, 0x10 }, { CR28, 0x7f }, { CR29, 0x80 }, | ||
720 | { CR30, 0x49 }, /* jointly decoder, no ASIC */ | ||
721 | { CR31, 0x60 }, { CR32, 0x43 }, { CR33, 0x08 }, | ||
722 | { CR34, 0x06 }, { CR35, 0x0a }, { CR36, 0x00 }, | ||
723 | { CR37, 0x00 }, { CR38, 0x38 }, { CR39, 0x0c }, | ||
724 | { CR40, 0x84 }, { CR41, 0x2a }, { CR42, 0x80 }, | ||
725 | { CR43, 0x10 }, { CR44, 0x33 }, { CR46, 0xff }, | ||
726 | { CR47, 0x1E }, { CR48, 0x26 }, { CR49, 0x5b }, | ||
727 | { CR64, 0xd0 }, { CR65, 0x04 }, { CR66, 0x58 }, | ||
728 | { CR67, 0xc9 }, { CR68, 0x88 }, { CR69, 0x41 }, | ||
729 | { CR70, 0x23 }, { CR71, 0x10 }, { CR72, 0xff }, | ||
730 | { CR73, 0x32 }, { CR74, 0x30 }, { CR75, 0x65 }, | ||
731 | { CR76, 0x41 }, { CR77, 0x1b }, { CR78, 0x30 }, | ||
732 | { CR79, 0xf0 }, { CR80, 0x64 }, { CR81, 0x64 }, | ||
733 | { CR82, 0x00 }, { CR83, 0x24 }, { CR84, 0x04 }, | ||
734 | { CR85, 0x00 }, { CR86, 0x0c }, { CR87, 0x12 }, | ||
735 | { CR88, 0x0c }, { CR89, 0x00 }, { CR90, 0x58 }, | ||
736 | { CR91, 0x04 }, { CR92, 0x00 }, { CR93, 0x00 }, | ||
737 | { CR94, 0x01 }, | ||
738 | { CR95, 0x20 }, /* ZD1211B */ | ||
739 | { CR96, 0x50 }, { CR97, 0x37 }, { CR98, 0x35 }, | ||
740 | { CR99, 0x00 }, { CR100, 0x01 }, { CR101, 0x13 }, | ||
741 | { CR102, 0x27 }, { CR103, 0x27 }, { CR104, 0x18 }, | ||
742 | { CR105, 0x12 }, { CR106, 0x04 }, { CR107, 0x00 }, | ||
743 | { CR108, 0x0a }, { CR109, 0x27 }, { CR110, 0x27 }, | ||
744 | { CR111, 0x27 }, { CR112, 0x27 }, { CR113, 0x27 }, | ||
745 | { CR114, 0x27 }, { CR115, 0x26 }, { CR116, 0x24 }, | ||
746 | { CR117, 0xfc }, { CR118, 0xfa }, { CR119, 0x1e }, | ||
747 | { CR125, 0x90 }, { CR126, 0x00 }, { CR127, 0x00 }, | ||
748 | { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 }, | ||
749 | { CR131, 0x0c }, { CR136, 0xdf }, { CR137, 0xa0 }, | ||
750 | { CR138, 0xa8 }, { CR139, 0xb4 }, { CR140, 0x98 }, | ||
751 | { CR141, 0x82 }, { CR142, 0x53 }, { CR143, 0x1c }, | ||
752 | { CR144, 0x6c }, { CR147, 0x07 }, { CR148, 0x40 }, | ||
753 | { CR149, 0x40 }, /* Org:0x50 ComTrend:RalLink AP */ | ||
754 | { CR150, 0x14 }, /* Org:0x0E ComTrend:RalLink AP */ | ||
755 | { CR151, 0x18 }, { CR159, 0x70 }, { CR160, 0xfe }, | ||
756 | { CR161, 0xee }, { CR162, 0xaa }, { CR163, 0xfa }, | ||
757 | { CR164, 0xfa }, { CR165, 0xea }, { CR166, 0xbe }, | ||
758 | { CR167, 0xbe }, { CR168, 0x6a }, { CR169, 0xba }, | ||
759 | { CR170, 0xba }, { CR171, 0xba }, | ||
760 | /* Note: CR204 must lead the CR203 */ | ||
761 | { CR204, 0x7d }, | ||
762 | {}, | ||
763 | { CR203, 0x30 }, | ||
764 | }; | ||
765 | |||
766 | int r, t; | ||
767 | |||
768 | dev_dbg_f(zd_chip_dev(chip), "\n"); | ||
769 | |||
770 | r = zd_chip_lock_phy_regs(chip); | ||
771 | if (r) | ||
772 | goto out; | ||
773 | |||
774 | r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); | ||
775 | if (r) | ||
776 | goto unlock; | ||
777 | |||
778 | r = patch_cr157(chip); | ||
779 | unlock: | ||
780 | t = zd_chip_unlock_phy_regs(chip); | ||
781 | if (t && !r) | ||
782 | r = t; | ||
783 | out: | ||
784 | return r; | ||
785 | } | ||
786 | |||
787 | static int hw_reset_phy(struct zd_chip *chip) | ||
788 | { | ||
789 | return chip->is_zd1211b ? zd1211b_hw_reset_phy(chip) : | ||
790 | zd1211_hw_reset_phy(chip); | ||
791 | } | ||
792 | |||
793 | static int zd1211_hw_init_hmac(struct zd_chip *chip) | ||
794 | { | ||
795 | static const struct zd_ioreq32 ioreqs[] = { | ||
796 | { CR_ACK_TIMEOUT_EXT, 0x20 }, | ||
797 | { CR_ADDA_MBIAS_WARMTIME, 0x30000808 }, | ||
798 | { CR_ZD1211_RETRY_MAX, 0x2 }, | ||
799 | { CR_SNIFFER_ON, 0 }, | ||
800 | { CR_RX_FILTER, STA_RX_FILTER }, | ||
801 | { CR_GROUP_HASH_P1, 0x00 }, | ||
802 | { CR_GROUP_HASH_P2, 0x80000000 }, | ||
803 | { CR_REG1, 0xa4 }, | ||
804 | { CR_ADDA_PWR_DWN, 0x7f }, | ||
805 | { CR_BCN_PLCP_CFG, 0x00f00401 }, | ||
806 | { CR_PHY_DELAY, 0x00 }, | ||
807 | { CR_ACK_TIMEOUT_EXT, 0x80 }, | ||
808 | { CR_ADDA_PWR_DWN, 0x00 }, | ||
809 | { CR_ACK_TIME_80211, 0x100 }, | ||
810 | { CR_IFS_VALUE, 0x547c032 }, | ||
811 | { CR_RX_PE_DELAY, 0x70 }, | ||
812 | { CR_PS_CTRL, 0x10000000 }, | ||
813 | { CR_RTS_CTS_RATE, 0x02030203 }, | ||
814 | { CR_RX_THRESHOLD, 0x000c0640 }, | ||
815 | { CR_AFTER_PNP, 0x1 }, | ||
816 | { CR_WEP_PROTECT, 0x114 }, | ||
817 | }; | ||
818 | |||
819 | int r; | ||
820 | |||
821 | dev_dbg_f(zd_chip_dev(chip), "\n"); | ||
822 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
823 | r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); | ||
824 | #ifdef DEBUG | ||
825 | if (r) { | ||
826 | dev_err(zd_chip_dev(chip), | ||
827 | "error in zd_iowrite32a_locked. Error number %d\n", r); | ||
828 | } | ||
829 | #endif /* DEBUG */ | ||
830 | return r; | ||
831 | } | ||
832 | |||
833 | static int zd1211b_hw_init_hmac(struct zd_chip *chip) | ||
834 | { | ||
835 | static const struct zd_ioreq32 ioreqs[] = { | ||
836 | { CR_ACK_TIMEOUT_EXT, 0x20 }, | ||
837 | { CR_ADDA_MBIAS_WARMTIME, 0x30000808 }, | ||
838 | { CR_ZD1211B_RETRY_MAX, 0x02020202 }, | ||
839 | { CR_ZD1211B_TX_PWR_CTL4, 0x007f003f }, | ||
840 | { CR_ZD1211B_TX_PWR_CTL3, 0x007f003f }, | ||
841 | { CR_ZD1211B_TX_PWR_CTL2, 0x003f001f }, | ||
842 | { CR_ZD1211B_TX_PWR_CTL1, 0x001f000f }, | ||
843 | { CR_ZD1211B_AIFS_CTL1, 0x00280028 }, | ||
844 | { CR_ZD1211B_AIFS_CTL2, 0x008C003C }, | ||
845 | { CR_ZD1211B_TXOP, 0x01800824 }, | ||
846 | { CR_SNIFFER_ON, 0 }, | ||
847 | { CR_RX_FILTER, STA_RX_FILTER }, | ||
848 | { CR_GROUP_HASH_P1, 0x00 }, | ||
849 | { CR_GROUP_HASH_P2, 0x80000000 }, | ||
850 | { CR_REG1, 0xa4 }, | ||
851 | { CR_ADDA_PWR_DWN, 0x7f }, | ||
852 | { CR_BCN_PLCP_CFG, 0x00f00401 }, | ||
853 | { CR_PHY_DELAY, 0x00 }, | ||
854 | { CR_ACK_TIMEOUT_EXT, 0x80 }, | ||
855 | { CR_ADDA_PWR_DWN, 0x00 }, | ||
856 | { CR_ACK_TIME_80211, 0x100 }, | ||
857 | { CR_IFS_VALUE, 0x547c032 }, | ||
858 | { CR_RX_PE_DELAY, 0x70 }, | ||
859 | { CR_PS_CTRL, 0x10000000 }, | ||
860 | { CR_RTS_CTS_RATE, 0x02030203 }, | ||
861 | { CR_RX_THRESHOLD, 0x000c0640 }, | ||
862 | { CR_AFTER_PNP, 0x1 }, | ||
863 | { CR_WEP_PROTECT, 0x114 }, | ||
864 | }; | ||
865 | |||
866 | int r; | ||
867 | |||
868 | dev_dbg_f(zd_chip_dev(chip), "\n"); | ||
869 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
870 | r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); | ||
871 | if (r) { | ||
872 | dev_dbg_f(zd_chip_dev(chip), | ||
873 | "error in zd_iowrite32a_locked. Error number %d\n", r); | ||
874 | } | ||
875 | return r; | ||
876 | } | ||
877 | |||
878 | static int hw_init_hmac(struct zd_chip *chip) | ||
879 | { | ||
880 | return chip->is_zd1211b ? | ||
881 | zd1211b_hw_init_hmac(chip) : zd1211_hw_init_hmac(chip); | ||
882 | } | ||
883 | |||
884 | struct aw_pt_bi { | ||
885 | u32 atim_wnd_period; | ||
886 | u32 pre_tbtt; | ||
887 | u32 beacon_interval; | ||
888 | }; | ||
889 | |||
890 | static int get_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s) | ||
891 | { | ||
892 | int r; | ||
893 | static const zd_addr_t aw_pt_bi_addr[] = | ||
894 | { CR_ATIM_WND_PERIOD, CR_PRE_TBTT, CR_BCN_INTERVAL }; | ||
895 | u32 values[3]; | ||
896 | |||
897 | r = zd_ioread32v_locked(chip, values, (const zd_addr_t *)aw_pt_bi_addr, | ||
898 | ARRAY_SIZE(aw_pt_bi_addr)); | ||
899 | if (r) { | ||
900 | memset(s, 0, sizeof(*s)); | ||
901 | return r; | ||
902 | } | ||
903 | |||
904 | s->atim_wnd_period = values[0]; | ||
905 | s->pre_tbtt = values[1]; | ||
906 | s->beacon_interval = values[2]; | ||
907 | dev_dbg_f(zd_chip_dev(chip), "aw %u pt %u bi %u\n", | ||
908 | s->atim_wnd_period, s->pre_tbtt, s->beacon_interval); | ||
909 | return 0; | ||
910 | } | ||
911 | |||
912 | static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s) | ||
913 | { | ||
914 | struct zd_ioreq32 reqs[3]; | ||
915 | |||
916 | if (s->beacon_interval <= 5) | ||
917 | s->beacon_interval = 5; | ||
918 | if (s->pre_tbtt < 4 || s->pre_tbtt >= s->beacon_interval) | ||
919 | s->pre_tbtt = s->beacon_interval - 1; | ||
920 | if (s->atim_wnd_period >= s->pre_tbtt) | ||
921 | s->atim_wnd_period = s->pre_tbtt - 1; | ||
922 | |||
923 | reqs[0].addr = CR_ATIM_WND_PERIOD; | ||
924 | reqs[0].value = s->atim_wnd_period; | ||
925 | reqs[1].addr = CR_PRE_TBTT; | ||
926 | reqs[1].value = s->pre_tbtt; | ||
927 | reqs[2].addr = CR_BCN_INTERVAL; | ||
928 | reqs[2].value = s->beacon_interval; | ||
929 | |||
930 | dev_dbg_f(zd_chip_dev(chip), | ||
931 | "aw %u pt %u bi %u\n", s->atim_wnd_period, s->pre_tbtt, | ||
932 | s->beacon_interval); | ||
933 | return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); | ||
934 | } | ||
935 | |||
936 | |||
937 | static int set_beacon_interval(struct zd_chip *chip, u32 interval) | ||
938 | { | ||
939 | int r; | ||
940 | struct aw_pt_bi s; | ||
941 | |||
942 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
943 | r = get_aw_pt_bi(chip, &s); | ||
944 | if (r) | ||
945 | return r; | ||
946 | s.beacon_interval = interval; | ||
947 | return set_aw_pt_bi(chip, &s); | ||
948 | } | ||
949 | |||
950 | int zd_set_beacon_interval(struct zd_chip *chip, u32 interval) | ||
951 | { | ||
952 | int r; | ||
953 | |||
954 | mutex_lock(&chip->mutex); | ||
955 | r = set_beacon_interval(chip, interval); | ||
956 | mutex_unlock(&chip->mutex); | ||
957 | return r; | ||
958 | } | ||
959 | |||
960 | static int hw_init(struct zd_chip *chip) | ||
961 | { | ||
962 | int r; | ||
963 | |||
964 | dev_dbg_f(zd_chip_dev(chip), "\n"); | ||
965 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
966 | r = hw_reset_phy(chip); | ||
967 | if (r) | ||
968 | return r; | ||
969 | |||
970 | r = hw_init_hmac(chip); | ||
971 | if (r) | ||
972 | return r; | ||
973 | r = set_beacon_interval(chip, 100); | ||
974 | if (r) | ||
975 | return r; | ||
976 | return 0; | ||
977 | } | ||
978 | |||
979 | #ifdef DEBUG | ||
980 | static int dump_cr(struct zd_chip *chip, const zd_addr_t addr, | ||
981 | const char *addr_string) | ||
982 | { | ||
983 | int r; | ||
984 | u32 value; | ||
985 | |||
986 | r = zd_ioread32_locked(chip, &value, addr); | ||
987 | if (r) { | ||
988 | dev_dbg_f(zd_chip_dev(chip), | ||
989 | "error reading %s. Error number %d\n", addr_string, r); | ||
990 | return r; | ||
991 | } | ||
992 | |||
993 | dev_dbg_f(zd_chip_dev(chip), "%s %#010x\n", | ||
994 | addr_string, (unsigned int)value); | ||
995 | return 0; | ||
996 | } | ||
997 | |||
998 | static int test_init(struct zd_chip *chip) | ||
999 | { | ||
1000 | int r; | ||
1001 | |||
1002 | r = dump_cr(chip, CR_AFTER_PNP, "CR_AFTER_PNP"); | ||
1003 | if (r) | ||
1004 | return r; | ||
1005 | r = dump_cr(chip, CR_GPI_EN, "CR_GPI_EN"); | ||
1006 | if (r) | ||
1007 | return r; | ||
1008 | return dump_cr(chip, CR_INTERRUPT, "CR_INTERRUPT"); | ||
1009 | } | ||
1010 | |||
1011 | static void dump_fw_registers(struct zd_chip *chip) | ||
1012 | { | ||
1013 | static const zd_addr_t addr[4] = { | ||
1014 | FW_FIRMWARE_VER, FW_USB_SPEED, FW_FIX_TX_RATE, | ||
1015 | FW_LINK_STATUS | ||
1016 | }; | ||
1017 | |||
1018 | int r; | ||
1019 | u16 values[4]; | ||
1020 | |||
1021 | r = zd_ioread16v_locked(chip, values, (const zd_addr_t*)addr, | ||
1022 | ARRAY_SIZE(addr)); | ||
1023 | if (r) { | ||
1024 | dev_dbg_f(zd_chip_dev(chip), "error %d zd_ioread16v_locked\n", | ||
1025 | r); | ||
1026 | return; | ||
1027 | } | ||
1028 | |||
1029 | dev_dbg_f(zd_chip_dev(chip), "FW_FIRMWARE_VER %#06hx\n", values[0]); | ||
1030 | dev_dbg_f(zd_chip_dev(chip), "FW_USB_SPEED %#06hx\n", values[1]); | ||
1031 | dev_dbg_f(zd_chip_dev(chip), "FW_FIX_TX_RATE %#06hx\n", values[2]); | ||
1032 | dev_dbg_f(zd_chip_dev(chip), "FW_LINK_STATUS %#06hx\n", values[3]); | ||
1033 | } | ||
1034 | #endif /* DEBUG */ | ||
1035 | |||
1036 | static int print_fw_version(struct zd_chip *chip) | ||
1037 | { | ||
1038 | int r; | ||
1039 | u16 version; | ||
1040 | |||
1041 | r = zd_ioread16_locked(chip, &version, FW_FIRMWARE_VER); | ||
1042 | if (r) | ||
1043 | return r; | ||
1044 | |||
1045 | dev_info(zd_chip_dev(chip),"firmware version %04hx\n", version); | ||
1046 | return 0; | ||
1047 | } | ||
1048 | |||
1049 | static int set_mandatory_rates(struct zd_chip *chip, enum ieee80211_std std) | ||
1050 | { | ||
1051 | u32 rates; | ||
1052 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
1053 | /* This sets the mandatory rates, which only depend from the standard | ||
1054 | * that the device is supporting. Until further notice we should try | ||
1055 | * to support 802.11g also for full speed USB. | ||
1056 | */ | ||
1057 | switch (std) { | ||
1058 | case IEEE80211B: | ||
1059 | rates = CR_RATE_1M|CR_RATE_2M|CR_RATE_5_5M|CR_RATE_11M; | ||
1060 | break; | ||
1061 | case IEEE80211G: | ||
1062 | rates = CR_RATE_1M|CR_RATE_2M|CR_RATE_5_5M|CR_RATE_11M| | ||
1063 | CR_RATE_6M|CR_RATE_12M|CR_RATE_24M; | ||
1064 | break; | ||
1065 | default: | ||
1066 | return -EINVAL; | ||
1067 | } | ||
1068 | return zd_iowrite32_locked(chip, rates, CR_MANDATORY_RATE_TBL); | ||
1069 | } | ||
1070 | |||
1071 | int zd_chip_enable_hwint(struct zd_chip *chip) | ||
1072 | { | ||
1073 | int r; | ||
1074 | |||
1075 | mutex_lock(&chip->mutex); | ||
1076 | r = zd_iowrite32_locked(chip, HWINT_ENABLED, CR_INTERRUPT); | ||
1077 | mutex_unlock(&chip->mutex); | ||
1078 | return r; | ||
1079 | } | ||
1080 | |||
1081 | static int disable_hwint(struct zd_chip *chip) | ||
1082 | { | ||
1083 | return zd_iowrite32_locked(chip, HWINT_DISABLED, CR_INTERRUPT); | ||
1084 | } | ||
1085 | |||
1086 | int zd_chip_disable_hwint(struct zd_chip *chip) | ||
1087 | { | ||
1088 | int r; | ||
1089 | |||
1090 | mutex_lock(&chip->mutex); | ||
1091 | r = disable_hwint(chip); | ||
1092 | mutex_unlock(&chip->mutex); | ||
1093 | return r; | ||
1094 | } | ||
1095 | |||
1096 | int zd_chip_init_hw(struct zd_chip *chip, u8 device_type) | ||
1097 | { | ||
1098 | int r; | ||
1099 | u8 rf_type; | ||
1100 | |||
1101 | dev_dbg_f(zd_chip_dev(chip), "\n"); | ||
1102 | |||
1103 | mutex_lock(&chip->mutex); | ||
1104 | chip->is_zd1211b = (device_type == DEVICE_ZD1211B) != 0; | ||
1105 | |||
1106 | #ifdef DEBUG | ||
1107 | r = test_init(chip); | ||
1108 | if (r) | ||
1109 | goto out; | ||
1110 | #endif | ||
1111 | r = zd_iowrite32_locked(chip, 1, CR_AFTER_PNP); | ||
1112 | if (r) | ||
1113 | goto out; | ||
1114 | |||
1115 | r = zd_usb_init_hw(&chip->usb); | ||
1116 | if (r) | ||
1117 | goto out; | ||
1118 | |||
1119 | /* GPI is always disabled, also in the other driver. | ||
1120 | */ | ||
1121 | r = zd_iowrite32_locked(chip, 0, CR_GPI_EN); | ||
1122 | if (r) | ||
1123 | goto out; | ||
1124 | r = zd_iowrite32_locked(chip, CWIN_SIZE, CR_CWMIN_CWMAX); | ||
1125 | if (r) | ||
1126 | goto out; | ||
1127 | /* Currently we support IEEE 802.11g for full and high speed USB. | ||
1128 | * It might be discussed, whether we should suppport pure b mode for | ||
1129 | * full speed USB. | ||
1130 | */ | ||
1131 | r = set_mandatory_rates(chip, IEEE80211G); | ||
1132 | if (r) | ||
1133 | goto out; | ||
1134 | /* Disabling interrupts is certainly a smart thing here. | ||
1135 | */ | ||
1136 | r = disable_hwint(chip); | ||
1137 | if (r) | ||
1138 | goto out; | ||
1139 | r = read_pod(chip, &rf_type); | ||
1140 | if (r) | ||
1141 | goto out; | ||
1142 | r = hw_init(chip); | ||
1143 | if (r) | ||
1144 | goto out; | ||
1145 | r = zd_rf_init_hw(&chip->rf, rf_type); | ||
1146 | if (r) | ||
1147 | goto out; | ||
1148 | |||
1149 | r = print_fw_version(chip); | ||
1150 | if (r) | ||
1151 | goto out; | ||
1152 | |||
1153 | #ifdef DEBUG | ||
1154 | dump_fw_registers(chip); | ||
1155 | r = test_init(chip); | ||
1156 | if (r) | ||
1157 | goto out; | ||
1158 | #endif /* DEBUG */ | ||
1159 | |||
1160 | r = read_e2p_mac_addr(chip); | ||
1161 | if (r) | ||
1162 | goto out; | ||
1163 | |||
1164 | r = read_cal_int_tables(chip); | ||
1165 | if (r) | ||
1166 | goto out; | ||
1167 | |||
1168 | print_id(chip); | ||
1169 | out: | ||
1170 | mutex_unlock(&chip->mutex); | ||
1171 | return r; | ||
1172 | } | ||
1173 | |||
1174 | static int update_pwr_int(struct zd_chip *chip, u8 channel) | ||
1175 | { | ||
1176 | u8 value = chip->pwr_int_values[channel - 1]; | ||
1177 | dev_dbg_f(zd_chip_dev(chip), "channel %d pwr_int %#04x\n", | ||
1178 | channel, value); | ||
1179 | return zd_iowrite32_locked(chip, value, CR31); | ||
1180 | } | ||
1181 | |||
1182 | static int update_pwr_cal(struct zd_chip *chip, u8 channel) | ||
1183 | { | ||
1184 | u8 value = chip->pwr_cal_values[channel-1]; | ||
1185 | dev_dbg_f(zd_chip_dev(chip), "channel %d pwr_cal %#04x\n", | ||
1186 | channel, value); | ||
1187 | return zd_iowrite32_locked(chip, value, CR68); | ||
1188 | } | ||
1189 | |||
1190 | static int update_ofdm_cal(struct zd_chip *chip, u8 channel) | ||
1191 | { | ||
1192 | struct zd_ioreq32 ioreqs[3]; | ||
1193 | |||
1194 | ioreqs[0].addr = CR67; | ||
1195 | ioreqs[0].value = chip->ofdm_cal_values[OFDM_36M_INDEX][channel-1]; | ||
1196 | ioreqs[1].addr = CR66; | ||
1197 | ioreqs[1].value = chip->ofdm_cal_values[OFDM_48M_INDEX][channel-1]; | ||
1198 | ioreqs[2].addr = CR65; | ||
1199 | ioreqs[2].value = chip->ofdm_cal_values[OFDM_54M_INDEX][channel-1]; | ||
1200 | |||
1201 | dev_dbg_f(zd_chip_dev(chip), | ||
1202 | "channel %d ofdm_cal 36M %#04x 48M %#04x 54M %#04x\n", | ||
1203 | channel, ioreqs[0].value, ioreqs[1].value, ioreqs[2].value); | ||
1204 | return zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); | ||
1205 | } | ||
1206 | |||
1207 | static int update_channel_integration_and_calibration(struct zd_chip *chip, | ||
1208 | u8 channel) | ||
1209 | { | ||
1210 | int r; | ||
1211 | |||
1212 | r = update_pwr_int(chip, channel); | ||
1213 | if (r) | ||
1214 | return r; | ||
1215 | if (chip->is_zd1211b) { | ||
1216 | static const struct zd_ioreq32 ioreqs[] = { | ||
1217 | { CR69, 0x28 }, | ||
1218 | {}, | ||
1219 | { CR69, 0x2a }, | ||
1220 | }; | ||
1221 | |||
1222 | r = update_ofdm_cal(chip, channel); | ||
1223 | if (r) | ||
1224 | return r; | ||
1225 | r = update_pwr_cal(chip, channel); | ||
1226 | if (r) | ||
1227 | return r; | ||
1228 | r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); | ||
1229 | if (r) | ||
1230 | return r; | ||
1231 | } | ||
1232 | |||
1233 | return 0; | ||
1234 | } | ||
1235 | |||
1236 | /* The CCK baseband gain can be optionally patched by the EEPROM */ | ||
1237 | static int patch_cck_gain(struct zd_chip *chip) | ||
1238 | { | ||
1239 | int r; | ||
1240 | u32 value; | ||
1241 | |||
1242 | if (!chip->patch_cck_gain) | ||
1243 | return 0; | ||
1244 | |||
1245 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
1246 | r = zd_ioread32_locked(chip, &value, E2P_PHY_REG); | ||
1247 | if (r) | ||
1248 | return r; | ||
1249 | dev_dbg_f(zd_chip_dev(chip), "patching value %x\n", value & 0xff); | ||
1250 | return zd_iowrite32_locked(chip, value & 0xff, CR47); | ||
1251 | } | ||
1252 | |||
1253 | int zd_chip_set_channel(struct zd_chip *chip, u8 channel) | ||
1254 | { | ||
1255 | int r, t; | ||
1256 | |||
1257 | mutex_lock(&chip->mutex); | ||
1258 | r = zd_chip_lock_phy_regs(chip); | ||
1259 | if (r) | ||
1260 | goto out; | ||
1261 | r = zd_rf_set_channel(&chip->rf, channel); | ||
1262 | if (r) | ||
1263 | goto unlock; | ||
1264 | r = update_channel_integration_and_calibration(chip, channel); | ||
1265 | if (r) | ||
1266 | goto unlock; | ||
1267 | r = patch_cck_gain(chip); | ||
1268 | if (r) | ||
1269 | goto unlock; | ||
1270 | r = patch_6m_band_edge(chip, channel); | ||
1271 | if (r) | ||
1272 | goto unlock; | ||
1273 | r = zd_iowrite32_locked(chip, 0, CR_CONFIG_PHILIPS); | ||
1274 | unlock: | ||
1275 | t = zd_chip_unlock_phy_regs(chip); | ||
1276 | if (t && !r) | ||
1277 | r = t; | ||
1278 | out: | ||
1279 | mutex_unlock(&chip->mutex); | ||
1280 | return r; | ||
1281 | } | ||
1282 | |||
1283 | u8 zd_chip_get_channel(struct zd_chip *chip) | ||
1284 | { | ||
1285 | u8 channel; | ||
1286 | |||
1287 | mutex_lock(&chip->mutex); | ||
1288 | channel = chip->rf.channel; | ||
1289 | mutex_unlock(&chip->mutex); | ||
1290 | return channel; | ||
1291 | } | ||
1292 | |||
1293 | static u16 led_mask(int led) | ||
1294 | { | ||
1295 | switch (led) { | ||
1296 | case 1: | ||
1297 | return LED1; | ||
1298 | case 2: | ||
1299 | return LED2; | ||
1300 | default: | ||
1301 | return 0; | ||
1302 | } | ||
1303 | } | ||
1304 | |||
1305 | static int read_led_reg(struct zd_chip *chip, u16 *status) | ||
1306 | { | ||
1307 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
1308 | return zd_ioread16_locked(chip, status, CR_LED); | ||
1309 | } | ||
1310 | |||
1311 | static int write_led_reg(struct zd_chip *chip, u16 status) | ||
1312 | { | ||
1313 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
1314 | return zd_iowrite16_locked(chip, status, CR_LED); | ||
1315 | } | ||
1316 | |||
1317 | int zd_chip_led_status(struct zd_chip *chip, int led, enum led_status status) | ||
1318 | { | ||
1319 | int r, ret; | ||
1320 | u16 mask = led_mask(led); | ||
1321 | u16 reg; | ||
1322 | |||
1323 | if (!mask) | ||
1324 | return -EINVAL; | ||
1325 | mutex_lock(&chip->mutex); | ||
1326 | r = read_led_reg(chip, ®); | ||
1327 | if (r) | ||
1328 | return r; | ||
1329 | switch (status) { | ||
1330 | case LED_STATUS: | ||
1331 | return (reg & mask) ? LED_ON : LED_OFF; | ||
1332 | case LED_OFF: | ||
1333 | reg &= ~mask; | ||
1334 | ret = LED_OFF; | ||
1335 | break; | ||
1336 | case LED_FLIP: | ||
1337 | reg ^= mask; | ||
1338 | ret = (reg&mask) ? LED_ON : LED_OFF; | ||
1339 | break; | ||
1340 | case LED_ON: | ||
1341 | reg |= mask; | ||
1342 | ret = LED_ON; | ||
1343 | break; | ||
1344 | default: | ||
1345 | return -EINVAL; | ||
1346 | } | ||
1347 | r = write_led_reg(chip, reg); | ||
1348 | if (r) { | ||
1349 | ret = r; | ||
1350 | goto out; | ||
1351 | } | ||
1352 | out: | ||
1353 | mutex_unlock(&chip->mutex); | ||
1354 | return r; | ||
1355 | } | ||
1356 | |||
1357 | int zd_chip_led_flip(struct zd_chip *chip, int led, | ||
1358 | const unsigned int *phases_msecs, unsigned int count) | ||
1359 | { | ||
1360 | int i, r; | ||
1361 | enum led_status status; | ||
1362 | |||
1363 | r = zd_chip_led_status(chip, led, LED_STATUS); | ||
1364 | if (r) | ||
1365 | return r; | ||
1366 | status = r; | ||
1367 | for (i = 0; i < count; i++) { | ||
1368 | r = zd_chip_led_status(chip, led, LED_FLIP); | ||
1369 | if (r < 0) | ||
1370 | goto out; | ||
1371 | msleep(phases_msecs[i]); | ||
1372 | } | ||
1373 | |||
1374 | out: | ||
1375 | zd_chip_led_status(chip, led, status); | ||
1376 | return r; | ||
1377 | } | ||
1378 | |||
1379 | int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates) | ||
1380 | { | ||
1381 | int r; | ||
1382 | |||
1383 | if (cr_rates & ~(CR_RATES_80211B|CR_RATES_80211G)) | ||
1384 | return -EINVAL; | ||
1385 | |||
1386 | mutex_lock(&chip->mutex); | ||
1387 | r = zd_iowrite32_locked(chip, cr_rates, CR_BASIC_RATE_TBL); | ||
1388 | mutex_unlock(&chip->mutex); | ||
1389 | return r; | ||
1390 | } | ||
1391 | |||
1392 | static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size) | ||
1393 | { | ||
1394 | static const u16 constants[] = { | ||
1395 | 715, 655, 585, 540, 470, 410, 360, 315, | ||
1396 | 270, 235, 205, 175, 150, 125, 105, 85, | ||
1397 | 65, 50, 40, 25, 15 | ||
1398 | }; | ||
1399 | |||
1400 | int i; | ||
1401 | u32 x; | ||
1402 | |||
1403 | /* It seems that their quality parameter is somehow per signal | ||
1404 | * and is now transferred per bit. | ||
1405 | */ | ||
1406 | switch (rate) { | ||
1407 | case ZD_OFDM_RATE_6M: | ||
1408 | case ZD_OFDM_RATE_12M: | ||
1409 | case ZD_OFDM_RATE_24M: | ||
1410 | size *= 2; | ||
1411 | break; | ||
1412 | case ZD_OFDM_RATE_9M: | ||
1413 | case ZD_OFDM_RATE_18M: | ||
1414 | case ZD_OFDM_RATE_36M: | ||
1415 | case ZD_OFDM_RATE_54M: | ||
1416 | size *= 4; | ||
1417 | size /= 3; | ||
1418 | break; | ||
1419 | case ZD_OFDM_RATE_48M: | ||
1420 | size *= 3; | ||
1421 | size /= 2; | ||
1422 | break; | ||
1423 | default: | ||
1424 | return -EINVAL; | ||
1425 | } | ||
1426 | |||
1427 | x = (10000 * status_quality)/size; | ||
1428 | for (i = 0; i < ARRAY_SIZE(constants); i++) { | ||
1429 | if (x > constants[i]) | ||
1430 | break; | ||
1431 | } | ||
1432 | |||
1433 | return i; | ||
1434 | } | ||
1435 | |||
1436 | static unsigned int log10times100(unsigned int x) | ||
1437 | { | ||
1438 | static const u8 log10[] = { | ||
1439 | 0, | ||
1440 | 0, 30, 47, 60, 69, 77, 84, 90, 95, 100, | ||
1441 | 104, 107, 111, 114, 117, 120, 123, 125, 127, 130, | ||
1442 | 132, 134, 136, 138, 139, 141, 143, 144, 146, 147, | ||
1443 | 149, 150, 151, 153, 154, 155, 156, 157, 159, 160, | ||
1444 | 161, 162, 163, 164, 165, 166, 167, 168, 169, 169, | ||
1445 | 170, 171, 172, 173, 174, 174, 175, 176, 177, 177, | ||
1446 | 178, 179, 179, 180, 181, 181, 182, 183, 183, 184, | ||
1447 | 185, 185, 186, 186, 187, 188, 188, 189, 189, 190, | ||
1448 | 190, 191, 191, 192, 192, 193, 193, 194, 194, 195, | ||
1449 | 195, 196, 196, 197, 197, 198, 198, 199, 199, 200, | ||
1450 | 200, 200, 201, 201, 202, 202, 202, 203, 203, 204, | ||
1451 | 204, 204, 205, 205, 206, 206, 206, 207, 207, 207, | ||
1452 | 208, 208, 208, 209, 209, 210, 210, 210, 211, 211, | ||
1453 | 211, 212, 212, 212, 213, 213, 213, 213, 214, 214, | ||
1454 | 214, 215, 215, 215, 216, 216, 216, 217, 217, 217, | ||
1455 | 217, 218, 218, 218, 219, 219, 219, 219, 220, 220, | ||
1456 | 220, 220, 221, 221, 221, 222, 222, 222, 222, 223, | ||
1457 | 223, 223, 223, 224, 224, 224, 224, | ||
1458 | }; | ||
1459 | |||
1460 | return x < ARRAY_SIZE(log10) ? log10[x] : 225; | ||
1461 | } | ||
1462 | |||
1463 | enum { | ||
1464 | MAX_CCK_EVM_DB = 45, | ||
1465 | }; | ||
1466 | |||
1467 | static int cck_evm_db(u8 status_quality) | ||
1468 | { | ||
1469 | return (20 * log10times100(status_quality)) / 100; | ||
1470 | } | ||
1471 | |||
1472 | static int cck_snr_db(u8 status_quality) | ||
1473 | { | ||
1474 | int r = MAX_CCK_EVM_DB - cck_evm_db(status_quality); | ||
1475 | ZD_ASSERT(r >= 0); | ||
1476 | return r; | ||
1477 | } | ||
1478 | |||
1479 | static int rx_qual_db(const void *rx_frame, unsigned int size, | ||
1480 | const struct rx_status *status) | ||
1481 | { | ||
1482 | return (status->frame_status&ZD_RX_OFDM) ? | ||
1483 | ofdm_qual_db(status->signal_quality_ofdm, | ||
1484 | zd_ofdm_plcp_header_rate(rx_frame), | ||
1485 | size) : | ||
1486 | cck_snr_db(status->signal_quality_cck); | ||
1487 | } | ||
1488 | |||
1489 | u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size, | ||
1490 | const struct rx_status *status) | ||
1491 | { | ||
1492 | int r = rx_qual_db(rx_frame, size, status); | ||
1493 | if (r < 0) | ||
1494 | r = 0; | ||
1495 | r = (r * 100) / 14; | ||
1496 | if (r > 100) | ||
1497 | r = 100; | ||
1498 | return r; | ||
1499 | } | ||
1500 | |||
1501 | u8 zd_rx_strength_percent(u8 rssi) | ||
1502 | { | ||
1503 | int r = (rssi*100) / 30; | ||
1504 | if (r > 100) | ||
1505 | r = 100; | ||
1506 | return (u8) r; | ||
1507 | } | ||
1508 | |||
1509 | u16 zd_rx_rate(const void *rx_frame, const struct rx_status *status) | ||
1510 | { | ||
1511 | static const u16 ofdm_rates[] = { | ||
1512 | [ZD_OFDM_RATE_6M] = 60, | ||
1513 | [ZD_OFDM_RATE_9M] = 90, | ||
1514 | [ZD_OFDM_RATE_12M] = 120, | ||
1515 | [ZD_OFDM_RATE_18M] = 180, | ||
1516 | [ZD_OFDM_RATE_24M] = 240, | ||
1517 | [ZD_OFDM_RATE_36M] = 360, | ||
1518 | [ZD_OFDM_RATE_48M] = 480, | ||
1519 | [ZD_OFDM_RATE_54M] = 540, | ||
1520 | }; | ||
1521 | u16 rate; | ||
1522 | if (status->frame_status & ZD_RX_OFDM) { | ||
1523 | u8 ofdm_rate = zd_ofdm_plcp_header_rate(rx_frame); | ||
1524 | rate = ofdm_rates[ofdm_rate & 0xf]; | ||
1525 | } else { | ||
1526 | u8 cck_rate = zd_cck_plcp_header_rate(rx_frame); | ||
1527 | switch (cck_rate) { | ||
1528 | case ZD_CCK_SIGNAL_1M: | ||
1529 | rate = 10; | ||
1530 | break; | ||
1531 | case ZD_CCK_SIGNAL_2M: | ||
1532 | rate = 20; | ||
1533 | break; | ||
1534 | case ZD_CCK_SIGNAL_5M5: | ||
1535 | rate = 55; | ||
1536 | break; | ||
1537 | case ZD_CCK_SIGNAL_11M: | ||
1538 | rate = 110; | ||
1539 | break; | ||
1540 | default: | ||
1541 | rate = 0; | ||
1542 | } | ||
1543 | } | ||
1544 | |||
1545 | return rate; | ||
1546 | } | ||
1547 | |||
1548 | int zd_chip_switch_radio_on(struct zd_chip *chip) | ||
1549 | { | ||
1550 | int r; | ||
1551 | |||
1552 | mutex_lock(&chip->mutex); | ||
1553 | r = zd_switch_radio_on(&chip->rf); | ||
1554 | mutex_unlock(&chip->mutex); | ||
1555 | return r; | ||
1556 | } | ||
1557 | |||
1558 | int zd_chip_switch_radio_off(struct zd_chip *chip) | ||
1559 | { | ||
1560 | int r; | ||
1561 | |||
1562 | mutex_lock(&chip->mutex); | ||
1563 | r = zd_switch_radio_off(&chip->rf); | ||
1564 | mutex_unlock(&chip->mutex); | ||
1565 | return r; | ||
1566 | } | ||
1567 | |||
1568 | int zd_chip_enable_int(struct zd_chip *chip) | ||
1569 | { | ||
1570 | int r; | ||
1571 | |||
1572 | mutex_lock(&chip->mutex); | ||
1573 | r = zd_usb_enable_int(&chip->usb); | ||
1574 | mutex_unlock(&chip->mutex); | ||
1575 | return r; | ||
1576 | } | ||
1577 | |||
1578 | void zd_chip_disable_int(struct zd_chip *chip) | ||
1579 | { | ||
1580 | mutex_lock(&chip->mutex); | ||
1581 | zd_usb_disable_int(&chip->usb); | ||
1582 | mutex_unlock(&chip->mutex); | ||
1583 | } | ||
1584 | |||
1585 | int zd_chip_enable_rx(struct zd_chip *chip) | ||
1586 | { | ||
1587 | int r; | ||
1588 | |||
1589 | mutex_lock(&chip->mutex); | ||
1590 | r = zd_usb_enable_rx(&chip->usb); | ||
1591 | mutex_unlock(&chip->mutex); | ||
1592 | return r; | ||
1593 | } | ||
1594 | |||
1595 | void zd_chip_disable_rx(struct zd_chip *chip) | ||
1596 | { | ||
1597 | mutex_lock(&chip->mutex); | ||
1598 | zd_usb_disable_rx(&chip->usb); | ||
1599 | mutex_unlock(&chip->mutex); | ||
1600 | } | ||
1601 | |||
1602 | int zd_rfwritev_locked(struct zd_chip *chip, | ||
1603 | const u32* values, unsigned int count, u8 bits) | ||
1604 | { | ||
1605 | int r; | ||
1606 | unsigned int i; | ||
1607 | |||
1608 | for (i = 0; i < count; i++) { | ||
1609 | r = zd_rfwrite_locked(chip, values[i], bits); | ||
1610 | if (r) | ||
1611 | return r; | ||
1612 | } | ||
1613 | |||
1614 | return 0; | ||
1615 | } | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h new file mode 100644 index 000000000000..069d2b467339 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_chip.h | |||
@@ -0,0 +1,827 @@ | |||
1 | /* zd_chip.h | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | #ifndef _ZD_CHIP_H | ||
19 | #define _ZD_CHIP_H | ||
20 | |||
21 | #include "zd_types.h" | ||
22 | #include "zd_rf.h" | ||
23 | #include "zd_usb.h" | ||
24 | |||
25 | /* Header for the Media Access Controller (MAC) and the Baseband Processor | ||
26 | * (BBP). It appears that the ZD1211 wraps the old ZD1205 with USB glue and | ||
27 | * adds a processor for handling the USB protocol. | ||
28 | */ | ||
29 | |||
30 | /* 8-bit hardware registers */ | ||
31 | #define CR0 CTL_REG(0x0000) | ||
32 | #define CR1 CTL_REG(0x0004) | ||
33 | #define CR2 CTL_REG(0x0008) | ||
34 | #define CR3 CTL_REG(0x000C) | ||
35 | |||
36 | #define CR5 CTL_REG(0x0010) | ||
37 | /* bit 5: if set short preamble used | ||
38 | * bit 6: filter band - Japan channel 14 on, else off | ||
39 | */ | ||
40 | #define CR6 CTL_REG(0x0014) | ||
41 | #define CR7 CTL_REG(0x0018) | ||
42 | #define CR8 CTL_REG(0x001C) | ||
43 | |||
44 | #define CR4 CTL_REG(0x0020) | ||
45 | |||
46 | #define CR9 CTL_REG(0x0024) | ||
47 | /* bit 2: antenna switch (together with CR10) */ | ||
48 | #define CR10 CTL_REG(0x0028) | ||
49 | /* bit 1: antenna switch (together with CR9) | ||
50 | * RF2959 controls with CR11 radion on and off | ||
51 | */ | ||
52 | #define CR11 CTL_REG(0x002C) | ||
53 | /* bit 6: TX power control for OFDM | ||
54 | * RF2959 controls with CR10 radio on and off | ||
55 | */ | ||
56 | #define CR12 CTL_REG(0x0030) | ||
57 | #define CR13 CTL_REG(0x0034) | ||
58 | #define CR14 CTL_REG(0x0038) | ||
59 | #define CR15 CTL_REG(0x003C) | ||
60 | #define CR16 CTL_REG(0x0040) | ||
61 | #define CR17 CTL_REG(0x0044) | ||
62 | #define CR18 CTL_REG(0x0048) | ||
63 | #define CR19 CTL_REG(0x004C) | ||
64 | #define CR20 CTL_REG(0x0050) | ||
65 | #define CR21 CTL_REG(0x0054) | ||
66 | #define CR22 CTL_REG(0x0058) | ||
67 | #define CR23 CTL_REG(0x005C) | ||
68 | #define CR24 CTL_REG(0x0060) /* CCA threshold */ | ||
69 | #define CR25 CTL_REG(0x0064) | ||
70 | #define CR26 CTL_REG(0x0068) | ||
71 | #define CR27 CTL_REG(0x006C) | ||
72 | #define CR28 CTL_REG(0x0070) | ||
73 | #define CR29 CTL_REG(0x0074) | ||
74 | #define CR30 CTL_REG(0x0078) | ||
75 | #define CR31 CTL_REG(0x007C) /* TX power control for RF in CCK mode */ | ||
76 | #define CR32 CTL_REG(0x0080) | ||
77 | #define CR33 CTL_REG(0x0084) | ||
78 | #define CR34 CTL_REG(0x0088) | ||
79 | #define CR35 CTL_REG(0x008C) | ||
80 | #define CR36 CTL_REG(0x0090) | ||
81 | #define CR37 CTL_REG(0x0094) | ||
82 | #define CR38 CTL_REG(0x0098) | ||
83 | #define CR39 CTL_REG(0x009C) | ||
84 | #define CR40 CTL_REG(0x00A0) | ||
85 | #define CR41 CTL_REG(0x00A4) | ||
86 | #define CR42 CTL_REG(0x00A8) | ||
87 | #define CR43 CTL_REG(0x00AC) | ||
88 | #define CR44 CTL_REG(0x00B0) | ||
89 | #define CR45 CTL_REG(0x00B4) | ||
90 | #define CR46 CTL_REG(0x00B8) | ||
91 | #define CR47 CTL_REG(0x00BC) /* CCK baseband gain | ||
92 | * (patch value might be in EEPROM) | ||
93 | */ | ||
94 | #define CR48 CTL_REG(0x00C0) | ||
95 | #define CR49 CTL_REG(0x00C4) | ||
96 | #define CR50 CTL_REG(0x00C8) | ||
97 | #define CR51 CTL_REG(0x00CC) /* TX power control for RF in 6-36M modes */ | ||
98 | #define CR52 CTL_REG(0x00D0) /* TX power control for RF in 48M mode */ | ||
99 | #define CR53 CTL_REG(0x00D4) /* TX power control for RF in 54M mode */ | ||
100 | #define CR54 CTL_REG(0x00D8) | ||
101 | #define CR55 CTL_REG(0x00DC) | ||
102 | #define CR56 CTL_REG(0x00E0) | ||
103 | #define CR57 CTL_REG(0x00E4) | ||
104 | #define CR58 CTL_REG(0x00E8) | ||
105 | #define CR59 CTL_REG(0x00EC) | ||
106 | #define CR60 CTL_REG(0x00F0) | ||
107 | #define CR61 CTL_REG(0x00F4) | ||
108 | #define CR62 CTL_REG(0x00F8) | ||
109 | #define CR63 CTL_REG(0x00FC) | ||
110 | #define CR64 CTL_REG(0x0100) | ||
111 | #define CR65 CTL_REG(0x0104) /* OFDM 54M calibration */ | ||
112 | #define CR66 CTL_REG(0x0108) /* OFDM 48M calibration */ | ||
113 | #define CR67 CTL_REG(0x010C) /* OFDM 36M calibration */ | ||
114 | #define CR68 CTL_REG(0x0110) /* CCK calibration */ | ||
115 | #define CR69 CTL_REG(0x0114) | ||
116 | #define CR70 CTL_REG(0x0118) | ||
117 | #define CR71 CTL_REG(0x011C) | ||
118 | #define CR72 CTL_REG(0x0120) | ||
119 | #define CR73 CTL_REG(0x0124) | ||
120 | #define CR74 CTL_REG(0x0128) | ||
121 | #define CR75 CTL_REG(0x012C) | ||
122 | #define CR76 CTL_REG(0x0130) | ||
123 | #define CR77 CTL_REG(0x0134) | ||
124 | #define CR78 CTL_REG(0x0138) | ||
125 | #define CR79 CTL_REG(0x013C) | ||
126 | #define CR80 CTL_REG(0x0140) | ||
127 | #define CR81 CTL_REG(0x0144) | ||
128 | #define CR82 CTL_REG(0x0148) | ||
129 | #define CR83 CTL_REG(0x014C) | ||
130 | #define CR84 CTL_REG(0x0150) | ||
131 | #define CR85 CTL_REG(0x0154) | ||
132 | #define CR86 CTL_REG(0x0158) | ||
133 | #define CR87 CTL_REG(0x015C) | ||
134 | #define CR88 CTL_REG(0x0160) | ||
135 | #define CR89 CTL_REG(0x0164) | ||
136 | #define CR90 CTL_REG(0x0168) | ||
137 | #define CR91 CTL_REG(0x016C) | ||
138 | #define CR92 CTL_REG(0x0170) | ||
139 | #define CR93 CTL_REG(0x0174) | ||
140 | #define CR94 CTL_REG(0x0178) | ||
141 | #define CR95 CTL_REG(0x017C) | ||
142 | #define CR96 CTL_REG(0x0180) | ||
143 | #define CR97 CTL_REG(0x0184) | ||
144 | #define CR98 CTL_REG(0x0188) | ||
145 | #define CR99 CTL_REG(0x018C) | ||
146 | #define CR100 CTL_REG(0x0190) | ||
147 | #define CR101 CTL_REG(0x0194) | ||
148 | #define CR102 CTL_REG(0x0198) | ||
149 | #define CR103 CTL_REG(0x019C) | ||
150 | #define CR104 CTL_REG(0x01A0) | ||
151 | #define CR105 CTL_REG(0x01A4) | ||
152 | #define CR106 CTL_REG(0x01A8) | ||
153 | #define CR107 CTL_REG(0x01AC) | ||
154 | #define CR108 CTL_REG(0x01B0) | ||
155 | #define CR109 CTL_REG(0x01B4) | ||
156 | #define CR110 CTL_REG(0x01B8) | ||
157 | #define CR111 CTL_REG(0x01BC) | ||
158 | #define CR112 CTL_REG(0x01C0) | ||
159 | #define CR113 CTL_REG(0x01C4) | ||
160 | #define CR114 CTL_REG(0x01C8) | ||
161 | #define CR115 CTL_REG(0x01CC) | ||
162 | #define CR116 CTL_REG(0x01D0) | ||
163 | #define CR117 CTL_REG(0x01D4) | ||
164 | #define CR118 CTL_REG(0x01D8) | ||
165 | #define CR119 CTL_REG(0x01DC) | ||
166 | #define CR120 CTL_REG(0x01E0) | ||
167 | #define CR121 CTL_REG(0x01E4) | ||
168 | #define CR122 CTL_REG(0x01E8) | ||
169 | #define CR123 CTL_REG(0x01EC) | ||
170 | #define CR124 CTL_REG(0x01F0) | ||
171 | #define CR125 CTL_REG(0x01F4) | ||
172 | #define CR126 CTL_REG(0x01F8) | ||
173 | #define CR127 CTL_REG(0x01FC) | ||
174 | #define CR128 CTL_REG(0x0200) | ||
175 | #define CR129 CTL_REG(0x0204) | ||
176 | #define CR130 CTL_REG(0x0208) | ||
177 | #define CR131 CTL_REG(0x020C) | ||
178 | #define CR132 CTL_REG(0x0210) | ||
179 | #define CR133 CTL_REG(0x0214) | ||
180 | #define CR134 CTL_REG(0x0218) | ||
181 | #define CR135 CTL_REG(0x021C) | ||
182 | #define CR136 CTL_REG(0x0220) | ||
183 | #define CR137 CTL_REG(0x0224) | ||
184 | #define CR138 CTL_REG(0x0228) | ||
185 | #define CR139 CTL_REG(0x022C) | ||
186 | #define CR140 CTL_REG(0x0230) | ||
187 | #define CR141 CTL_REG(0x0234) | ||
188 | #define CR142 CTL_REG(0x0238) | ||
189 | #define CR143 CTL_REG(0x023C) | ||
190 | #define CR144 CTL_REG(0x0240) | ||
191 | #define CR145 CTL_REG(0x0244) | ||
192 | #define CR146 CTL_REG(0x0248) | ||
193 | #define CR147 CTL_REG(0x024C) | ||
194 | #define CR148 CTL_REG(0x0250) | ||
195 | #define CR149 CTL_REG(0x0254) | ||
196 | #define CR150 CTL_REG(0x0258) | ||
197 | #define CR151 CTL_REG(0x025C) | ||
198 | #define CR152 CTL_REG(0x0260) | ||
199 | #define CR153 CTL_REG(0x0264) | ||
200 | #define CR154 CTL_REG(0x0268) | ||
201 | #define CR155 CTL_REG(0x026C) | ||
202 | #define CR156 CTL_REG(0x0270) | ||
203 | #define CR157 CTL_REG(0x0274) | ||
204 | #define CR158 CTL_REG(0x0278) | ||
205 | #define CR159 CTL_REG(0x027C) | ||
206 | #define CR160 CTL_REG(0x0280) | ||
207 | #define CR161 CTL_REG(0x0284) | ||
208 | #define CR162 CTL_REG(0x0288) | ||
209 | #define CR163 CTL_REG(0x028C) | ||
210 | #define CR164 CTL_REG(0x0290) | ||
211 | #define CR165 CTL_REG(0x0294) | ||
212 | #define CR166 CTL_REG(0x0298) | ||
213 | #define CR167 CTL_REG(0x029C) | ||
214 | #define CR168 CTL_REG(0x02A0) | ||
215 | #define CR169 CTL_REG(0x02A4) | ||
216 | #define CR170 CTL_REG(0x02A8) | ||
217 | #define CR171 CTL_REG(0x02AC) | ||
218 | #define CR172 CTL_REG(0x02B0) | ||
219 | #define CR173 CTL_REG(0x02B4) | ||
220 | #define CR174 CTL_REG(0x02B8) | ||
221 | #define CR175 CTL_REG(0x02BC) | ||
222 | #define CR176 CTL_REG(0x02C0) | ||
223 | #define CR177 CTL_REG(0x02C4) | ||
224 | #define CR178 CTL_REG(0x02C8) | ||
225 | #define CR179 CTL_REG(0x02CC) | ||
226 | #define CR180 CTL_REG(0x02D0) | ||
227 | #define CR181 CTL_REG(0x02D4) | ||
228 | #define CR182 CTL_REG(0x02D8) | ||
229 | #define CR183 CTL_REG(0x02DC) | ||
230 | #define CR184 CTL_REG(0x02E0) | ||
231 | #define CR185 CTL_REG(0x02E4) | ||
232 | #define CR186 CTL_REG(0x02E8) | ||
233 | #define CR187 CTL_REG(0x02EC) | ||
234 | #define CR188 CTL_REG(0x02F0) | ||
235 | #define CR189 CTL_REG(0x02F4) | ||
236 | #define CR190 CTL_REG(0x02F8) | ||
237 | #define CR191 CTL_REG(0x02FC) | ||
238 | #define CR192 CTL_REG(0x0300) | ||
239 | #define CR193 CTL_REG(0x0304) | ||
240 | #define CR194 CTL_REG(0x0308) | ||
241 | #define CR195 CTL_REG(0x030C) | ||
242 | #define CR196 CTL_REG(0x0310) | ||
243 | #define CR197 CTL_REG(0x0314) | ||
244 | #define CR198 CTL_REG(0x0318) | ||
245 | #define CR199 CTL_REG(0x031C) | ||
246 | #define CR200 CTL_REG(0x0320) | ||
247 | #define CR201 CTL_REG(0x0324) | ||
248 | #define CR202 CTL_REG(0x0328) | ||
249 | #define CR203 CTL_REG(0x032C) /* I2C bus template value & flash control */ | ||
250 | #define CR204 CTL_REG(0x0330) | ||
251 | #define CR205 CTL_REG(0x0334) | ||
252 | #define CR206 CTL_REG(0x0338) | ||
253 | #define CR207 CTL_REG(0x033C) | ||
254 | #define CR208 CTL_REG(0x0340) | ||
255 | #define CR209 CTL_REG(0x0344) | ||
256 | #define CR210 CTL_REG(0x0348) | ||
257 | #define CR211 CTL_REG(0x034C) | ||
258 | #define CR212 CTL_REG(0x0350) | ||
259 | #define CR213 CTL_REG(0x0354) | ||
260 | #define CR214 CTL_REG(0x0358) | ||
261 | #define CR215 CTL_REG(0x035C) | ||
262 | #define CR216 CTL_REG(0x0360) | ||
263 | #define CR217 CTL_REG(0x0364) | ||
264 | #define CR218 CTL_REG(0x0368) | ||
265 | #define CR219 CTL_REG(0x036C) | ||
266 | #define CR220 CTL_REG(0x0370) | ||
267 | #define CR221 CTL_REG(0x0374) | ||
268 | #define CR222 CTL_REG(0x0378) | ||
269 | #define CR223 CTL_REG(0x037C) | ||
270 | #define CR224 CTL_REG(0x0380) | ||
271 | #define CR225 CTL_REG(0x0384) | ||
272 | #define CR226 CTL_REG(0x0388) | ||
273 | #define CR227 CTL_REG(0x038C) | ||
274 | #define CR228 CTL_REG(0x0390) | ||
275 | #define CR229 CTL_REG(0x0394) | ||
276 | #define CR230 CTL_REG(0x0398) | ||
277 | #define CR231 CTL_REG(0x039C) | ||
278 | #define CR232 CTL_REG(0x03A0) | ||
279 | #define CR233 CTL_REG(0x03A4) | ||
280 | #define CR234 CTL_REG(0x03A8) | ||
281 | #define CR235 CTL_REG(0x03AC) | ||
282 | #define CR236 CTL_REG(0x03B0) | ||
283 | |||
284 | #define CR240 CTL_REG(0x03C0) | ||
285 | /* bit 7: host-controlled RF register writes | ||
286 | * CR241-CR245: for hardware controlled writing of RF bits, not needed for | ||
287 | * USB | ||
288 | */ | ||
289 | #define CR241 CTL_REG(0x03C4) | ||
290 | #define CR242 CTL_REG(0x03C8) | ||
291 | #define CR243 CTL_REG(0x03CC) | ||
292 | #define CR244 CTL_REG(0x03D0) | ||
293 | #define CR245 CTL_REG(0x03D4) | ||
294 | |||
295 | #define CR251 CTL_REG(0x03EC) /* only used for activation and deactivation of | ||
296 | * Airoha RFs AL2230 and AL7230B | ||
297 | */ | ||
298 | #define CR252 CTL_REG(0x03F0) | ||
299 | #define CR253 CTL_REG(0x03F4) | ||
300 | #define CR254 CTL_REG(0x03F8) | ||
301 | #define CR255 CTL_REG(0x03FC) | ||
302 | |||
303 | #define CR_MAX_PHY_REG 255 | ||
304 | |||
305 | /* Taken from the ZYDAS driver, not all of them are relevant for the ZSD1211 | ||
306 | * driver. | ||
307 | */ | ||
308 | |||
309 | #define CR_RF_IF_CLK CTL_REG(0x0400) | ||
310 | #define CR_RF_IF_DATA CTL_REG(0x0404) | ||
311 | #define CR_PE1_PE2 CTL_REG(0x0408) | ||
312 | #define CR_PE2_DLY CTL_REG(0x040C) | ||
313 | #define CR_LE1 CTL_REG(0x0410) | ||
314 | #define CR_LE2 CTL_REG(0x0414) | ||
315 | /* Seems to enable/disable GPI (General Purpose IO?) */ | ||
316 | #define CR_GPI_EN CTL_REG(0x0418) | ||
317 | #define CR_RADIO_PD CTL_REG(0x042C) | ||
318 | #define CR_RF2948_PD CTL_REG(0x042C) | ||
319 | #define CR_ENABLE_PS_MANUAL_AGC CTL_REG(0x043C) | ||
320 | #define CR_CONFIG_PHILIPS CTL_REG(0x0440) | ||
321 | #define CR_SA2400_SER_AP CTL_REG(0x0444) | ||
322 | #define CR_I2C_WRITE CTL_REG(0x0444) | ||
323 | #define CR_SA2400_SER_RP CTL_REG(0x0448) | ||
324 | #define CR_RADIO_PE CTL_REG(0x0458) | ||
325 | #define CR_RST_BUS_MASTER CTL_REG(0x045C) | ||
326 | #define CR_RFCFG CTL_REG(0x0464) | ||
327 | #define CR_HSTSCHG CTL_REG(0x046C) | ||
328 | #define CR_PHY_ON CTL_REG(0x0474) | ||
329 | #define CR_RX_DELAY CTL_REG(0x0478) | ||
330 | #define CR_RX_PE_DELAY CTL_REG(0x047C) | ||
331 | #define CR_GPIO_1 CTL_REG(0x0490) | ||
332 | #define CR_GPIO_2 CTL_REG(0x0494) | ||
333 | #define CR_EncryBufMux CTL_REG(0x04A8) | ||
334 | #define CR_PS_CTRL CTL_REG(0x0500) | ||
335 | #define CR_ADDA_PWR_DWN CTL_REG(0x0504) | ||
336 | #define CR_ADDA_MBIAS_WARMTIME CTL_REG(0x0508) | ||
337 | #define CR_MAC_PS_STATE CTL_REG(0x050C) | ||
338 | |||
339 | #define CR_INTERRUPT CTL_REG(0x0510) | ||
340 | #define INT_TX_COMPLETE 0x00000001 | ||
341 | #define INT_RX_COMPLETE 0x00000002 | ||
342 | #define INT_RETRY_FAIL 0x00000004 | ||
343 | #define INT_WAKEUP 0x00000008 | ||
344 | #define INT_DTIM_NOTIFY 0x00000020 | ||
345 | #define INT_CFG_NEXT_BCN 0x00000040 | ||
346 | #define INT_BUS_ABORT 0x00000080 | ||
347 | #define INT_TX_FIFO_READY 0x00000100 | ||
348 | #define INT_UART 0x00000200 | ||
349 | #define INT_TX_COMPLETE_EN 0x00010000 | ||
350 | #define INT_RX_COMPLETE_EN 0x00020000 | ||
351 | #define INT_RETRY_FAIL_EN 0x00040000 | ||
352 | #define INT_WAKEUP_EN 0x00080000 | ||
353 | #define INT_DTIM_NOTIFY_EN 0x00200000 | ||
354 | #define INT_CFG_NEXT_BCN_EN 0x00400000 | ||
355 | #define INT_BUS_ABORT_EN 0x00800000 | ||
356 | #define INT_TX_FIFO_READY_EN 0x01000000 | ||
357 | #define INT_UART_EN 0x02000000 | ||
358 | |||
359 | #define CR_TSF_LOW_PART CTL_REG(0x0514) | ||
360 | #define CR_TSF_HIGH_PART CTL_REG(0x0518) | ||
361 | |||
362 | /* Following three values are in time units (1024us) | ||
363 | * Following condition must be met: | ||
364 | * atim < tbtt < bcn | ||
365 | */ | ||
366 | #define CR_ATIM_WND_PERIOD CTL_REG(0x051C) | ||
367 | #define CR_BCN_INTERVAL CTL_REG(0x0520) | ||
368 | #define CR_PRE_TBTT CTL_REG(0x0524) | ||
369 | /* in units of TU(1024us) */ | ||
370 | |||
371 | /* for UART support */ | ||
372 | #define CR_UART_RBR_THR_DLL CTL_REG(0x0540) | ||
373 | #define CR_UART_DLM_IER CTL_REG(0x0544) | ||
374 | #define CR_UART_IIR_FCR CTL_REG(0x0548) | ||
375 | #define CR_UART_LCR CTL_REG(0x054c) | ||
376 | #define CR_UART_MCR CTL_REG(0x0550) | ||
377 | #define CR_UART_LSR CTL_REG(0x0554) | ||
378 | #define CR_UART_MSR CTL_REG(0x0558) | ||
379 | #define CR_UART_ECR CTL_REG(0x055c) | ||
380 | #define CR_UART_STATUS CTL_REG(0x0560) | ||
381 | |||
382 | #define CR_PCI_TX_ADDR_P1 CTL_REG(0x0600) | ||
383 | #define CR_PCI_TX_AddR_P2 CTL_REG(0x0604) | ||
384 | #define CR_PCI_RX_AddR_P1 CTL_REG(0x0608) | ||
385 | #define CR_PCI_RX_AddR_P2 CTL_REG(0x060C) | ||
386 | |||
387 | /* must be overwritten if custom MAC address will be used */ | ||
388 | #define CR_MAC_ADDR_P1 CTL_REG(0x0610) | ||
389 | #define CR_MAC_ADDR_P2 CTL_REG(0x0614) | ||
390 | #define CR_BSSID_P1 CTL_REG(0x0618) | ||
391 | #define CR_BSSID_P2 CTL_REG(0x061C) | ||
392 | #define CR_BCN_PLCP_CFG CTL_REG(0x0620) | ||
393 | #define CR_GROUP_HASH_P1 CTL_REG(0x0624) | ||
394 | #define CR_GROUP_HASH_P2 CTL_REG(0x0628) | ||
395 | #define CR_RX_TIMEOUT CTL_REG(0x062C) | ||
396 | |||
397 | /* Basic rates supported by the BSS. When producing ACK or CTS messages, the | ||
398 | * device will use a rate in this table that is less than or equal to the rate | ||
399 | * of the incoming frame which prompted the response */ | ||
400 | #define CR_BASIC_RATE_TBL CTL_REG(0x0630) | ||
401 | #define CR_RATE_1M 0x0001 /* 802.11b */ | ||
402 | #define CR_RATE_2M 0x0002 /* 802.11b */ | ||
403 | #define CR_RATE_5_5M 0x0004 /* 802.11b */ | ||
404 | #define CR_RATE_11M 0x0008 /* 802.11b */ | ||
405 | #define CR_RATE_6M 0x0100 /* 802.11g */ | ||
406 | #define CR_RATE_9M 0x0200 /* 802.11g */ | ||
407 | #define CR_RATE_12M 0x0400 /* 802.11g */ | ||
408 | #define CR_RATE_18M 0x0800 /* 802.11g */ | ||
409 | #define CR_RATE_24M 0x1000 /* 802.11g */ | ||
410 | #define CR_RATE_36M 0x2000 /* 802.11g */ | ||
411 | #define CR_RATE_48M 0x4000 /* 802.11g */ | ||
412 | #define CR_RATE_54M 0x8000 /* 802.11g */ | ||
413 | #define CR_RATES_80211G 0xff00 | ||
414 | #define CR_RATES_80211B 0x000f | ||
415 | |||
416 | /* Mandatory rates required in the BSS. When producing ACK or CTS messages, if | ||
417 | * the device could not find an appropriate rate in CR_BASIC_RATE_TBL, it will | ||
418 | * look for a rate in this table that is less than or equal to the rate of | ||
419 | * the incoming frame. */ | ||
420 | #define CR_MANDATORY_RATE_TBL CTL_REG(0x0634) | ||
421 | #define CR_RTS_CTS_RATE CTL_REG(0x0638) | ||
422 | |||
423 | #define CR_WEP_PROTECT CTL_REG(0x063C) | ||
424 | #define CR_RX_THRESHOLD CTL_REG(0x0640) | ||
425 | |||
426 | /* register for controlling the LEDS */ | ||
427 | #define CR_LED CTL_REG(0x0644) | ||
428 | /* masks for controlling LEDs */ | ||
429 | #define LED1 0x0100 | ||
430 | #define LED2 0x0200 | ||
431 | |||
432 | /* Seems to indicate that the configuration is over. | ||
433 | */ | ||
434 | #define CR_AFTER_PNP CTL_REG(0x0648) | ||
435 | #define CR_ACK_TIME_80211 CTL_REG(0x0658) | ||
436 | |||
437 | #define CR_RX_OFFSET CTL_REG(0x065c) | ||
438 | |||
439 | #define CR_PHY_DELAY CTL_REG(0x066C) | ||
440 | #define CR_BCN_FIFO CTL_REG(0x0670) | ||
441 | #define CR_SNIFFER_ON CTL_REG(0x0674) | ||
442 | |||
443 | #define CR_ENCRYPTION_TYPE CTL_REG(0x0678) | ||
444 | #define NO_WEP 0 | ||
445 | #define WEP64 1 | ||
446 | #define WEP128 5 | ||
447 | #define WEP256 6 | ||
448 | #define ENC_SNIFFER 8 | ||
449 | |||
450 | #define CR_ZD1211_RETRY_MAX CTL_REG(0x067C) | ||
451 | |||
452 | #define CR_REG1 CTL_REG(0x0680) | ||
453 | /* Setting the bit UNLOCK_PHY_REGS disallows the write access to physical | ||
454 | * registers, so one could argue it is a LOCK bit. But calling it | ||
455 | * LOCK_PHY_REGS makes it confusing. | ||
456 | */ | ||
457 | #define UNLOCK_PHY_REGS 0x0080 | ||
458 | |||
459 | #define CR_DEVICE_STATE CTL_REG(0x0684) | ||
460 | #define CR_UNDERRUN_CNT CTL_REG(0x0688) | ||
461 | |||
462 | #define CR_RX_FILTER CTL_REG(0x068c) | ||
463 | #define RX_FILTER_ASSOC_RESPONSE 0x0002 | ||
464 | #define RX_FILTER_REASSOC_RESPONSE 0x0008 | ||
465 | #define RX_FILTER_PROBE_RESPONSE 0x0020 | ||
466 | #define RX_FILTER_BEACON 0x0100 | ||
467 | #define RX_FILTER_DISASSOC 0x0400 | ||
468 | #define RX_FILTER_AUTH 0x0800 | ||
469 | #define AP_RX_FILTER 0x0400feff | ||
470 | #define STA_RX_FILTER 0x0000ffff | ||
471 | |||
472 | /* Monitor mode sets filter to 0xfffff */ | ||
473 | |||
474 | #define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690) | ||
475 | #define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694) | ||
476 | #define CR_IFS_VALUE CTL_REG(0x0698) | ||
477 | #define CR_RX_TIME_OUT CTL_REG(0x069C) | ||
478 | #define CR_TOTAL_RX_FRM CTL_REG(0x06A0) | ||
479 | #define CR_CRC32_CNT CTL_REG(0x06A4) | ||
480 | #define CR_CRC16_CNT CTL_REG(0x06A8) | ||
481 | #define CR_DECRYPTION_ERR_UNI CTL_REG(0x06AC) | ||
482 | #define CR_RX_FIFO_OVERRUN CTL_REG(0x06B0) | ||
483 | |||
484 | #define CR_DECRYPTION_ERR_MUL CTL_REG(0x06BC) | ||
485 | |||
486 | #define CR_NAV_CNT CTL_REG(0x06C4) | ||
487 | #define CR_NAV_CCA CTL_REG(0x06C8) | ||
488 | #define CR_RETRY_CNT CTL_REG(0x06CC) | ||
489 | |||
490 | #define CR_READ_TCB_ADDR CTL_REG(0x06E8) | ||
491 | #define CR_READ_RFD_ADDR CTL_REG(0x06EC) | ||
492 | #define CR_CWMIN_CWMAX CTL_REG(0x06F0) | ||
493 | #define CR_TOTAL_TX_FRM CTL_REG(0x06F4) | ||
494 | |||
495 | /* CAM: Continuous Access Mode (power management) */ | ||
496 | #define CR_CAM_MODE CTL_REG(0x0700) | ||
497 | #define CR_CAM_ROLL_TB_LOW CTL_REG(0x0704) | ||
498 | #define CR_CAM_ROLL_TB_HIGH CTL_REG(0x0708) | ||
499 | #define CR_CAM_ADDRESS CTL_REG(0x070C) | ||
500 | #define CR_CAM_DATA CTL_REG(0x0710) | ||
501 | |||
502 | #define CR_ROMDIR CTL_REG(0x0714) | ||
503 | |||
504 | #define CR_DECRY_ERR_FLG_LOW CTL_REG(0x0714) | ||
505 | #define CR_DECRY_ERR_FLG_HIGH CTL_REG(0x0718) | ||
506 | |||
507 | #define CR_WEPKEY0 CTL_REG(0x0720) | ||
508 | #define CR_WEPKEY1 CTL_REG(0x0724) | ||
509 | #define CR_WEPKEY2 CTL_REG(0x0728) | ||
510 | #define CR_WEPKEY3 CTL_REG(0x072C) | ||
511 | #define CR_WEPKEY4 CTL_REG(0x0730) | ||
512 | #define CR_WEPKEY5 CTL_REG(0x0734) | ||
513 | #define CR_WEPKEY6 CTL_REG(0x0738) | ||
514 | #define CR_WEPKEY7 CTL_REG(0x073C) | ||
515 | #define CR_WEPKEY8 CTL_REG(0x0740) | ||
516 | #define CR_WEPKEY9 CTL_REG(0x0744) | ||
517 | #define CR_WEPKEY10 CTL_REG(0x0748) | ||
518 | #define CR_WEPKEY11 CTL_REG(0x074C) | ||
519 | #define CR_WEPKEY12 CTL_REG(0x0750) | ||
520 | #define CR_WEPKEY13 CTL_REG(0x0754) | ||
521 | #define CR_WEPKEY14 CTL_REG(0x0758) | ||
522 | #define CR_WEPKEY15 CTL_REG(0x075c) | ||
523 | #define CR_TKIP_MODE CTL_REG(0x0760) | ||
524 | |||
525 | #define CR_EEPROM_PROTECT0 CTL_REG(0x0758) | ||
526 | #define CR_EEPROM_PROTECT1 CTL_REG(0x075C) | ||
527 | |||
528 | #define CR_DBG_FIFO_RD CTL_REG(0x0800) | ||
529 | #define CR_DBG_SELECT CTL_REG(0x0804) | ||
530 | #define CR_FIFO_Length CTL_REG(0x0808) | ||
531 | |||
532 | |||
533 | #define CR_RSSI_MGC CTL_REG(0x0810) | ||
534 | |||
535 | #define CR_PON CTL_REG(0x0818) | ||
536 | #define CR_RX_ON CTL_REG(0x081C) | ||
537 | #define CR_TX_ON CTL_REG(0x0820) | ||
538 | #define CR_CHIP_EN CTL_REG(0x0824) | ||
539 | #define CR_LO_SW CTL_REG(0x0828) | ||
540 | #define CR_TXRX_SW CTL_REG(0x082C) | ||
541 | #define CR_S_MD CTL_REG(0x0830) | ||
542 | |||
543 | #define CR_USB_DEBUG_PORT CTL_REG(0x0888) | ||
544 | |||
545 | #define CR_ZD1211B_TX_PWR_CTL1 CTL_REG(0x0b00) | ||
546 | #define CR_ZD1211B_TX_PWR_CTL2 CTL_REG(0x0b04) | ||
547 | #define CR_ZD1211B_TX_PWR_CTL3 CTL_REG(0x0b08) | ||
548 | #define CR_ZD1211B_TX_PWR_CTL4 CTL_REG(0x0b0c) | ||
549 | #define CR_ZD1211B_AIFS_CTL1 CTL_REG(0x0b10) | ||
550 | #define CR_ZD1211B_AIFS_CTL2 CTL_REG(0x0b14) | ||
551 | #define CR_ZD1211B_TXOP CTL_REG(0x0b20) | ||
552 | #define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28) | ||
553 | |||
554 | #define CWIN_SIZE 0x007f043f | ||
555 | |||
556 | |||
557 | #define HWINT_ENABLED 0x004f0000 | ||
558 | #define HWINT_DISABLED 0 | ||
559 | |||
560 | #define E2P_PWR_INT_GUARD 8 | ||
561 | #define E2P_CHANNEL_COUNT 14 | ||
562 | |||
563 | /* If you compare this addresses with the ZYDAS orignal driver, please notify | ||
564 | * that we use word mapping for the EEPROM. | ||
565 | */ | ||
566 | |||
567 | /* | ||
568 | * Upper 16 bit contains the regulatory domain. | ||
569 | */ | ||
570 | #define E2P_SUBID E2P_REG(0x00) | ||
571 | #define E2P_POD E2P_REG(0x02) | ||
572 | #define E2P_MAC_ADDR_P1 E2P_REG(0x04) | ||
573 | #define E2P_MAC_ADDR_P2 E2P_REG(0x06) | ||
574 | #define E2P_PWR_CAL_VALUE1 E2P_REG(0x08) | ||
575 | #define E2P_PWR_CAL_VALUE2 E2P_REG(0x0a) | ||
576 | #define E2P_PWR_CAL_VALUE3 E2P_REG(0x0c) | ||
577 | #define E2P_PWR_CAL_VALUE4 E2P_REG(0x0e) | ||
578 | #define E2P_PWR_INT_VALUE1 E2P_REG(0x10) | ||
579 | #define E2P_PWR_INT_VALUE2 E2P_REG(0x12) | ||
580 | #define E2P_PWR_INT_VALUE3 E2P_REG(0x14) | ||
581 | #define E2P_PWR_INT_VALUE4 E2P_REG(0x16) | ||
582 | |||
583 | /* Contains a bit for each allowed channel. It gives for Europe (ETSI 0x30) | ||
584 | * also only 11 channels. */ | ||
585 | #define E2P_ALLOWED_CHANNEL E2P_REG(0x18) | ||
586 | |||
587 | #define E2P_PHY_REG E2P_REG(0x1a) | ||
588 | #define E2P_DEVICE_VER E2P_REG(0x20) | ||
589 | #define E2P_36M_CAL_VALUE1 E2P_REG(0x28) | ||
590 | #define E2P_36M_CAL_VALUE2 E2P_REG(0x2a) | ||
591 | #define E2P_36M_CAL_VALUE3 E2P_REG(0x2c) | ||
592 | #define E2P_36M_CAL_VALUE4 E2P_REG(0x2e) | ||
593 | #define E2P_11A_INT_VALUE1 E2P_REG(0x30) | ||
594 | #define E2P_11A_INT_VALUE2 E2P_REG(0x32) | ||
595 | #define E2P_11A_INT_VALUE3 E2P_REG(0x34) | ||
596 | #define E2P_11A_INT_VALUE4 E2P_REG(0x36) | ||
597 | #define E2P_48M_CAL_VALUE1 E2P_REG(0x38) | ||
598 | #define E2P_48M_CAL_VALUE2 E2P_REG(0x3a) | ||
599 | #define E2P_48M_CAL_VALUE3 E2P_REG(0x3c) | ||
600 | #define E2P_48M_CAL_VALUE4 E2P_REG(0x3e) | ||
601 | #define E2P_48M_INT_VALUE1 E2P_REG(0x40) | ||
602 | #define E2P_48M_INT_VALUE2 E2P_REG(0x42) | ||
603 | #define E2P_48M_INT_VALUE3 E2P_REG(0x44) | ||
604 | #define E2P_48M_INT_VALUE4 E2P_REG(0x46) | ||
605 | #define E2P_54M_CAL_VALUE1 E2P_REG(0x48) /* ??? */ | ||
606 | #define E2P_54M_CAL_VALUE2 E2P_REG(0x4a) | ||
607 | #define E2P_54M_CAL_VALUE3 E2P_REG(0x4c) | ||
608 | #define E2P_54M_CAL_VALUE4 E2P_REG(0x4e) | ||
609 | #define E2P_54M_INT_VALUE1 E2P_REG(0x50) | ||
610 | #define E2P_54M_INT_VALUE2 E2P_REG(0x52) | ||
611 | #define E2P_54M_INT_VALUE3 E2P_REG(0x54) | ||
612 | #define E2P_54M_INT_VALUE4 E2P_REG(0x56) | ||
613 | |||
614 | /* All 16 bit values */ | ||
615 | #define FW_FIRMWARE_VER FW_REG(0) | ||
616 | /* non-zero if USB high speed connection */ | ||
617 | #define FW_USB_SPEED FW_REG(1) | ||
618 | #define FW_FIX_TX_RATE FW_REG(2) | ||
619 | /* Seems to be able to control LEDs over the firmware */ | ||
620 | #define FW_LINK_STATUS FW_REG(3) | ||
621 | #define FW_SOFT_RESET FW_REG(4) | ||
622 | #define FW_FLASH_CHK FW_REG(5) | ||
623 | |||
624 | enum { | ||
625 | CR_BASE_OFFSET = 0x9000, | ||
626 | FW_START_OFFSET = 0xee00, | ||
627 | FW_BASE_ADDR_OFFSET = FW_START_OFFSET + 0x1d, | ||
628 | EEPROM_START_OFFSET = 0xf800, | ||
629 | EEPROM_SIZE = 0x800, /* words */ | ||
630 | LOAD_CODE_SIZE = 0xe, /* words */ | ||
631 | LOAD_VECT_SIZE = 0x10000 - 0xfff7, /* words */ | ||
632 | EEPROM_REGS_OFFSET = LOAD_CODE_SIZE + LOAD_VECT_SIZE, | ||
633 | E2P_BASE_OFFSET = EEPROM_START_OFFSET + | ||
634 | EEPROM_REGS_OFFSET, | ||
635 | }; | ||
636 | |||
637 | #define FW_REG_TABLE_ADDR USB_ADDR(FW_START_OFFSET + 0x1d) | ||
638 | |||
639 | enum { | ||
640 | /* indices for ofdm_cal_values */ | ||
641 | OFDM_36M_INDEX = 0, | ||
642 | OFDM_48M_INDEX = 1, | ||
643 | OFDM_54M_INDEX = 2, | ||
644 | }; | ||
645 | |||
646 | struct zd_chip { | ||
647 | struct zd_usb usb; | ||
648 | struct zd_rf rf; | ||
649 | struct mutex mutex; | ||
650 | u8 e2p_mac[ETH_ALEN]; | ||
651 | /* EepSetPoint in the vendor driver */ | ||
652 | u8 pwr_cal_values[E2P_CHANNEL_COUNT]; | ||
653 | /* integration values in the vendor driver */ | ||
654 | u8 pwr_int_values[E2P_CHANNEL_COUNT]; | ||
655 | /* SetPointOFDM in the vendor driver */ | ||
656 | u8 ofdm_cal_values[3][E2P_CHANNEL_COUNT]; | ||
657 | u8 pa_type:4, patch_cck_gain:1, patch_cr157:1, patch_6m_band_edge:1, | ||
658 | is_zd1211b:1; | ||
659 | }; | ||
660 | |||
661 | static inline struct zd_chip *zd_usb_to_chip(struct zd_usb *usb) | ||
662 | { | ||
663 | return container_of(usb, struct zd_chip, usb); | ||
664 | } | ||
665 | |||
666 | static inline struct zd_chip *zd_rf_to_chip(struct zd_rf *rf) | ||
667 | { | ||
668 | return container_of(rf, struct zd_chip, rf); | ||
669 | } | ||
670 | |||
671 | #define zd_chip_dev(chip) (&(chip)->usb.intf->dev) | ||
672 | |||
673 | void zd_chip_init(struct zd_chip *chip, | ||
674 | struct net_device *netdev, | ||
675 | struct usb_interface *intf); | ||
676 | void zd_chip_clear(struct zd_chip *chip); | ||
677 | int zd_chip_init_hw(struct zd_chip *chip, u8 device_type); | ||
678 | int zd_chip_reset(struct zd_chip *chip); | ||
679 | |||
680 | static inline int zd_ioread16v_locked(struct zd_chip *chip, u16 *values, | ||
681 | const zd_addr_t *addresses, | ||
682 | unsigned int count) | ||
683 | { | ||
684 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
685 | return zd_usb_ioread16v(&chip->usb, values, addresses, count); | ||
686 | } | ||
687 | |||
688 | static inline int zd_ioread16_locked(struct zd_chip *chip, u16 *value, | ||
689 | const zd_addr_t addr) | ||
690 | { | ||
691 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
692 | return zd_usb_ioread16(&chip->usb, value, addr); | ||
693 | } | ||
694 | |||
695 | int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, | ||
696 | const zd_addr_t *addresses, unsigned int count); | ||
697 | |||
698 | static inline int zd_ioread32_locked(struct zd_chip *chip, u32 *value, | ||
699 | const zd_addr_t addr) | ||
700 | { | ||
701 | return zd_ioread32v_locked(chip, value, (const zd_addr_t *)&addr, 1); | ||
702 | } | ||
703 | |||
704 | static inline int zd_iowrite16_locked(struct zd_chip *chip, u16 value, | ||
705 | zd_addr_t addr) | ||
706 | { | ||
707 | struct zd_ioreq16 ioreq; | ||
708 | |||
709 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
710 | ioreq.addr = addr; | ||
711 | ioreq.value = value; | ||
712 | |||
713 | return zd_usb_iowrite16v(&chip->usb, &ioreq, 1); | ||
714 | } | ||
715 | |||
716 | int zd_iowrite16a_locked(struct zd_chip *chip, | ||
717 | const struct zd_ioreq16 *ioreqs, unsigned int count); | ||
718 | |||
719 | int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs, | ||
720 | unsigned int count); | ||
721 | |||
722 | static inline int zd_iowrite32_locked(struct zd_chip *chip, u32 value, | ||
723 | zd_addr_t addr) | ||
724 | { | ||
725 | struct zd_ioreq32 ioreq; | ||
726 | |||
727 | ioreq.addr = addr; | ||
728 | ioreq.value = value; | ||
729 | |||
730 | return _zd_iowrite32v_locked(chip, &ioreq, 1); | ||
731 | } | ||
732 | |||
733 | int zd_iowrite32a_locked(struct zd_chip *chip, | ||
734 | const struct zd_ioreq32 *ioreqs, unsigned int count); | ||
735 | |||
736 | static inline int zd_rfwrite_locked(struct zd_chip *chip, u32 value, u8 bits) | ||
737 | { | ||
738 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
739 | return zd_usb_rfwrite(&chip->usb, value, bits); | ||
740 | } | ||
741 | |||
742 | int zd_rfwritev_locked(struct zd_chip *chip, | ||
743 | const u32* values, unsigned int count, u8 bits); | ||
744 | |||
745 | /* Locking functions for reading and writing registers. | ||
746 | * The different parameters are intentional. | ||
747 | */ | ||
748 | int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value); | ||
749 | int zd_iowrite16(struct zd_chip *chip, zd_addr_t addr, u16 value); | ||
750 | int zd_ioread32(struct zd_chip *chip, zd_addr_t addr, u32 *value); | ||
751 | int zd_iowrite32(struct zd_chip *chip, zd_addr_t addr, u32 value); | ||
752 | int zd_ioread32v(struct zd_chip *chip, const zd_addr_t *addresses, | ||
753 | u32 *values, unsigned int count); | ||
754 | int zd_iowrite32a(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs, | ||
755 | unsigned int count); | ||
756 | |||
757 | int zd_chip_set_channel(struct zd_chip *chip, u8 channel); | ||
758 | static inline u8 _zd_chip_get_channel(struct zd_chip *chip) | ||
759 | { | ||
760 | return chip->rf.channel; | ||
761 | } | ||
762 | u8 zd_chip_get_channel(struct zd_chip *chip); | ||
763 | int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain); | ||
764 | void zd_get_e2p_mac_addr(struct zd_chip *chip, u8 *mac_addr); | ||
765 | int zd_read_mac_addr(struct zd_chip *chip, u8 *mac_addr); | ||
766 | int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr); | ||
767 | int zd_chip_switch_radio_on(struct zd_chip *chip); | ||
768 | int zd_chip_switch_radio_off(struct zd_chip *chip); | ||
769 | int zd_chip_enable_int(struct zd_chip *chip); | ||
770 | void zd_chip_disable_int(struct zd_chip *chip); | ||
771 | int zd_chip_enable_rx(struct zd_chip *chip); | ||
772 | void zd_chip_disable_rx(struct zd_chip *chip); | ||
773 | int zd_chip_enable_hwint(struct zd_chip *chip); | ||
774 | int zd_chip_disable_hwint(struct zd_chip *chip); | ||
775 | |||
776 | static inline int zd_get_encryption_type(struct zd_chip *chip, u32 *type) | ||
777 | { | ||
778 | return zd_ioread32(chip, CR_ENCRYPTION_TYPE, type); | ||
779 | } | ||
780 | |||
781 | static inline int zd_set_encryption_type(struct zd_chip *chip, u32 type) | ||
782 | { | ||
783 | return zd_iowrite32(chip, CR_ENCRYPTION_TYPE, type); | ||
784 | } | ||
785 | |||
786 | static inline int zd_chip_get_basic_rates(struct zd_chip *chip, u16 *cr_rates) | ||
787 | { | ||
788 | return zd_ioread16(chip, CR_BASIC_RATE_TBL, cr_rates); | ||
789 | } | ||
790 | |||
791 | int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates); | ||
792 | |||
793 | static inline int zd_chip_set_rx_filter(struct zd_chip *chip, u32 filter) | ||
794 | { | ||
795 | return zd_iowrite32(chip, CR_RX_FILTER, filter); | ||
796 | } | ||
797 | |||
798 | int zd_chip_lock_phy_regs(struct zd_chip *chip); | ||
799 | int zd_chip_unlock_phy_regs(struct zd_chip *chip); | ||
800 | |||
801 | enum led_status { | ||
802 | LED_OFF = 0, | ||
803 | LED_ON = 1, | ||
804 | LED_FLIP = 2, | ||
805 | LED_STATUS = 3, | ||
806 | }; | ||
807 | |||
808 | int zd_chip_led_status(struct zd_chip *chip, int led, enum led_status status); | ||
809 | int zd_chip_led_flip(struct zd_chip *chip, int led, | ||
810 | const unsigned int *phases_msecs, unsigned int count); | ||
811 | |||
812 | int zd_set_beacon_interval(struct zd_chip *chip, u32 interval); | ||
813 | |||
814 | static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval) | ||
815 | { | ||
816 | return zd_ioread32(chip, CR_BCN_INTERVAL, interval); | ||
817 | } | ||
818 | |||
819 | struct rx_status; | ||
820 | |||
821 | u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size, | ||
822 | const struct rx_status *status); | ||
823 | u8 zd_rx_strength_percent(u8 rssi); | ||
824 | |||
825 | u16 zd_rx_rate(const void *rx_frame, const struct rx_status *status); | ||
826 | |||
827 | #endif /* _ZD_CHIP_H */ | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h new file mode 100644 index 000000000000..465906812fc4 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_def.h | |||
@@ -0,0 +1,48 @@ | |||
1 | /* zd_def.h | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | #ifndef _ZD_DEF_H | ||
19 | #define _ZD_DEF_H | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/stringify.h> | ||
23 | #include <linux/device.h> | ||
24 | #include <linux/kernel.h> | ||
25 | |||
26 | #define dev_printk_f(level, dev, fmt, args...) \ | ||
27 | dev_printk(level, dev, "%s() " fmt, __func__, ##args) | ||
28 | |||
29 | #ifdef DEBUG | ||
30 | # define dev_dbg_f(dev, fmt, args...) \ | ||
31 | dev_printk_f(KERN_DEBUG, dev, fmt, ## args) | ||
32 | #else | ||
33 | # define dev_dbg_f(dev, fmt, args...) do { (void)(dev); } while (0) | ||
34 | #endif /* DEBUG */ | ||
35 | |||
36 | #ifdef DEBUG | ||
37 | # define ZD_ASSERT(x) \ | ||
38 | do { \ | ||
39 | if (!(x)) { \ | ||
40 | pr_debug("%s:%d ASSERT %s VIOLATED!\n", \ | ||
41 | __FILE__, __LINE__, __stringify(x)); \ | ||
42 | } \ | ||
43 | } while (0) | ||
44 | #else | ||
45 | # define ZD_ASSERT(x) do { } while (0) | ||
46 | #endif | ||
47 | |||
48 | #endif /* _ZD_DEF_H */ | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.c b/drivers/net/wireless/zd1211rw/zd_ieee80211.c new file mode 100644 index 000000000000..66905f7b61ff --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.c | |||
@@ -0,0 +1,191 @@ | |||
1 | /* zd_ieee80211.c | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | /* | ||
19 | * A lot of this code is generic and should be moved into the upper layers | ||
20 | * at some point. | ||
21 | */ | ||
22 | |||
23 | #include <linux/errno.h> | ||
24 | #include <linux/wireless.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <net/ieee80211.h> | ||
27 | |||
28 | #include "zd_def.h" | ||
29 | #include "zd_ieee80211.h" | ||
30 | #include "zd_mac.h" | ||
31 | |||
32 | static const struct channel_range channel_ranges[] = { | ||
33 | [0] = { 0, 0}, | ||
34 | [ZD_REGDOMAIN_FCC] = { 1, 12}, | ||
35 | [ZD_REGDOMAIN_IC] = { 1, 12}, | ||
36 | [ZD_REGDOMAIN_ETSI] = { 1, 14}, | ||
37 | [ZD_REGDOMAIN_JAPAN] = { 1, 14}, | ||
38 | [ZD_REGDOMAIN_SPAIN] = { 1, 14}, | ||
39 | [ZD_REGDOMAIN_FRANCE] = { 1, 14}, | ||
40 | [ZD_REGDOMAIN_JAPAN_ADD] = {14, 15}, | ||
41 | }; | ||
42 | |||
43 | const struct channel_range *zd_channel_range(u8 regdomain) | ||
44 | { | ||
45 | if (regdomain >= ARRAY_SIZE(channel_ranges)) | ||
46 | regdomain = 0; | ||
47 | return &channel_ranges[regdomain]; | ||
48 | } | ||
49 | |||
50 | int zd_regdomain_supports_channel(u8 regdomain, u8 channel) | ||
51 | { | ||
52 | const struct channel_range *range = zd_channel_range(regdomain); | ||
53 | return range->start <= channel && channel < range->end; | ||
54 | } | ||
55 | |||
56 | int zd_regdomain_supported(u8 regdomain) | ||
57 | { | ||
58 | const struct channel_range *range = zd_channel_range(regdomain); | ||
59 | return range->start != 0; | ||
60 | } | ||
61 | |||
62 | /* Stores channel frequencies in MHz. */ | ||
63 | static const u16 channel_frequencies[] = { | ||
64 | 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, | ||
65 | 2452, 2457, 2462, 2467, 2472, 2484, | ||
66 | }; | ||
67 | |||
68 | #define NUM_CHANNELS ARRAY_SIZE(channel_frequencies) | ||
69 | |||
70 | static int compute_freq(struct iw_freq *freq, u32 mhz, u32 hz) | ||
71 | { | ||
72 | u32 factor; | ||
73 | |||
74 | freq->e = 0; | ||
75 | if (mhz >= 1000000000U) { | ||
76 | pr_debug("zd1211 mhz %u to large\n", mhz); | ||
77 | freq->m = 0; | ||
78 | return -EINVAL; | ||
79 | } | ||
80 | |||
81 | factor = 1000; | ||
82 | while (mhz >= factor) { | ||
83 | |||
84 | freq->e += 1; | ||
85 | factor *= 10; | ||
86 | } | ||
87 | |||
88 | factor /= 1000U; | ||
89 | freq->m = mhz * (1000000U/factor) + hz/factor; | ||
90 | |||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | int zd_channel_to_freq(struct iw_freq *freq, u8 channel) | ||
95 | { | ||
96 | if (channel > NUM_CHANNELS) { | ||
97 | freq->m = 0; | ||
98 | freq->e = 0; | ||
99 | return -EINVAL; | ||
100 | } | ||
101 | if (!channel) { | ||
102 | freq->m = 0; | ||
103 | freq->e = 0; | ||
104 | return -EINVAL; | ||
105 | } | ||
106 | return compute_freq(freq, channel_frequencies[channel-1], 0); | ||
107 | } | ||
108 | |||
109 | static int freq_to_mhz(const struct iw_freq *freq) | ||
110 | { | ||
111 | u32 factor; | ||
112 | int e; | ||
113 | |||
114 | /* Such high frequencies are not supported. */ | ||
115 | if (freq->e > 6) | ||
116 | return -EINVAL; | ||
117 | |||
118 | factor = 1; | ||
119 | for (e = freq->e; e > 0; --e) { | ||
120 | factor *= 10; | ||
121 | } | ||
122 | factor = 1000000U / factor; | ||
123 | |||
124 | if (freq->m % factor) { | ||
125 | return -EINVAL; | ||
126 | } | ||
127 | |||
128 | return freq->m / factor; | ||
129 | } | ||
130 | |||
131 | int zd_find_channel(u8 *channel, const struct iw_freq *freq) | ||
132 | { | ||
133 | int i, r; | ||
134 | u32 mhz; | ||
135 | |||
136 | if (!(freq->flags & IW_FREQ_FIXED)) | ||
137 | return 0; | ||
138 | |||
139 | if (freq->m < 1000) { | ||
140 | if (freq->m > NUM_CHANNELS || freq->m == 0) | ||
141 | return -EINVAL; | ||
142 | *channel = freq->m; | ||
143 | return 1; | ||
144 | } | ||
145 | |||
146 | r = freq_to_mhz(freq); | ||
147 | if (r < 0) | ||
148 | return r; | ||
149 | mhz = r; | ||
150 | |||
151 | for (i = 0; i < NUM_CHANNELS; i++) { | ||
152 | if (mhz == channel_frequencies[i]) { | ||
153 | *channel = i+1; | ||
154 | return 1; | ||
155 | } | ||
156 | } | ||
157 | |||
158 | return -EINVAL; | ||
159 | } | ||
160 | |||
161 | int zd_geo_init(struct ieee80211_device *ieee, u8 regdomain) | ||
162 | { | ||
163 | struct ieee80211_geo geo; | ||
164 | const struct channel_range *range; | ||
165 | int i; | ||
166 | u8 channel; | ||
167 | |||
168 | dev_dbg(zd_mac_dev(zd_netdev_mac(ieee->dev)), | ||
169 | "regdomain %#04x\n", regdomain); | ||
170 | |||
171 | range = zd_channel_range(regdomain); | ||
172 | if (range->start == 0) { | ||
173 | dev_err(zd_mac_dev(zd_netdev_mac(ieee->dev)), | ||
174 | "zd1211 regdomain %#04x not supported\n", | ||
175 | regdomain); | ||
176 | return -EINVAL; | ||
177 | } | ||
178 | |||
179 | memset(&geo, 0, sizeof(geo)); | ||
180 | |||
181 | for (i = 0, channel = range->start; channel < range->end; channel++) { | ||
182 | struct ieee80211_channel *chan = &geo.bg[i++]; | ||
183 | chan->freq = channel_frequencies[channel - 1]; | ||
184 | chan->channel = channel; | ||
185 | } | ||
186 | |||
187 | geo.bg_channels = i; | ||
188 | memcpy(geo.name, "XX ", 4); | ||
189 | ieee80211_set_geo(ieee, &geo); | ||
190 | return 0; | ||
191 | } | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.h b/drivers/net/wireless/zd1211rw/zd_ieee80211.h new file mode 100644 index 000000000000..36329890dfec --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.h | |||
@@ -0,0 +1,85 @@ | |||
1 | #ifndef _ZD_IEEE80211_H | ||
2 | #define _ZD_IEEE80211_H | ||
3 | |||
4 | #include <net/ieee80211.h> | ||
5 | #include "zd_types.h" | ||
6 | |||
7 | /* Additional definitions from the standards. | ||
8 | */ | ||
9 | |||
10 | #define ZD_REGDOMAIN_FCC 0x10 | ||
11 | #define ZD_REGDOMAIN_IC 0x20 | ||
12 | #define ZD_REGDOMAIN_ETSI 0x30 | ||
13 | #define ZD_REGDOMAIN_SPAIN 0x31 | ||
14 | #define ZD_REGDOMAIN_FRANCE 0x32 | ||
15 | #define ZD_REGDOMAIN_JAPAN_ADD 0x40 | ||
16 | #define ZD_REGDOMAIN_JAPAN 0x41 | ||
17 | |||
18 | enum { | ||
19 | MIN_CHANNEL24 = 1, | ||
20 | MAX_CHANNEL24 = 14, | ||
21 | }; | ||
22 | |||
23 | struct channel_range { | ||
24 | u8 start; | ||
25 | u8 end; /* exclusive (channel must be less than end) */ | ||
26 | }; | ||
27 | |||
28 | struct iw_freq; | ||
29 | |||
30 | int zd_geo_init(struct ieee80211_device *ieee, u8 regdomain); | ||
31 | |||
32 | const struct channel_range *zd_channel_range(u8 regdomain); | ||
33 | int zd_regdomain_supports_channel(u8 regdomain, u8 channel); | ||
34 | int zd_regdomain_supported(u8 regdomain); | ||
35 | |||
36 | /* for 2.4 GHz band */ | ||
37 | int zd_channel_to_freq(struct iw_freq *freq, u8 channel); | ||
38 | int zd_find_channel(u8 *channel, const struct iw_freq *freq); | ||
39 | |||
40 | #define ZD_PLCP_SERVICE_LENGTH_EXTENSION 0x80 | ||
41 | |||
42 | struct ofdm_plcp_header { | ||
43 | u8 prefix[3]; | ||
44 | __le16 service; | ||
45 | } __attribute__((packed)); | ||
46 | |||
47 | static inline u8 zd_ofdm_plcp_header_rate( | ||
48 | const struct ofdm_plcp_header *header) | ||
49 | { | ||
50 | return header->prefix[0] & 0xf; | ||
51 | } | ||
52 | |||
53 | #define ZD_OFDM_RATE_6M 0xb | ||
54 | #define ZD_OFDM_RATE_9M 0xf | ||
55 | #define ZD_OFDM_RATE_12M 0xa | ||
56 | #define ZD_OFDM_RATE_18M 0xe | ||
57 | #define ZD_OFDM_RATE_24M 0x9 | ||
58 | #define ZD_OFDM_RATE_36M 0xd | ||
59 | #define ZD_OFDM_RATE_48M 0x8 | ||
60 | #define ZD_OFDM_RATE_54M 0xc | ||
61 | |||
62 | struct cck_plcp_header { | ||
63 | u8 signal; | ||
64 | u8 service; | ||
65 | __le16 length; | ||
66 | __le16 crc16; | ||
67 | } __attribute__((packed)); | ||
68 | |||
69 | static inline u8 zd_cck_plcp_header_rate(const struct cck_plcp_header *header) | ||
70 | { | ||
71 | return header->signal; | ||
72 | } | ||
73 | |||
74 | #define ZD_CCK_SIGNAL_1M 0x0a | ||
75 | #define ZD_CCK_SIGNAL_2M 0x14 | ||
76 | #define ZD_CCK_SIGNAL_5M5 0x37 | ||
77 | #define ZD_CCK_SIGNAL_11M 0x6e | ||
78 | |||
79 | enum ieee80211_std { | ||
80 | IEEE80211B = 0x01, | ||
81 | IEEE80211A = 0x02, | ||
82 | IEEE80211G = 0x04, | ||
83 | }; | ||
84 | |||
85 | #endif /* _ZD_IEEE80211_H */ | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c new file mode 100644 index 000000000000..d6f3e02a0b54 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_mac.c | |||
@@ -0,0 +1,1057 @@ | |||
1 | /* zd_mac.c | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | #include <linux/netdevice.h> | ||
19 | #include <linux/etherdevice.h> | ||
20 | #include <linux/wireless.h> | ||
21 | #include <linux/usb.h> | ||
22 | #include <linux/jiffies.h> | ||
23 | #include <net/ieee80211_radiotap.h> | ||
24 | |||
25 | #include "zd_def.h" | ||
26 | #include "zd_chip.h" | ||
27 | #include "zd_mac.h" | ||
28 | #include "zd_ieee80211.h" | ||
29 | #include "zd_netdev.h" | ||
30 | #include "zd_rf.h" | ||
31 | #include "zd_util.h" | ||
32 | |||
33 | static void ieee_init(struct ieee80211_device *ieee); | ||
34 | static void softmac_init(struct ieee80211softmac_device *sm); | ||
35 | |||
36 | int zd_mac_init(struct zd_mac *mac, | ||
37 | struct net_device *netdev, | ||
38 | struct usb_interface *intf) | ||
39 | { | ||
40 | struct ieee80211_device *ieee = zd_netdev_ieee80211(netdev); | ||
41 | |||
42 | memset(mac, 0, sizeof(*mac)); | ||
43 | spin_lock_init(&mac->lock); | ||
44 | mac->netdev = netdev; | ||
45 | |||
46 | ieee_init(ieee); | ||
47 | softmac_init(ieee80211_priv(netdev)); | ||
48 | zd_chip_init(&mac->chip, netdev, intf); | ||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | static int reset_channel(struct zd_mac *mac) | ||
53 | { | ||
54 | int r; | ||
55 | unsigned long flags; | ||
56 | const struct channel_range *range; | ||
57 | |||
58 | spin_lock_irqsave(&mac->lock, flags); | ||
59 | range = zd_channel_range(mac->regdomain); | ||
60 | if (!range->start) { | ||
61 | r = -EINVAL; | ||
62 | goto out; | ||
63 | } | ||
64 | mac->requested_channel = range->start; | ||
65 | r = 0; | ||
66 | out: | ||
67 | spin_unlock_irqrestore(&mac->lock, flags); | ||
68 | return r; | ||
69 | } | ||
70 | |||
71 | int zd_mac_init_hw(struct zd_mac *mac, u8 device_type) | ||
72 | { | ||
73 | int r; | ||
74 | struct zd_chip *chip = &mac->chip; | ||
75 | u8 addr[ETH_ALEN]; | ||
76 | u8 default_regdomain; | ||
77 | |||
78 | r = zd_chip_enable_int(chip); | ||
79 | if (r) | ||
80 | goto out; | ||
81 | r = zd_chip_init_hw(chip, device_type); | ||
82 | if (r) | ||
83 | goto disable_int; | ||
84 | |||
85 | zd_get_e2p_mac_addr(chip, addr); | ||
86 | r = zd_write_mac_addr(chip, addr); | ||
87 | if (r) | ||
88 | goto disable_int; | ||
89 | ZD_ASSERT(!irqs_disabled()); | ||
90 | spin_lock_irq(&mac->lock); | ||
91 | memcpy(mac->netdev->dev_addr, addr, ETH_ALEN); | ||
92 | spin_unlock_irq(&mac->lock); | ||
93 | |||
94 | r = zd_read_regdomain(chip, &default_regdomain); | ||
95 | if (r) | ||
96 | goto disable_int; | ||
97 | if (!zd_regdomain_supported(default_regdomain)) { | ||
98 | dev_dbg_f(zd_mac_dev(mac), | ||
99 | "Regulatory Domain %#04x is not supported.\n", | ||
100 | default_regdomain); | ||
101 | r = -EINVAL; | ||
102 | goto disable_int; | ||
103 | } | ||
104 | spin_lock_irq(&mac->lock); | ||
105 | mac->regdomain = mac->default_regdomain = default_regdomain; | ||
106 | spin_unlock_irq(&mac->lock); | ||
107 | r = reset_channel(mac); | ||
108 | if (r) | ||
109 | goto disable_int; | ||
110 | |||
111 | /* We must inform the device that we are doing encryption/decryption in | ||
112 | * software at the moment. */ | ||
113 | r = zd_set_encryption_type(chip, ENC_SNIFFER); | ||
114 | if (r) | ||
115 | goto disable_int; | ||
116 | |||
117 | r = zd_geo_init(zd_mac_to_ieee80211(mac), mac->regdomain); | ||
118 | if (r) | ||
119 | goto disable_int; | ||
120 | |||
121 | r = 0; | ||
122 | disable_int: | ||
123 | zd_chip_disable_int(chip); | ||
124 | out: | ||
125 | return r; | ||
126 | } | ||
127 | |||
128 | void zd_mac_clear(struct zd_mac *mac) | ||
129 | { | ||
130 | /* Aquire the lock. */ | ||
131 | spin_lock(&mac->lock); | ||
132 | spin_unlock(&mac->lock); | ||
133 | zd_chip_clear(&mac->chip); | ||
134 | memset(mac, 0, sizeof(*mac)); | ||
135 | } | ||
136 | |||
137 | static int reset_mode(struct zd_mac *mac) | ||
138 | { | ||
139 | struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); | ||
140 | struct zd_ioreq32 ioreqs[3] = { | ||
141 | { CR_RX_FILTER, STA_RX_FILTER }, | ||
142 | { CR_SNIFFER_ON, 0U }, | ||
143 | }; | ||
144 | |||
145 | if (ieee->iw_mode == IW_MODE_MONITOR) { | ||
146 | ioreqs[0].value = 0xffffffff; | ||
147 | ioreqs[1].value = 0x1; | ||
148 | ioreqs[2].value = ENC_SNIFFER; | ||
149 | } | ||
150 | |||
151 | return zd_iowrite32a(&mac->chip, ioreqs, 3); | ||
152 | } | ||
153 | |||
154 | int zd_mac_open(struct net_device *netdev) | ||
155 | { | ||
156 | struct zd_mac *mac = zd_netdev_mac(netdev); | ||
157 | struct zd_chip *chip = &mac->chip; | ||
158 | int r; | ||
159 | |||
160 | r = zd_chip_enable_int(chip); | ||
161 | if (r < 0) | ||
162 | goto out; | ||
163 | |||
164 | r = zd_chip_set_basic_rates(chip, CR_RATES_80211B | CR_RATES_80211G); | ||
165 | if (r < 0) | ||
166 | goto disable_int; | ||
167 | r = reset_mode(mac); | ||
168 | if (r) | ||
169 | goto disable_int; | ||
170 | r = zd_chip_switch_radio_on(chip); | ||
171 | if (r < 0) | ||
172 | goto disable_int; | ||
173 | r = zd_chip_set_channel(chip, mac->requested_channel); | ||
174 | if (r < 0) | ||
175 | goto disable_radio; | ||
176 | r = zd_chip_enable_rx(chip); | ||
177 | if (r < 0) | ||
178 | goto disable_radio; | ||
179 | r = zd_chip_enable_hwint(chip); | ||
180 | if (r < 0) | ||
181 | goto disable_rx; | ||
182 | |||
183 | ieee80211softmac_start(netdev); | ||
184 | return 0; | ||
185 | disable_rx: | ||
186 | zd_chip_disable_rx(chip); | ||
187 | disable_radio: | ||
188 | zd_chip_switch_radio_off(chip); | ||
189 | disable_int: | ||
190 | zd_chip_disable_int(chip); | ||
191 | out: | ||
192 | return r; | ||
193 | } | ||
194 | |||
195 | int zd_mac_stop(struct net_device *netdev) | ||
196 | { | ||
197 | struct zd_mac *mac = zd_netdev_mac(netdev); | ||
198 | struct zd_chip *chip = &mac->chip; | ||
199 | |||
200 | netif_stop_queue(netdev); | ||
201 | |||
202 | /* | ||
203 | * The order here deliberately is a little different from the open() | ||
204 | * method, since we need to make sure there is no opportunity for RX | ||
205 | * frames to be processed by softmac after we have stopped it. | ||
206 | */ | ||
207 | |||
208 | zd_chip_disable_rx(chip); | ||
209 | ieee80211softmac_stop(netdev); | ||
210 | |||
211 | zd_chip_disable_hwint(chip); | ||
212 | zd_chip_switch_radio_off(chip); | ||
213 | zd_chip_disable_int(chip); | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | int zd_mac_set_mac_address(struct net_device *netdev, void *p) | ||
219 | { | ||
220 | int r; | ||
221 | unsigned long flags; | ||
222 | struct sockaddr *addr = p; | ||
223 | struct zd_mac *mac = zd_netdev_mac(netdev); | ||
224 | struct zd_chip *chip = &mac->chip; | ||
225 | |||
226 | if (!is_valid_ether_addr(addr->sa_data)) | ||
227 | return -EADDRNOTAVAIL; | ||
228 | |||
229 | dev_dbg_f(zd_mac_dev(mac), | ||
230 | "Setting MAC to " MAC_FMT "\n", MAC_ARG(addr->sa_data)); | ||
231 | |||
232 | r = zd_write_mac_addr(chip, addr->sa_data); | ||
233 | if (r) | ||
234 | return r; | ||
235 | |||
236 | spin_lock_irqsave(&mac->lock, flags); | ||
237 | memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN); | ||
238 | spin_unlock_irqrestore(&mac->lock, flags); | ||
239 | |||
240 | return 0; | ||
241 | } | ||
242 | |||
243 | int zd_mac_set_regdomain(struct zd_mac *mac, u8 regdomain) | ||
244 | { | ||
245 | int r; | ||
246 | u8 channel; | ||
247 | |||
248 | ZD_ASSERT(!irqs_disabled()); | ||
249 | spin_lock_irq(&mac->lock); | ||
250 | if (regdomain == 0) { | ||
251 | regdomain = mac->default_regdomain; | ||
252 | } | ||
253 | if (!zd_regdomain_supported(regdomain)) { | ||
254 | spin_unlock_irq(&mac->lock); | ||
255 | return -EINVAL; | ||
256 | } | ||
257 | mac->regdomain = regdomain; | ||
258 | channel = mac->requested_channel; | ||
259 | spin_unlock_irq(&mac->lock); | ||
260 | |||
261 | r = zd_geo_init(zd_mac_to_ieee80211(mac), regdomain); | ||
262 | if (r) | ||
263 | return r; | ||
264 | if (!zd_regdomain_supports_channel(regdomain, channel)) { | ||
265 | r = reset_channel(mac); | ||
266 | if (r) | ||
267 | return r; | ||
268 | } | ||
269 | |||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | u8 zd_mac_get_regdomain(struct zd_mac *mac) | ||
274 | { | ||
275 | unsigned long flags; | ||
276 | u8 regdomain; | ||
277 | |||
278 | spin_lock_irqsave(&mac->lock, flags); | ||
279 | regdomain = mac->regdomain; | ||
280 | spin_unlock_irqrestore(&mac->lock, flags); | ||
281 | return regdomain; | ||
282 | } | ||
283 | |||
284 | static void set_channel(struct net_device *netdev, u8 channel) | ||
285 | { | ||
286 | struct zd_mac *mac = zd_netdev_mac(netdev); | ||
287 | |||
288 | dev_dbg_f(zd_mac_dev(mac), "channel %d\n", channel); | ||
289 | |||
290 | zd_chip_set_channel(&mac->chip, channel); | ||
291 | } | ||
292 | |||
293 | /* TODO: Should not work in Managed mode. */ | ||
294 | int zd_mac_request_channel(struct zd_mac *mac, u8 channel) | ||
295 | { | ||
296 | unsigned long lock_flags; | ||
297 | struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); | ||
298 | |||
299 | if (ieee->iw_mode == IW_MODE_INFRA) | ||
300 | return -EPERM; | ||
301 | |||
302 | spin_lock_irqsave(&mac->lock, lock_flags); | ||
303 | if (!zd_regdomain_supports_channel(mac->regdomain, channel)) { | ||
304 | spin_unlock_irqrestore(&mac->lock, lock_flags); | ||
305 | return -EINVAL; | ||
306 | } | ||
307 | mac->requested_channel = channel; | ||
308 | spin_unlock_irqrestore(&mac->lock, lock_flags); | ||
309 | if (netif_running(mac->netdev)) | ||
310 | return zd_chip_set_channel(&mac->chip, channel); | ||
311 | else | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags) | ||
316 | { | ||
317 | struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); | ||
318 | |||
319 | *channel = zd_chip_get_channel(&mac->chip); | ||
320 | if (ieee->iw_mode != IW_MODE_INFRA) { | ||
321 | spin_lock_irq(&mac->lock); | ||
322 | *flags = *channel == mac->requested_channel ? | ||
323 | MAC_FIXED_CHANNEL : 0; | ||
324 | spin_unlock(&mac->lock); | ||
325 | } else { | ||
326 | *flags = 0; | ||
327 | } | ||
328 | dev_dbg_f(zd_mac_dev(mac), "channel %u flags %u\n", *channel, *flags); | ||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | /* If wrong rate is given, we are falling back to the slowest rate: 1MBit/s */ | ||
333 | static u8 cs_typed_rate(u8 cs_rate) | ||
334 | { | ||
335 | static const u8 typed_rates[16] = { | ||
336 | [ZD_CS_CCK_RATE_1M] = ZD_CS_CCK|ZD_CS_CCK_RATE_1M, | ||
337 | [ZD_CS_CCK_RATE_2M] = ZD_CS_CCK|ZD_CS_CCK_RATE_2M, | ||
338 | [ZD_CS_CCK_RATE_5_5M] = ZD_CS_CCK|ZD_CS_CCK_RATE_5_5M, | ||
339 | [ZD_CS_CCK_RATE_11M] = ZD_CS_CCK|ZD_CS_CCK_RATE_11M, | ||
340 | [ZD_OFDM_RATE_6M] = ZD_CS_OFDM|ZD_OFDM_RATE_6M, | ||
341 | [ZD_OFDM_RATE_9M] = ZD_CS_OFDM|ZD_OFDM_RATE_9M, | ||
342 | [ZD_OFDM_RATE_12M] = ZD_CS_OFDM|ZD_OFDM_RATE_12M, | ||
343 | [ZD_OFDM_RATE_18M] = ZD_CS_OFDM|ZD_OFDM_RATE_18M, | ||
344 | [ZD_OFDM_RATE_24M] = ZD_CS_OFDM|ZD_OFDM_RATE_24M, | ||
345 | [ZD_OFDM_RATE_36M] = ZD_CS_OFDM|ZD_OFDM_RATE_36M, | ||
346 | [ZD_OFDM_RATE_48M] = ZD_CS_OFDM|ZD_OFDM_RATE_48M, | ||
347 | [ZD_OFDM_RATE_54M] = ZD_CS_OFDM|ZD_OFDM_RATE_54M, | ||
348 | }; | ||
349 | |||
350 | ZD_ASSERT(ZD_CS_RATE_MASK == 0x0f); | ||
351 | return typed_rates[cs_rate & ZD_CS_RATE_MASK]; | ||
352 | } | ||
353 | |||
354 | /* Fallback to lowest rate, if rate is unknown. */ | ||
355 | static u8 rate_to_cs_rate(u8 rate) | ||
356 | { | ||
357 | switch (rate) { | ||
358 | case IEEE80211_CCK_RATE_2MB: | ||
359 | return ZD_CS_CCK_RATE_2M; | ||
360 | case IEEE80211_CCK_RATE_5MB: | ||
361 | return ZD_CS_CCK_RATE_5_5M; | ||
362 | case IEEE80211_CCK_RATE_11MB: | ||
363 | return ZD_CS_CCK_RATE_11M; | ||
364 | case IEEE80211_OFDM_RATE_6MB: | ||
365 | return ZD_OFDM_RATE_6M; | ||
366 | case IEEE80211_OFDM_RATE_9MB: | ||
367 | return ZD_OFDM_RATE_9M; | ||
368 | case IEEE80211_OFDM_RATE_12MB: | ||
369 | return ZD_OFDM_RATE_12M; | ||
370 | case IEEE80211_OFDM_RATE_18MB: | ||
371 | return ZD_OFDM_RATE_18M; | ||
372 | case IEEE80211_OFDM_RATE_24MB: | ||
373 | return ZD_OFDM_RATE_24M; | ||
374 | case IEEE80211_OFDM_RATE_36MB: | ||
375 | return ZD_OFDM_RATE_36M; | ||
376 | case IEEE80211_OFDM_RATE_48MB: | ||
377 | return ZD_OFDM_RATE_48M; | ||
378 | case IEEE80211_OFDM_RATE_54MB: | ||
379 | return ZD_OFDM_RATE_54M; | ||
380 | } | ||
381 | return ZD_CS_CCK_RATE_1M; | ||
382 | } | ||
383 | |||
384 | int zd_mac_set_mode(struct zd_mac *mac, u32 mode) | ||
385 | { | ||
386 | struct ieee80211_device *ieee; | ||
387 | |||
388 | switch (mode) { | ||
389 | case IW_MODE_AUTO: | ||
390 | case IW_MODE_ADHOC: | ||
391 | case IW_MODE_INFRA: | ||
392 | mac->netdev->type = ARPHRD_ETHER; | ||
393 | break; | ||
394 | case IW_MODE_MONITOR: | ||
395 | mac->netdev->type = ARPHRD_IEEE80211_RADIOTAP; | ||
396 | break; | ||
397 | default: | ||
398 | dev_dbg_f(zd_mac_dev(mac), "wrong mode %u\n", mode); | ||
399 | return -EINVAL; | ||
400 | } | ||
401 | |||
402 | ieee = zd_mac_to_ieee80211(mac); | ||
403 | ZD_ASSERT(!irqs_disabled()); | ||
404 | spin_lock_irq(&ieee->lock); | ||
405 | ieee->iw_mode = mode; | ||
406 | spin_unlock_irq(&ieee->lock); | ||
407 | |||
408 | if (netif_running(mac->netdev)) | ||
409 | return reset_mode(mac); | ||
410 | |||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | int zd_mac_get_mode(struct zd_mac *mac, u32 *mode) | ||
415 | { | ||
416 | unsigned long flags; | ||
417 | struct ieee80211_device *ieee; | ||
418 | |||
419 | ieee = zd_mac_to_ieee80211(mac); | ||
420 | spin_lock_irqsave(&ieee->lock, flags); | ||
421 | *mode = ieee->iw_mode; | ||
422 | spin_unlock_irqrestore(&ieee->lock, flags); | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | int zd_mac_get_range(struct zd_mac *mac, struct iw_range *range) | ||
427 | { | ||
428 | int i; | ||
429 | const struct channel_range *channel_range; | ||
430 | u8 regdomain; | ||
431 | |||
432 | memset(range, 0, sizeof(*range)); | ||
433 | |||
434 | /* FIXME: Not so important and depends on the mode. For 802.11g | ||
435 | * usually this value is used. It seems to be that Bit/s number is | ||
436 | * given here. | ||
437 | */ | ||
438 | range->throughput = 27 * 1000 * 1000; | ||
439 | |||
440 | range->max_qual.qual = 100; | ||
441 | range->max_qual.level = 100; | ||
442 | |||
443 | /* FIXME: Needs still to be tuned. */ | ||
444 | range->avg_qual.qual = 71; | ||
445 | range->avg_qual.level = 80; | ||
446 | |||
447 | /* FIXME: depends on standard? */ | ||
448 | range->min_rts = 256; | ||
449 | range->max_rts = 2346; | ||
450 | |||
451 | range->min_frag = MIN_FRAG_THRESHOLD; | ||
452 | range->max_frag = MAX_FRAG_THRESHOLD; | ||
453 | |||
454 | range->max_encoding_tokens = WEP_KEYS; | ||
455 | range->num_encoding_sizes = 2; | ||
456 | range->encoding_size[0] = 5; | ||
457 | range->encoding_size[1] = WEP_KEY_LEN; | ||
458 | |||
459 | range->we_version_compiled = WIRELESS_EXT; | ||
460 | range->we_version_source = 20; | ||
461 | |||
462 | ZD_ASSERT(!irqs_disabled()); | ||
463 | spin_lock_irq(&mac->lock); | ||
464 | regdomain = mac->regdomain; | ||
465 | spin_unlock_irq(&mac->lock); | ||
466 | channel_range = zd_channel_range(regdomain); | ||
467 | |||
468 | range->num_channels = channel_range->end - channel_range->start; | ||
469 | range->old_num_channels = range->num_channels; | ||
470 | range->num_frequency = range->num_channels; | ||
471 | range->old_num_frequency = range->num_frequency; | ||
472 | |||
473 | for (i = 0; i < range->num_frequency; i++) { | ||
474 | struct iw_freq *freq = &range->freq[i]; | ||
475 | freq->i = channel_range->start + i; | ||
476 | zd_channel_to_freq(freq, freq->i); | ||
477 | } | ||
478 | |||
479 | return 0; | ||
480 | } | ||
481 | |||
482 | static int zd_calc_tx_length_us(u8 *service, u8 cs_rate, u16 tx_length) | ||
483 | { | ||
484 | static const u8 rate_divisor[] = { | ||
485 | [ZD_CS_CCK_RATE_1M] = 1, | ||
486 | [ZD_CS_CCK_RATE_2M] = 2, | ||
487 | [ZD_CS_CCK_RATE_5_5M] = 11, /* bits must be doubled */ | ||
488 | [ZD_CS_CCK_RATE_11M] = 11, | ||
489 | [ZD_OFDM_RATE_6M] = 6, | ||
490 | [ZD_OFDM_RATE_9M] = 9, | ||
491 | [ZD_OFDM_RATE_12M] = 12, | ||
492 | [ZD_OFDM_RATE_18M] = 18, | ||
493 | [ZD_OFDM_RATE_24M] = 24, | ||
494 | [ZD_OFDM_RATE_36M] = 36, | ||
495 | [ZD_OFDM_RATE_48M] = 48, | ||
496 | [ZD_OFDM_RATE_54M] = 54, | ||
497 | }; | ||
498 | |||
499 | u32 bits = (u32)tx_length * 8; | ||
500 | u32 divisor; | ||
501 | |||
502 | divisor = rate_divisor[cs_rate]; | ||
503 | if (divisor == 0) | ||
504 | return -EINVAL; | ||
505 | |||
506 | switch (cs_rate) { | ||
507 | case ZD_CS_CCK_RATE_5_5M: | ||
508 | bits = (2*bits) + 10; /* round up to the next integer */ | ||
509 | break; | ||
510 | case ZD_CS_CCK_RATE_11M: | ||
511 | if (service) { | ||
512 | u32 t = bits % 11; | ||
513 | *service &= ~ZD_PLCP_SERVICE_LENGTH_EXTENSION; | ||
514 | if (0 < t && t <= 3) { | ||
515 | *service |= ZD_PLCP_SERVICE_LENGTH_EXTENSION; | ||
516 | } | ||
517 | } | ||
518 | bits += 10; /* round up to the next integer */ | ||
519 | break; | ||
520 | } | ||
521 | |||
522 | return bits/divisor; | ||
523 | } | ||
524 | |||
525 | enum { | ||
526 | R2M_SHORT_PREAMBLE = 0x01, | ||
527 | R2M_11A = 0x02, | ||
528 | }; | ||
529 | |||
530 | static u8 cs_rate_to_modulation(u8 cs_rate, int flags) | ||
531 | { | ||
532 | u8 modulation; | ||
533 | |||
534 | modulation = cs_typed_rate(cs_rate); | ||
535 | if (flags & R2M_SHORT_PREAMBLE) { | ||
536 | switch (ZD_CS_RATE(modulation)) { | ||
537 | case ZD_CS_CCK_RATE_2M: | ||
538 | case ZD_CS_CCK_RATE_5_5M: | ||
539 | case ZD_CS_CCK_RATE_11M: | ||
540 | modulation |= ZD_CS_CCK_PREA_SHORT; | ||
541 | return modulation; | ||
542 | } | ||
543 | } | ||
544 | if (flags & R2M_11A) { | ||
545 | if (ZD_CS_TYPE(modulation) == ZD_CS_OFDM) | ||
546 | modulation |= ZD_CS_OFDM_MODE_11A; | ||
547 | } | ||
548 | return modulation; | ||
549 | } | ||
550 | |||
551 | static void cs_set_modulation(struct zd_mac *mac, struct zd_ctrlset *cs, | ||
552 | struct ieee80211_hdr_4addr *hdr) | ||
553 | { | ||
554 | struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev); | ||
555 | u16 ftype = WLAN_FC_GET_TYPE(le16_to_cpu(hdr->frame_ctl)); | ||
556 | u8 rate, cs_rate; | ||
557 | int is_mgt = (ftype == IEEE80211_FTYPE_MGMT) != 0; | ||
558 | |||
559 | /* FIXME: 802.11a? short preamble? */ | ||
560 | rate = ieee80211softmac_suggest_txrate(softmac, | ||
561 | is_multicast_ether_addr(hdr->addr1), is_mgt); | ||
562 | |||
563 | cs_rate = rate_to_cs_rate(rate); | ||
564 | cs->modulation = cs_rate_to_modulation(cs_rate, 0); | ||
565 | } | ||
566 | |||
567 | static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs, | ||
568 | struct ieee80211_hdr_4addr *header) | ||
569 | { | ||
570 | unsigned int tx_length = le16_to_cpu(cs->tx_length); | ||
571 | u16 fctl = le16_to_cpu(header->frame_ctl); | ||
572 | u16 ftype = WLAN_FC_GET_TYPE(fctl); | ||
573 | u16 stype = WLAN_FC_GET_STYPE(fctl); | ||
574 | |||
575 | /* | ||
576 | * CONTROL: | ||
577 | * - start at 0x00 | ||
578 | * - if fragment 0, enable bit 0 | ||
579 | * - if backoff needed, enable bit 0 | ||
580 | * - if burst (backoff not needed) disable bit 0 | ||
581 | * - if multicast, enable bit 1 | ||
582 | * - if PS-POLL frame, enable bit 2 | ||
583 | * - if in INDEPENDENT_BSS mode and zd1205_DestPowerSave, then enable | ||
584 | * bit 4 (FIXME: wtf) | ||
585 | * - if frag_len > RTS threshold, set bit 5 as long if it isnt | ||
586 | * multicast or mgt | ||
587 | * - if bit 5 is set, and we are in OFDM mode, unset bit 5 and set bit | ||
588 | * 7 | ||
589 | */ | ||
590 | |||
591 | cs->control = 0; | ||
592 | |||
593 | /* First fragment */ | ||
594 | if (WLAN_GET_SEQ_FRAG(le16_to_cpu(header->seq_ctl)) == 0) | ||
595 | cs->control |= ZD_CS_NEED_RANDOM_BACKOFF; | ||
596 | |||
597 | /* Multicast */ | ||
598 | if (is_multicast_ether_addr(header->addr1)) | ||
599 | cs->control |= ZD_CS_MULTICAST; | ||
600 | |||
601 | /* PS-POLL */ | ||
602 | if (stype == IEEE80211_STYPE_PSPOLL) | ||
603 | cs->control |= ZD_CS_PS_POLL_FRAME; | ||
604 | |||
605 | if (!is_multicast_ether_addr(header->addr1) && | ||
606 | ftype != IEEE80211_FTYPE_MGMT && | ||
607 | tx_length > zd_netdev_ieee80211(mac->netdev)->rts) | ||
608 | { | ||
609 | /* FIXME: check the logic */ | ||
610 | if (ZD_CS_TYPE(cs->modulation) == ZD_CS_OFDM) { | ||
611 | /* 802.11g */ | ||
612 | cs->control |= ZD_CS_SELF_CTS; | ||
613 | } else { /* 802.11b */ | ||
614 | cs->control |= ZD_CS_RTS; | ||
615 | } | ||
616 | } | ||
617 | |||
618 | /* FIXME: Management frame? */ | ||
619 | } | ||
620 | |||
621 | static int fill_ctrlset(struct zd_mac *mac, | ||
622 | struct ieee80211_txb *txb, | ||
623 | int frag_num) | ||
624 | { | ||
625 | int r; | ||
626 | struct sk_buff *skb = txb->fragments[frag_num]; | ||
627 | struct ieee80211_hdr_4addr *hdr = | ||
628 | (struct ieee80211_hdr_4addr *) skb->data; | ||
629 | unsigned int frag_len = skb->len + IEEE80211_FCS_LEN; | ||
630 | unsigned int next_frag_len; | ||
631 | unsigned int packet_length; | ||
632 | struct zd_ctrlset *cs = (struct zd_ctrlset *) | ||
633 | skb_push(skb, sizeof(struct zd_ctrlset)); | ||
634 | |||
635 | if (frag_num+1 < txb->nr_frags) { | ||
636 | next_frag_len = txb->fragments[frag_num+1]->len + | ||
637 | IEEE80211_FCS_LEN; | ||
638 | } else { | ||
639 | next_frag_len = 0; | ||
640 | } | ||
641 | ZD_ASSERT(frag_len <= 0xffff); | ||
642 | ZD_ASSERT(next_frag_len <= 0xffff); | ||
643 | |||
644 | cs_set_modulation(mac, cs, hdr); | ||
645 | |||
646 | cs->tx_length = cpu_to_le16(frag_len); | ||
647 | |||
648 | cs_set_control(mac, cs, hdr); | ||
649 | |||
650 | packet_length = frag_len + sizeof(struct zd_ctrlset) + 10; | ||
651 | ZD_ASSERT(packet_length <= 0xffff); | ||
652 | /* ZD1211B: Computing the length difference this way, gives us | ||
653 | * flexibility to compute the packet length. | ||
654 | */ | ||
655 | cs->packet_length = cpu_to_le16(mac->chip.is_zd1211b ? | ||
656 | packet_length - frag_len : packet_length); | ||
657 | |||
658 | /* | ||
659 | * CURRENT LENGTH: | ||
660 | * - transmit frame length in microseconds | ||
661 | * - seems to be derived from frame length | ||
662 | * - see Cal_Us_Service() in zdinlinef.h | ||
663 | * - if macp->bTxBurstEnable is enabled, then multiply by 4 | ||
664 | * - bTxBurstEnable is never set in the vendor driver | ||
665 | * | ||
666 | * SERVICE: | ||
667 | * - "for PLCP configuration" | ||
668 | * - always 0 except in some situations at 802.11b 11M | ||
669 | * - see line 53 of zdinlinef.h | ||
670 | */ | ||
671 | cs->service = 0; | ||
672 | r = zd_calc_tx_length_us(&cs->service, ZD_CS_RATE(cs->modulation), | ||
673 | le16_to_cpu(cs->tx_length)); | ||
674 | if (r < 0) | ||
675 | return r; | ||
676 | cs->current_length = cpu_to_le16(r); | ||
677 | |||
678 | if (next_frag_len == 0) { | ||
679 | cs->next_frame_length = 0; | ||
680 | } else { | ||
681 | r = zd_calc_tx_length_us(NULL, ZD_CS_RATE(cs->modulation), | ||
682 | next_frag_len); | ||
683 | if (r < 0) | ||
684 | return r; | ||
685 | cs->next_frame_length = cpu_to_le16(r); | ||
686 | } | ||
687 | |||
688 | return 0; | ||
689 | } | ||
690 | |||
691 | static int zd_mac_tx(struct zd_mac *mac, struct ieee80211_txb *txb, int pri) | ||
692 | { | ||
693 | int i, r; | ||
694 | |||
695 | for (i = 0; i < txb->nr_frags; i++) { | ||
696 | struct sk_buff *skb = txb->fragments[i]; | ||
697 | |||
698 | r = fill_ctrlset(mac, txb, i); | ||
699 | if (r) | ||
700 | return r; | ||
701 | r = zd_usb_tx(&mac->chip.usb, skb->data, skb->len); | ||
702 | if (r) | ||
703 | return r; | ||
704 | } | ||
705 | |||
706 | /* FIXME: shouldn't this be handled by the upper layers? */ | ||
707 | mac->netdev->trans_start = jiffies; | ||
708 | |||
709 | ieee80211_txb_free(txb); | ||
710 | return 0; | ||
711 | } | ||
712 | |||
713 | struct zd_rt_hdr { | ||
714 | struct ieee80211_radiotap_header rt_hdr; | ||
715 | u8 rt_flags; | ||
716 | u8 rt_rate; | ||
717 | u16 rt_channel; | ||
718 | u16 rt_chbitmask; | ||
719 | } __attribute__((packed)); | ||
720 | |||
721 | static void fill_rt_header(void *buffer, struct zd_mac *mac, | ||
722 | const struct ieee80211_rx_stats *stats, | ||
723 | const struct rx_status *status) | ||
724 | { | ||
725 | struct zd_rt_hdr *hdr = buffer; | ||
726 | |||
727 | hdr->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; | ||
728 | hdr->rt_hdr.it_pad = 0; | ||
729 | hdr->rt_hdr.it_len = cpu_to_le16(sizeof(struct zd_rt_hdr)); | ||
730 | hdr->rt_hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | | ||
731 | (1 << IEEE80211_RADIOTAP_CHANNEL) | | ||
732 | (1 << IEEE80211_RADIOTAP_RATE)); | ||
733 | |||
734 | hdr->rt_flags = 0; | ||
735 | if (status->decryption_type & (ZD_RX_WEP64|ZD_RX_WEP128|ZD_RX_WEP256)) | ||
736 | hdr->rt_flags |= IEEE80211_RADIOTAP_F_WEP; | ||
737 | |||
738 | hdr->rt_rate = stats->rate / 5; | ||
739 | |||
740 | /* FIXME: 802.11a */ | ||
741 | hdr->rt_channel = cpu_to_le16(ieee80211chan2mhz( | ||
742 | _zd_chip_get_channel(&mac->chip))); | ||
743 | hdr->rt_chbitmask = cpu_to_le16(IEEE80211_CHAN_2GHZ | | ||
744 | ((status->frame_status & ZD_RX_FRAME_MODULATION_MASK) == | ||
745 | ZD_RX_OFDM ? IEEE80211_CHAN_OFDM : IEEE80211_CHAN_CCK)); | ||
746 | } | ||
747 | |||
748 | /* Returns 1 if the data packet is for us and 0 otherwise. */ | ||
749 | static int is_data_packet_for_us(struct ieee80211_device *ieee, | ||
750 | struct ieee80211_hdr_4addr *hdr) | ||
751 | { | ||
752 | struct net_device *netdev = ieee->dev; | ||
753 | u16 fc = le16_to_cpu(hdr->frame_ctl); | ||
754 | |||
755 | ZD_ASSERT(WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA); | ||
756 | |||
757 | switch (ieee->iw_mode) { | ||
758 | case IW_MODE_ADHOC: | ||
759 | if ((fc & (IEEE80211_FCTL_TODS|IEEE80211_FCTL_FROMDS)) != 0 || | ||
760 | memcmp(hdr->addr3, ieee->bssid, ETH_ALEN) != 0) | ||
761 | return 0; | ||
762 | break; | ||
763 | case IW_MODE_AUTO: | ||
764 | case IW_MODE_INFRA: | ||
765 | if ((fc & (IEEE80211_FCTL_TODS|IEEE80211_FCTL_FROMDS)) != | ||
766 | IEEE80211_FCTL_FROMDS || | ||
767 | memcmp(hdr->addr2, ieee->bssid, ETH_ALEN) != 0) | ||
768 | return 0; | ||
769 | break; | ||
770 | default: | ||
771 | ZD_ASSERT(ieee->iw_mode != IW_MODE_MONITOR); | ||
772 | return 0; | ||
773 | } | ||
774 | |||
775 | return memcmp(hdr->addr1, netdev->dev_addr, ETH_ALEN) == 0 || | ||
776 | is_multicast_ether_addr(hdr->addr1) || | ||
777 | (netdev->flags & IFF_PROMISC); | ||
778 | } | ||
779 | |||
780 | /* Filters receiving packets. If it returns 1 send it to ieee80211_rx, if 0 | ||
781 | * return. If an error is detected -EINVAL is returned. ieee80211_rx_mgt() is | ||
782 | * called here. | ||
783 | * | ||
784 | * It has been based on ieee80211_rx_any. | ||
785 | */ | ||
786 | static int filter_rx(struct ieee80211_device *ieee, | ||
787 | const u8 *buffer, unsigned int length, | ||
788 | struct ieee80211_rx_stats *stats) | ||
789 | { | ||
790 | struct ieee80211_hdr_4addr *hdr; | ||
791 | u16 fc; | ||
792 | |||
793 | if (ieee->iw_mode == IW_MODE_MONITOR) | ||
794 | return 1; | ||
795 | |||
796 | hdr = (struct ieee80211_hdr_4addr *)buffer; | ||
797 | fc = le16_to_cpu(hdr->frame_ctl); | ||
798 | if ((fc & IEEE80211_FCTL_VERS) != 0) | ||
799 | return -EINVAL; | ||
800 | |||
801 | switch (WLAN_FC_GET_TYPE(fc)) { | ||
802 | case IEEE80211_FTYPE_MGMT: | ||
803 | if (length < sizeof(struct ieee80211_hdr_3addr)) | ||
804 | return -EINVAL; | ||
805 | ieee80211_rx_mgt(ieee, hdr, stats); | ||
806 | return 0; | ||
807 | case IEEE80211_FTYPE_CTL: | ||
808 | /* Ignore invalid short buffers */ | ||
809 | return 0; | ||
810 | case IEEE80211_FTYPE_DATA: | ||
811 | if (length < sizeof(struct ieee80211_hdr_3addr)) | ||
812 | return -EINVAL; | ||
813 | return is_data_packet_for_us(ieee, hdr); | ||
814 | } | ||
815 | |||
816 | return -EINVAL; | ||
817 | } | ||
818 | |||
819 | static void update_qual_rssi(struct zd_mac *mac, u8 qual_percent, u8 rssi) | ||
820 | { | ||
821 | unsigned long flags; | ||
822 | |||
823 | spin_lock_irqsave(&mac->lock, flags); | ||
824 | mac->qual_average = (7 * mac->qual_average + qual_percent) / 8; | ||
825 | mac->rssi_average = (7 * mac->rssi_average + rssi) / 8; | ||
826 | spin_unlock_irqrestore(&mac->lock, flags); | ||
827 | } | ||
828 | |||
829 | static int fill_rx_stats(struct ieee80211_rx_stats *stats, | ||
830 | const struct rx_status **pstatus, | ||
831 | struct zd_mac *mac, | ||
832 | const u8 *buffer, unsigned int length) | ||
833 | { | ||
834 | const struct rx_status *status; | ||
835 | |||
836 | *pstatus = status = zd_tail(buffer, length, sizeof(struct rx_status)); | ||
837 | if (status->frame_status & ZD_RX_ERROR) { | ||
838 | /* FIXME: update? */ | ||
839 | return -EINVAL; | ||
840 | } | ||
841 | memset(stats, 0, sizeof(struct ieee80211_rx_stats)); | ||
842 | stats->len = length - (ZD_PLCP_HEADER_SIZE + IEEE80211_FCS_LEN + | ||
843 | + sizeof(struct rx_status)); | ||
844 | /* FIXME: 802.11a */ | ||
845 | stats->freq = IEEE80211_24GHZ_BAND; | ||
846 | stats->received_channel = _zd_chip_get_channel(&mac->chip); | ||
847 | stats->rssi = zd_rx_strength_percent(status->signal_strength); | ||
848 | stats->signal = zd_rx_qual_percent(buffer, | ||
849 | length - sizeof(struct rx_status), | ||
850 | status); | ||
851 | stats->mask = IEEE80211_STATMASK_RSSI | IEEE80211_STATMASK_SIGNAL; | ||
852 | stats->rate = zd_rx_rate(buffer, status); | ||
853 | if (stats->rate) | ||
854 | stats->mask |= IEEE80211_STATMASK_RATE; | ||
855 | |||
856 | update_qual_rssi(mac, stats->signal, stats->rssi); | ||
857 | return 0; | ||
858 | } | ||
859 | |||
860 | int zd_mac_rx(struct zd_mac *mac, const u8 *buffer, unsigned int length) | ||
861 | { | ||
862 | int r; | ||
863 | struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); | ||
864 | struct ieee80211_rx_stats stats; | ||
865 | const struct rx_status *status; | ||
866 | struct sk_buff *skb; | ||
867 | |||
868 | if (length < ZD_PLCP_HEADER_SIZE + IEEE80211_1ADDR_LEN + | ||
869 | IEEE80211_FCS_LEN + sizeof(struct rx_status)) | ||
870 | return -EINVAL; | ||
871 | |||
872 | r = fill_rx_stats(&stats, &status, mac, buffer, length); | ||
873 | if (r) | ||
874 | return r; | ||
875 | |||
876 | length -= ZD_PLCP_HEADER_SIZE+IEEE80211_FCS_LEN+ | ||
877 | sizeof(struct rx_status); | ||
878 | buffer += ZD_PLCP_HEADER_SIZE; | ||
879 | |||
880 | r = filter_rx(ieee, buffer, length, &stats); | ||
881 | if (r <= 0) | ||
882 | return r; | ||
883 | |||
884 | skb = dev_alloc_skb(sizeof(struct zd_rt_hdr) + length); | ||
885 | if (!skb) | ||
886 | return -ENOMEM; | ||
887 | if (ieee->iw_mode == IW_MODE_MONITOR) | ||
888 | fill_rt_header(skb_put(skb, sizeof(struct zd_rt_hdr)), mac, | ||
889 | &stats, status); | ||
890 | memcpy(skb_put(skb, length), buffer, length); | ||
891 | |||
892 | r = ieee80211_rx(ieee, skb, &stats); | ||
893 | if (!r) { | ||
894 | ZD_ASSERT(in_irq()); | ||
895 | dev_kfree_skb_irq(skb); | ||
896 | } | ||
897 | return 0; | ||
898 | } | ||
899 | |||
900 | static int netdev_tx(struct ieee80211_txb *txb, struct net_device *netdev, | ||
901 | int pri) | ||
902 | { | ||
903 | return zd_mac_tx(zd_netdev_mac(netdev), txb, pri); | ||
904 | } | ||
905 | |||
906 | static void set_security(struct net_device *netdev, | ||
907 | struct ieee80211_security *sec) | ||
908 | { | ||
909 | struct ieee80211_device *ieee = zd_netdev_ieee80211(netdev); | ||
910 | struct ieee80211_security *secinfo = &ieee->sec; | ||
911 | int keyidx; | ||
912 | |||
913 | dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)), "\n"); | ||
914 | |||
915 | for (keyidx = 0; keyidx<WEP_KEYS; keyidx++) | ||
916 | if (sec->flags & (1<<keyidx)) { | ||
917 | secinfo->encode_alg[keyidx] = sec->encode_alg[keyidx]; | ||
918 | secinfo->key_sizes[keyidx] = sec->key_sizes[keyidx]; | ||
919 | memcpy(secinfo->keys[keyidx], sec->keys[keyidx], | ||
920 | SCM_KEY_LEN); | ||
921 | } | ||
922 | |||
923 | if (sec->flags & SEC_ACTIVE_KEY) { | ||
924 | secinfo->active_key = sec->active_key; | ||
925 | dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)), | ||
926 | " .active_key = %d\n", sec->active_key); | ||
927 | } | ||
928 | if (sec->flags & SEC_UNICAST_GROUP) { | ||
929 | secinfo->unicast_uses_group = sec->unicast_uses_group; | ||
930 | dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)), | ||
931 | " .unicast_uses_group = %d\n", | ||
932 | sec->unicast_uses_group); | ||
933 | } | ||
934 | if (sec->flags & SEC_LEVEL) { | ||
935 | secinfo->level = sec->level; | ||
936 | dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)), | ||
937 | " .level = %d\n", sec->level); | ||
938 | } | ||
939 | if (sec->flags & SEC_ENABLED) { | ||
940 | secinfo->enabled = sec->enabled; | ||
941 | dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)), | ||
942 | " .enabled = %d\n", sec->enabled); | ||
943 | } | ||
944 | if (sec->flags & SEC_ENCRYPT) { | ||
945 | secinfo->encrypt = sec->encrypt; | ||
946 | dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)), | ||
947 | " .encrypt = %d\n", sec->encrypt); | ||
948 | } | ||
949 | if (sec->flags & SEC_AUTH_MODE) { | ||
950 | secinfo->auth_mode = sec->auth_mode; | ||
951 | dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)), | ||
952 | " .auth_mode = %d\n", sec->auth_mode); | ||
953 | } | ||
954 | } | ||
955 | |||
956 | static void ieee_init(struct ieee80211_device *ieee) | ||
957 | { | ||
958 | ieee->mode = IEEE_B | IEEE_G; | ||
959 | ieee->freq_band = IEEE80211_24GHZ_BAND; | ||
960 | ieee->modulation = IEEE80211_OFDM_MODULATION | IEEE80211_CCK_MODULATION; | ||
961 | ieee->tx_headroom = sizeof(struct zd_ctrlset); | ||
962 | ieee->set_security = set_security; | ||
963 | ieee->hard_start_xmit = netdev_tx; | ||
964 | |||
965 | /* Software encryption/decryption for now */ | ||
966 | ieee->host_build_iv = 0; | ||
967 | ieee->host_encrypt = 1; | ||
968 | ieee->host_decrypt = 1; | ||
969 | |||
970 | /* FIXME: default to managed mode, until ieee80211 and zd1211rw can | ||
971 | * correctly support AUTO */ | ||
972 | ieee->iw_mode = IW_MODE_INFRA; | ||
973 | } | ||
974 | |||
975 | static void softmac_init(struct ieee80211softmac_device *sm) | ||
976 | { | ||
977 | sm->set_channel = set_channel; | ||
978 | } | ||
979 | |||
980 | struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev) | ||
981 | { | ||
982 | struct zd_mac *mac = zd_netdev_mac(ndev); | ||
983 | struct iw_statistics *iw_stats = &mac->iw_stats; | ||
984 | |||
985 | memset(iw_stats, 0, sizeof(struct iw_statistics)); | ||
986 | /* We are not setting the status, because ieee->state is not updated | ||
987 | * at all and this driver doesn't track authentication state. | ||
988 | */ | ||
989 | spin_lock_irq(&mac->lock); | ||
990 | iw_stats->qual.qual = mac->qual_average; | ||
991 | iw_stats->qual.level = mac->rssi_average; | ||
992 | iw_stats->qual.updated = IW_QUAL_QUAL_UPDATED|IW_QUAL_LEVEL_UPDATED| | ||
993 | IW_QUAL_NOISE_INVALID; | ||
994 | spin_unlock_irq(&mac->lock); | ||
995 | /* TODO: update counter */ | ||
996 | return iw_stats; | ||
997 | } | ||
998 | |||
999 | #ifdef DEBUG | ||
1000 | static const char* decryption_types[] = { | ||
1001 | [ZD_RX_NO_WEP] = "none", | ||
1002 | [ZD_RX_WEP64] = "WEP64", | ||
1003 | [ZD_RX_TKIP] = "TKIP", | ||
1004 | [ZD_RX_AES] = "AES", | ||
1005 | [ZD_RX_WEP128] = "WEP128", | ||
1006 | [ZD_RX_WEP256] = "WEP256", | ||
1007 | }; | ||
1008 | |||
1009 | static const char *decryption_type_string(u8 type) | ||
1010 | { | ||
1011 | const char *s; | ||
1012 | |||
1013 | if (type < ARRAY_SIZE(decryption_types)) { | ||
1014 | s = decryption_types[type]; | ||
1015 | } else { | ||
1016 | s = NULL; | ||
1017 | } | ||
1018 | return s ? s : "unknown"; | ||
1019 | } | ||
1020 | |||
1021 | static int is_ofdm(u8 frame_status) | ||
1022 | { | ||
1023 | return (frame_status & ZD_RX_OFDM); | ||
1024 | } | ||
1025 | |||
1026 | void zd_dump_rx_status(const struct rx_status *status) | ||
1027 | { | ||
1028 | const char* modulation; | ||
1029 | u8 quality; | ||
1030 | |||
1031 | if (is_ofdm(status->frame_status)) { | ||
1032 | modulation = "ofdm"; | ||
1033 | quality = status->signal_quality_ofdm; | ||
1034 | } else { | ||
1035 | modulation = "cck"; | ||
1036 | quality = status->signal_quality_cck; | ||
1037 | } | ||
1038 | pr_debug("rx status %s strength %#04x qual %#04x decryption %s\n", | ||
1039 | modulation, status->signal_strength, quality, | ||
1040 | decryption_type_string(status->decryption_type)); | ||
1041 | if (status->frame_status & ZD_RX_ERROR) { | ||
1042 | pr_debug("rx error %s%s%s%s%s%s\n", | ||
1043 | (status->frame_status & ZD_RX_TIMEOUT_ERROR) ? | ||
1044 | "timeout " : "", | ||
1045 | (status->frame_status & ZD_RX_FIFO_OVERRUN_ERROR) ? | ||
1046 | "fifo " : "", | ||
1047 | (status->frame_status & ZD_RX_DECRYPTION_ERROR) ? | ||
1048 | "decryption " : "", | ||
1049 | (status->frame_status & ZD_RX_CRC32_ERROR) ? | ||
1050 | "crc32 " : "", | ||
1051 | (status->frame_status & ZD_RX_NO_ADDR1_MATCH_ERROR) ? | ||
1052 | "addr1 " : "", | ||
1053 | (status->frame_status & ZD_RX_CRC16_ERROR) ? | ||
1054 | "crc16" : ""); | ||
1055 | } | ||
1056 | } | ||
1057 | #endif /* DEBUG */ | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h new file mode 100644 index 000000000000..71e382c589ee --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_mac.h | |||
@@ -0,0 +1,190 @@ | |||
1 | /* zd_mac.c | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | #ifndef _ZD_MAC_H | ||
19 | #define _ZD_MAC_H | ||
20 | |||
21 | #include <linux/wireless.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <net/ieee80211.h> | ||
24 | #include <net/ieee80211softmac.h> | ||
25 | |||
26 | #include "zd_chip.h" | ||
27 | #include "zd_netdev.h" | ||
28 | |||
29 | struct zd_ctrlset { | ||
30 | u8 modulation; | ||
31 | __le16 tx_length; | ||
32 | u8 control; | ||
33 | /* stores only the difference to tx_length on ZD1211B */ | ||
34 | __le16 packet_length; | ||
35 | __le16 current_length; | ||
36 | u8 service; | ||
37 | __le16 next_frame_length; | ||
38 | } __attribute__((packed)); | ||
39 | |||
40 | #define ZD_CS_RESERVED_SIZE 25 | ||
41 | |||
42 | /* zd_crtlset field modulation */ | ||
43 | #define ZD_CS_RATE_MASK 0x0f | ||
44 | #define ZD_CS_TYPE_MASK 0x10 | ||
45 | #define ZD_CS_RATE(modulation) ((modulation) & ZD_CS_RATE_MASK) | ||
46 | #define ZD_CS_TYPE(modulation) ((modulation) & ZD_CS_TYPE_MASK) | ||
47 | |||
48 | #define ZD_CS_CCK 0x00 | ||
49 | #define ZD_CS_OFDM 0x10 | ||
50 | |||
51 | #define ZD_CS_CCK_RATE_1M 0x00 | ||
52 | #define ZD_CS_CCK_RATE_2M 0x01 | ||
53 | #define ZD_CS_CCK_RATE_5_5M 0x02 | ||
54 | #define ZD_CS_CCK_RATE_11M 0x03 | ||
55 | /* The rates for OFDM are encoded as in the PLCP header. Use ZD_OFDM_RATE_*. | ||
56 | */ | ||
57 | |||
58 | /* bit 5 is preamble (when in CCK mode), or a/g selection (when in OFDM mode) */ | ||
59 | #define ZD_CS_CCK_PREA_LONG 0x00 | ||
60 | #define ZD_CS_CCK_PREA_SHORT 0x20 | ||
61 | #define ZD_CS_OFDM_MODE_11G 0x00 | ||
62 | #define ZD_CS_OFDM_MODE_11A 0x20 | ||
63 | |||
64 | /* zd_ctrlset control field */ | ||
65 | #define ZD_CS_NEED_RANDOM_BACKOFF 0x01 | ||
66 | #define ZD_CS_MULTICAST 0x02 | ||
67 | |||
68 | #define ZD_CS_FRAME_TYPE_MASK 0x0c | ||
69 | #define ZD_CS_DATA_FRAME 0x00 | ||
70 | #define ZD_CS_PS_POLL_FRAME 0x04 | ||
71 | #define ZD_CS_MANAGEMENT_FRAME 0x08 | ||
72 | #define ZD_CS_NO_SEQUENCE_CTL_FRAME 0x0c | ||
73 | |||
74 | #define ZD_CS_WAKE_DESTINATION 0x10 | ||
75 | #define ZD_CS_RTS 0x20 | ||
76 | #define ZD_CS_ENCRYPT 0x40 | ||
77 | #define ZD_CS_SELF_CTS 0x80 | ||
78 | |||
79 | /* Incoming frames are prepended by a PLCP header */ | ||
80 | #define ZD_PLCP_HEADER_SIZE 5 | ||
81 | |||
82 | struct rx_length_info { | ||
83 | __le16 length[3]; | ||
84 | __le16 tag; | ||
85 | } __attribute__((packed)); | ||
86 | |||
87 | #define RX_LENGTH_INFO_TAG 0x697e | ||
88 | |||
89 | struct rx_status { | ||
90 | /* rssi */ | ||
91 | u8 signal_strength; | ||
92 | u8 signal_quality_cck; | ||
93 | u8 signal_quality_ofdm; | ||
94 | u8 decryption_type; | ||
95 | u8 frame_status; | ||
96 | } __attribute__((packed)); | ||
97 | |||
98 | /* rx_status field decryption_type */ | ||
99 | #define ZD_RX_NO_WEP 0 | ||
100 | #define ZD_RX_WEP64 1 | ||
101 | #define ZD_RX_TKIP 2 | ||
102 | #define ZD_RX_AES 4 | ||
103 | #define ZD_RX_WEP128 5 | ||
104 | #define ZD_RX_WEP256 6 | ||
105 | |||
106 | /* rx_status field frame_status */ | ||
107 | #define ZD_RX_FRAME_MODULATION_MASK 0x01 | ||
108 | #define ZD_RX_CCK 0x00 | ||
109 | #define ZD_RX_OFDM 0x01 | ||
110 | |||
111 | #define ZD_RX_TIMEOUT_ERROR 0x02 | ||
112 | #define ZD_RX_FIFO_OVERRUN_ERROR 0x04 | ||
113 | #define ZD_RX_DECRYPTION_ERROR 0x08 | ||
114 | #define ZD_RX_CRC32_ERROR 0x10 | ||
115 | #define ZD_RX_NO_ADDR1_MATCH_ERROR 0x20 | ||
116 | #define ZD_RX_CRC16_ERROR 0x40 | ||
117 | #define ZD_RX_ERROR 0x80 | ||
118 | |||
119 | enum mac_flags { | ||
120 | MAC_FIXED_CHANNEL = 0x01, | ||
121 | }; | ||
122 | |||
123 | struct zd_mac { | ||
124 | struct net_device *netdev; | ||
125 | struct zd_chip chip; | ||
126 | spinlock_t lock; | ||
127 | /* Unlocked reading possible */ | ||
128 | struct iw_statistics iw_stats; | ||
129 | u8 qual_average; | ||
130 | u8 rssi_average; | ||
131 | u8 regdomain; | ||
132 | u8 default_regdomain; | ||
133 | u8 requested_channel; | ||
134 | }; | ||
135 | |||
136 | static inline struct ieee80211_device *zd_mac_to_ieee80211(struct zd_mac *mac) | ||
137 | { | ||
138 | return zd_netdev_ieee80211(mac->netdev); | ||
139 | } | ||
140 | |||
141 | static inline struct zd_mac *zd_netdev_mac(struct net_device *netdev) | ||
142 | { | ||
143 | return ieee80211softmac_priv(netdev); | ||
144 | } | ||
145 | |||
146 | static inline struct zd_mac *zd_chip_to_mac(struct zd_chip *chip) | ||
147 | { | ||
148 | return container_of(chip, struct zd_mac, chip); | ||
149 | } | ||
150 | |||
151 | static inline struct zd_mac *zd_usb_to_mac(struct zd_usb *usb) | ||
152 | { | ||
153 | return zd_chip_to_mac(zd_usb_to_chip(usb)); | ||
154 | } | ||
155 | |||
156 | #define zd_mac_dev(mac) (zd_chip_dev(&(mac)->chip)) | ||
157 | |||
158 | int zd_mac_init(struct zd_mac *mac, | ||
159 | struct net_device *netdev, | ||
160 | struct usb_interface *intf); | ||
161 | void zd_mac_clear(struct zd_mac *mac); | ||
162 | |||
163 | int zd_mac_init_hw(struct zd_mac *mac, u8 device_type); | ||
164 | |||
165 | int zd_mac_open(struct net_device *netdev); | ||
166 | int zd_mac_stop(struct net_device *netdev); | ||
167 | int zd_mac_set_mac_address(struct net_device *dev, void *p); | ||
168 | |||
169 | int zd_mac_rx(struct zd_mac *mac, const u8 *buffer, unsigned int length); | ||
170 | |||
171 | int zd_mac_set_regdomain(struct zd_mac *zd_mac, u8 regdomain); | ||
172 | u8 zd_mac_get_regdomain(struct zd_mac *zd_mac); | ||
173 | |||
174 | int zd_mac_request_channel(struct zd_mac *mac, u8 channel); | ||
175 | int zd_mac_get_channel(struct zd_mac *mac, u8 *channel, u8 *flags); | ||
176 | |||
177 | int zd_mac_set_mode(struct zd_mac *mac, u32 mode); | ||
178 | int zd_mac_get_mode(struct zd_mac *mac, u32 *mode); | ||
179 | |||
180 | int zd_mac_get_range(struct zd_mac *mac, struct iw_range *range); | ||
181 | |||
182 | struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev); | ||
183 | |||
184 | #ifdef DEBUG | ||
185 | void zd_dump_rx_status(const struct rx_status *status); | ||
186 | #else | ||
187 | #define zd_dump_rx_status(status) | ||
188 | #endif /* DEBUG */ | ||
189 | |||
190 | #endif /* _ZD_MAC_H */ | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_netdev.c b/drivers/net/wireless/zd1211rw/zd_netdev.c new file mode 100644 index 000000000000..9df232c2c863 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_netdev.c | |||
@@ -0,0 +1,267 @@ | |||
1 | /* zd_netdev.c | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | #include <linux/netdevice.h> | ||
19 | #include <linux/etherdevice.h> | ||
20 | #include <linux/skbuff.h> | ||
21 | #include <net/ieee80211.h> | ||
22 | #include <net/ieee80211softmac.h> | ||
23 | #include <net/ieee80211softmac_wx.h> | ||
24 | #include <net/iw_handler.h> | ||
25 | |||
26 | #include "zd_def.h" | ||
27 | #include "zd_netdev.h" | ||
28 | #include "zd_mac.h" | ||
29 | #include "zd_ieee80211.h" | ||
30 | |||
31 | /* Region 0 means reset regdomain to default. */ | ||
32 | static int zd_set_regdomain(struct net_device *netdev, | ||
33 | struct iw_request_info *info, | ||
34 | union iwreq_data *req, char *extra) | ||
35 | { | ||
36 | const u8 *regdomain = (u8 *)req; | ||
37 | return zd_mac_set_regdomain(zd_netdev_mac(netdev), *regdomain); | ||
38 | } | ||
39 | |||
40 | static int zd_get_regdomain(struct net_device *netdev, | ||
41 | struct iw_request_info *info, | ||
42 | union iwreq_data *req, char *extra) | ||
43 | { | ||
44 | u8 *regdomain = (u8 *)req; | ||
45 | if (!regdomain) | ||
46 | return -EINVAL; | ||
47 | *regdomain = zd_mac_get_regdomain(zd_netdev_mac(netdev)); | ||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | static const struct iw_priv_args zd_priv_args[] = { | ||
52 | { | ||
53 | .cmd = ZD_PRIV_SET_REGDOMAIN, | ||
54 | .set_args = IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, | ||
55 | .name = "set_regdomain", | ||
56 | }, | ||
57 | { | ||
58 | .cmd = ZD_PRIV_GET_REGDOMAIN, | ||
59 | .get_args = IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, | ||
60 | .name = "get_regdomain", | ||
61 | }, | ||
62 | }; | ||
63 | |||
64 | #define PRIV_OFFSET(x) [(x)-SIOCIWFIRSTPRIV] | ||
65 | |||
66 | static const iw_handler zd_priv_handler[] = { | ||
67 | PRIV_OFFSET(ZD_PRIV_SET_REGDOMAIN) = zd_set_regdomain, | ||
68 | PRIV_OFFSET(ZD_PRIV_GET_REGDOMAIN) = zd_get_regdomain, | ||
69 | }; | ||
70 | |||
71 | static int iw_get_name(struct net_device *netdev, | ||
72 | struct iw_request_info *info, | ||
73 | union iwreq_data *req, char *extra) | ||
74 | { | ||
75 | /* FIXME: check whether 802.11a will also supported, add also | ||
76 | * zd1211B, if we support it. | ||
77 | */ | ||
78 | strlcpy(req->name, "802.11g zd1211", IFNAMSIZ); | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | static int iw_set_freq(struct net_device *netdev, | ||
83 | struct iw_request_info *info, | ||
84 | union iwreq_data *req, char *extra) | ||
85 | { | ||
86 | int r; | ||
87 | struct zd_mac *mac = zd_netdev_mac(netdev); | ||
88 | struct iw_freq *freq = &req->freq; | ||
89 | u8 channel; | ||
90 | |||
91 | r = zd_find_channel(&channel, freq); | ||
92 | if (r < 0) | ||
93 | return r; | ||
94 | r = zd_mac_request_channel(mac, channel); | ||
95 | return r; | ||
96 | } | ||
97 | |||
98 | static int iw_get_freq(struct net_device *netdev, | ||
99 | struct iw_request_info *info, | ||
100 | union iwreq_data *req, char *extra) | ||
101 | { | ||
102 | int r; | ||
103 | struct zd_mac *mac = zd_netdev_mac(netdev); | ||
104 | struct iw_freq *freq = &req->freq; | ||
105 | u8 channel; | ||
106 | u8 flags; | ||
107 | |||
108 | r = zd_mac_get_channel(mac, &channel, &flags); | ||
109 | if (r) | ||
110 | return r; | ||
111 | |||
112 | freq->flags = (flags & MAC_FIXED_CHANNEL) ? | ||
113 | IW_FREQ_FIXED : IW_FREQ_AUTO; | ||
114 | dev_dbg_f(zd_mac_dev(mac), "channel %s\n", | ||
115 | (flags & MAC_FIXED_CHANNEL) ? "fixed" : "auto"); | ||
116 | return zd_channel_to_freq(freq, channel); | ||
117 | } | ||
118 | |||
119 | static int iw_set_mode(struct net_device *netdev, | ||
120 | struct iw_request_info *info, | ||
121 | union iwreq_data *req, char *extra) | ||
122 | { | ||
123 | return zd_mac_set_mode(zd_netdev_mac(netdev), req->mode); | ||
124 | } | ||
125 | |||
126 | static int iw_get_mode(struct net_device *netdev, | ||
127 | struct iw_request_info *info, | ||
128 | union iwreq_data *req, char *extra) | ||
129 | { | ||
130 | return zd_mac_get_mode(zd_netdev_mac(netdev), &req->mode); | ||
131 | } | ||
132 | |||
133 | static int iw_get_range(struct net_device *netdev, | ||
134 | struct iw_request_info *info, | ||
135 | union iwreq_data *req, char *extra) | ||
136 | { | ||
137 | struct iw_range *range = (struct iw_range *)extra; | ||
138 | |||
139 | dev_dbg_f(zd_mac_dev(zd_netdev_mac(netdev)), "\n"); | ||
140 | req->data.length = sizeof(*range); | ||
141 | return zd_mac_get_range(zd_netdev_mac(netdev), range); | ||
142 | } | ||
143 | |||
144 | static int iw_set_encode(struct net_device *netdev, | ||
145 | struct iw_request_info *info, | ||
146 | union iwreq_data *data, | ||
147 | char *extra) | ||
148 | { | ||
149 | return ieee80211_wx_set_encode(zd_netdev_ieee80211(netdev), info, | ||
150 | data, extra); | ||
151 | } | ||
152 | |||
153 | static int iw_get_encode(struct net_device *netdev, | ||
154 | struct iw_request_info *info, | ||
155 | union iwreq_data *data, | ||
156 | char *extra) | ||
157 | { | ||
158 | return ieee80211_wx_get_encode(zd_netdev_ieee80211(netdev), info, | ||
159 | data, extra); | ||
160 | } | ||
161 | |||
162 | static int iw_set_encodeext(struct net_device *netdev, | ||
163 | struct iw_request_info *info, | ||
164 | union iwreq_data *data, | ||
165 | char *extra) | ||
166 | { | ||
167 | return ieee80211_wx_set_encodeext(zd_netdev_ieee80211(netdev), info, | ||
168 | data, extra); | ||
169 | } | ||
170 | |||
171 | static int iw_get_encodeext(struct net_device *netdev, | ||
172 | struct iw_request_info *info, | ||
173 | union iwreq_data *data, | ||
174 | char *extra) | ||
175 | { | ||
176 | return ieee80211_wx_get_encodeext(zd_netdev_ieee80211(netdev), info, | ||
177 | data, extra); | ||
178 | } | ||
179 | |||
180 | #define WX(x) [(x)-SIOCIWFIRST] | ||
181 | |||
182 | static const iw_handler zd_standard_iw_handlers[] = { | ||
183 | WX(SIOCGIWNAME) = iw_get_name, | ||
184 | WX(SIOCSIWFREQ) = iw_set_freq, | ||
185 | WX(SIOCGIWFREQ) = iw_get_freq, | ||
186 | WX(SIOCSIWMODE) = iw_set_mode, | ||
187 | WX(SIOCGIWMODE) = iw_get_mode, | ||
188 | WX(SIOCGIWRANGE) = iw_get_range, | ||
189 | WX(SIOCSIWENCODE) = iw_set_encode, | ||
190 | WX(SIOCGIWENCODE) = iw_get_encode, | ||
191 | WX(SIOCSIWENCODEEXT) = iw_set_encodeext, | ||
192 | WX(SIOCGIWENCODEEXT) = iw_get_encodeext, | ||
193 | WX(SIOCSIWAUTH) = ieee80211_wx_set_auth, | ||
194 | WX(SIOCGIWAUTH) = ieee80211_wx_get_auth, | ||
195 | WX(SIOCSIWSCAN) = ieee80211softmac_wx_trigger_scan, | ||
196 | WX(SIOCGIWSCAN) = ieee80211softmac_wx_get_scan_results, | ||
197 | WX(SIOCSIWESSID) = ieee80211softmac_wx_set_essid, | ||
198 | WX(SIOCGIWESSID) = ieee80211softmac_wx_get_essid, | ||
199 | WX(SIOCSIWAP) = ieee80211softmac_wx_set_wap, | ||
200 | WX(SIOCGIWAP) = ieee80211softmac_wx_get_wap, | ||
201 | WX(SIOCSIWRATE) = ieee80211softmac_wx_set_rate, | ||
202 | WX(SIOCGIWRATE) = ieee80211softmac_wx_get_rate, | ||
203 | WX(SIOCSIWGENIE) = ieee80211softmac_wx_set_genie, | ||
204 | WX(SIOCGIWGENIE) = ieee80211softmac_wx_get_genie, | ||
205 | WX(SIOCSIWMLME) = ieee80211softmac_wx_set_mlme, | ||
206 | }; | ||
207 | |||
208 | static const struct iw_handler_def iw_handler_def = { | ||
209 | .standard = zd_standard_iw_handlers, | ||
210 | .num_standard = ARRAY_SIZE(zd_standard_iw_handlers), | ||
211 | .private = zd_priv_handler, | ||
212 | .num_private = ARRAY_SIZE(zd_priv_handler), | ||
213 | .private_args = zd_priv_args, | ||
214 | .num_private_args = ARRAY_SIZE(zd_priv_args), | ||
215 | .get_wireless_stats = zd_mac_get_wireless_stats, | ||
216 | }; | ||
217 | |||
218 | struct net_device *zd_netdev_alloc(struct usb_interface *intf) | ||
219 | { | ||
220 | int r; | ||
221 | struct net_device *netdev; | ||
222 | struct zd_mac *mac; | ||
223 | |||
224 | netdev = alloc_ieee80211softmac(sizeof(struct zd_mac)); | ||
225 | if (!netdev) { | ||
226 | dev_dbg_f(&intf->dev, "out of memory\n"); | ||
227 | return NULL; | ||
228 | } | ||
229 | |||
230 | mac = zd_netdev_mac(netdev); | ||
231 | r = zd_mac_init(mac, netdev, intf); | ||
232 | if (r) { | ||
233 | usb_set_intfdata(intf, NULL); | ||
234 | free_ieee80211(netdev); | ||
235 | return NULL; | ||
236 | } | ||
237 | |||
238 | SET_MODULE_OWNER(netdev); | ||
239 | SET_NETDEV_DEV(netdev, &intf->dev); | ||
240 | |||
241 | dev_dbg_f(&intf->dev, "netdev->flags %#06hx\n", netdev->flags); | ||
242 | dev_dbg_f(&intf->dev, "netdev->features %#010lx\n", netdev->features); | ||
243 | |||
244 | netdev->open = zd_mac_open; | ||
245 | netdev->stop = zd_mac_stop; | ||
246 | /* netdev->get_stats = */ | ||
247 | /* netdev->set_multicast_list = */ | ||
248 | netdev->set_mac_address = zd_mac_set_mac_address; | ||
249 | netdev->wireless_handlers = &iw_handler_def; | ||
250 | /* netdev->ethtool_ops = */ | ||
251 | |||
252 | return netdev; | ||
253 | } | ||
254 | |||
255 | void zd_netdev_free(struct net_device *netdev) | ||
256 | { | ||
257 | if (!netdev) | ||
258 | return; | ||
259 | |||
260 | zd_mac_clear(zd_netdev_mac(netdev)); | ||
261 | free_ieee80211(netdev); | ||
262 | } | ||
263 | |||
264 | void zd_netdev_disconnect(struct net_device *netdev) | ||
265 | { | ||
266 | unregister_netdev(netdev); | ||
267 | } | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_netdev.h b/drivers/net/wireless/zd1211rw/zd_netdev.h new file mode 100644 index 000000000000..374a957073c1 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_netdev.h | |||
@@ -0,0 +1,45 @@ | |||
1 | /* zd_netdev.h: Header for net device related functions. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | #ifndef _ZD_NETDEV_H | ||
19 | #define _ZD_NETDEV_H | ||
20 | |||
21 | #include <linux/usb.h> | ||
22 | #include <linux/netdevice.h> | ||
23 | #include <net/ieee80211.h> | ||
24 | |||
25 | #define ZD_PRIV_SET_REGDOMAIN (SIOCIWFIRSTPRIV) | ||
26 | #define ZD_PRIV_GET_REGDOMAIN (SIOCIWFIRSTPRIV+1) | ||
27 | |||
28 | static inline struct ieee80211_device *zd_netdev_ieee80211( | ||
29 | struct net_device *ndev) | ||
30 | { | ||
31 | return netdev_priv(ndev); | ||
32 | } | ||
33 | |||
34 | static inline struct net_device *zd_ieee80211_to_netdev( | ||
35 | struct ieee80211_device *ieee) | ||
36 | { | ||
37 | return ieee->dev; | ||
38 | } | ||
39 | |||
40 | struct net_device *zd_netdev_alloc(struct usb_interface *intf); | ||
41 | void zd_netdev_free(struct net_device *netdev); | ||
42 | |||
43 | void zd_netdev_disconnect(struct net_device *netdev); | ||
44 | |||
45 | #endif /* _ZD_NETDEV_H */ | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c new file mode 100644 index 000000000000..d3770d2c61bc --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_rf.c | |||
@@ -0,0 +1,151 @@ | |||
1 | /* zd_rf.c | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | #include <linux/errno.h> | ||
19 | #include <linux/string.h> | ||
20 | |||
21 | #include "zd_def.h" | ||
22 | #include "zd_rf.h" | ||
23 | #include "zd_ieee80211.h" | ||
24 | #include "zd_chip.h" | ||
25 | |||
26 | static const char *rfs[] = { | ||
27 | [0] = "unknown RF0", | ||
28 | [1] = "unknown RF1", | ||
29 | [UW2451_RF] = "UW2451_RF", | ||
30 | [UCHIP_RF] = "UCHIP_RF", | ||
31 | [AL2230_RF] = "AL2230_RF", | ||
32 | [AL7230B_RF] = "AL7230B_RF", | ||
33 | [THETA_RF] = "THETA_RF", | ||
34 | [AL2210_RF] = "AL2210_RF", | ||
35 | [MAXIM_NEW_RF] = "MAXIM_NEW_RF", | ||
36 | [UW2453_RF] = "UW2453_RF", | ||
37 | [AL2230S_RF] = "AL2230S_RF", | ||
38 | [RALINK_RF] = "RALINK_RF", | ||
39 | [INTERSIL_RF] = "INTERSIL_RF", | ||
40 | [RF2959_RF] = "RF2959_RF", | ||
41 | [MAXIM_NEW2_RF] = "MAXIM_NEW2_RF", | ||
42 | [PHILIPS_RF] = "PHILIPS_RF", | ||
43 | }; | ||
44 | |||
45 | const char *zd_rf_name(u8 type) | ||
46 | { | ||
47 | if (type & 0xf0) | ||
48 | type = 0; | ||
49 | return rfs[type]; | ||
50 | } | ||
51 | |||
52 | void zd_rf_init(struct zd_rf *rf) | ||
53 | { | ||
54 | memset(rf, 0, sizeof(*rf)); | ||
55 | } | ||
56 | |||
57 | void zd_rf_clear(struct zd_rf *rf) | ||
58 | { | ||
59 | memset(rf, 0, sizeof(*rf)); | ||
60 | } | ||
61 | |||
62 | int zd_rf_init_hw(struct zd_rf *rf, u8 type) | ||
63 | { | ||
64 | int r, t; | ||
65 | struct zd_chip *chip = zd_rf_to_chip(rf); | ||
66 | |||
67 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
68 | switch (type) { | ||
69 | case RF2959_RF: | ||
70 | r = zd_rf_init_rf2959(rf); | ||
71 | if (r) | ||
72 | return r; | ||
73 | break; | ||
74 | case AL2230_RF: | ||
75 | r = zd_rf_init_al2230(rf); | ||
76 | if (r) | ||
77 | return r; | ||
78 | break; | ||
79 | default: | ||
80 | dev_err(zd_chip_dev(chip), | ||
81 | "RF %s %#x is not supported\n", zd_rf_name(type), type); | ||
82 | rf->type = 0; | ||
83 | return -ENODEV; | ||
84 | } | ||
85 | |||
86 | rf->type = type; | ||
87 | |||
88 | r = zd_chip_lock_phy_regs(chip); | ||
89 | if (r) | ||
90 | return r; | ||
91 | t = rf->init_hw(rf); | ||
92 | r = zd_chip_unlock_phy_regs(chip); | ||
93 | if (t) | ||
94 | r = t; | ||
95 | return r; | ||
96 | } | ||
97 | |||
98 | int zd_rf_scnprint_id(struct zd_rf *rf, char *buffer, size_t size) | ||
99 | { | ||
100 | return scnprintf(buffer, size, "%s", zd_rf_name(rf->type)); | ||
101 | } | ||
102 | |||
103 | int zd_rf_set_channel(struct zd_rf *rf, u8 channel) | ||
104 | { | ||
105 | int r; | ||
106 | |||
107 | ZD_ASSERT(mutex_is_locked(&zd_rf_to_chip(rf)->mutex)); | ||
108 | if (channel < MIN_CHANNEL24) | ||
109 | return -EINVAL; | ||
110 | if (channel > MAX_CHANNEL24) | ||
111 | return -EINVAL; | ||
112 | dev_dbg_f(zd_chip_dev(zd_rf_to_chip(rf)), "channel: %d\n", channel); | ||
113 | |||
114 | r = rf->set_channel(rf, channel); | ||
115 | if (r >= 0) | ||
116 | rf->channel = channel; | ||
117 | return r; | ||
118 | } | ||
119 | |||
120 | int zd_switch_radio_on(struct zd_rf *rf) | ||
121 | { | ||
122 | int r, t; | ||
123 | struct zd_chip *chip = zd_rf_to_chip(rf); | ||
124 | |||
125 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
126 | r = zd_chip_lock_phy_regs(chip); | ||
127 | if (r) | ||
128 | return r; | ||
129 | t = rf->switch_radio_on(rf); | ||
130 | r = zd_chip_unlock_phy_regs(chip); | ||
131 | if (t) | ||
132 | r = t; | ||
133 | return r; | ||
134 | } | ||
135 | |||
136 | int zd_switch_radio_off(struct zd_rf *rf) | ||
137 | { | ||
138 | int r, t; | ||
139 | struct zd_chip *chip = zd_rf_to_chip(rf); | ||
140 | |||
141 | /* TODO: move phy regs handling to zd_chip */ | ||
142 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
143 | r = zd_chip_lock_phy_regs(chip); | ||
144 | if (r) | ||
145 | return r; | ||
146 | t = rf->switch_radio_off(rf); | ||
147 | r = zd_chip_unlock_phy_regs(chip); | ||
148 | if (t) | ||
149 | r = t; | ||
150 | return r; | ||
151 | } | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h new file mode 100644 index 000000000000..ea30f693fcc8 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_rf.h | |||
@@ -0,0 +1,82 @@ | |||
1 | /* zd_rf.h | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | #ifndef _ZD_RF_H | ||
19 | #define _ZD_RF_H | ||
20 | |||
21 | #include "zd_types.h" | ||
22 | |||
23 | #define UW2451_RF 0x2 | ||
24 | #define UCHIP_RF 0x3 | ||
25 | #define AL2230_RF 0x4 | ||
26 | #define AL7230B_RF 0x5 /* a,b,g */ | ||
27 | #define THETA_RF 0x6 | ||
28 | #define AL2210_RF 0x7 | ||
29 | #define MAXIM_NEW_RF 0x8 | ||
30 | #define UW2453_RF 0x9 | ||
31 | #define AL2230S_RF 0xa | ||
32 | #define RALINK_RF 0xb | ||
33 | #define INTERSIL_RF 0xc | ||
34 | #define RF2959_RF 0xd | ||
35 | #define MAXIM_NEW2_RF 0xe | ||
36 | #define PHILIPS_RF 0xf | ||
37 | |||
38 | #define RF_CHANNEL(ch) [(ch)-1] | ||
39 | |||
40 | /* Provides functions of the RF transceiver. */ | ||
41 | |||
42 | enum { | ||
43 | RF_REG_BITS = 6, | ||
44 | RF_VALUE_BITS = 18, | ||
45 | RF_RV_BITS = RF_REG_BITS + RF_VALUE_BITS, | ||
46 | }; | ||
47 | |||
48 | struct zd_rf { | ||
49 | u8 type; | ||
50 | |||
51 | u8 channel; | ||
52 | /* | ||
53 | * Whether this RF should patch the 6M band edge | ||
54 | * (assuming E2P_POD agrees) | ||
55 | */ | ||
56 | u8 patch_6m_band_edge:1; | ||
57 | |||
58 | /* RF-specific functions */ | ||
59 | int (*init_hw)(struct zd_rf *rf); | ||
60 | int (*set_channel)(struct zd_rf *rf, u8 channel); | ||
61 | int (*switch_radio_on)(struct zd_rf *rf); | ||
62 | int (*switch_radio_off)(struct zd_rf *rf); | ||
63 | }; | ||
64 | |||
65 | const char *zd_rf_name(u8 type); | ||
66 | void zd_rf_init(struct zd_rf *rf); | ||
67 | void zd_rf_clear(struct zd_rf *rf); | ||
68 | int zd_rf_init_hw(struct zd_rf *rf, u8 type); | ||
69 | |||
70 | int zd_rf_scnprint_id(struct zd_rf *rf, char *buffer, size_t size); | ||
71 | |||
72 | int zd_rf_set_channel(struct zd_rf *rf, u8 channel); | ||
73 | |||
74 | int zd_switch_radio_on(struct zd_rf *rf); | ||
75 | int zd_switch_radio_off(struct zd_rf *rf); | ||
76 | |||
77 | /* Functions for individual RF chips */ | ||
78 | |||
79 | int zd_rf_init_rf2959(struct zd_rf *rf); | ||
80 | int zd_rf_init_al2230(struct zd_rf *rf); | ||
81 | |||
82 | #endif /* _ZD_RF_H */ | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c new file mode 100644 index 000000000000..0948b25f660d --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c | |||
@@ -0,0 +1,308 @@ | |||
1 | /* zd_rf_al2230.c: Functions for the AL2230 RF controller | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | |||
20 | #include "zd_rf.h" | ||
21 | #include "zd_usb.h" | ||
22 | #include "zd_chip.h" | ||
23 | |||
24 | static const u32 al2230_table[][3] = { | ||
25 | RF_CHANNEL( 1) = { 0x03f790, 0x033331, 0x00000d, }, | ||
26 | RF_CHANNEL( 2) = { 0x03f790, 0x0b3331, 0x00000d, }, | ||
27 | RF_CHANNEL( 3) = { 0x03e790, 0x033331, 0x00000d, }, | ||
28 | RF_CHANNEL( 4) = { 0x03e790, 0x0b3331, 0x00000d, }, | ||
29 | RF_CHANNEL( 5) = { 0x03f7a0, 0x033331, 0x00000d, }, | ||
30 | RF_CHANNEL( 6) = { 0x03f7a0, 0x0b3331, 0x00000d, }, | ||
31 | RF_CHANNEL( 7) = { 0x03e7a0, 0x033331, 0x00000d, }, | ||
32 | RF_CHANNEL( 8) = { 0x03e7a0, 0x0b3331, 0x00000d, }, | ||
33 | RF_CHANNEL( 9) = { 0x03f7b0, 0x033331, 0x00000d, }, | ||
34 | RF_CHANNEL(10) = { 0x03f7b0, 0x0b3331, 0x00000d, }, | ||
35 | RF_CHANNEL(11) = { 0x03e7b0, 0x033331, 0x00000d, }, | ||
36 | RF_CHANNEL(12) = { 0x03e7b0, 0x0b3331, 0x00000d, }, | ||
37 | RF_CHANNEL(13) = { 0x03f7c0, 0x033331, 0x00000d, }, | ||
38 | RF_CHANNEL(14) = { 0x03e7c0, 0x066661, 0x00000d, }, | ||
39 | }; | ||
40 | |||
41 | static int zd1211_al2230_init_hw(struct zd_rf *rf) | ||
42 | { | ||
43 | int r; | ||
44 | struct zd_chip *chip = zd_rf_to_chip(rf); | ||
45 | |||
46 | static const struct zd_ioreq16 ioreqs[] = { | ||
47 | { CR15, 0x20 }, { CR23, 0x40 }, { CR24, 0x20 }, | ||
48 | { CR26, 0x11 }, { CR28, 0x3e }, { CR29, 0x00 }, | ||
49 | { CR44, 0x33 }, { CR106, 0x2a }, { CR107, 0x1a }, | ||
50 | { CR109, 0x09 }, { CR110, 0x27 }, { CR111, 0x2b }, | ||
51 | { CR112, 0x2b }, { CR119, 0x0a }, { CR10, 0x89 }, | ||
52 | /* for newest (3rd cut) AL2300 */ | ||
53 | { CR17, 0x28 }, | ||
54 | { CR26, 0x93 }, { CR34, 0x30 }, | ||
55 | /* for newest (3rd cut) AL2300 */ | ||
56 | { CR35, 0x3e }, | ||
57 | { CR41, 0x24 }, { CR44, 0x32 }, | ||
58 | /* for newest (3rd cut) AL2300 */ | ||
59 | { CR46, 0x96 }, | ||
60 | { CR47, 0x1e }, { CR79, 0x58 }, { CR80, 0x30 }, | ||
61 | { CR81, 0x30 }, { CR87, 0x0a }, { CR89, 0x04 }, | ||
62 | { CR92, 0x0a }, { CR99, 0x28 }, { CR100, 0x00 }, | ||
63 | { CR101, 0x13 }, { CR102, 0x27 }, { CR106, 0x24 }, | ||
64 | { CR107, 0x2a }, { CR109, 0x09 }, { CR110, 0x13 }, | ||
65 | { CR111, 0x1f }, { CR112, 0x1f }, { CR113, 0x27 }, | ||
66 | { CR114, 0x27 }, | ||
67 | /* for newest (3rd cut) AL2300 */ | ||
68 | { CR115, 0x24 }, | ||
69 | { CR116, 0x24 }, { CR117, 0xf4 }, { CR118, 0xfc }, | ||
70 | { CR119, 0x10 }, { CR120, 0x4f }, { CR121, 0x77 }, | ||
71 | { CR122, 0xe0 }, { CR137, 0x88 }, { CR252, 0xff }, | ||
72 | { CR253, 0xff }, | ||
73 | |||
74 | /* These following happen separately in the vendor driver */ | ||
75 | { }, | ||
76 | |||
77 | /* shdnb(PLL_ON)=0 */ | ||
78 | { CR251, 0x2f }, | ||
79 | /* shdnb(PLL_ON)=1 */ | ||
80 | { CR251, 0x3f }, | ||
81 | { CR138, 0x28 }, { CR203, 0x06 }, | ||
82 | }; | ||
83 | |||
84 | static const u32 rv[] = { | ||
85 | /* Channel 1 */ | ||
86 | 0x03f790, | ||
87 | 0x033331, | ||
88 | 0x00000d, | ||
89 | |||
90 | 0x0b3331, | ||
91 | 0x03b812, | ||
92 | 0x00fff3, | ||
93 | 0x000da4, | ||
94 | 0x0f4dc5, /* fix freq shift, 0x04edc5 */ | ||
95 | 0x0805b6, | ||
96 | 0x011687, | ||
97 | 0x000688, | ||
98 | 0x0403b9, /* external control TX power (CR31) */ | ||
99 | 0x00dbba, | ||
100 | 0x00099b, | ||
101 | 0x0bdffc, | ||
102 | 0x00000d, | ||
103 | 0x00500f, | ||
104 | |||
105 | /* These writes happen separately in the vendor driver */ | ||
106 | 0x00d00f, | ||
107 | 0x004c0f, | ||
108 | 0x00540f, | ||
109 | 0x00700f, | ||
110 | 0x00500f, | ||
111 | }; | ||
112 | |||
113 | r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); | ||
114 | if (r) | ||
115 | return r; | ||
116 | |||
117 | r = zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS); | ||
118 | if (r) | ||
119 | return r; | ||
120 | |||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static int zd1211b_al2230_init_hw(struct zd_rf *rf) | ||
125 | { | ||
126 | int r; | ||
127 | struct zd_chip *chip = zd_rf_to_chip(rf); | ||
128 | |||
129 | static const struct zd_ioreq16 ioreqs1[] = { | ||
130 | { CR10, 0x89 }, { CR15, 0x20 }, | ||
131 | { CR17, 0x2B }, /* for newest(3rd cut) AL2230 */ | ||
132 | { CR23, 0x40 }, { CR24, 0x20 }, { CR26, 0x93 }, | ||
133 | { CR28, 0x3e }, { CR29, 0x00 }, | ||
134 | { CR33, 0x28 }, /* 5621 */ | ||
135 | { CR34, 0x30 }, | ||
136 | { CR35, 0x3e }, /* for newest(3rd cut) AL2230 */ | ||
137 | { CR41, 0x24 }, { CR44, 0x32 }, | ||
138 | { CR46, 0x99 }, /* for newest(3rd cut) AL2230 */ | ||
139 | { CR47, 0x1e }, | ||
140 | |||
141 | /* ZD1211B 05.06.10 */ | ||
142 | { CR48, 0x00 }, { CR49, 0x00 }, { CR51, 0x01 }, | ||
143 | { CR52, 0x80 }, { CR53, 0x7e }, { CR65, 0x00 }, | ||
144 | { CR66, 0x00 }, { CR67, 0x00 }, { CR68, 0x00 }, | ||
145 | { CR69, 0x28 }, | ||
146 | |||
147 | { CR79, 0x58 }, { CR80, 0x30 }, { CR81, 0x30 }, | ||
148 | { CR87, 0x0a }, { CR89, 0x04 }, | ||
149 | { CR91, 0x00 }, /* 5621 */ | ||
150 | { CR92, 0x0a }, | ||
151 | { CR98, 0x8d }, /* 4804, for 1212 new algorithm */ | ||
152 | { CR99, 0x00 }, /* 5621 */ | ||
153 | { CR101, 0x13 }, { CR102, 0x27 }, | ||
154 | { CR106, 0x24 }, /* for newest(3rd cut) AL2230 */ | ||
155 | { CR107, 0x2a }, | ||
156 | { CR109, 0x13 }, /* 4804, for 1212 new algorithm */ | ||
157 | { CR110, 0x1f }, /* 4804, for 1212 new algorithm */ | ||
158 | { CR111, 0x1f }, { CR112, 0x1f }, { CR113, 0x27 }, | ||
159 | { CR114, 0x27 }, | ||
160 | { CR115, 0x26 }, /* 24->26 at 4902 for newest(3rd cut) AL2230 */ | ||
161 | { CR116, 0x24 }, | ||
162 | { CR117, 0xfa }, /* for 1211b */ | ||
163 | { CR118, 0xfa }, /* for 1211b */ | ||
164 | { CR119, 0x10 }, | ||
165 | { CR120, 0x4f }, | ||
166 | { CR121, 0x6c }, /* for 1211b */ | ||
167 | { CR122, 0xfc }, /* E0->FC at 4902 */ | ||
168 | { CR123, 0x57 }, /* 5623 */ | ||
169 | { CR125, 0xad }, /* 4804, for 1212 new algorithm */ | ||
170 | { CR126, 0x6c }, /* 5614 */ | ||
171 | { CR127, 0x03 }, /* 4804, for 1212 new algorithm */ | ||
172 | { CR137, 0x50 }, /* 5614 */ | ||
173 | { CR138, 0xa8 }, | ||
174 | { CR144, 0xac }, /* 5621 */ | ||
175 | { CR150, 0x0d }, { CR252, 0x00 }, { CR253, 0x00 }, | ||
176 | }; | ||
177 | |||
178 | static const u32 rv1[] = { | ||
179 | /* channel 1 */ | ||
180 | 0x03f790, | ||
181 | 0x033331, | ||
182 | 0x00000d, | ||
183 | |||
184 | 0x0b3331, | ||
185 | 0x03b812, | ||
186 | 0x00fff3, | ||
187 | 0x0005a4, | ||
188 | 0x0f4dc5, /* fix freq shift 0x044dc5 */ | ||
189 | 0x0805b6, | ||
190 | 0x0146c7, | ||
191 | 0x000688, | ||
192 | 0x0403b9, /* External control TX power (CR31) */ | ||
193 | 0x00dbba, | ||
194 | 0x00099b, | ||
195 | 0x0bdffc, | ||
196 | 0x00000d, | ||
197 | 0x00580f, | ||
198 | }; | ||
199 | |||
200 | static const struct zd_ioreq16 ioreqs2[] = { | ||
201 | { CR47, 0x1e }, { CR_RFCFG, 0x03 }, | ||
202 | }; | ||
203 | |||
204 | static const u32 rv2[] = { | ||
205 | 0x00880f, | ||
206 | 0x00080f, | ||
207 | }; | ||
208 | |||
209 | static const struct zd_ioreq16 ioreqs3[] = { | ||
210 | { CR_RFCFG, 0x00 }, { CR47, 0x1e }, { CR251, 0x7f }, | ||
211 | }; | ||
212 | |||
213 | static const u32 rv3[] = { | ||
214 | 0x00d80f, | ||
215 | 0x00780f, | ||
216 | 0x00580f, | ||
217 | }; | ||
218 | |||
219 | static const struct zd_ioreq16 ioreqs4[] = { | ||
220 | { CR138, 0x28 }, { CR203, 0x06 }, | ||
221 | }; | ||
222 | |||
223 | r = zd_iowrite16a_locked(chip, ioreqs1, ARRAY_SIZE(ioreqs1)); | ||
224 | if (r) | ||
225 | return r; | ||
226 | r = zd_rfwritev_locked(chip, rv1, ARRAY_SIZE(rv1), RF_RV_BITS); | ||
227 | if (r) | ||
228 | return r; | ||
229 | r = zd_iowrite16a_locked(chip, ioreqs2, ARRAY_SIZE(ioreqs2)); | ||
230 | if (r) | ||
231 | return r; | ||
232 | r = zd_rfwritev_locked(chip, rv2, ARRAY_SIZE(rv2), RF_RV_BITS); | ||
233 | if (r) | ||
234 | return r; | ||
235 | r = zd_iowrite16a_locked(chip, ioreqs3, ARRAY_SIZE(ioreqs3)); | ||
236 | if (r) | ||
237 | return r; | ||
238 | r = zd_rfwritev_locked(chip, rv3, ARRAY_SIZE(rv3), RF_RV_BITS); | ||
239 | if (r) | ||
240 | return r; | ||
241 | return zd_iowrite16a_locked(chip, ioreqs4, ARRAY_SIZE(ioreqs4)); | ||
242 | } | ||
243 | |||
244 | static int al2230_set_channel(struct zd_rf *rf, u8 channel) | ||
245 | { | ||
246 | int r; | ||
247 | const u32 *rv = al2230_table[channel-1]; | ||
248 | struct zd_chip *chip = zd_rf_to_chip(rf); | ||
249 | static const struct zd_ioreq16 ioreqs[] = { | ||
250 | { CR138, 0x28 }, | ||
251 | { CR203, 0x06 }, | ||
252 | }; | ||
253 | |||
254 | r = zd_rfwritev_locked(chip, rv, 3, RF_RV_BITS); | ||
255 | if (r) | ||
256 | return r; | ||
257 | return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); | ||
258 | } | ||
259 | |||
260 | static int zd1211_al2230_switch_radio_on(struct zd_rf *rf) | ||
261 | { | ||
262 | struct zd_chip *chip = zd_rf_to_chip(rf); | ||
263 | static const struct zd_ioreq16 ioreqs[] = { | ||
264 | { CR11, 0x00 }, | ||
265 | { CR251, 0x3f }, | ||
266 | }; | ||
267 | |||
268 | return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); | ||
269 | } | ||
270 | |||
271 | static int zd1211b_al2230_switch_radio_on(struct zd_rf *rf) | ||
272 | { | ||
273 | struct zd_chip *chip = zd_rf_to_chip(rf); | ||
274 | static const struct zd_ioreq16 ioreqs[] = { | ||
275 | { CR11, 0x00 }, | ||
276 | { CR251, 0x7f }, | ||
277 | }; | ||
278 | |||
279 | return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); | ||
280 | } | ||
281 | |||
282 | static int al2230_switch_radio_off(struct zd_rf *rf) | ||
283 | { | ||
284 | struct zd_chip *chip = zd_rf_to_chip(rf); | ||
285 | static const struct zd_ioreq16 ioreqs[] = { | ||
286 | { CR11, 0x04 }, | ||
287 | { CR251, 0x2f }, | ||
288 | }; | ||
289 | |||
290 | return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); | ||
291 | } | ||
292 | |||
293 | int zd_rf_init_al2230(struct zd_rf *rf) | ||
294 | { | ||
295 | struct zd_chip *chip = zd_rf_to_chip(rf); | ||
296 | |||
297 | rf->set_channel = al2230_set_channel; | ||
298 | rf->switch_radio_off = al2230_switch_radio_off; | ||
299 | if (chip->is_zd1211b) { | ||
300 | rf->init_hw = zd1211b_al2230_init_hw; | ||
301 | rf->switch_radio_on = zd1211b_al2230_switch_radio_on; | ||
302 | } else { | ||
303 | rf->init_hw = zd1211_al2230_init_hw; | ||
304 | rf->switch_radio_on = zd1211_al2230_switch_radio_on; | ||
305 | } | ||
306 | rf->patch_6m_band_edge = 1; | ||
307 | return 0; | ||
308 | } | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c new file mode 100644 index 000000000000..58247271cc24 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c | |||
@@ -0,0 +1,279 @@ | |||
1 | /* zd_rf_rfmd.c: Functions for the RFMD RF controller | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | |||
20 | #include "zd_rf.h" | ||
21 | #include "zd_usb.h" | ||
22 | #include "zd_chip.h" | ||
23 | |||
24 | static u32 rf2959_table[][2] = { | ||
25 | RF_CHANNEL( 1) = { 0x181979, 0x1e6666 }, | ||
26 | RF_CHANNEL( 2) = { 0x181989, 0x1e6666 }, | ||
27 | RF_CHANNEL( 3) = { 0x181999, 0x1e6666 }, | ||
28 | RF_CHANNEL( 4) = { 0x1819a9, 0x1e6666 }, | ||
29 | RF_CHANNEL( 5) = { 0x1819b9, 0x1e6666 }, | ||
30 | RF_CHANNEL( 6) = { 0x1819c9, 0x1e6666 }, | ||
31 | RF_CHANNEL( 7) = { 0x1819d9, 0x1e6666 }, | ||
32 | RF_CHANNEL( 8) = { 0x1819e9, 0x1e6666 }, | ||
33 | RF_CHANNEL( 9) = { 0x1819f9, 0x1e6666 }, | ||
34 | RF_CHANNEL(10) = { 0x181a09, 0x1e6666 }, | ||
35 | RF_CHANNEL(11) = { 0x181a19, 0x1e6666 }, | ||
36 | RF_CHANNEL(12) = { 0x181a29, 0x1e6666 }, | ||
37 | RF_CHANNEL(13) = { 0x181a39, 0x1e6666 }, | ||
38 | RF_CHANNEL(14) = { 0x181a60, 0x1c0000 }, | ||
39 | }; | ||
40 | |||
41 | #if 0 | ||
42 | static int bits(u32 rw, int from, int to) | ||
43 | { | ||
44 | rw &= ~(0xffffffffU << (to+1)); | ||
45 | rw >>= from; | ||
46 | return rw; | ||
47 | } | ||
48 | |||
49 | static int bit(u32 rw, int bit) | ||
50 | { | ||
51 | return bits(rw, bit, bit); | ||
52 | } | ||
53 | |||
54 | static void dump_regwrite(u32 rw) | ||
55 | { | ||
56 | int reg = bits(rw, 18, 22); | ||
57 | int rw_flag = bits(rw, 23, 23); | ||
58 | PDEBUG("rf2959 %#010x reg %d rw %d", rw, reg, rw_flag); | ||
59 | |||
60 | switch (reg) { | ||
61 | case 0: | ||
62 | PDEBUG("reg0 CFG1 ref_sel %d hybernate %d rf_vco_reg_en %d" | ||
63 | " if_vco_reg_en %d if_vga_en %d", | ||
64 | bits(rw, 14, 15), bit(rw, 3), bit(rw, 2), bit(rw, 1), | ||
65 | bit(rw, 0)); | ||
66 | break; | ||
67 | case 1: | ||
68 | PDEBUG("reg1 IFPLL1 pll_en1 %d kv_en1 %d vtc_en1 %d lpf1 %d" | ||
69 | " cpl1 %d pdp1 %d autocal_en1 %d ld_en1 %d ifloopr %d" | ||
70 | " ifloopc %d dac1 %d", | ||
71 | bit(rw, 17), bit(rw, 16), bit(rw, 15), bit(rw, 14), | ||
72 | bit(rw, 13), bit(rw, 12), bit(rw, 11), bit(rw, 10), | ||
73 | bits(rw, 7, 9), bits(rw, 4, 6), bits(rw, 0, 3)); | ||
74 | break; | ||
75 | case 2: | ||
76 | PDEBUG("reg2 IFPLL2 n1 %d num1 %d", | ||
77 | bits(rw, 6, 17), bits(rw, 0, 5)); | ||
78 | break; | ||
79 | case 3: | ||
80 | PDEBUG("reg3 IFPLL3 num %d", bits(rw, 0, 17)); | ||
81 | break; | ||
82 | case 4: | ||
83 | PDEBUG("reg4 IFPLL4 dn1 %#04x ct_def1 %d kv_def1 %d", | ||
84 | bits(rw, 8, 16), bits(rw, 4, 7), bits(rw, 0, 3)); | ||
85 | break; | ||
86 | case 5: | ||
87 | PDEBUG("reg5 RFPLL1 pll_en %d kv_en %d vtc_en %d lpf %d cpl %d" | ||
88 | " pdp %d autocal_en %d ld_en %d rfloopr %d rfloopc %d" | ||
89 | " dac %d", | ||
90 | bit(rw, 17), bit(rw, 16), bit(rw, 15), bit(rw, 14), | ||
91 | bit(rw, 13), bit(rw, 12), bit(rw, 11), bit(rw, 10), | ||
92 | bits(rw, 7, 9), bits(rw, 4, 6), bits(rw, 0,3)); | ||
93 | break; | ||
94 | case 6: | ||
95 | PDEBUG("reg6 RFPLL2 n %d num %d", | ||
96 | bits(rw, 6, 17), bits(rw, 0, 5)); | ||
97 | break; | ||
98 | case 7: | ||
99 | PDEBUG("reg7 RFPLL3 num2 %d", bits(rw, 0, 17)); | ||
100 | break; | ||
101 | case 8: | ||
102 | PDEBUG("reg8 RFPLL4 dn %#06x ct_def %d kv_def %d", | ||
103 | bits(rw, 8, 16), bits(rw, 4, 7), bits(rw, 0, 3)); | ||
104 | break; | ||
105 | case 9: | ||
106 | PDEBUG("reg9 CAL1 tvco %d tlock %d m_ct_value %d ld_window %d", | ||
107 | bits(rw, 13, 17), bits(rw, 8, 12), bits(rw, 3, 7), | ||
108 | bits(rw, 0, 2)); | ||
109 | break; | ||
110 | case 10: | ||
111 | PDEBUG("reg10 TXRX1 rxdcfbbyps %d pcontrol %d txvgc %d" | ||
112 | " rxlpfbw %d txlpfbw %d txdiffmode %d txenmode %d" | ||
113 | " intbiasen %d tybypass %d", | ||
114 | bit(rw, 17), bits(rw, 15, 16), bits(rw, 10, 14), | ||
115 | bits(rw, 7, 9), bits(rw, 4, 6), bit(rw, 3), bit(rw, 2), | ||
116 | bit(rw, 1), bit(rw, 0)); | ||
117 | break; | ||
118 | case 11: | ||
119 | PDEBUG("reg11 PCNT1 mid_bias %d p_desired %d pc_offset %d" | ||
120 | " tx_delay %d", | ||
121 | bits(rw, 15, 17), bits(rw, 9, 14), bits(rw, 3, 8), | ||
122 | bits(rw, 0, 2)); | ||
123 | break; | ||
124 | case 12: | ||
125 | PDEBUG("reg12 PCNT2 max_power %d mid_power %d min_power %d", | ||
126 | bits(rw, 12, 17), bits(rw, 6, 11), bits(rw, 0, 5)); | ||
127 | break; | ||
128 | case 13: | ||
129 | PDEBUG("reg13 VCOT1 rfpll vco comp %d ifpll vco comp %d" | ||
130 | " lobias %d if_biasbuf %d if_biasvco %d rf_biasbuf %d" | ||
131 | " rf_biasvco %d", | ||
132 | bit(rw, 17), bit(rw, 16), bit(rw, 15), | ||
133 | bits(rw, 8, 9), bits(rw, 5, 7), bits(rw, 3, 4), | ||
134 | bits(rw, 0, 2)); | ||
135 | break; | ||
136 | case 14: | ||
137 | PDEBUG("reg14 IQCAL rx_acal %d rx_pcal %d" | ||
138 | " tx_acal %d tx_pcal %d", | ||
139 | bits(rw, 13, 17), bits(rw, 9, 12), bits(rw, 4, 8), | ||
140 | bits(rw, 0, 3)); | ||
141 | break; | ||
142 | } | ||
143 | } | ||
144 | #endif /* 0 */ | ||
145 | |||
146 | static int rf2959_init_hw(struct zd_rf *rf) | ||
147 | { | ||
148 | int r; | ||
149 | struct zd_chip *chip = zd_rf_to_chip(rf); | ||
150 | |||
151 | static const struct zd_ioreq16 ioreqs[] = { | ||
152 | { CR2, 0x1E }, { CR9, 0x20 }, { CR10, 0x89 }, | ||
153 | { CR11, 0x00 }, { CR15, 0xD0 }, { CR17, 0x68 }, | ||
154 | { CR19, 0x4a }, { CR20, 0x0c }, { CR21, 0x0E }, | ||
155 | { CR23, 0x48 }, | ||
156 | /* normal size for cca threshold */ | ||
157 | { CR24, 0x14 }, | ||
158 | /* { CR24, 0x20 }, */ | ||
159 | { CR26, 0x90 }, { CR27, 0x30 }, { CR29, 0x20 }, | ||
160 | { CR31, 0xb2 }, { CR32, 0x43 }, { CR33, 0x28 }, | ||
161 | { CR38, 0x30 }, { CR34, 0x0f }, { CR35, 0xF0 }, | ||
162 | { CR41, 0x2a }, { CR46, 0x7F }, { CR47, 0x1E }, | ||
163 | { CR51, 0xc5 }, { CR52, 0xc5 }, { CR53, 0xc5 }, | ||
164 | { CR79, 0x58 }, { CR80, 0x30 }, { CR81, 0x30 }, | ||
165 | { CR82, 0x00 }, { CR83, 0x24 }, { CR84, 0x04 }, | ||
166 | { CR85, 0x00 }, { CR86, 0x10 }, { CR87, 0x2A }, | ||
167 | { CR88, 0x10 }, { CR89, 0x24 }, { CR90, 0x18 }, | ||
168 | /* { CR91, 0x18 }, */ | ||
169 | /* should solve continous CTS frame problems */ | ||
170 | { CR91, 0x00 }, | ||
171 | { CR92, 0x0a }, { CR93, 0x00 }, { CR94, 0x01 }, | ||
172 | { CR95, 0x00 }, { CR96, 0x40 }, { CR97, 0x37 }, | ||
173 | { CR98, 0x05 }, { CR99, 0x28 }, { CR100, 0x00 }, | ||
174 | { CR101, 0x13 }, { CR102, 0x27 }, { CR103, 0x27 }, | ||
175 | { CR104, 0x18 }, { CR105, 0x12 }, | ||
176 | /* normal size */ | ||
177 | { CR106, 0x1a }, | ||
178 | /* { CR106, 0x22 }, */ | ||
179 | { CR107, 0x24 }, { CR108, 0x0a }, { CR109, 0x13 }, | ||
180 | { CR110, 0x2F }, { CR111, 0x27 }, { CR112, 0x27 }, | ||
181 | { CR113, 0x27 }, { CR114, 0x27 }, { CR115, 0x40 }, | ||
182 | { CR116, 0x40 }, { CR117, 0xF0 }, { CR118, 0xF0 }, | ||
183 | { CR119, 0x16 }, | ||
184 | /* no TX continuation */ | ||
185 | { CR122, 0x00 }, | ||
186 | /* { CR122, 0xff }, */ | ||
187 | { CR127, 0x03 }, { CR131, 0x08 }, { CR138, 0x28 }, | ||
188 | { CR148, 0x44 }, { CR150, 0x10 }, { CR169, 0xBB }, | ||
189 | { CR170, 0xBB }, | ||
190 | }; | ||
191 | |||
192 | static const u32 rv[] = { | ||
193 | 0x000007, /* REG0(CFG1) */ | ||
194 | 0x07dd43, /* REG1(IFPLL1) */ | ||
195 | 0x080959, /* REG2(IFPLL2) */ | ||
196 | 0x0e6666, | ||
197 | 0x116a57, /* REG4 */ | ||
198 | 0x17dd43, /* REG5 */ | ||
199 | 0x1819f9, /* REG6 */ | ||
200 | 0x1e6666, | ||
201 | 0x214554, | ||
202 | 0x25e7fa, | ||
203 | 0x27fffa, | ||
204 | /* The Zydas driver somehow forgets to set this value. It's | ||
205 | * only set for Japan. We are using internal power control | ||
206 | * for now. | ||
207 | */ | ||
208 | 0x294128, /* internal power */ | ||
209 | /* 0x28252c, */ /* External control TX power */ | ||
210 | /* CR31_CCK, CR51_6-36M, CR52_48M, CR53_54M */ | ||
211 | 0x2c0000, | ||
212 | 0x300000, | ||
213 | 0x340000, /* REG13(0xD) */ | ||
214 | 0x381e0f, /* REG14(0xE) */ | ||
215 | /* Bogus, RF2959's data sheet doesn't know register 27, which is | ||
216 | * actually referenced here. The commented 0x11 is 17. | ||
217 | */ | ||
218 | 0x6c180f, /* REG27(0x11) */ | ||
219 | }; | ||
220 | |||
221 | r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); | ||
222 | if (r) | ||
223 | return r; | ||
224 | |||
225 | return zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS); | ||
226 | } | ||
227 | |||
228 | static int rf2959_set_channel(struct zd_rf *rf, u8 channel) | ||
229 | { | ||
230 | int i, r; | ||
231 | u32 *rv = rf2959_table[channel-1]; | ||
232 | struct zd_chip *chip = zd_rf_to_chip(rf); | ||
233 | |||
234 | for (i = 0; i < 2; i++) { | ||
235 | r = zd_rfwrite_locked(chip, rv[i], RF_RV_BITS); | ||
236 | if (r) | ||
237 | return r; | ||
238 | } | ||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | static int rf2959_switch_radio_on(struct zd_rf *rf) | ||
243 | { | ||
244 | static const struct zd_ioreq16 ioreqs[] = { | ||
245 | { CR10, 0x89 }, | ||
246 | { CR11, 0x00 }, | ||
247 | }; | ||
248 | struct zd_chip *chip = zd_rf_to_chip(rf); | ||
249 | |||
250 | return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); | ||
251 | } | ||
252 | |||
253 | static int rf2959_switch_radio_off(struct zd_rf *rf) | ||
254 | { | ||
255 | static const struct zd_ioreq16 ioreqs[] = { | ||
256 | { CR10, 0x15 }, | ||
257 | { CR11, 0x81 }, | ||
258 | }; | ||
259 | struct zd_chip *chip = zd_rf_to_chip(rf); | ||
260 | |||
261 | return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); | ||
262 | } | ||
263 | |||
264 | int zd_rf_init_rf2959(struct zd_rf *rf) | ||
265 | { | ||
266 | struct zd_chip *chip = zd_rf_to_chip(rf); | ||
267 | |||
268 | if (chip->is_zd1211b) { | ||
269 | dev_err(zd_chip_dev(chip), | ||
270 | "RF2959 is currently not supported for ZD1211B" | ||
271 | " devices\n"); | ||
272 | return -ENODEV; | ||
273 | } | ||
274 | rf->init_hw = rf2959_init_hw; | ||
275 | rf->set_channel = rf2959_set_channel; | ||
276 | rf->switch_radio_on = rf2959_switch_radio_on; | ||
277 | rf->switch_radio_off = rf2959_switch_radio_off; | ||
278 | return 0; | ||
279 | } | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_types.h b/drivers/net/wireless/zd1211rw/zd_types.h new file mode 100644 index 000000000000..0155a1584ed3 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_types.h | |||
@@ -0,0 +1,71 @@ | |||
1 | /* zd_types.h | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | #ifndef _ZD_TYPES_H | ||
19 | #define _ZD_TYPES_H | ||
20 | |||
21 | #include <linux/types.h> | ||
22 | |||
23 | /* We have three register spaces mapped into the overall USB address space of | ||
24 | * 64K words (16-bit values). There is the control register space of | ||
25 | * double-word registers, the eeprom register space and the firmware register | ||
26 | * space. The control register space is byte mapped, the others are word | ||
27 | * mapped. | ||
28 | * | ||
29 | * For that reason, we are using byte offsets for control registers and word | ||
30 | * offsets for everything else. | ||
31 | */ | ||
32 | |||
33 | typedef u32 __nocast zd_addr_t; | ||
34 | |||
35 | enum { | ||
36 | ADDR_BASE_MASK = 0xff000000, | ||
37 | ADDR_OFFSET_MASK = 0x0000ffff, | ||
38 | ADDR_ZERO_MASK = 0x00ff0000, | ||
39 | NULL_BASE = 0x00000000, | ||
40 | USB_BASE = 0x01000000, | ||
41 | CR_BASE = 0x02000000, | ||
42 | CR_MAX_OFFSET = 0x0b30, | ||
43 | E2P_BASE = 0x03000000, | ||
44 | E2P_MAX_OFFSET = 0x007e, | ||
45 | FW_BASE = 0x04000000, | ||
46 | FW_MAX_OFFSET = 0x0005, | ||
47 | }; | ||
48 | |||
49 | #define ZD_ADDR_BASE(addr) ((u32)(addr) & ADDR_BASE_MASK) | ||
50 | #define ZD_OFFSET(addr) ((u32)(addr) & ADDR_OFFSET_MASK) | ||
51 | |||
52 | #define ZD_ADDR(base, offset) \ | ||
53 | ((zd_addr_t)(((base) & ADDR_BASE_MASK) | ((offset) & ADDR_OFFSET_MASK))) | ||
54 | |||
55 | #define ZD_NULL_ADDR ((zd_addr_t)0) | ||
56 | #define USB_REG(offset) ZD_ADDR(USB_BASE, offset) /* word addressing */ | ||
57 | #define CTL_REG(offset) ZD_ADDR(CR_BASE, offset) /* byte addressing */ | ||
58 | #define E2P_REG(offset) ZD_ADDR(E2P_BASE, offset) /* word addressing */ | ||
59 | #define FW_REG(offset) ZD_ADDR(FW_BASE, offset) /* word addressing */ | ||
60 | |||
61 | static inline zd_addr_t zd_inc_word(zd_addr_t addr) | ||
62 | { | ||
63 | u32 base = ZD_ADDR_BASE(addr); | ||
64 | u32 offset = ZD_OFFSET(addr); | ||
65 | |||
66 | offset += base == CR_BASE ? 2 : 1; | ||
67 | |||
68 | return base | offset; | ||
69 | } | ||
70 | |||
71 | #endif /* _ZD_TYPES_H */ | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c new file mode 100644 index 000000000000..6320984126c7 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
@@ -0,0 +1,1309 @@ | |||
1 | /* zd_usb.c | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | #include <asm/unaligned.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/firmware.h> | ||
22 | #include <linux/device.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/skbuff.h> | ||
25 | #include <linux/usb.h> | ||
26 | #include <net/ieee80211.h> | ||
27 | |||
28 | #include "zd_def.h" | ||
29 | #include "zd_netdev.h" | ||
30 | #include "zd_mac.h" | ||
31 | #include "zd_usb.h" | ||
32 | #include "zd_util.h" | ||
33 | |||
34 | static struct usb_device_id usb_ids[] = { | ||
35 | /* ZD1211 */ | ||
36 | { USB_DEVICE(0x0ace, 0x1211), .driver_info = DEVICE_ZD1211 }, | ||
37 | { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211 }, | ||
38 | { USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 }, | ||
39 | { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, | ||
40 | { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 }, | ||
41 | { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, | ||
42 | /* ZD1211B */ | ||
43 | { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, | ||
44 | { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, | ||
45 | {} | ||
46 | }; | ||
47 | |||
48 | MODULE_LICENSE("GPL"); | ||
49 | MODULE_DESCRIPTION("USB driver for devices with the ZD1211 chip."); | ||
50 | MODULE_AUTHOR("Ulrich Kunitz"); | ||
51 | MODULE_AUTHOR("Daniel Drake"); | ||
52 | MODULE_VERSION("1.0"); | ||
53 | MODULE_DEVICE_TABLE(usb, usb_ids); | ||
54 | |||
55 | #define FW_ZD1211_PREFIX "zd1211/zd1211_" | ||
56 | #define FW_ZD1211B_PREFIX "zd1211/zd1211b_" | ||
57 | |||
58 | /* register address handling */ | ||
59 | |||
60 | #ifdef DEBUG | ||
61 | static int check_addr(struct zd_usb *usb, zd_addr_t addr) | ||
62 | { | ||
63 | u32 base = ZD_ADDR_BASE(addr); | ||
64 | u32 offset = ZD_OFFSET(addr); | ||
65 | |||
66 | if ((u32)addr & ADDR_ZERO_MASK) | ||
67 | goto invalid_address; | ||
68 | switch (base) { | ||
69 | case USB_BASE: | ||
70 | break; | ||
71 | case CR_BASE: | ||
72 | if (offset > CR_MAX_OFFSET) { | ||
73 | dev_dbg(zd_usb_dev(usb), | ||
74 | "CR offset %#010x larger than" | ||
75 | " CR_MAX_OFFSET %#10x\n", | ||
76 | offset, CR_MAX_OFFSET); | ||
77 | goto invalid_address; | ||
78 | } | ||
79 | if (offset & 1) { | ||
80 | dev_dbg(zd_usb_dev(usb), | ||
81 | "CR offset %#010x is not a multiple of 2\n", | ||
82 | offset); | ||
83 | goto invalid_address; | ||
84 | } | ||
85 | break; | ||
86 | case E2P_BASE: | ||
87 | if (offset > E2P_MAX_OFFSET) { | ||
88 | dev_dbg(zd_usb_dev(usb), | ||
89 | "E2P offset %#010x larger than" | ||
90 | " E2P_MAX_OFFSET %#010x\n", | ||
91 | offset, E2P_MAX_OFFSET); | ||
92 | goto invalid_address; | ||
93 | } | ||
94 | break; | ||
95 | case FW_BASE: | ||
96 | if (!usb->fw_base_offset) { | ||
97 | dev_dbg(zd_usb_dev(usb), | ||
98 | "ERROR: fw base offset has not been set\n"); | ||
99 | return -EAGAIN; | ||
100 | } | ||
101 | if (offset > FW_MAX_OFFSET) { | ||
102 | dev_dbg(zd_usb_dev(usb), | ||
103 | "FW offset %#10x is larger than" | ||
104 | " FW_MAX_OFFSET %#010x\n", | ||
105 | offset, FW_MAX_OFFSET); | ||
106 | goto invalid_address; | ||
107 | } | ||
108 | break; | ||
109 | default: | ||
110 | dev_dbg(zd_usb_dev(usb), | ||
111 | "address has unsupported base %#010x\n", addr); | ||
112 | goto invalid_address; | ||
113 | } | ||
114 | |||
115 | return 0; | ||
116 | invalid_address: | ||
117 | dev_dbg(zd_usb_dev(usb), | ||
118 | "ERROR: invalid address: %#010x\n", addr); | ||
119 | return -EINVAL; | ||
120 | } | ||
121 | #endif /* DEBUG */ | ||
122 | |||
123 | static u16 usb_addr(struct zd_usb *usb, zd_addr_t addr) | ||
124 | { | ||
125 | u32 base; | ||
126 | u16 offset; | ||
127 | |||
128 | base = ZD_ADDR_BASE(addr); | ||
129 | offset = ZD_OFFSET(addr); | ||
130 | |||
131 | ZD_ASSERT(check_addr(usb, addr) == 0); | ||
132 | |||
133 | switch (base) { | ||
134 | case CR_BASE: | ||
135 | offset += CR_BASE_OFFSET; | ||
136 | break; | ||
137 | case E2P_BASE: | ||
138 | offset += E2P_BASE_OFFSET; | ||
139 | break; | ||
140 | case FW_BASE: | ||
141 | offset += usb->fw_base_offset; | ||
142 | break; | ||
143 | } | ||
144 | |||
145 | return offset; | ||
146 | } | ||
147 | |||
148 | /* USB device initialization */ | ||
149 | |||
150 | static int request_fw_file( | ||
151 | const struct firmware **fw, const char *name, struct device *device) | ||
152 | { | ||
153 | int r; | ||
154 | |||
155 | dev_dbg_f(device, "fw name %s\n", name); | ||
156 | |||
157 | r = request_firmware(fw, name, device); | ||
158 | if (r) | ||
159 | dev_err(device, | ||
160 | "Could not load firmware file %s. Error number %d\n", | ||
161 | name, r); | ||
162 | return r; | ||
163 | } | ||
164 | |||
165 | static inline u16 get_bcdDevice(const struct usb_device *udev) | ||
166 | { | ||
167 | return le16_to_cpu(udev->descriptor.bcdDevice); | ||
168 | } | ||
169 | |||
170 | enum upload_code_flags { | ||
171 | REBOOT = 1, | ||
172 | }; | ||
173 | |||
174 | /* Ensures that MAX_TRANSFER_SIZE is even. */ | ||
175 | #define MAX_TRANSFER_SIZE (USB_MAX_TRANSFER_SIZE & ~1) | ||
176 | |||
177 | static int upload_code(struct usb_device *udev, | ||
178 | const u8 *data, size_t size, u16 code_offset, int flags) | ||
179 | { | ||
180 | u8 *p; | ||
181 | int r; | ||
182 | |||
183 | /* USB request blocks need "kmalloced" buffers. | ||
184 | */ | ||
185 | p = kmalloc(MAX_TRANSFER_SIZE, GFP_KERNEL); | ||
186 | if (!p) { | ||
187 | dev_err(&udev->dev, "out of memory\n"); | ||
188 | r = -ENOMEM; | ||
189 | goto error; | ||
190 | } | ||
191 | |||
192 | size &= ~1; | ||
193 | while (size > 0) { | ||
194 | size_t transfer_size = size <= MAX_TRANSFER_SIZE ? | ||
195 | size : MAX_TRANSFER_SIZE; | ||
196 | |||
197 | dev_dbg_f(&udev->dev, "transfer size %zu\n", transfer_size); | ||
198 | |||
199 | memcpy(p, data, transfer_size); | ||
200 | r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), | ||
201 | USB_REQ_FIRMWARE_DOWNLOAD, | ||
202 | USB_DIR_OUT | USB_TYPE_VENDOR, | ||
203 | code_offset, 0, p, transfer_size, 1000 /* ms */); | ||
204 | if (r < 0) { | ||
205 | dev_err(&udev->dev, | ||
206 | "USB control request for firmware upload" | ||
207 | " failed. Error number %d\n", r); | ||
208 | goto error; | ||
209 | } | ||
210 | transfer_size = r & ~1; | ||
211 | |||
212 | size -= transfer_size; | ||
213 | data += transfer_size; | ||
214 | code_offset += transfer_size/sizeof(u16); | ||
215 | } | ||
216 | |||
217 | if (flags & REBOOT) { | ||
218 | u8 ret; | ||
219 | |||
220 | r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), | ||
221 | USB_REQ_FIRMWARE_CONFIRM, | ||
222 | USB_DIR_IN | USB_TYPE_VENDOR, | ||
223 | 0, 0, &ret, sizeof(ret), 5000 /* ms */); | ||
224 | if (r != sizeof(ret)) { | ||
225 | dev_err(&udev->dev, | ||
226 | "control request firmeware confirmation failed." | ||
227 | " Return value %d\n", r); | ||
228 | if (r >= 0) | ||
229 | r = -ENODEV; | ||
230 | goto error; | ||
231 | } | ||
232 | if (ret & 0x80) { | ||
233 | dev_err(&udev->dev, | ||
234 | "Internal error while downloading." | ||
235 | " Firmware confirm return value %#04x\n", | ||
236 | (unsigned int)ret); | ||
237 | r = -ENODEV; | ||
238 | goto error; | ||
239 | } | ||
240 | dev_dbg_f(&udev->dev, "firmware confirm return value %#04x\n", | ||
241 | (unsigned int)ret); | ||
242 | } | ||
243 | |||
244 | r = 0; | ||
245 | error: | ||
246 | kfree(p); | ||
247 | return r; | ||
248 | } | ||
249 | |||
250 | static u16 get_word(const void *data, u16 offset) | ||
251 | { | ||
252 | const __le16 *p = data; | ||
253 | return le16_to_cpu(p[offset]); | ||
254 | } | ||
255 | |||
256 | static char *get_fw_name(char *buffer, size_t size, u8 device_type, | ||
257 | const char* postfix) | ||
258 | { | ||
259 | scnprintf(buffer, size, "%s%s", | ||
260 | device_type == DEVICE_ZD1211B ? | ||
261 | FW_ZD1211B_PREFIX : FW_ZD1211_PREFIX, | ||
262 | postfix); | ||
263 | return buffer; | ||
264 | } | ||
265 | |||
266 | static int upload_firmware(struct usb_device *udev, u8 device_type) | ||
267 | { | ||
268 | int r; | ||
269 | u16 fw_bcdDevice; | ||
270 | u16 bcdDevice; | ||
271 | const struct firmware *ub_fw = NULL; | ||
272 | const struct firmware *uph_fw = NULL; | ||
273 | char fw_name[128]; | ||
274 | |||
275 | bcdDevice = get_bcdDevice(udev); | ||
276 | |||
277 | r = request_fw_file(&ub_fw, | ||
278 | get_fw_name(fw_name, sizeof(fw_name), device_type, "ub"), | ||
279 | &udev->dev); | ||
280 | if (r) | ||
281 | goto error; | ||
282 | |||
283 | fw_bcdDevice = get_word(ub_fw->data, EEPROM_REGS_OFFSET); | ||
284 | |||
285 | /* FIXME: do we have any reason to perform the kludge that the vendor | ||
286 | * driver does when there is a version mismatch? (their driver uploads | ||
287 | * different firmwares and stuff) | ||
288 | */ | ||
289 | if (fw_bcdDevice != bcdDevice) { | ||
290 | dev_info(&udev->dev, | ||
291 | "firmware device id %#06x and actual device id " | ||
292 | "%#06x differ, continuing anyway\n", | ||
293 | fw_bcdDevice, bcdDevice); | ||
294 | } else { | ||
295 | dev_dbg_f(&udev->dev, | ||
296 | "firmware device id %#06x is equal to the " | ||
297 | "actual device id\n", fw_bcdDevice); | ||
298 | } | ||
299 | |||
300 | |||
301 | r = request_fw_file(&uph_fw, | ||
302 | get_fw_name(fw_name, sizeof(fw_name), device_type, "uphr"), | ||
303 | &udev->dev); | ||
304 | if (r) | ||
305 | goto error; | ||
306 | |||
307 | r = upload_code(udev, uph_fw->data, uph_fw->size, FW_START_OFFSET, | ||
308 | REBOOT); | ||
309 | if (r) { | ||
310 | dev_err(&udev->dev, | ||
311 | "Could not upload firmware code uph. Error number %d\n", | ||
312 | r); | ||
313 | } | ||
314 | |||
315 | /* FALL-THROUGH */ | ||
316 | error: | ||
317 | release_firmware(ub_fw); | ||
318 | release_firmware(uph_fw); | ||
319 | return r; | ||
320 | } | ||
321 | |||
322 | static void disable_read_regs_int(struct zd_usb *usb) | ||
323 | { | ||
324 | struct zd_usb_interrupt *intr = &usb->intr; | ||
325 | |||
326 | spin_lock(&intr->lock); | ||
327 | intr->read_regs_enabled = 0; | ||
328 | spin_unlock(&intr->lock); | ||
329 | } | ||
330 | |||
331 | #define urb_dev(urb) (&(urb)->dev->dev) | ||
332 | |||
333 | static inline void handle_regs_int(struct urb *urb) | ||
334 | { | ||
335 | struct zd_usb *usb = urb->context; | ||
336 | struct zd_usb_interrupt *intr = &usb->intr; | ||
337 | int len; | ||
338 | |||
339 | ZD_ASSERT(in_interrupt()); | ||
340 | spin_lock(&intr->lock); | ||
341 | |||
342 | if (intr->read_regs_enabled) { | ||
343 | intr->read_regs.length = len = urb->actual_length; | ||
344 | |||
345 | if (len > sizeof(intr->read_regs.buffer)) | ||
346 | len = sizeof(intr->read_regs.buffer); | ||
347 | memcpy(intr->read_regs.buffer, urb->transfer_buffer, len); | ||
348 | intr->read_regs_enabled = 0; | ||
349 | complete(&intr->read_regs.completion); | ||
350 | goto out; | ||
351 | } | ||
352 | |||
353 | dev_dbg_f(urb_dev(urb), "regs interrupt ignored\n"); | ||
354 | out: | ||
355 | spin_unlock(&intr->lock); | ||
356 | } | ||
357 | |||
358 | static inline void handle_retry_failed_int(struct urb *urb) | ||
359 | { | ||
360 | dev_dbg_f(urb_dev(urb), "retry failed interrupt\n"); | ||
361 | } | ||
362 | |||
363 | |||
364 | static void int_urb_complete(struct urb *urb, struct pt_regs *pt_regs) | ||
365 | { | ||
366 | int r; | ||
367 | struct usb_int_header *hdr; | ||
368 | |||
369 | switch (urb->status) { | ||
370 | case 0: | ||
371 | break; | ||
372 | case -ESHUTDOWN: | ||
373 | case -EINVAL: | ||
374 | case -ENODEV: | ||
375 | case -ENOENT: | ||
376 | case -ECONNRESET: | ||
377 | case -EPIPE: | ||
378 | goto kfree; | ||
379 | default: | ||
380 | goto resubmit; | ||
381 | } | ||
382 | |||
383 | if (urb->actual_length < sizeof(hdr)) { | ||
384 | dev_dbg_f(urb_dev(urb), "error: urb %p to small\n", urb); | ||
385 | goto resubmit; | ||
386 | } | ||
387 | |||
388 | hdr = urb->transfer_buffer; | ||
389 | if (hdr->type != USB_INT_TYPE) { | ||
390 | dev_dbg_f(urb_dev(urb), "error: urb %p wrong type\n", urb); | ||
391 | goto resubmit; | ||
392 | } | ||
393 | |||
394 | switch (hdr->id) { | ||
395 | case USB_INT_ID_REGS: | ||
396 | handle_regs_int(urb); | ||
397 | break; | ||
398 | case USB_INT_ID_RETRY_FAILED: | ||
399 | handle_retry_failed_int(urb); | ||
400 | break; | ||
401 | default: | ||
402 | dev_dbg_f(urb_dev(urb), "error: urb %p unknown id %x\n", urb, | ||
403 | (unsigned int)hdr->id); | ||
404 | goto resubmit; | ||
405 | } | ||
406 | |||
407 | resubmit: | ||
408 | r = usb_submit_urb(urb, GFP_ATOMIC); | ||
409 | if (r) { | ||
410 | dev_dbg_f(urb_dev(urb), "resubmit urb %p\n", urb); | ||
411 | goto kfree; | ||
412 | } | ||
413 | return; | ||
414 | kfree: | ||
415 | kfree(urb->transfer_buffer); | ||
416 | } | ||
417 | |||
418 | static inline int int_urb_interval(struct usb_device *udev) | ||
419 | { | ||
420 | switch (udev->speed) { | ||
421 | case USB_SPEED_HIGH: | ||
422 | return 4; | ||
423 | case USB_SPEED_LOW: | ||
424 | return 10; | ||
425 | case USB_SPEED_FULL: | ||
426 | default: | ||
427 | return 1; | ||
428 | } | ||
429 | } | ||
430 | |||
431 | static inline int usb_int_enabled(struct zd_usb *usb) | ||
432 | { | ||
433 | unsigned long flags; | ||
434 | struct zd_usb_interrupt *intr = &usb->intr; | ||
435 | struct urb *urb; | ||
436 | |||
437 | spin_lock_irqsave(&intr->lock, flags); | ||
438 | urb = intr->urb; | ||
439 | spin_unlock_irqrestore(&intr->lock, flags); | ||
440 | return urb != NULL; | ||
441 | } | ||
442 | |||
443 | int zd_usb_enable_int(struct zd_usb *usb) | ||
444 | { | ||
445 | int r; | ||
446 | struct usb_device *udev; | ||
447 | struct zd_usb_interrupt *intr = &usb->intr; | ||
448 | void *transfer_buffer = NULL; | ||
449 | struct urb *urb; | ||
450 | |||
451 | dev_dbg_f(zd_usb_dev(usb), "\n"); | ||
452 | |||
453 | urb = usb_alloc_urb(0, GFP_NOFS); | ||
454 | if (!urb) { | ||
455 | r = -ENOMEM; | ||
456 | goto out; | ||
457 | } | ||
458 | |||
459 | ZD_ASSERT(!irqs_disabled()); | ||
460 | spin_lock_irq(&intr->lock); | ||
461 | if (intr->urb) { | ||
462 | spin_unlock_irq(&intr->lock); | ||
463 | r = 0; | ||
464 | goto error_free_urb; | ||
465 | } | ||
466 | intr->urb = urb; | ||
467 | spin_unlock_irq(&intr->lock); | ||
468 | |||
469 | /* TODO: make it a DMA buffer */ | ||
470 | r = -ENOMEM; | ||
471 | transfer_buffer = kmalloc(USB_MAX_EP_INT_BUFFER, GFP_NOFS); | ||
472 | if (!transfer_buffer) { | ||
473 | dev_dbg_f(zd_usb_dev(usb), | ||
474 | "couldn't allocate transfer_buffer\n"); | ||
475 | goto error_set_urb_null; | ||
476 | } | ||
477 | |||
478 | udev = zd_usb_to_usbdev(usb); | ||
479 | usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN), | ||
480 | transfer_buffer, USB_MAX_EP_INT_BUFFER, | ||
481 | int_urb_complete, usb, | ||
482 | intr->interval); | ||
483 | |||
484 | dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb); | ||
485 | r = usb_submit_urb(urb, GFP_NOFS); | ||
486 | if (r) { | ||
487 | dev_dbg_f(zd_usb_dev(usb), | ||
488 | "Couldn't submit urb. Error number %d\n", r); | ||
489 | goto error; | ||
490 | } | ||
491 | |||
492 | return 0; | ||
493 | error: | ||
494 | kfree(transfer_buffer); | ||
495 | error_set_urb_null: | ||
496 | spin_lock_irq(&intr->lock); | ||
497 | intr->urb = NULL; | ||
498 | spin_unlock_irq(&intr->lock); | ||
499 | error_free_urb: | ||
500 | usb_free_urb(urb); | ||
501 | out: | ||
502 | return r; | ||
503 | } | ||
504 | |||
505 | void zd_usb_disable_int(struct zd_usb *usb) | ||
506 | { | ||
507 | unsigned long flags; | ||
508 | struct zd_usb_interrupt *intr = &usb->intr; | ||
509 | struct urb *urb; | ||
510 | |||
511 | spin_lock_irqsave(&intr->lock, flags); | ||
512 | urb = intr->urb; | ||
513 | if (!urb) { | ||
514 | spin_unlock_irqrestore(&intr->lock, flags); | ||
515 | return; | ||
516 | } | ||
517 | intr->urb = NULL; | ||
518 | spin_unlock_irqrestore(&intr->lock, flags); | ||
519 | |||
520 | usb_kill_urb(urb); | ||
521 | dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb); | ||
522 | usb_free_urb(urb); | ||
523 | } | ||
524 | |||
525 | static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, | ||
526 | unsigned int length) | ||
527 | { | ||
528 | int i; | ||
529 | struct zd_mac *mac = zd_usb_to_mac(usb); | ||
530 | const struct rx_length_info *length_info; | ||
531 | |||
532 | if (length < sizeof(struct rx_length_info)) { | ||
533 | /* It's not a complete packet anyhow. */ | ||
534 | return; | ||
535 | } | ||
536 | length_info = (struct rx_length_info *) | ||
537 | (buffer + length - sizeof(struct rx_length_info)); | ||
538 | |||
539 | /* It might be that three frames are merged into a single URB | ||
540 | * transaction. We have to check for the length info tag. | ||
541 | * | ||
542 | * While testing we discovered that length_info might be unaligned, | ||
543 | * because if USB transactions are merged, the last packet will not | ||
544 | * be padded. Unaligned access might also happen if the length_info | ||
545 | * structure is not present. | ||
546 | */ | ||
547 | if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG)) | ||
548 | { | ||
549 | unsigned int l, k, n; | ||
550 | for (i = 0, l = 0;; i++) { | ||
551 | k = le16_to_cpu(get_unaligned(&length_info->length[i])); | ||
552 | n = l+k; | ||
553 | if (n > length) | ||
554 | return; | ||
555 | zd_mac_rx(mac, buffer+l, k); | ||
556 | if (i >= 2) | ||
557 | return; | ||
558 | l = (n+3) & ~3; | ||
559 | } | ||
560 | } else { | ||
561 | zd_mac_rx(mac, buffer, length); | ||
562 | } | ||
563 | } | ||
564 | |||
565 | static void rx_urb_complete(struct urb *urb, struct pt_regs *pt_regs) | ||
566 | { | ||
567 | struct zd_usb *usb; | ||
568 | struct zd_usb_rx *rx; | ||
569 | const u8 *buffer; | ||
570 | unsigned int length; | ||
571 | |||
572 | switch (urb->status) { | ||
573 | case 0: | ||
574 | break; | ||
575 | case -ESHUTDOWN: | ||
576 | case -EINVAL: | ||
577 | case -ENODEV: | ||
578 | case -ENOENT: | ||
579 | case -ECONNRESET: | ||
580 | case -EPIPE: | ||
581 | return; | ||
582 | default: | ||
583 | dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); | ||
584 | goto resubmit; | ||
585 | } | ||
586 | |||
587 | buffer = urb->transfer_buffer; | ||
588 | length = urb->actual_length; | ||
589 | usb = urb->context; | ||
590 | rx = &usb->rx; | ||
591 | |||
592 | if (length%rx->usb_packet_size > rx->usb_packet_size-4) { | ||
593 | /* If there is an old first fragment, we don't care. */ | ||
594 | dev_dbg_f(urb_dev(urb), "*** first fragment ***\n"); | ||
595 | ZD_ASSERT(length <= ARRAY_SIZE(rx->fragment)); | ||
596 | spin_lock(&rx->lock); | ||
597 | memcpy(rx->fragment, buffer, length); | ||
598 | rx->fragment_length = length; | ||
599 | spin_unlock(&rx->lock); | ||
600 | goto resubmit; | ||
601 | } | ||
602 | |||
603 | spin_lock(&rx->lock); | ||
604 | if (rx->fragment_length > 0) { | ||
605 | /* We are on a second fragment, we believe */ | ||
606 | ZD_ASSERT(length + rx->fragment_length <= | ||
607 | ARRAY_SIZE(rx->fragment)); | ||
608 | dev_dbg_f(urb_dev(urb), "*** second fragment ***\n"); | ||
609 | memcpy(rx->fragment+rx->fragment_length, buffer, length); | ||
610 | handle_rx_packet(usb, rx->fragment, | ||
611 | rx->fragment_length + length); | ||
612 | rx->fragment_length = 0; | ||
613 | spin_unlock(&rx->lock); | ||
614 | } else { | ||
615 | spin_unlock(&rx->lock); | ||
616 | handle_rx_packet(usb, buffer, length); | ||
617 | } | ||
618 | |||
619 | resubmit: | ||
620 | usb_submit_urb(urb, GFP_ATOMIC); | ||
621 | } | ||
622 | |||
623 | struct urb *alloc_urb(struct zd_usb *usb) | ||
624 | { | ||
625 | struct usb_device *udev = zd_usb_to_usbdev(usb); | ||
626 | struct urb *urb; | ||
627 | void *buffer; | ||
628 | |||
629 | urb = usb_alloc_urb(0, GFP_NOFS); | ||
630 | if (!urb) | ||
631 | return NULL; | ||
632 | buffer = usb_buffer_alloc(udev, USB_MAX_RX_SIZE, GFP_NOFS, | ||
633 | &urb->transfer_dma); | ||
634 | if (!buffer) { | ||
635 | usb_free_urb(urb); | ||
636 | return NULL; | ||
637 | } | ||
638 | |||
639 | usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN), | ||
640 | buffer, USB_MAX_RX_SIZE, | ||
641 | rx_urb_complete, usb); | ||
642 | urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; | ||
643 | |||
644 | return urb; | ||
645 | } | ||
646 | |||
647 | void free_urb(struct urb *urb) | ||
648 | { | ||
649 | if (!urb) | ||
650 | return; | ||
651 | usb_buffer_free(urb->dev, urb->transfer_buffer_length, | ||
652 | urb->transfer_buffer, urb->transfer_dma); | ||
653 | usb_free_urb(urb); | ||
654 | } | ||
655 | |||
656 | int zd_usb_enable_rx(struct zd_usb *usb) | ||
657 | { | ||
658 | int i, r; | ||
659 | struct zd_usb_rx *rx = &usb->rx; | ||
660 | struct urb **urbs; | ||
661 | |||
662 | dev_dbg_f(zd_usb_dev(usb), "\n"); | ||
663 | |||
664 | r = -ENOMEM; | ||
665 | urbs = kcalloc(URBS_COUNT, sizeof(struct urb *), GFP_NOFS); | ||
666 | if (!urbs) | ||
667 | goto error; | ||
668 | for (i = 0; i < URBS_COUNT; i++) { | ||
669 | urbs[i] = alloc_urb(usb); | ||
670 | if (!urbs[i]) | ||
671 | goto error; | ||
672 | } | ||
673 | |||
674 | ZD_ASSERT(!irqs_disabled()); | ||
675 | spin_lock_irq(&rx->lock); | ||
676 | if (rx->urbs) { | ||
677 | spin_unlock_irq(&rx->lock); | ||
678 | r = 0; | ||
679 | goto error; | ||
680 | } | ||
681 | rx->urbs = urbs; | ||
682 | rx->urbs_count = URBS_COUNT; | ||
683 | spin_unlock_irq(&rx->lock); | ||
684 | |||
685 | for (i = 0; i < URBS_COUNT; i++) { | ||
686 | r = usb_submit_urb(urbs[i], GFP_NOFS); | ||
687 | if (r) | ||
688 | goto error_submit; | ||
689 | } | ||
690 | |||
691 | return 0; | ||
692 | error_submit: | ||
693 | for (i = 0; i < URBS_COUNT; i++) { | ||
694 | usb_kill_urb(urbs[i]); | ||
695 | } | ||
696 | spin_lock_irq(&rx->lock); | ||
697 | rx->urbs = NULL; | ||
698 | rx->urbs_count = 0; | ||
699 | spin_unlock_irq(&rx->lock); | ||
700 | error: | ||
701 | if (urbs) { | ||
702 | for (i = 0; i < URBS_COUNT; i++) | ||
703 | free_urb(urbs[i]); | ||
704 | } | ||
705 | return r; | ||
706 | } | ||
707 | |||
708 | void zd_usb_disable_rx(struct zd_usb *usb) | ||
709 | { | ||
710 | int i; | ||
711 | unsigned long flags; | ||
712 | struct urb **urbs; | ||
713 | unsigned int count; | ||
714 | struct zd_usb_rx *rx = &usb->rx; | ||
715 | |||
716 | spin_lock_irqsave(&rx->lock, flags); | ||
717 | urbs = rx->urbs; | ||
718 | count = rx->urbs_count; | ||
719 | spin_unlock_irqrestore(&rx->lock, flags); | ||
720 | if (!urbs) | ||
721 | return; | ||
722 | |||
723 | for (i = 0; i < count; i++) { | ||
724 | usb_kill_urb(urbs[i]); | ||
725 | free_urb(urbs[i]); | ||
726 | } | ||
727 | kfree(urbs); | ||
728 | |||
729 | spin_lock_irqsave(&rx->lock, flags); | ||
730 | rx->urbs = NULL; | ||
731 | rx->urbs_count = 0; | ||
732 | spin_unlock_irqrestore(&rx->lock, flags); | ||
733 | } | ||
734 | |||
735 | static void tx_urb_complete(struct urb *urb, struct pt_regs *pt_regs) | ||
736 | { | ||
737 | int r; | ||
738 | |||
739 | switch (urb->status) { | ||
740 | case 0: | ||
741 | break; | ||
742 | case -ESHUTDOWN: | ||
743 | case -EINVAL: | ||
744 | case -ENODEV: | ||
745 | case -ENOENT: | ||
746 | case -ECONNRESET: | ||
747 | case -EPIPE: | ||
748 | dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); | ||
749 | break; | ||
750 | default: | ||
751 | dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); | ||
752 | goto resubmit; | ||
753 | } | ||
754 | free_urb: | ||
755 | usb_buffer_free(urb->dev, urb->transfer_buffer_length, | ||
756 | urb->transfer_buffer, urb->transfer_dma); | ||
757 | usb_free_urb(urb); | ||
758 | return; | ||
759 | resubmit: | ||
760 | r = usb_submit_urb(urb, GFP_ATOMIC); | ||
761 | if (r) { | ||
762 | dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r); | ||
763 | goto free_urb; | ||
764 | } | ||
765 | } | ||
766 | |||
767 | /* Puts the frame on the USB endpoint. It doesn't wait for | ||
768 | * completion. The frame must contain the control set. | ||
769 | */ | ||
770 | int zd_usb_tx(struct zd_usb *usb, const u8 *frame, unsigned int length) | ||
771 | { | ||
772 | int r; | ||
773 | struct usb_device *udev = zd_usb_to_usbdev(usb); | ||
774 | struct urb *urb; | ||
775 | void *buffer; | ||
776 | |||
777 | urb = usb_alloc_urb(0, GFP_ATOMIC); | ||
778 | if (!urb) { | ||
779 | r = -ENOMEM; | ||
780 | goto out; | ||
781 | } | ||
782 | |||
783 | buffer = usb_buffer_alloc(zd_usb_to_usbdev(usb), length, GFP_ATOMIC, | ||
784 | &urb->transfer_dma); | ||
785 | if (!buffer) { | ||
786 | r = -ENOMEM; | ||
787 | goto error_free_urb; | ||
788 | } | ||
789 | memcpy(buffer, frame, length); | ||
790 | |||
791 | usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT), | ||
792 | buffer, length, tx_urb_complete, NULL); | ||
793 | urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; | ||
794 | |||
795 | r = usb_submit_urb(urb, GFP_ATOMIC); | ||
796 | if (r) | ||
797 | goto error; | ||
798 | return 0; | ||
799 | error: | ||
800 | usb_buffer_free(zd_usb_to_usbdev(usb), length, buffer, | ||
801 | urb->transfer_dma); | ||
802 | error_free_urb: | ||
803 | usb_free_urb(urb); | ||
804 | out: | ||
805 | return r; | ||
806 | } | ||
807 | |||
808 | static inline void init_usb_interrupt(struct zd_usb *usb) | ||
809 | { | ||
810 | struct zd_usb_interrupt *intr = &usb->intr; | ||
811 | |||
812 | spin_lock_init(&intr->lock); | ||
813 | intr->interval = int_urb_interval(zd_usb_to_usbdev(usb)); | ||
814 | init_completion(&intr->read_regs.completion); | ||
815 | intr->read_regs.cr_int_addr = cpu_to_le16(usb_addr(usb, CR_INTERRUPT)); | ||
816 | } | ||
817 | |||
818 | static inline void init_usb_rx(struct zd_usb *usb) | ||
819 | { | ||
820 | struct zd_usb_rx *rx = &usb->rx; | ||
821 | spin_lock_init(&rx->lock); | ||
822 | if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) { | ||
823 | rx->usb_packet_size = 512; | ||
824 | } else { | ||
825 | rx->usb_packet_size = 64; | ||
826 | } | ||
827 | ZD_ASSERT(rx->fragment_length == 0); | ||
828 | } | ||
829 | |||
830 | static inline void init_usb_tx(struct zd_usb *usb) | ||
831 | { | ||
832 | /* FIXME: at this point we will allocate a fixed number of urb's for | ||
833 | * use in a cyclic scheme */ | ||
834 | } | ||
835 | |||
836 | void zd_usb_init(struct zd_usb *usb, struct net_device *netdev, | ||
837 | struct usb_interface *intf) | ||
838 | { | ||
839 | memset(usb, 0, sizeof(*usb)); | ||
840 | usb->intf = usb_get_intf(intf); | ||
841 | usb_set_intfdata(usb->intf, netdev); | ||
842 | init_usb_interrupt(usb); | ||
843 | init_usb_tx(usb); | ||
844 | init_usb_rx(usb); | ||
845 | } | ||
846 | |||
847 | int zd_usb_init_hw(struct zd_usb *usb) | ||
848 | { | ||
849 | int r; | ||
850 | struct zd_chip *chip = zd_usb_to_chip(usb); | ||
851 | |||
852 | ZD_ASSERT(mutex_is_locked(&chip->mutex)); | ||
853 | r = zd_ioread16_locked(chip, &usb->fw_base_offset, | ||
854 | USB_REG((u16)FW_BASE_ADDR_OFFSET)); | ||
855 | if (r) | ||
856 | return r; | ||
857 | dev_dbg_f(zd_usb_dev(usb), "fw_base_offset: %#06hx\n", | ||
858 | usb->fw_base_offset); | ||
859 | |||
860 | return 0; | ||
861 | } | ||
862 | |||
863 | void zd_usb_clear(struct zd_usb *usb) | ||
864 | { | ||
865 | usb_set_intfdata(usb->intf, NULL); | ||
866 | usb_put_intf(usb->intf); | ||
867 | memset(usb, 0, sizeof(*usb)); | ||
868 | /* FIXME: usb_interrupt, usb_tx, usb_rx? */ | ||
869 | } | ||
870 | |||
871 | static const char *speed(enum usb_device_speed speed) | ||
872 | { | ||
873 | switch (speed) { | ||
874 | case USB_SPEED_LOW: | ||
875 | return "low"; | ||
876 | case USB_SPEED_FULL: | ||
877 | return "full"; | ||
878 | case USB_SPEED_HIGH: | ||
879 | return "high"; | ||
880 | default: | ||
881 | return "unknown speed"; | ||
882 | } | ||
883 | } | ||
884 | |||
885 | static int scnprint_id(struct usb_device *udev, char *buffer, size_t size) | ||
886 | { | ||
887 | return scnprintf(buffer, size, "%04hx:%04hx v%04hx %s", | ||
888 | le16_to_cpu(udev->descriptor.idVendor), | ||
889 | le16_to_cpu(udev->descriptor.idProduct), | ||
890 | get_bcdDevice(udev), | ||
891 | speed(udev->speed)); | ||
892 | } | ||
893 | |||
894 | int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size) | ||
895 | { | ||
896 | struct usb_device *udev = interface_to_usbdev(usb->intf); | ||
897 | return scnprint_id(udev, buffer, size); | ||
898 | } | ||
899 | |||
900 | #ifdef DEBUG | ||
901 | static void print_id(struct usb_device *udev) | ||
902 | { | ||
903 | char buffer[40]; | ||
904 | |||
905 | scnprint_id(udev, buffer, sizeof(buffer)); | ||
906 | buffer[sizeof(buffer)-1] = 0; | ||
907 | dev_dbg_f(&udev->dev, "%s\n", buffer); | ||
908 | } | ||
909 | #else | ||
910 | #define print_id(udev) do { } while (0) | ||
911 | #endif | ||
912 | |||
913 | static int probe(struct usb_interface *intf, const struct usb_device_id *id) | ||
914 | { | ||
915 | int r; | ||
916 | struct usb_device *udev = interface_to_usbdev(intf); | ||
917 | struct net_device *netdev = NULL; | ||
918 | |||
919 | print_id(udev); | ||
920 | |||
921 | switch (udev->speed) { | ||
922 | case USB_SPEED_LOW: | ||
923 | case USB_SPEED_FULL: | ||
924 | case USB_SPEED_HIGH: | ||
925 | break; | ||
926 | default: | ||
927 | dev_dbg_f(&intf->dev, "Unknown USB speed\n"); | ||
928 | r = -ENODEV; | ||
929 | goto error; | ||
930 | } | ||
931 | |||
932 | netdev = zd_netdev_alloc(intf); | ||
933 | if (netdev == NULL) { | ||
934 | r = -ENOMEM; | ||
935 | goto error; | ||
936 | } | ||
937 | |||
938 | r = upload_firmware(udev, id->driver_info); | ||
939 | if (r) { | ||
940 | dev_err(&intf->dev, | ||
941 | "couldn't load firmware. Error number %d\n", r); | ||
942 | goto error; | ||
943 | } | ||
944 | |||
945 | r = usb_reset_configuration(udev); | ||
946 | if (r) { | ||
947 | dev_dbg_f(&intf->dev, | ||
948 | "couldn't reset configuration. Error number %d\n", r); | ||
949 | goto error; | ||
950 | } | ||
951 | |||
952 | /* At this point the interrupt endpoint is not generally enabled. We | ||
953 | * save the USB bandwidth until the network device is opened. But | ||
954 | * notify that the initialization of the MAC will require the | ||
955 | * interrupts to be temporary enabled. | ||
956 | */ | ||
957 | r = zd_mac_init_hw(zd_netdev_mac(netdev), id->driver_info); | ||
958 | if (r) { | ||
959 | dev_dbg_f(&intf->dev, | ||
960 | "couldn't initialize mac. Error number %d\n", r); | ||
961 | goto error; | ||
962 | } | ||
963 | |||
964 | r = register_netdev(netdev); | ||
965 | if (r) { | ||
966 | dev_dbg_f(&intf->dev, | ||
967 | "couldn't register netdev. Error number %d\n", r); | ||
968 | goto error; | ||
969 | } | ||
970 | |||
971 | dev_dbg_f(&intf->dev, "successful\n"); | ||
972 | dev_info(&intf->dev,"%s\n", netdev->name); | ||
973 | return 0; | ||
974 | error: | ||
975 | usb_reset_device(interface_to_usbdev(intf)); | ||
976 | zd_netdev_free(netdev); | ||
977 | return r; | ||
978 | } | ||
979 | |||
980 | static void disconnect(struct usb_interface *intf) | ||
981 | { | ||
982 | struct net_device *netdev = zd_intf_to_netdev(intf); | ||
983 | struct zd_mac *mac = zd_netdev_mac(netdev); | ||
984 | struct zd_usb *usb = &mac->chip.usb; | ||
985 | |||
986 | dev_dbg_f(zd_usb_dev(usb), "\n"); | ||
987 | |||
988 | zd_netdev_disconnect(netdev); | ||
989 | |||
990 | /* Just in case something has gone wrong! */ | ||
991 | zd_usb_disable_rx(usb); | ||
992 | zd_usb_disable_int(usb); | ||
993 | |||
994 | /* If the disconnect has been caused by a removal of the | ||
995 | * driver module, the reset allows reloading of the driver. If the | ||
996 | * reset will not be executed here, the upload of the firmware in the | ||
997 | * probe function caused by the reloading of the driver will fail. | ||
998 | */ | ||
999 | usb_reset_device(interface_to_usbdev(intf)); | ||
1000 | |||
1001 | /* If somebody still waits on this lock now, this is an error. */ | ||
1002 | zd_netdev_free(netdev); | ||
1003 | dev_dbg(&intf->dev, "disconnected\n"); | ||
1004 | } | ||
1005 | |||
1006 | static struct usb_driver driver = { | ||
1007 | .name = "zd1211rw", | ||
1008 | .id_table = usb_ids, | ||
1009 | .probe = probe, | ||
1010 | .disconnect = disconnect, | ||
1011 | }; | ||
1012 | |||
1013 | static int __init usb_init(void) | ||
1014 | { | ||
1015 | int r; | ||
1016 | |||
1017 | pr_debug("usb_init()\n"); | ||
1018 | |||
1019 | r = usb_register(&driver); | ||
1020 | if (r) { | ||
1021 | printk(KERN_ERR "usb_register() failed. Error number %d\n", r); | ||
1022 | return r; | ||
1023 | } | ||
1024 | |||
1025 | pr_debug("zd1211rw initialized\n"); | ||
1026 | return 0; | ||
1027 | } | ||
1028 | |||
1029 | static void __exit usb_exit(void) | ||
1030 | { | ||
1031 | pr_debug("usb_exit()\n"); | ||
1032 | usb_deregister(&driver); | ||
1033 | } | ||
1034 | |||
1035 | module_init(usb_init); | ||
1036 | module_exit(usb_exit); | ||
1037 | |||
1038 | static int usb_int_regs_length(unsigned int count) | ||
1039 | { | ||
1040 | return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data); | ||
1041 | } | ||
1042 | |||
1043 | static void prepare_read_regs_int(struct zd_usb *usb) | ||
1044 | { | ||
1045 | struct zd_usb_interrupt *intr = &usb->intr; | ||
1046 | |||
1047 | spin_lock(&intr->lock); | ||
1048 | intr->read_regs_enabled = 1; | ||
1049 | INIT_COMPLETION(intr->read_regs.completion); | ||
1050 | spin_unlock(&intr->lock); | ||
1051 | } | ||
1052 | |||
1053 | static int get_results(struct zd_usb *usb, u16 *values, | ||
1054 | struct usb_req_read_regs *req, unsigned int count) | ||
1055 | { | ||
1056 | int r; | ||
1057 | int i; | ||
1058 | struct zd_usb_interrupt *intr = &usb->intr; | ||
1059 | struct read_regs_int *rr = &intr->read_regs; | ||
1060 | struct usb_int_regs *regs = (struct usb_int_regs *)rr->buffer; | ||
1061 | |||
1062 | spin_lock(&intr->lock); | ||
1063 | |||
1064 | r = -EIO; | ||
1065 | /* The created block size seems to be larger than expected. | ||
1066 | * However results appear to be correct. | ||
1067 | */ | ||
1068 | if (rr->length < usb_int_regs_length(count)) { | ||
1069 | dev_dbg_f(zd_usb_dev(usb), | ||
1070 | "error: actual length %d less than expected %d\n", | ||
1071 | rr->length, usb_int_regs_length(count)); | ||
1072 | goto error_unlock; | ||
1073 | } | ||
1074 | if (rr->length > sizeof(rr->buffer)) { | ||
1075 | dev_dbg_f(zd_usb_dev(usb), | ||
1076 | "error: actual length %d exceeds buffer size %zu\n", | ||
1077 | rr->length, sizeof(rr->buffer)); | ||
1078 | goto error_unlock; | ||
1079 | } | ||
1080 | |||
1081 | for (i = 0; i < count; i++) { | ||
1082 | struct reg_data *rd = ®s->regs[i]; | ||
1083 | if (rd->addr != req->addr[i]) { | ||
1084 | dev_dbg_f(zd_usb_dev(usb), | ||
1085 | "rd[%d] addr %#06hx expected %#06hx\n", i, | ||
1086 | le16_to_cpu(rd->addr), | ||
1087 | le16_to_cpu(req->addr[i])); | ||
1088 | goto error_unlock; | ||
1089 | } | ||
1090 | values[i] = le16_to_cpu(rd->value); | ||
1091 | } | ||
1092 | |||
1093 | r = 0; | ||
1094 | error_unlock: | ||
1095 | spin_unlock(&intr->lock); | ||
1096 | return r; | ||
1097 | } | ||
1098 | |||
1099 | int zd_usb_ioread16v(struct zd_usb *usb, u16 *values, | ||
1100 | const zd_addr_t *addresses, unsigned int count) | ||
1101 | { | ||
1102 | int r; | ||
1103 | int i, req_len, actual_req_len; | ||
1104 | struct usb_device *udev; | ||
1105 | struct usb_req_read_regs *req = NULL; | ||
1106 | unsigned long timeout; | ||
1107 | |||
1108 | if (count < 1) { | ||
1109 | dev_dbg_f(zd_usb_dev(usb), "error: count is zero\n"); | ||
1110 | return -EINVAL; | ||
1111 | } | ||
1112 | if (count > USB_MAX_IOREAD16_COUNT) { | ||
1113 | dev_dbg_f(zd_usb_dev(usb), | ||
1114 | "error: count %u exceeds possible max %u\n", | ||
1115 | count, USB_MAX_IOREAD16_COUNT); | ||
1116 | return -EINVAL; | ||
1117 | } | ||
1118 | if (in_atomic()) { | ||
1119 | dev_dbg_f(zd_usb_dev(usb), | ||
1120 | "error: io in atomic context not supported\n"); | ||
1121 | return -EWOULDBLOCK; | ||
1122 | } | ||
1123 | if (!usb_int_enabled(usb)) { | ||
1124 | dev_dbg_f(zd_usb_dev(usb), | ||
1125 | "error: usb interrupt not enabled\n"); | ||
1126 | return -EWOULDBLOCK; | ||
1127 | } | ||
1128 | |||
1129 | req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16); | ||
1130 | req = kmalloc(req_len, GFP_NOFS); | ||
1131 | if (!req) | ||
1132 | return -ENOMEM; | ||
1133 | req->id = cpu_to_le16(USB_REQ_READ_REGS); | ||
1134 | for (i = 0; i < count; i++) | ||
1135 | req->addr[i] = cpu_to_le16(usb_addr(usb, addresses[i])); | ||
1136 | |||
1137 | udev = zd_usb_to_usbdev(usb); | ||
1138 | prepare_read_regs_int(usb); | ||
1139 | r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), | ||
1140 | req, req_len, &actual_req_len, 1000 /* ms */); | ||
1141 | if (r) { | ||
1142 | dev_dbg_f(zd_usb_dev(usb), | ||
1143 | "error in usb_bulk_msg(). Error number %d\n", r); | ||
1144 | goto error; | ||
1145 | } | ||
1146 | if (req_len != actual_req_len) { | ||
1147 | dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()\n" | ||
1148 | " req_len %d != actual_req_len %d\n", | ||
1149 | req_len, actual_req_len); | ||
1150 | r = -EIO; | ||
1151 | goto error; | ||
1152 | } | ||
1153 | |||
1154 | timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion, | ||
1155 | msecs_to_jiffies(1000)); | ||
1156 | if (!timeout) { | ||
1157 | disable_read_regs_int(usb); | ||
1158 | dev_dbg_f(zd_usb_dev(usb), "read timed out\n"); | ||
1159 | r = -ETIMEDOUT; | ||
1160 | goto error; | ||
1161 | } | ||
1162 | |||
1163 | r = get_results(usb, values, req, count); | ||
1164 | error: | ||
1165 | kfree(req); | ||
1166 | return r; | ||
1167 | } | ||
1168 | |||
1169 | int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, | ||
1170 | unsigned int count) | ||
1171 | { | ||
1172 | int r; | ||
1173 | struct usb_device *udev; | ||
1174 | struct usb_req_write_regs *req = NULL; | ||
1175 | int i, req_len, actual_req_len; | ||
1176 | |||
1177 | if (count == 0) | ||
1178 | return 0; | ||
1179 | if (count > USB_MAX_IOWRITE16_COUNT) { | ||
1180 | dev_dbg_f(zd_usb_dev(usb), | ||
1181 | "error: count %u exceeds possible max %u\n", | ||
1182 | count, USB_MAX_IOWRITE16_COUNT); | ||
1183 | return -EINVAL; | ||
1184 | } | ||
1185 | if (in_atomic()) { | ||
1186 | dev_dbg_f(zd_usb_dev(usb), | ||
1187 | "error: io in atomic context not supported\n"); | ||
1188 | return -EWOULDBLOCK; | ||
1189 | } | ||
1190 | |||
1191 | req_len = sizeof(struct usb_req_write_regs) + | ||
1192 | count * sizeof(struct reg_data); | ||
1193 | req = kmalloc(req_len, GFP_NOFS); | ||
1194 | if (!req) | ||
1195 | return -ENOMEM; | ||
1196 | |||
1197 | req->id = cpu_to_le16(USB_REQ_WRITE_REGS); | ||
1198 | for (i = 0; i < count; i++) { | ||
1199 | struct reg_data *rw = &req->reg_writes[i]; | ||
1200 | rw->addr = cpu_to_le16(usb_addr(usb, ioreqs[i].addr)); | ||
1201 | rw->value = cpu_to_le16(ioreqs[i].value); | ||
1202 | } | ||
1203 | |||
1204 | udev = zd_usb_to_usbdev(usb); | ||
1205 | r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), | ||
1206 | req, req_len, &actual_req_len, 1000 /* ms */); | ||
1207 | if (r) { | ||
1208 | dev_dbg_f(zd_usb_dev(usb), | ||
1209 | "error in usb_bulk_msg(). Error number %d\n", r); | ||
1210 | goto error; | ||
1211 | } | ||
1212 | if (req_len != actual_req_len) { | ||
1213 | dev_dbg_f(zd_usb_dev(usb), | ||
1214 | "error in usb_bulk_msg()" | ||
1215 | " req_len %d != actual_req_len %d\n", | ||
1216 | req_len, actual_req_len); | ||
1217 | r = -EIO; | ||
1218 | goto error; | ||
1219 | } | ||
1220 | |||
1221 | /* FALL-THROUGH with r == 0 */ | ||
1222 | error: | ||
1223 | kfree(req); | ||
1224 | return r; | ||
1225 | } | ||
1226 | |||
1227 | int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits) | ||
1228 | { | ||
1229 | int r; | ||
1230 | struct usb_device *udev; | ||
1231 | struct usb_req_rfwrite *req = NULL; | ||
1232 | int i, req_len, actual_req_len; | ||
1233 | u16 bit_value_template; | ||
1234 | |||
1235 | if (in_atomic()) { | ||
1236 | dev_dbg_f(zd_usb_dev(usb), | ||
1237 | "error: io in atomic context not supported\n"); | ||
1238 | return -EWOULDBLOCK; | ||
1239 | } | ||
1240 | if (bits < USB_MIN_RFWRITE_BIT_COUNT) { | ||
1241 | dev_dbg_f(zd_usb_dev(usb), | ||
1242 | "error: bits %d are smaller than" | ||
1243 | " USB_MIN_RFWRITE_BIT_COUNT %d\n", | ||
1244 | bits, USB_MIN_RFWRITE_BIT_COUNT); | ||
1245 | return -EINVAL; | ||
1246 | } | ||
1247 | if (bits > USB_MAX_RFWRITE_BIT_COUNT) { | ||
1248 | dev_dbg_f(zd_usb_dev(usb), | ||
1249 | "error: bits %d exceed USB_MAX_RFWRITE_BIT_COUNT %d\n", | ||
1250 | bits, USB_MAX_RFWRITE_BIT_COUNT); | ||
1251 | return -EINVAL; | ||
1252 | } | ||
1253 | #ifdef DEBUG | ||
1254 | if (value & (~0UL << bits)) { | ||
1255 | dev_dbg_f(zd_usb_dev(usb), | ||
1256 | "error: value %#09x has bits >= %d set\n", | ||
1257 | value, bits); | ||
1258 | return -EINVAL; | ||
1259 | } | ||
1260 | #endif /* DEBUG */ | ||
1261 | |||
1262 | dev_dbg_f(zd_usb_dev(usb), "value %#09x bits %d\n", value, bits); | ||
1263 | |||
1264 | r = zd_usb_ioread16(usb, &bit_value_template, CR203); | ||
1265 | if (r) { | ||
1266 | dev_dbg_f(zd_usb_dev(usb), | ||
1267 | "error %d: Couldn't read CR203\n", r); | ||
1268 | goto out; | ||
1269 | } | ||
1270 | bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA); | ||
1271 | |||
1272 | req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16); | ||
1273 | req = kmalloc(req_len, GFP_NOFS); | ||
1274 | if (!req) | ||
1275 | return -ENOMEM; | ||
1276 | |||
1277 | req->id = cpu_to_le16(USB_REQ_WRITE_RF); | ||
1278 | /* 1: 3683a, but not used in ZYDAS driver */ | ||
1279 | req->value = cpu_to_le16(2); | ||
1280 | req->bits = cpu_to_le16(bits); | ||
1281 | |||
1282 | for (i = 0; i < bits; i++) { | ||
1283 | u16 bv = bit_value_template; | ||
1284 | if (value & (1 << (bits-1-i))) | ||
1285 | bv |= RF_DATA; | ||
1286 | req->bit_values[i] = cpu_to_le16(bv); | ||
1287 | } | ||
1288 | |||
1289 | udev = zd_usb_to_usbdev(usb); | ||
1290 | r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), | ||
1291 | req, req_len, &actual_req_len, 1000 /* ms */); | ||
1292 | if (r) { | ||
1293 | dev_dbg_f(zd_usb_dev(usb), | ||
1294 | "error in usb_bulk_msg(). Error number %d\n", r); | ||
1295 | goto out; | ||
1296 | } | ||
1297 | if (req_len != actual_req_len) { | ||
1298 | dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()" | ||
1299 | " req_len %d != actual_req_len %d\n", | ||
1300 | req_len, actual_req_len); | ||
1301 | r = -EIO; | ||
1302 | goto out; | ||
1303 | } | ||
1304 | |||
1305 | /* FALL-THROUGH with r == 0 */ | ||
1306 | out: | ||
1307 | kfree(req); | ||
1308 | return r; | ||
1309 | } | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h new file mode 100644 index 000000000000..d6420283bd5a --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_usb.h | |||
@@ -0,0 +1,240 @@ | |||
1 | /* zd_usb.h: Header for USB interface implemented by ZD1211 chip | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | #ifndef _ZD_USB_H | ||
19 | #define _ZD_USB_H | ||
20 | |||
21 | #include <linux/completion.h> | ||
22 | #include <linux/netdevice.h> | ||
23 | #include <linux/spinlock.h> | ||
24 | #include <linux/skbuff.h> | ||
25 | #include <linux/usb.h> | ||
26 | |||
27 | #include "zd_def.h" | ||
28 | #include "zd_types.h" | ||
29 | |||
30 | enum devicetype { | ||
31 | DEVICE_ZD1211 = 0, | ||
32 | DEVICE_ZD1211B = 1, | ||
33 | }; | ||
34 | |||
35 | enum endpoints { | ||
36 | EP_CTRL = 0, | ||
37 | EP_DATA_OUT = 1, | ||
38 | EP_DATA_IN = 2, | ||
39 | EP_INT_IN = 3, | ||
40 | EP_REGS_OUT = 4, | ||
41 | }; | ||
42 | |||
43 | enum { | ||
44 | USB_MAX_TRANSFER_SIZE = 4096, /* bytes */ | ||
45 | /* FIXME: The original driver uses this value. We have to check, | ||
46 | * whether the MAX_TRANSFER_SIZE is sufficient and this needs only be | ||
47 | * used if one combined frame is split over two USB transactions. | ||
48 | */ | ||
49 | USB_MAX_RX_SIZE = 4800, /* bytes */ | ||
50 | USB_MAX_IOWRITE16_COUNT = 15, | ||
51 | USB_MAX_IOWRITE32_COUNT = USB_MAX_IOWRITE16_COUNT/2, | ||
52 | USB_MAX_IOREAD16_COUNT = 15, | ||
53 | USB_MAX_IOREAD32_COUNT = USB_MAX_IOREAD16_COUNT/2, | ||
54 | USB_MIN_RFWRITE_BIT_COUNT = 16, | ||
55 | USB_MAX_RFWRITE_BIT_COUNT = 28, | ||
56 | USB_MAX_EP_INT_BUFFER = 64, | ||
57 | USB_ZD1211B_BCD_DEVICE = 0x4810, | ||
58 | }; | ||
59 | |||
60 | enum control_requests { | ||
61 | USB_REQ_WRITE_REGS = 0x21, | ||
62 | USB_REQ_READ_REGS = 0x22, | ||
63 | USB_REQ_WRITE_RF = 0x23, | ||
64 | USB_REQ_PROG_FLASH = 0x24, | ||
65 | USB_REQ_EEPROM_START = 0x0128, /* ? request is a byte */ | ||
66 | USB_REQ_EEPROM_MID = 0x28, | ||
67 | USB_REQ_EEPROM_END = 0x0228, /* ? request is a byte */ | ||
68 | USB_REQ_FIRMWARE_DOWNLOAD = 0x30, | ||
69 | USB_REQ_FIRMWARE_CONFIRM = 0x31, | ||
70 | USB_REQ_FIRMWARE_READ_DATA = 0x32, | ||
71 | }; | ||
72 | |||
73 | struct usb_req_read_regs { | ||
74 | __le16 id; | ||
75 | __le16 addr[0]; | ||
76 | } __attribute__((packed)); | ||
77 | |||
78 | struct reg_data { | ||
79 | __le16 addr; | ||
80 | __le16 value; | ||
81 | } __attribute__((packed)); | ||
82 | |||
83 | struct usb_req_write_regs { | ||
84 | __le16 id; | ||
85 | struct reg_data reg_writes[0]; | ||
86 | } __attribute__((packed)); | ||
87 | |||
88 | enum { | ||
89 | RF_IF_LE = 0x02, | ||
90 | RF_CLK = 0x04, | ||
91 | RF_DATA = 0x08, | ||
92 | }; | ||
93 | |||
94 | struct usb_req_rfwrite { | ||
95 | __le16 id; | ||
96 | __le16 value; | ||
97 | /* 1: 3683a */ | ||
98 | /* 2: other (default) */ | ||
99 | __le16 bits; | ||
100 | /* RF2595: 24 */ | ||
101 | __le16 bit_values[0]; | ||
102 | /* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */ | ||
103 | } __attribute__((packed)); | ||
104 | |||
105 | /* USB interrupt */ | ||
106 | |||
107 | enum usb_int_id { | ||
108 | USB_INT_TYPE = 0x01, | ||
109 | USB_INT_ID_REGS = 0x90, | ||
110 | USB_INT_ID_RETRY_FAILED = 0xa0, | ||
111 | }; | ||
112 | |||
113 | enum usb_int_flags { | ||
114 | USB_INT_READ_REGS_EN = 0x01, | ||
115 | }; | ||
116 | |||
117 | struct usb_int_header { | ||
118 | u8 type; /* must always be 1 */ | ||
119 | u8 id; | ||
120 | } __attribute__((packed)); | ||
121 | |||
122 | struct usb_int_regs { | ||
123 | struct usb_int_header hdr; | ||
124 | struct reg_data regs[0]; | ||
125 | } __attribute__((packed)); | ||
126 | |||
127 | struct usb_int_retry_fail { | ||
128 | struct usb_int_header hdr; | ||
129 | u8 new_rate; | ||
130 | u8 _dummy; | ||
131 | u8 addr[ETH_ALEN]; | ||
132 | u8 ibss_wakeup_dest; | ||
133 | } __attribute__((packed)); | ||
134 | |||
135 | struct read_regs_int { | ||
136 | struct completion completion; | ||
137 | /* Stores the USB int structure and contains the USB address of the | ||
138 | * first requested register before request. | ||
139 | */ | ||
140 | u8 buffer[USB_MAX_EP_INT_BUFFER]; | ||
141 | int length; | ||
142 | __le16 cr_int_addr; | ||
143 | }; | ||
144 | |||
145 | struct zd_ioreq16 { | ||
146 | zd_addr_t addr; | ||
147 | u16 value; | ||
148 | }; | ||
149 | |||
150 | struct zd_ioreq32 { | ||
151 | zd_addr_t addr; | ||
152 | u32 value; | ||
153 | }; | ||
154 | |||
155 | struct zd_usb_interrupt { | ||
156 | struct read_regs_int read_regs; | ||
157 | spinlock_t lock; | ||
158 | struct urb *urb; | ||
159 | int interval; | ||
160 | u8 read_regs_enabled:1; | ||
161 | }; | ||
162 | |||
163 | static inline struct usb_int_regs *get_read_regs(struct zd_usb_interrupt *intr) | ||
164 | { | ||
165 | return (struct usb_int_regs *)intr->read_regs.buffer; | ||
166 | } | ||
167 | |||
168 | #define URBS_COUNT 5 | ||
169 | |||
170 | struct zd_usb_rx { | ||
171 | spinlock_t lock; | ||
172 | u8 fragment[2*USB_MAX_RX_SIZE]; | ||
173 | unsigned int fragment_length; | ||
174 | unsigned int usb_packet_size; | ||
175 | struct urb **urbs; | ||
176 | int urbs_count; | ||
177 | }; | ||
178 | |||
179 | struct zd_usb_tx { | ||
180 | spinlock_t lock; | ||
181 | }; | ||
182 | |||
183 | /* Contains the usb parts. The structure doesn't require a lock, because intf | ||
184 | * and fw_base_offset, will not be changed after initialization. | ||
185 | */ | ||
186 | struct zd_usb { | ||
187 | struct zd_usb_interrupt intr; | ||
188 | struct zd_usb_rx rx; | ||
189 | struct zd_usb_tx tx; | ||
190 | struct usb_interface *intf; | ||
191 | u16 fw_base_offset; | ||
192 | }; | ||
193 | |||
194 | #define zd_usb_dev(usb) (&usb->intf->dev) | ||
195 | |||
196 | static inline struct usb_device *zd_usb_to_usbdev(struct zd_usb *usb) | ||
197 | { | ||
198 | return interface_to_usbdev(usb->intf); | ||
199 | } | ||
200 | |||
201 | static inline struct net_device *zd_intf_to_netdev(struct usb_interface *intf) | ||
202 | { | ||
203 | return usb_get_intfdata(intf); | ||
204 | } | ||
205 | |||
206 | static inline struct net_device *zd_usb_to_netdev(struct zd_usb *usb) | ||
207 | { | ||
208 | return zd_intf_to_netdev(usb->intf); | ||
209 | } | ||
210 | |||
211 | void zd_usb_init(struct zd_usb *usb, struct net_device *netdev, | ||
212 | struct usb_interface *intf); | ||
213 | int zd_usb_init_hw(struct zd_usb *usb); | ||
214 | void zd_usb_clear(struct zd_usb *usb); | ||
215 | |||
216 | int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size); | ||
217 | |||
218 | int zd_usb_enable_int(struct zd_usb *usb); | ||
219 | void zd_usb_disable_int(struct zd_usb *usb); | ||
220 | |||
221 | int zd_usb_enable_rx(struct zd_usb *usb); | ||
222 | void zd_usb_disable_rx(struct zd_usb *usb); | ||
223 | |||
224 | int zd_usb_tx(struct zd_usb *usb, const u8 *frame, unsigned int length); | ||
225 | |||
226 | int zd_usb_ioread16v(struct zd_usb *usb, u16 *values, | ||
227 | const zd_addr_t *addresses, unsigned int count); | ||
228 | |||
229 | static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value, | ||
230 | const zd_addr_t addr) | ||
231 | { | ||
232 | return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1); | ||
233 | } | ||
234 | |||
235 | int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, | ||
236 | unsigned int count); | ||
237 | |||
238 | int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits); | ||
239 | |||
240 | #endif /* _ZD_USB_H */ | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_util.c b/drivers/net/wireless/zd1211rw/zd_util.c new file mode 100644 index 000000000000..d20036c15d11 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_util.c | |||
@@ -0,0 +1,82 @@ | |||
1 | /* zd_util.c | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | * | ||
17 | * Utility program | ||
18 | */ | ||
19 | |||
20 | #include "zd_def.h" | ||
21 | #include "zd_util.h" | ||
22 | |||
23 | #ifdef DEBUG | ||
24 | static char hex(u8 v) | ||
25 | { | ||
26 | v &= 0xf; | ||
27 | return (v < 10 ? '0' : 'a' - 10) + v; | ||
28 | } | ||
29 | |||
30 | static char hex_print(u8 c) | ||
31 | { | ||
32 | return (0x20 <= c && c < 0x7f) ? c : '.'; | ||
33 | } | ||
34 | |||
35 | static void dump_line(const u8 *bytes, size_t size) | ||
36 | { | ||
37 | char c; | ||
38 | size_t i; | ||
39 | |||
40 | size = size <= 8 ? size : 8; | ||
41 | printk(KERN_DEBUG "zd1211 %p ", bytes); | ||
42 | for (i = 0; i < 8; i++) { | ||
43 | switch (i) { | ||
44 | case 1: | ||
45 | case 5: | ||
46 | c = '.'; | ||
47 | break; | ||
48 | case 3: | ||
49 | c = ':'; | ||
50 | break; | ||
51 | default: | ||
52 | c = ' '; | ||
53 | } | ||
54 | if (i < size) { | ||
55 | printk("%c%c%c", hex(bytes[i] >> 4), hex(bytes[i]), c); | ||
56 | } else { | ||
57 | printk(" %c", c); | ||
58 | } | ||
59 | } | ||
60 | |||
61 | for (i = 0; i < size; i++) | ||
62 | printk("%c", hex_print(bytes[i])); | ||
63 | printk("\n"); | ||
64 | } | ||
65 | |||
66 | void zd_hexdump(const void *bytes, size_t size) | ||
67 | { | ||
68 | size_t i = 0; | ||
69 | |||
70 | do { | ||
71 | dump_line((u8 *)bytes + i, size-i); | ||
72 | i += 8; | ||
73 | } while (i < size); | ||
74 | } | ||
75 | #endif /* DEBUG */ | ||
76 | |||
77 | void *zd_tail(const void *buffer, size_t buffer_size, size_t tail_size) | ||
78 | { | ||
79 | if (buffer_size < tail_size) | ||
80 | return NULL; | ||
81 | return (u8 *)buffer + (buffer_size - tail_size); | ||
82 | } | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_util.h b/drivers/net/wireless/zd1211rw/zd_util.h new file mode 100644 index 000000000000..ce26f7adea92 --- /dev/null +++ b/drivers/net/wireless/zd1211rw/zd_util.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* zd_util.h | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
16 | */ | ||
17 | |||
18 | #ifndef _ZD_UTIL_H | ||
19 | #define _ZD_UTIL_H | ||
20 | |||
21 | void *zd_tail(const void *buffer, size_t buffer_size, size_t tail_size); | ||
22 | |||
23 | #ifdef DEBUG | ||
24 | void zd_hexdump(const void *bytes, size_t size); | ||
25 | #else | ||
26 | #define zd_hexdump(bytes, size) | ||
27 | #endif /* DEBUG */ | ||
28 | |||
29 | #endif /* _ZD_UTIL_H */ | ||
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index bbbf7e274a2a..8459a18254a4 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c | |||
@@ -19,37 +19,13 @@ | |||
19 | 19 | ||
20 | Support and updates available at | 20 | Support and updates available at |
21 | http://www.scyld.com/network/yellowfin.html | 21 | http://www.scyld.com/network/yellowfin.html |
22 | [link no longer provides useful info -jgarzik] | ||
22 | 23 | ||
23 | |||
24 | Linux kernel changelog: | ||
25 | ----------------------- | ||
26 | |||
27 | LK1.1.1 (jgarzik): Port to 2.4 kernel | ||
28 | |||
29 | LK1.1.2 (jgarzik): | ||
30 | * Merge in becker version 1.05 | ||
31 | |||
32 | LK1.1.3 (jgarzik): | ||
33 | * Various cleanups | ||
34 | * Update yellowfin_timer to correctly calculate duplex. | ||
35 | (suggested by Manfred Spraul) | ||
36 | |||
37 | LK1.1.4 (val@nmt.edu): | ||
38 | * Fix three endian-ness bugs | ||
39 | * Support dual function SYM53C885E ethernet chip | ||
40 | |||
41 | LK1.1.5 (val@nmt.edu): | ||
42 | * Fix forced full-duplex bug I introduced | ||
43 | |||
44 | LK1.1.6 (val@nmt.edu): | ||
45 | * Only print warning on truly "oversized" packets | ||
46 | * Fix theoretical bug on gigabit cards - return to 1.1.3 behavior | ||
47 | |||
48 | */ | 24 | */ |
49 | 25 | ||
50 | #define DRV_NAME "yellowfin" | 26 | #define DRV_NAME "yellowfin" |
51 | #define DRV_VERSION "1.05+LK1.1.6" | 27 | #define DRV_VERSION "2.0" |
52 | #define DRV_RELDATE "Feb 11, 2002" | 28 | #define DRV_RELDATE "Jun 27, 2006" |
53 | 29 | ||
54 | #define PFX DRV_NAME ": " | 30 | #define PFX DRV_NAME ": " |
55 | 31 | ||
@@ -239,8 +215,11 @@ enum capability_flags { | |||
239 | HasMACAddrBug=32, /* Only on early revs. */ | 215 | HasMACAddrBug=32, /* Only on early revs. */ |
240 | DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */ | 216 | DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */ |
241 | }; | 217 | }; |
218 | |||
242 | /* The PCI I/O space extent. */ | 219 | /* The PCI I/O space extent. */ |
243 | #define YELLOWFIN_SIZE 0x100 | 220 | enum { |
221 | YELLOWFIN_SIZE = 0x100, | ||
222 | }; | ||
244 | 223 | ||
245 | struct pci_id_info { | 224 | struct pci_id_info { |
246 | const char *name; | 225 | const char *name; |
@@ -248,16 +227,14 @@ struct pci_id_info { | |||
248 | int pci, pci_mask, subsystem, subsystem_mask; | 227 | int pci, pci_mask, subsystem, subsystem_mask; |
249 | int revision, revision_mask; /* Only 8 bits. */ | 228 | int revision, revision_mask; /* Only 8 bits. */ |
250 | } id; | 229 | } id; |
251 | int io_size; /* Needed for I/O region check or ioremap(). */ | ||
252 | int drv_flags; /* Driver use, intended as capability flags. */ | 230 | int drv_flags; /* Driver use, intended as capability flags. */ |
253 | }; | 231 | }; |
254 | 232 | ||
255 | static const struct pci_id_info pci_id_tbl[] = { | 233 | static const struct pci_id_info pci_id_tbl[] = { |
256 | {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff}, | 234 | {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff}, |
257 | YELLOWFIN_SIZE, | ||
258 | FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom}, | 235 | FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom}, |
259 | {"Symbios SYM83C885", { 0x07011000, 0xffffffff}, | 236 | {"Symbios SYM83C885", { 0x07011000, 0xffffffff}, |
260 | YELLOWFIN_SIZE, HasMII | DontUseEeprom }, | 237 | HasMII | DontUseEeprom }, |
261 | { } | 238 | { } |
262 | }; | 239 | }; |
263 | 240 | ||