diff options
author | Paul Mackerras <paulus@samba.org> | 2006-07-31 20:37:25 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-07-31 20:37:25 -0400 |
commit | 57cad8084e0837e0f2c97da789ec9b3f36809be9 (patch) | |
tree | e9c790afb4286f78cb08d9664f58baa7e876fe55 /drivers/net | |
parent | cb18bd40030c879cd93fef02fd579f74dbab473d (diff) | |
parent | 49b1e3ea19b1c95c2f012b8331ffb3b169e4c042 (diff) |
Merge branch 'merge'
Diffstat (limited to 'drivers/net')
53 files changed, 3325 insertions, 1316 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 2819de79442c..80e8ca013e44 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -17,172 +17,6 @@ | |||
17 | 410 Severn Ave., Suite 210 | 17 | 410 Severn Ave., Suite 210 |
18 | Annapolis MD 21403 | 18 | Annapolis MD 21403 |
19 | 19 | ||
20 | Linux Kernel Additions: | ||
21 | |||
22 | 0.99H+lk0.9 - David S. Miller - softnet, PCI DMA updates | ||
23 | 0.99H+lk1.0 - Jeff Garzik <jgarzik@pobox.com> | ||
24 | Remove compatibility defines for kernel versions < 2.2.x. | ||
25 | Update for new 2.3.x module interface | ||
26 | LK1.1.2 (March 19, 2000) | ||
27 | * New PCI interface (jgarzik) | ||
28 | |||
29 | LK1.1.3 25 April 2000, Andrew Morton <andrewm@uow.edu.au> | ||
30 | - Merged with 3c575_cb.c | ||
31 | - Don't set RxComplete in boomerang interrupt enable reg | ||
32 | - spinlock in vortex_timer to protect mdio functions | ||
33 | - disable local interrupts around call to vortex_interrupt in | ||
34 | vortex_tx_timeout() (So vortex_interrupt can use spin_lock()) | ||
35 | - Select window 3 in vortex_timer()'s write to Wn3_MAC_Ctrl | ||
36 | - In vortex_start_xmit(), move the lock to _after_ we've altered | ||
37 | vp->cur_tx and vp->tx_full. This defeats the race between | ||
38 | vortex_start_xmit() and vortex_interrupt which was identified | ||
39 | by Bogdan Costescu. | ||
40 | - Merged back support for six new cards from various sources | ||
41 | - Set vortex_have_pci if pci_module_init returns zero (fixes cardbus | ||
42 | insertion oops) | ||
43 | - Tell it that 3c905C has NWAY for 100bT autoneg | ||
44 | - Fix handling of SetStatusEnd in 'Too much work..' code, as | ||
45 | per 2.3.99's 3c575_cb (Dave Hinds). | ||
46 | - Split ISR into two for vortex & boomerang | ||
47 | - Fix MOD_INC/DEC races | ||
48 | - Handle resource allocation failures. | ||
49 | - Fix 3CCFE575CT LED polarity | ||
50 | - Make tx_interrupt_mitigation the default | ||
51 | |||
52 | LK1.1.4 25 April 2000, Andrew Morton <andrewm@uow.edu.au> | ||
53 | - Add extra TxReset to vortex_up() to fix 575_cb hotplug initialisation probs. | ||
54 | - Put vortex_info_tbl into __devinitdata | ||
55 | - In the vortex_error StatsFull HACK, disable stats in vp->intr_enable as well | ||
56 | as in the hardware. | ||
57 | - Increased the loop counter in issue_and_wait from 2,000 to 4,000. | ||
58 | |||
59 | LK1.1.5 28 April 2000, andrewm | ||
60 | - Added powerpc defines (John Daniel <jdaniel@etresoft.com> said these work...) | ||
61 | - Some extra diagnostics | ||
62 | - In vortex_error(), reset the Tx on maxCollisions. Otherwise most | ||
63 | chips usually get a Tx timeout. | ||
64 | - Added extra_reset module parm | ||
65 | - Replaced some inline timer manip with mod_timer | ||
66 | (Franois romieu <Francois.Romieu@nic.fr>) | ||
67 | - In vortex_up(), don't make Wn3_config initialisation dependent upon has_nway | ||
68 | (this came across from 3c575_cb). | ||
69 | |||
70 | LK1.1.6 06 Jun 2000, andrewm | ||
71 | - Backed out the PPC defines. | ||
72 | - Use del_timer_sync(), mod_timer(). | ||
73 | - Fix wrapped ulong comparison in boomerang_rx() | ||
74 | - Add IS_TORNADO, use it to suppress 3c905C checksum error msg | ||
75 | (Donald Becker, I Lee Hetherington <ilh@sls.lcs.mit.edu>) | ||
76 | - Replace union wn3_config with BFINS/BFEXT manipulation for | ||
77 | sparc64 (Pete Zaitcev, Peter Jones) | ||
78 | - In vortex_error, do_tx_reset and vortex_tx_timeout(Vortex): | ||
79 | do a netif_wake_queue() to better recover from errors. (Anders Pedersen, | ||
80 | Donald Becker) | ||
81 | - Print a warning on out-of-memory (rate limited to 1 per 10 secs) | ||
82 | - Added two more Cardbus 575 NICs: 5b57 and 6564 (Paul Wagland) | ||
83 | |||
84 | LK1.1.7 2 Jul 2000 andrewm | ||
85 | - Better handling of shared IRQs | ||
86 | - Reset the transmitter on a Tx reclaim error | ||
87 | - Fixed crash under OOM during vortex_open() (Mark Hemment) | ||
88 | - Fix Rx cessation problem during OOM (help from Mark Hemment) | ||
89 | - The spinlocks around the mdio access were blocking interrupts for 300uS. | ||
90 | Fix all this to use spin_lock_bh() within mdio_read/write | ||
91 | - Only write to TxFreeThreshold if it's a boomerang - other NICs don't | ||
92 | have one. | ||
93 | - Added 802.3x MAC-layer flow control support | ||
94 | |||
95 | LK1.1.8 13 Aug 2000 andrewm | ||
96 | - Ignore request_region() return value - already reserved if Cardbus. | ||
97 | - Merged some additional Cardbus flags from Don's 0.99Qk | ||
98 | - Some fixes for 3c556 (Fred Maciel) | ||
99 | - Fix for EISA initialisation (Jan Rekorajski) | ||
100 | - Renamed MII_XCVR_PWR and EEPROM_230 to align with 3c575_cb and D. Becker's drivers | ||
101 | - Fixed MII_XCVR_PWR for 3CCFE575CT | ||
102 | - Added INVERT_LED_PWR, used it. | ||
103 | - Backed out the extra_reset stuff | ||
104 | |||
105 | LK1.1.9 12 Sep 2000 andrewm | ||
106 | - Backed out the tx_reset_resume flags. It was a no-op. | ||
107 | - In vortex_error, don't reset the Tx on txReclaim errors | ||
108 | - In vortex_error, don't reset the Tx on maxCollisions errors. | ||
109 | Hence backed out all the DownListPtr logic here. | ||
110 | - In vortex_error, give Tornado cards a partial TxReset on | ||
111 | maxCollisions (David Hinds). Defined MAX_COLLISION_RESET for this. | ||
112 | - Redid some driver flags and device names based on pcmcia_cs-3.1.20. | ||
113 | - Fixed a bug where, if vp->tx_full is set when the interface | ||
114 | is downed, it remains set when the interface is upped. Bad | ||
115 | things happen. | ||
116 | |||
117 | LK1.1.10 17 Sep 2000 andrewm | ||
118 | - Added EEPROM_8BIT for 3c555 (Fred Maciel) | ||
119 | - Added experimental support for the 3c556B Laptop Hurricane (Louis Gerbarg) | ||
120 | - Add HAS_NWAY to "3c900 Cyclone 10Mbps TPO" | ||
121 | |||
122 | LK1.1.11 13 Nov 2000 andrewm | ||
123 | - Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER | ||
124 | |||
125 | LK1.1.12 1 Jan 2001 andrewm (2.4.0-pre1) | ||
126 | - Call pci_enable_device before we request our IRQ (Tobias Ringstrom) | ||
127 | - Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra) | ||
128 | - Added extended issue_and_wait for the 3c905CX. | ||
129 | - Look for an MII on PHY index 24 first (3c905CX oddity). | ||
130 | - Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger) | ||
131 | - Don't free skbs we don't own on oom path in vortex_open(). | ||
132 | |||
133 | LK1.1.13 27 Jan 2001 | ||
134 | - Added explicit `medialock' flag so we can truly | ||
135 | lock the media type down with `options'. | ||
136 | - "check ioremap return and some tidbits" (Arnaldo Carvalho de Melo <acme@conectiva.com.br>) | ||
137 | - Added and used EEPROM_NORESET for 3c556B PM resumes. | ||
138 | - Fixed leakage of vp->rx_ring. | ||
139 | - Break out separate HAS_HWCKSM device capability flag. | ||
140 | - Kill vp->tx_full (ANK) | ||
141 | - Merge zerocopy fragment handling (ANK?) | ||
142 | |||
143 | LK1.1.14 15 Feb 2001 | ||
144 | - Enable WOL. Can be turned on with `enable_wol' module option. | ||
145 | - EISA and PCI initialisation fixes (jgarzik, Manfred Spraul) | ||
146 | - If a device's internalconfig register reports it has NWAY, | ||
147 | use it, even if autoselect is enabled. | ||
148 | |||
149 | LK1.1.15 6 June 2001 akpm | ||
150 | - Prevent double counting of received bytes (Lars Christensen) | ||
151 | - Add ethtool support (jgarzik) | ||
152 | - Add module parm descriptions (Andrzej M. Krzysztofowicz) | ||
153 | - Implemented alloc_etherdev() API | ||
154 | - Special-case the 'Tx error 82' message. | ||
155 | |||
156 | LK1.1.16 18 July 2001 akpm | ||
157 | - Make NETIF_F_SG dependent upon nr_free_highpages(), not on CONFIG_HIGHMEM | ||
158 | - Lessen verbosity of bootup messages | ||
159 | - Fix WOL - use new PM API functions. | ||
160 | - Use netif_running() instead of vp->open in suspend/resume. | ||
161 | - Don't reset the interface logic on open/close/rmmod. It upsets | ||
162 | autonegotiation, and hence DHCP (from 0.99T). | ||
163 | - Back out EEPROM_NORESET flag because of the above (we do it for all | ||
164 | NICs). | ||
165 | - Correct 3c982 identification string | ||
166 | - Rename wait_for_completion() to issue_and_wait() to avoid completion.h | ||
167 | clash. | ||
168 | |||
169 | LK1.1.17 18Dec01 akpm | ||
170 | - PCI ID 9805 is a Python-T, not a dual-port Cyclone. Apparently. | ||
171 | And it has NWAY. | ||
172 | - Mask our advertised modes (vp->advertising) with our capabilities | ||
173 | (MII reg5) when deciding which duplex mode to use. | ||
174 | - Add `global_options' as default for options[]. Ditto global_enable_wol, | ||
175 | global_full_duplex. | ||
176 | |||
177 | LK1.1.18 01Jul02 akpm | ||
178 | - Fix for undocumented transceiver power-up bit on some 3c566B's | ||
179 | (Donald Becker, Rahul Karnik) | ||
180 | |||
181 | - See http://www.zip.com.au/~akpm/linux/#3c59x-2.3 for more details. | ||
182 | - Also see Documentation/networking/vortex.txt | ||
183 | |||
184 | LK1.1.19 10Nov02 Marc Zyngier <maz@wild-wind.fr.eu.org> | ||
185 | - EISA sysfs integration. | ||
186 | */ | 20 | */ |
187 | 21 | ||
188 | /* | 22 | /* |
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index d2150baa7e35..1428bb7715af 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -1916,7 +1916,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1916 | regs = ioremap(pciaddr, CP_REGS_SIZE); | 1916 | regs = ioremap(pciaddr, CP_REGS_SIZE); |
1917 | if (!regs) { | 1917 | if (!regs) { |
1918 | rc = -EIO; | 1918 | rc = -EIO; |
1919 | dev_err(&pdev->dev, "Cannot map PCI MMIO (%lx@%lx)\n", | 1919 | dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n", |
1920 | (unsigned long long)pci_resource_len(pdev, 1), | 1920 | (unsigned long long)pci_resource_len(pdev, 1), |
1921 | (unsigned long long)pciaddr); | 1921 | (unsigned long long)pciaddr); |
1922 | goto err_out_res; | 1922 | goto err_out_res; |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index cd9718512d1c..e4f4eaff7679 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -1709,6 +1709,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
1709 | void __iomem *ioaddr = tp->mmio_addr; | 1709 | void __iomem *ioaddr = tp->mmio_addr; |
1710 | unsigned int entry; | 1710 | unsigned int entry; |
1711 | unsigned int len = skb->len; | 1711 | unsigned int len = skb->len; |
1712 | unsigned long flags; | ||
1712 | 1713 | ||
1713 | /* Calculate the next Tx descriptor entry. */ | 1714 | /* Calculate the next Tx descriptor entry. */ |
1714 | entry = tp->cur_tx % NUM_TX_DESC; | 1715 | entry = tp->cur_tx % NUM_TX_DESC; |
@@ -1725,7 +1726,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
1725 | return 0; | 1726 | return 0; |
1726 | } | 1727 | } |
1727 | 1728 | ||
1728 | spin_lock_irq(&tp->lock); | 1729 | spin_lock_irqsave(&tp->lock, flags); |
1729 | RTL_W32_F (TxStatus0 + (entry * sizeof (u32)), | 1730 | RTL_W32_F (TxStatus0 + (entry * sizeof (u32)), |
1730 | tp->tx_flag | max(len, (unsigned int)ETH_ZLEN)); | 1731 | tp->tx_flag | max(len, (unsigned int)ETH_ZLEN)); |
1731 | 1732 | ||
@@ -1736,7 +1737,7 @@ static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev) | |||
1736 | 1737 | ||
1737 | if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) | 1738 | if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) |
1738 | netif_stop_queue (dev); | 1739 | netif_stop_queue (dev); |
1739 | spin_unlock_irq(&tp->lock); | 1740 | spin_unlock_irqrestore(&tp->lock, flags); |
1740 | 1741 | ||
1741 | if (netif_msg_tx_queued(tp)) | 1742 | if (netif_msg_tx_queued(tp)) |
1742 | printk (KERN_DEBUG "%s: Queued Tx packet size %u to slot %d.\n", | 1743 | printk (KERN_DEBUG "%s: Queued Tx packet size %u to slot %d.\n", |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 64b6a72b4f6a..db73de0d2511 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -1639,7 +1639,7 @@ bnx2_tx_int(struct bnx2 *bp) | |||
1639 | skb = tx_buf->skb; | 1639 | skb = tx_buf->skb; |
1640 | #ifdef BCM_TSO | 1640 | #ifdef BCM_TSO |
1641 | /* partial BD completions possible with TSO packets */ | 1641 | /* partial BD completions possible with TSO packets */ |
1642 | if (skb_shinfo(skb)->gso_size) { | 1642 | if (skb_is_gso(skb)) { |
1643 | u16 last_idx, last_ring_idx; | 1643 | u16 last_idx, last_ring_idx; |
1644 | 1644 | ||
1645 | last_idx = sw_cons + | 1645 | last_idx = sw_cons + |
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 87f94d939ff8..61b3754f50ff 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -1417,7 +1417,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1417 | struct cpl_tx_pkt *cpl; | 1417 | struct cpl_tx_pkt *cpl; |
1418 | 1418 | ||
1419 | #ifdef NETIF_F_TSO | 1419 | #ifdef NETIF_F_TSO |
1420 | if (skb_shinfo(skb)->gso_size) { | 1420 | if (skb_is_gso(skb)) { |
1421 | int eth_type; | 1421 | int eth_type; |
1422 | struct cpl_tx_pkt_lso *hdr; | 1422 | struct cpl_tx_pkt_lso *hdr; |
1423 | 1423 | ||
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 36d511729f71..2146cf74425e 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c | |||
@@ -132,6 +132,7 @@ static int __init dummy_init_module(void) | |||
132 | for (i = 0; i < numdummies && !err; i++) | 132 | for (i = 0; i < numdummies && !err; i++) |
133 | err = dummy_init_one(i); | 133 | err = dummy_init_one(i); |
134 | if (err) { | 134 | if (err) { |
135 | i--; | ||
135 | while (--i >= 0) | 136 | while (--i >= 0) |
136 | dummy_free_one(i); | 137 | dummy_free_one(i); |
137 | } | 138 | } |
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 3042d33e2d4d..d304297c496c 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -68,7 +68,6 @@ | |||
68 | #ifdef NETIF_F_TSO | 68 | #ifdef NETIF_F_TSO |
69 | #include <net/checksum.h> | 69 | #include <net/checksum.h> |
70 | #endif | 70 | #endif |
71 | #include <linux/workqueue.h> | ||
72 | #include <linux/mii.h> | 71 | #include <linux/mii.h> |
73 | #include <linux/ethtool.h> | 72 | #include <linux/ethtool.h> |
74 | #include <linux/if_vlan.h> | 73 | #include <linux/if_vlan.h> |
@@ -111,6 +110,9 @@ struct e1000_adapter; | |||
111 | #define E1000_MIN_RXD 80 | 110 | #define E1000_MIN_RXD 80 |
112 | #define E1000_MAX_82544_RXD 4096 | 111 | #define E1000_MAX_82544_RXD 4096 |
113 | 112 | ||
113 | /* this is the size past which hardware will drop packets when setting LPE=0 */ | ||
114 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 | ||
115 | |||
114 | /* Supported Rx Buffer Sizes */ | 116 | /* Supported Rx Buffer Sizes */ |
115 | #define E1000_RXBUFFER_128 128 /* Used for packet split */ | 117 | #define E1000_RXBUFFER_128 128 /* Used for packet split */ |
116 | #define E1000_RXBUFFER_256 256 /* Used for packet split */ | 118 | #define E1000_RXBUFFER_256 256 /* Used for packet split */ |
@@ -143,6 +145,7 @@ struct e1000_adapter; | |||
143 | 145 | ||
144 | #define AUTO_ALL_MODES 0 | 146 | #define AUTO_ALL_MODES 0 |
145 | #define E1000_EEPROM_82544_APM 0x0004 | 147 | #define E1000_EEPROM_82544_APM 0x0004 |
148 | #define E1000_EEPROM_ICH8_APME 0x0004 | ||
146 | #define E1000_EEPROM_APME 0x0400 | 149 | #define E1000_EEPROM_APME 0x0400 |
147 | 150 | ||
148 | #ifndef E1000_MASTER_SLAVE | 151 | #ifndef E1000_MASTER_SLAVE |
@@ -254,7 +257,6 @@ struct e1000_adapter { | |||
254 | spinlock_t tx_queue_lock; | 257 | spinlock_t tx_queue_lock; |
255 | #endif | 258 | #endif |
256 | atomic_t irq_sem; | 259 | atomic_t irq_sem; |
257 | struct work_struct watchdog_task; | ||
258 | struct work_struct reset_task; | 260 | struct work_struct reset_task; |
259 | uint8_t fc_autoneg; | 261 | uint8_t fc_autoneg; |
260 | 262 | ||
@@ -339,8 +341,14 @@ struct e1000_adapter { | |||
339 | #ifdef NETIF_F_TSO | 341 | #ifdef NETIF_F_TSO |
340 | boolean_t tso_force; | 342 | boolean_t tso_force; |
341 | #endif | 343 | #endif |
344 | boolean_t smart_power_down; /* phy smart power down */ | ||
345 | unsigned long flags; | ||
342 | }; | 346 | }; |
343 | 347 | ||
348 | enum e1000_state_t { | ||
349 | __E1000_DRIVER_TESTING, | ||
350 | __E1000_RESETTING, | ||
351 | }; | ||
344 | 352 | ||
345 | /* e1000_main.c */ | 353 | /* e1000_main.c */ |
346 | extern char e1000_driver_name[]; | 354 | extern char e1000_driver_name[]; |
@@ -348,6 +356,7 @@ extern char e1000_driver_version[]; | |||
348 | int e1000_up(struct e1000_adapter *adapter); | 356 | int e1000_up(struct e1000_adapter *adapter); |
349 | void e1000_down(struct e1000_adapter *adapter); | 357 | void e1000_down(struct e1000_adapter *adapter); |
350 | void e1000_reset(struct e1000_adapter *adapter); | 358 | void e1000_reset(struct e1000_adapter *adapter); |
359 | void e1000_reinit_locked(struct e1000_adapter *adapter); | ||
351 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); | 360 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); |
352 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter); | 361 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter); |
353 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); | 362 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); |
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index d19664891768..88a82ba88f57 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c | |||
@@ -109,7 +109,8 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
109 | SUPPORTED_1000baseT_Full| | 109 | SUPPORTED_1000baseT_Full| |
110 | SUPPORTED_Autoneg | | 110 | SUPPORTED_Autoneg | |
111 | SUPPORTED_TP); | 111 | SUPPORTED_TP); |
112 | 112 | if (hw->phy_type == e1000_phy_ife) | |
113 | ecmd->supported &= ~SUPPORTED_1000baseT_Full; | ||
113 | ecmd->advertising = ADVERTISED_TP; | 114 | ecmd->advertising = ADVERTISED_TP; |
114 | 115 | ||
115 | if (hw->autoneg == 1) { | 116 | if (hw->autoneg == 1) { |
@@ -203,11 +204,9 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | |||
203 | 204 | ||
204 | /* reset the link */ | 205 | /* reset the link */ |
205 | 206 | ||
206 | if (netif_running(adapter->netdev)) { | 207 | if (netif_running(adapter->netdev)) |
207 | e1000_down(adapter); | 208 | e1000_reinit_locked(adapter); |
208 | e1000_reset(adapter); | 209 | else |
209 | e1000_up(adapter); | ||
210 | } else | ||
211 | e1000_reset(adapter); | 210 | e1000_reset(adapter); |
212 | 211 | ||
213 | return 0; | 212 | return 0; |
@@ -254,10 +253,9 @@ e1000_set_pauseparam(struct net_device *netdev, | |||
254 | hw->original_fc = hw->fc; | 253 | hw->original_fc = hw->fc; |
255 | 254 | ||
256 | if (adapter->fc_autoneg == AUTONEG_ENABLE) { | 255 | if (adapter->fc_autoneg == AUTONEG_ENABLE) { |
257 | if (netif_running(adapter->netdev)) { | 256 | if (netif_running(adapter->netdev)) |
258 | e1000_down(adapter); | 257 | e1000_reinit_locked(adapter); |
259 | e1000_up(adapter); | 258 | else |
260 | } else | ||
261 | e1000_reset(adapter); | 259 | e1000_reset(adapter); |
262 | } else | 260 | } else |
263 | return ((hw->media_type == e1000_media_type_fiber) ? | 261 | return ((hw->media_type == e1000_media_type_fiber) ? |
@@ -279,10 +277,9 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data) | |||
279 | struct e1000_adapter *adapter = netdev_priv(netdev); | 277 | struct e1000_adapter *adapter = netdev_priv(netdev); |
280 | adapter->rx_csum = data; | 278 | adapter->rx_csum = data; |
281 | 279 | ||
282 | if (netif_running(netdev)) { | 280 | if (netif_running(netdev)) |
283 | e1000_down(adapter); | 281 | e1000_reinit_locked(adapter); |
284 | e1000_up(adapter); | 282 | else |
285 | } else | ||
286 | e1000_reset(adapter); | 283 | e1000_reset(adapter); |
287 | return 0; | 284 | return 0; |
288 | } | 285 | } |
@@ -577,6 +574,7 @@ e1000_get_drvinfo(struct net_device *netdev, | |||
577 | case e1000_82572: | 574 | case e1000_82572: |
578 | case e1000_82573: | 575 | case e1000_82573: |
579 | case e1000_80003es2lan: | 576 | case e1000_80003es2lan: |
577 | case e1000_ich8lan: | ||
580 | sprintf(firmware_version, "%d.%d-%d", | 578 | sprintf(firmware_version, "%d.%d-%d", |
581 | (eeprom_data & 0xF000) >> 12, | 579 | (eeprom_data & 0xF000) >> 12, |
582 | (eeprom_data & 0x0FF0) >> 4, | 580 | (eeprom_data & 0x0FF0) >> 4, |
@@ -631,6 +629,9 @@ e1000_set_ringparam(struct net_device *netdev, | |||
631 | tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; | 629 | tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues; |
632 | rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; | 630 | rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; |
633 | 631 | ||
632 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) | ||
633 | msleep(1); | ||
634 | |||
634 | if (netif_running(adapter->netdev)) | 635 | if (netif_running(adapter->netdev)) |
635 | e1000_down(adapter); | 636 | e1000_down(adapter); |
636 | 637 | ||
@@ -691,9 +692,11 @@ e1000_set_ringparam(struct net_device *netdev, | |||
691 | adapter->rx_ring = rx_new; | 692 | adapter->rx_ring = rx_new; |
692 | adapter->tx_ring = tx_new; | 693 | adapter->tx_ring = tx_new; |
693 | if ((err = e1000_up(adapter))) | 694 | if ((err = e1000_up(adapter))) |
694 | return err; | 695 | goto err_setup; |
695 | } | 696 | } |
696 | 697 | ||
698 | clear_bit(__E1000_RESETTING, &adapter->flags); | ||
699 | |||
697 | return 0; | 700 | return 0; |
698 | err_setup_tx: | 701 | err_setup_tx: |
699 | e1000_free_all_rx_resources(adapter); | 702 | e1000_free_all_rx_resources(adapter); |
@@ -701,6 +704,8 @@ err_setup_rx: | |||
701 | adapter->rx_ring = rx_old; | 704 | adapter->rx_ring = rx_old; |
702 | adapter->tx_ring = tx_old; | 705 | adapter->tx_ring = tx_old; |
703 | e1000_up(adapter); | 706 | e1000_up(adapter); |
707 | err_setup: | ||
708 | clear_bit(__E1000_RESETTING, &adapter->flags); | ||
704 | return err; | 709 | return err; |
705 | } | 710 | } |
706 | 711 | ||
@@ -754,6 +759,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
754 | toggle = 0x7FFFF3FF; | 759 | toggle = 0x7FFFF3FF; |
755 | break; | 760 | break; |
756 | case e1000_82573: | 761 | case e1000_82573: |
762 | case e1000_ich8lan: | ||
757 | toggle = 0x7FFFF033; | 763 | toggle = 0x7FFFF033; |
758 | break; | 764 | break; |
759 | default: | 765 | default: |
@@ -773,11 +779,12 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
773 | } | 779 | } |
774 | /* restore previous status */ | 780 | /* restore previous status */ |
775 | E1000_WRITE_REG(&adapter->hw, STATUS, before); | 781 | E1000_WRITE_REG(&adapter->hw, STATUS, before); |
776 | 782 | if (adapter->hw.mac_type != e1000_ich8lan) { | |
777 | REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); | 783 | REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); |
778 | REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); | 784 | REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); |
779 | REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); | 785 | REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); |
780 | REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); | 786 | REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); |
787 | } | ||
781 | REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); | 788 | REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); |
782 | REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); | 789 | REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); |
783 | REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); | 790 | REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); |
@@ -790,20 +797,22 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
790 | REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); | 797 | REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); |
791 | 798 | ||
792 | REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); | 799 | REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); |
793 | REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB); | 800 | before = (adapter->hw.mac_type == e1000_ich8lan ? |
801 | 0x06C3B33E : 0x06DFB3FE); | ||
802 | REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); | ||
794 | REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); | 803 | REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); |
795 | 804 | ||
796 | if (adapter->hw.mac_type >= e1000_82543) { | 805 | if (adapter->hw.mac_type >= e1000_82543) { |
797 | 806 | ||
798 | REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF); | 807 | REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); |
799 | REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | 808 | REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); |
800 | REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); | 809 | if (adapter->hw.mac_type != e1000_ich8lan) |
810 | REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); | ||
801 | REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | 811 | REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); |
802 | REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); | 812 | REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); |
803 | 813 | value = (adapter->hw.mac_type == e1000_ich8lan ? | |
804 | for (i = 0; i < E1000_RAR_ENTRIES; i++) { | 814 | E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES); |
805 | REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF, | 815 | for (i = 0; i < value; i++) { |
806 | 0xFFFFFFFF); | ||
807 | REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, | 816 | REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, |
808 | 0xFFFFFFFF); | 817 | 0xFFFFFFFF); |
809 | } | 818 | } |
@@ -817,7 +826,9 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) | |||
817 | 826 | ||
818 | } | 827 | } |
819 | 828 | ||
820 | for (i = 0; i < E1000_MC_TBL_SIZE; i++) | 829 | value = (adapter->hw.mac_type == e1000_ich8lan ? |
830 | E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE); | ||
831 | for (i = 0; i < value; i++) | ||
821 | REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); | 832 | REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); |
822 | 833 | ||
823 | *data = 0; | 834 | *data = 0; |
@@ -889,6 +900,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) | |||
889 | /* Test each interrupt */ | 900 | /* Test each interrupt */ |
890 | for (; i < 10; i++) { | 901 | for (; i < 10; i++) { |
891 | 902 | ||
903 | if (adapter->hw.mac_type == e1000_ich8lan && i == 8) | ||
904 | continue; | ||
892 | /* Interrupt to test */ | 905 | /* Interrupt to test */ |
893 | mask = 1 << i; | 906 | mask = 1 << i; |
894 | 907 | ||
@@ -1246,18 +1259,33 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1246 | } else if (adapter->hw.phy_type == e1000_phy_gg82563) { | 1259 | } else if (adapter->hw.phy_type == e1000_phy_gg82563) { |
1247 | e1000_write_phy_reg(&adapter->hw, | 1260 | e1000_write_phy_reg(&adapter->hw, |
1248 | GG82563_PHY_KMRN_MODE_CTRL, | 1261 | GG82563_PHY_KMRN_MODE_CTRL, |
1249 | 0x1CE); | 1262 | 0x1CC); |
1250 | } | 1263 | } |
1251 | /* force 1000, set loopback */ | ||
1252 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140); | ||
1253 | 1264 | ||
1254 | /* Now set up the MAC to the same speed/duplex as the PHY. */ | ||
1255 | ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); | 1265 | ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); |
1256 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ | 1266 | |
1257 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ | 1267 | if (adapter->hw.phy_type == e1000_phy_ife) { |
1258 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | 1268 | /* force 100, set loopback */ |
1259 | E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ | 1269 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x6100); |
1260 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1270 | |
1271 | /* Now set up the MAC to the same speed/duplex as the PHY. */ | ||
1272 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ | ||
1273 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ | ||
1274 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | ||
1275 | E1000_CTRL_SPD_100 |/* Force Speed to 100 */ | ||
1276 | E1000_CTRL_FD); /* Force Duplex to FULL */ | ||
1277 | } else { | ||
1278 | /* force 1000, set loopback */ | ||
1279 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140); | ||
1280 | |||
1281 | /* Now set up the MAC to the same speed/duplex as the PHY. */ | ||
1282 | ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); | ||
1283 | ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ | ||
1284 | ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ | ||
1285 | E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ | ||
1286 | E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ | ||
1287 | E1000_CTRL_FD); /* Force Duplex to FULL */ | ||
1288 | } | ||
1261 | 1289 | ||
1262 | if (adapter->hw.media_type == e1000_media_type_copper && | 1290 | if (adapter->hw.media_type == e1000_media_type_copper && |
1263 | adapter->hw.phy_type == e1000_phy_m88) { | 1291 | adapter->hw.phy_type == e1000_phy_m88) { |
@@ -1317,6 +1345,7 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter) | |||
1317 | case e1000_82572: | 1345 | case e1000_82572: |
1318 | case e1000_82573: | 1346 | case e1000_82573: |
1319 | case e1000_80003es2lan: | 1347 | case e1000_80003es2lan: |
1348 | case e1000_ich8lan: | ||
1320 | return e1000_integrated_phy_loopback(adapter); | 1349 | return e1000_integrated_phy_loopback(adapter); |
1321 | break; | 1350 | break; |
1322 | 1351 | ||
@@ -1568,6 +1597,7 @@ e1000_diag_test(struct net_device *netdev, | |||
1568 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1597 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1569 | boolean_t if_running = netif_running(netdev); | 1598 | boolean_t if_running = netif_running(netdev); |
1570 | 1599 | ||
1600 | set_bit(__E1000_DRIVER_TESTING, &adapter->flags); | ||
1571 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { | 1601 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { |
1572 | /* Offline tests */ | 1602 | /* Offline tests */ |
1573 | 1603 | ||
@@ -1582,7 +1612,8 @@ e1000_diag_test(struct net_device *netdev, | |||
1582 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1612 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1583 | 1613 | ||
1584 | if (if_running) | 1614 | if (if_running) |
1585 | e1000_down(adapter); | 1615 | /* indicate we're in test mode */ |
1616 | dev_close(netdev); | ||
1586 | else | 1617 | else |
1587 | e1000_reset(adapter); | 1618 | e1000_reset(adapter); |
1588 | 1619 | ||
@@ -1607,8 +1638,9 @@ e1000_diag_test(struct net_device *netdev, | |||
1607 | adapter->hw.autoneg = autoneg; | 1638 | adapter->hw.autoneg = autoneg; |
1608 | 1639 | ||
1609 | e1000_reset(adapter); | 1640 | e1000_reset(adapter); |
1641 | clear_bit(__E1000_DRIVER_TESTING, &adapter->flags); | ||
1610 | if (if_running) | 1642 | if (if_running) |
1611 | e1000_up(adapter); | 1643 | dev_open(netdev); |
1612 | } else { | 1644 | } else { |
1613 | /* Online tests */ | 1645 | /* Online tests */ |
1614 | if (e1000_link_test(adapter, &data[4])) | 1646 | if (e1000_link_test(adapter, &data[4])) |
@@ -1619,6 +1651,8 @@ e1000_diag_test(struct net_device *netdev, | |||
1619 | data[1] = 0; | 1651 | data[1] = 0; |
1620 | data[2] = 0; | 1652 | data[2] = 0; |
1621 | data[3] = 0; | 1653 | data[3] = 0; |
1654 | |||
1655 | clear_bit(__E1000_DRIVER_TESTING, &adapter->flags); | ||
1622 | } | 1656 | } |
1623 | msleep_interruptible(4 * 1000); | 1657 | msleep_interruptible(4 * 1000); |
1624 | } | 1658 | } |
@@ -1778,21 +1812,18 @@ e1000_phys_id(struct net_device *netdev, uint32_t data) | |||
1778 | mod_timer(&adapter->blink_timer, jiffies); | 1812 | mod_timer(&adapter->blink_timer, jiffies); |
1779 | msleep_interruptible(data * 1000); | 1813 | msleep_interruptible(data * 1000); |
1780 | del_timer_sync(&adapter->blink_timer); | 1814 | del_timer_sync(&adapter->blink_timer); |
1781 | } else if (adapter->hw.mac_type < e1000_82573) { | 1815 | } else if (adapter->hw.phy_type == e1000_phy_ife) { |
1782 | E1000_WRITE_REG(&adapter->hw, LEDCTL, | 1816 | if (!adapter->blink_timer.function) { |
1783 | (E1000_LEDCTL_LED2_BLINK_RATE | | 1817 | init_timer(&adapter->blink_timer); |
1784 | E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK | | 1818 | adapter->blink_timer.function = e1000_led_blink_callback; |
1785 | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | | 1819 | adapter->blink_timer.data = (unsigned long) adapter; |
1786 | (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) | | 1820 | } |
1787 | (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT))); | 1821 | mod_timer(&adapter->blink_timer, jiffies); |
1788 | msleep_interruptible(data * 1000); | 1822 | msleep_interruptible(data * 1000); |
1823 | del_timer_sync(&adapter->blink_timer); | ||
1824 | e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0); | ||
1789 | } else { | 1825 | } else { |
1790 | E1000_WRITE_REG(&adapter->hw, LEDCTL, | 1826 | e1000_blink_led_start(&adapter->hw); |
1791 | (E1000_LEDCTL_LED2_BLINK_RATE | | ||
1792 | E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK | | ||
1793 | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | | ||
1794 | (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) | | ||
1795 | (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT))); | ||
1796 | msleep_interruptible(data * 1000); | 1827 | msleep_interruptible(data * 1000); |
1797 | } | 1828 | } |
1798 | 1829 | ||
@@ -1807,10 +1838,8 @@ static int | |||
1807 | e1000_nway_reset(struct net_device *netdev) | 1838 | e1000_nway_reset(struct net_device *netdev) |
1808 | { | 1839 | { |
1809 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1840 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1810 | if (netif_running(netdev)) { | 1841 | if (netif_running(netdev)) |
1811 | e1000_down(adapter); | 1842 | e1000_reinit_locked(adapter); |
1812 | e1000_up(adapter); | ||
1813 | } | ||
1814 | return 0; | 1843 | return 0; |
1815 | } | 1844 | } |
1816 | 1845 | ||
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 3959039b16ec..583518ae49ce 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -101,7 +101,8 @@ static void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, | |||
101 | 101 | ||
102 | #define E1000_WRITE_REG_IO(a, reg, val) \ | 102 | #define E1000_WRITE_REG_IO(a, reg, val) \ |
103 | e1000_write_reg_io((a), E1000_##reg, val) | 103 | e1000_write_reg_io((a), E1000_##reg, val) |
104 | static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw); | 104 | static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, |
105 | uint16_t duplex); | ||
105 | static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); | 106 | static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); |
106 | 107 | ||
107 | /* IGP cable length table */ | 108 | /* IGP cable length table */ |
@@ -156,6 +157,14 @@ e1000_set_phy_type(struct e1000_hw *hw) | |||
156 | hw->phy_type = e1000_phy_igp; | 157 | hw->phy_type = e1000_phy_igp; |
157 | break; | 158 | break; |
158 | } | 159 | } |
160 | case IGP03E1000_E_PHY_ID: | ||
161 | hw->phy_type = e1000_phy_igp_3; | ||
162 | break; | ||
163 | case IFE_E_PHY_ID: | ||
164 | case IFE_PLUS_E_PHY_ID: | ||
165 | case IFE_C_E_PHY_ID: | ||
166 | hw->phy_type = e1000_phy_ife; | ||
167 | break; | ||
159 | case GG82563_E_PHY_ID: | 168 | case GG82563_E_PHY_ID: |
160 | if (hw->mac_type == e1000_80003es2lan) { | 169 | if (hw->mac_type == e1000_80003es2lan) { |
161 | hw->phy_type = e1000_phy_gg82563; | 170 | hw->phy_type = e1000_phy_gg82563; |
@@ -332,6 +341,7 @@ e1000_set_mac_type(struct e1000_hw *hw) | |||
332 | break; | 341 | break; |
333 | case E1000_DEV_ID_82541EI: | 342 | case E1000_DEV_ID_82541EI: |
334 | case E1000_DEV_ID_82541EI_MOBILE: | 343 | case E1000_DEV_ID_82541EI_MOBILE: |
344 | case E1000_DEV_ID_82541ER_LOM: | ||
335 | hw->mac_type = e1000_82541; | 345 | hw->mac_type = e1000_82541; |
336 | break; | 346 | break; |
337 | case E1000_DEV_ID_82541ER: | 347 | case E1000_DEV_ID_82541ER: |
@@ -341,6 +351,7 @@ e1000_set_mac_type(struct e1000_hw *hw) | |||
341 | hw->mac_type = e1000_82541_rev_2; | 351 | hw->mac_type = e1000_82541_rev_2; |
342 | break; | 352 | break; |
343 | case E1000_DEV_ID_82547EI: | 353 | case E1000_DEV_ID_82547EI: |
354 | case E1000_DEV_ID_82547EI_MOBILE: | ||
344 | hw->mac_type = e1000_82547; | 355 | hw->mac_type = e1000_82547; |
345 | break; | 356 | break; |
346 | case E1000_DEV_ID_82547GI: | 357 | case E1000_DEV_ID_82547GI: |
@@ -354,6 +365,7 @@ e1000_set_mac_type(struct e1000_hw *hw) | |||
354 | case E1000_DEV_ID_82572EI_COPPER: | 365 | case E1000_DEV_ID_82572EI_COPPER: |
355 | case E1000_DEV_ID_82572EI_FIBER: | 366 | case E1000_DEV_ID_82572EI_FIBER: |
356 | case E1000_DEV_ID_82572EI_SERDES: | 367 | case E1000_DEV_ID_82572EI_SERDES: |
368 | case E1000_DEV_ID_82572EI: | ||
357 | hw->mac_type = e1000_82572; | 369 | hw->mac_type = e1000_82572; |
358 | break; | 370 | break; |
359 | case E1000_DEV_ID_82573E: | 371 | case E1000_DEV_ID_82573E: |
@@ -361,16 +373,29 @@ e1000_set_mac_type(struct e1000_hw *hw) | |||
361 | case E1000_DEV_ID_82573L: | 373 | case E1000_DEV_ID_82573L: |
362 | hw->mac_type = e1000_82573; | 374 | hw->mac_type = e1000_82573; |
363 | break; | 375 | break; |
376 | case E1000_DEV_ID_80003ES2LAN_COPPER_SPT: | ||
377 | case E1000_DEV_ID_80003ES2LAN_SERDES_SPT: | ||
364 | case E1000_DEV_ID_80003ES2LAN_COPPER_DPT: | 378 | case E1000_DEV_ID_80003ES2LAN_COPPER_DPT: |
365 | case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: | 379 | case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: |
366 | hw->mac_type = e1000_80003es2lan; | 380 | hw->mac_type = e1000_80003es2lan; |
367 | break; | 381 | break; |
382 | case E1000_DEV_ID_ICH8_IGP_M_AMT: | ||
383 | case E1000_DEV_ID_ICH8_IGP_AMT: | ||
384 | case E1000_DEV_ID_ICH8_IGP_C: | ||
385 | case E1000_DEV_ID_ICH8_IFE: | ||
386 | case E1000_DEV_ID_ICH8_IGP_M: | ||
387 | hw->mac_type = e1000_ich8lan; | ||
388 | break; | ||
368 | default: | 389 | default: |
369 | /* Should never have loaded on this device */ | 390 | /* Should never have loaded on this device */ |
370 | return -E1000_ERR_MAC_TYPE; | 391 | return -E1000_ERR_MAC_TYPE; |
371 | } | 392 | } |
372 | 393 | ||
373 | switch(hw->mac_type) { | 394 | switch(hw->mac_type) { |
395 | case e1000_ich8lan: | ||
396 | hw->swfwhw_semaphore_present = TRUE; | ||
397 | hw->asf_firmware_present = TRUE; | ||
398 | break; | ||
374 | case e1000_80003es2lan: | 399 | case e1000_80003es2lan: |
375 | hw->swfw_sync_present = TRUE; | 400 | hw->swfw_sync_present = TRUE; |
376 | /* fall through */ | 401 | /* fall through */ |
@@ -423,6 +448,7 @@ e1000_set_media_type(struct e1000_hw *hw) | |||
423 | case e1000_82542_rev2_1: | 448 | case e1000_82542_rev2_1: |
424 | hw->media_type = e1000_media_type_fiber; | 449 | hw->media_type = e1000_media_type_fiber; |
425 | break; | 450 | break; |
451 | case e1000_ich8lan: | ||
426 | case e1000_82573: | 452 | case e1000_82573: |
427 | /* The STATUS_TBIMODE bit is reserved or reused for the this | 453 | /* The STATUS_TBIMODE bit is reserved or reused for the this |
428 | * device. | 454 | * device. |
@@ -527,6 +553,14 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
527 | } while(timeout); | 553 | } while(timeout); |
528 | } | 554 | } |
529 | 555 | ||
556 | /* Workaround for ICH8 bit corruption issue in FIFO memory */ | ||
557 | if (hw->mac_type == e1000_ich8lan) { | ||
558 | /* Set Tx and Rx buffer allocation to 8k apiece. */ | ||
559 | E1000_WRITE_REG(hw, PBA, E1000_PBA_8K); | ||
560 | /* Set Packet Buffer Size to 16k. */ | ||
561 | E1000_WRITE_REG(hw, PBS, E1000_PBS_16K); | ||
562 | } | ||
563 | |||
530 | /* Issue a global reset to the MAC. This will reset the chip's | 564 | /* Issue a global reset to the MAC. This will reset the chip's |
531 | * transmit, receive, DMA, and link units. It will not effect | 565 | * transmit, receive, DMA, and link units. It will not effect |
532 | * the current PCI configuration. The global reset bit is self- | 566 | * the current PCI configuration. The global reset bit is self- |
@@ -550,6 +584,20 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
550 | /* Reset is performed on a shadow of the control register */ | 584 | /* Reset is performed on a shadow of the control register */ |
551 | E1000_WRITE_REG(hw, CTRL_DUP, (ctrl | E1000_CTRL_RST)); | 585 | E1000_WRITE_REG(hw, CTRL_DUP, (ctrl | E1000_CTRL_RST)); |
552 | break; | 586 | break; |
587 | case e1000_ich8lan: | ||
588 | if (!hw->phy_reset_disable && | ||
589 | e1000_check_phy_reset_block(hw) == E1000_SUCCESS) { | ||
590 | /* e1000_ich8lan PHY HW reset requires MAC CORE reset | ||
591 | * at the same time to make sure the interface between | ||
592 | * MAC and the external PHY is reset. | ||
593 | */ | ||
594 | ctrl |= E1000_CTRL_PHY_RST; | ||
595 | } | ||
596 | |||
597 | e1000_get_software_flag(hw); | ||
598 | E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST)); | ||
599 | msec_delay(5); | ||
600 | break; | ||
553 | default: | 601 | default: |
554 | E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST)); | 602 | E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST)); |
555 | break; | 603 | break; |
@@ -591,6 +639,7 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
591 | /* fall through */ | 639 | /* fall through */ |
592 | case e1000_82571: | 640 | case e1000_82571: |
593 | case e1000_82572: | 641 | case e1000_82572: |
642 | case e1000_ich8lan: | ||
594 | case e1000_80003es2lan: | 643 | case e1000_80003es2lan: |
595 | ret_val = e1000_get_auto_rd_done(hw); | 644 | ret_val = e1000_get_auto_rd_done(hw); |
596 | if(ret_val) | 645 | if(ret_val) |
@@ -633,6 +682,12 @@ e1000_reset_hw(struct e1000_hw *hw) | |||
633 | e1000_pci_set_mwi(hw); | 682 | e1000_pci_set_mwi(hw); |
634 | } | 683 | } |
635 | 684 | ||
685 | if (hw->mac_type == e1000_ich8lan) { | ||
686 | uint32_t kab = E1000_READ_REG(hw, KABGTXD); | ||
687 | kab |= E1000_KABGTXD_BGSQLBIAS; | ||
688 | E1000_WRITE_REG(hw, KABGTXD, kab); | ||
689 | } | ||
690 | |||
636 | return E1000_SUCCESS; | 691 | return E1000_SUCCESS; |
637 | } | 692 | } |
638 | 693 | ||
@@ -675,9 +730,12 @@ e1000_init_hw(struct e1000_hw *hw) | |||
675 | 730 | ||
676 | /* Disabling VLAN filtering. */ | 731 | /* Disabling VLAN filtering. */ |
677 | DEBUGOUT("Initializing the IEEE VLAN\n"); | 732 | DEBUGOUT("Initializing the IEEE VLAN\n"); |
678 | if (hw->mac_type < e1000_82545_rev_3) | 733 | /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */ |
679 | E1000_WRITE_REG(hw, VET, 0); | 734 | if (hw->mac_type != e1000_ich8lan) { |
680 | e1000_clear_vfta(hw); | 735 | if (hw->mac_type < e1000_82545_rev_3) |
736 | E1000_WRITE_REG(hw, VET, 0); | ||
737 | e1000_clear_vfta(hw); | ||
738 | } | ||
681 | 739 | ||
682 | /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ | 740 | /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ |
683 | if(hw->mac_type == e1000_82542_rev2_0) { | 741 | if(hw->mac_type == e1000_82542_rev2_0) { |
@@ -705,8 +763,14 @@ e1000_init_hw(struct e1000_hw *hw) | |||
705 | /* Zero out the Multicast HASH table */ | 763 | /* Zero out the Multicast HASH table */ |
706 | DEBUGOUT("Zeroing the MTA\n"); | 764 | DEBUGOUT("Zeroing the MTA\n"); |
707 | mta_size = E1000_MC_TBL_SIZE; | 765 | mta_size = E1000_MC_TBL_SIZE; |
708 | for(i = 0; i < mta_size; i++) | 766 | if (hw->mac_type == e1000_ich8lan) |
767 | mta_size = E1000_MC_TBL_SIZE_ICH8LAN; | ||
768 | for(i = 0; i < mta_size; i++) { | ||
709 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); | 769 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); |
770 | /* use write flush to prevent Memory Write Block (MWB) from | ||
771 | * occuring when accessing our register space */ | ||
772 | E1000_WRITE_FLUSH(hw); | ||
773 | } | ||
710 | 774 | ||
711 | /* Set the PCI priority bit correctly in the CTRL register. This | 775 | /* Set the PCI priority bit correctly in the CTRL register. This |
712 | * determines if the adapter gives priority to receives, or if it | 776 | * determines if the adapter gives priority to receives, or if it |
@@ -744,6 +808,10 @@ e1000_init_hw(struct e1000_hw *hw) | |||
744 | break; | 808 | break; |
745 | } | 809 | } |
746 | 810 | ||
811 | /* More time needed for PHY to initialize */ | ||
812 | if (hw->mac_type == e1000_ich8lan) | ||
813 | msec_delay(15); | ||
814 | |||
747 | /* Call a subroutine to configure the link and setup flow control. */ | 815 | /* Call a subroutine to configure the link and setup flow control. */ |
748 | ret_val = e1000_setup_link(hw); | 816 | ret_val = e1000_setup_link(hw); |
749 | 817 | ||
@@ -757,6 +825,7 @@ e1000_init_hw(struct e1000_hw *hw) | |||
757 | case e1000_82571: | 825 | case e1000_82571: |
758 | case e1000_82572: | 826 | case e1000_82572: |
759 | case e1000_82573: | 827 | case e1000_82573: |
828 | case e1000_ich8lan: | ||
760 | case e1000_80003es2lan: | 829 | case e1000_80003es2lan: |
761 | ctrl |= E1000_TXDCTL_COUNT_DESC; | 830 | ctrl |= E1000_TXDCTL_COUNT_DESC; |
762 | break; | 831 | break; |
@@ -795,6 +864,7 @@ e1000_init_hw(struct e1000_hw *hw) | |||
795 | /* Fall through */ | 864 | /* Fall through */ |
796 | case e1000_82571: | 865 | case e1000_82571: |
797 | case e1000_82572: | 866 | case e1000_82572: |
867 | case e1000_ich8lan: | ||
798 | ctrl = E1000_READ_REG(hw, TXDCTL1); | 868 | ctrl = E1000_READ_REG(hw, TXDCTL1); |
799 | ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; | 869 | ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; |
800 | if(hw->mac_type >= e1000_82571) | 870 | if(hw->mac_type >= e1000_82571) |
@@ -818,6 +888,11 @@ e1000_init_hw(struct e1000_hw *hw) | |||
818 | */ | 888 | */ |
819 | e1000_clear_hw_cntrs(hw); | 889 | e1000_clear_hw_cntrs(hw); |
820 | 890 | ||
891 | /* ICH8 No-snoop bits are opposite polarity. | ||
892 | * Set to snoop by default after reset. */ | ||
893 | if (hw->mac_type == e1000_ich8lan) | ||
894 | e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL); | ||
895 | |||
821 | if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || | 896 | if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || |
822 | hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { | 897 | hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { |
823 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | 898 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); |
@@ -905,6 +980,7 @@ e1000_setup_link(struct e1000_hw *hw) | |||
905 | */ | 980 | */ |
906 | if (hw->fc == e1000_fc_default) { | 981 | if (hw->fc == e1000_fc_default) { |
907 | switch (hw->mac_type) { | 982 | switch (hw->mac_type) { |
983 | case e1000_ich8lan: | ||
908 | case e1000_82573: | 984 | case e1000_82573: |
909 | hw->fc = e1000_fc_full; | 985 | hw->fc = e1000_fc_full; |
910 | break; | 986 | break; |
@@ -971,9 +1047,12 @@ e1000_setup_link(struct e1000_hw *hw) | |||
971 | */ | 1047 | */ |
972 | DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); | 1048 | DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); |
973 | 1049 | ||
974 | E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW); | 1050 | /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */ |
975 | E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH); | 1051 | if (hw->mac_type != e1000_ich8lan) { |
976 | E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE); | 1052 | E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE); |
1053 | E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH); | ||
1054 | E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW); | ||
1055 | } | ||
977 | 1056 | ||
978 | E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time); | 1057 | E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time); |
979 | 1058 | ||
@@ -1237,12 +1316,13 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw) | |||
1237 | 1316 | ||
1238 | /* Wait 10ms for MAC to configure PHY from eeprom settings */ | 1317 | /* Wait 10ms for MAC to configure PHY from eeprom settings */ |
1239 | msec_delay(15); | 1318 | msec_delay(15); |
1240 | 1319 | if (hw->mac_type != e1000_ich8lan) { | |
1241 | /* Configure activity LED after PHY reset */ | 1320 | /* Configure activity LED after PHY reset */ |
1242 | led_ctrl = E1000_READ_REG(hw, LEDCTL); | 1321 | led_ctrl = E1000_READ_REG(hw, LEDCTL); |
1243 | led_ctrl &= IGP_ACTIVITY_LED_MASK; | 1322 | led_ctrl &= IGP_ACTIVITY_LED_MASK; |
1244 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); | 1323 | led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); |
1245 | E1000_WRITE_REG(hw, LEDCTL, led_ctrl); | 1324 | E1000_WRITE_REG(hw, LEDCTL, led_ctrl); |
1325 | } | ||
1246 | 1326 | ||
1247 | /* disable lplu d3 during driver init */ | 1327 | /* disable lplu d3 during driver init */ |
1248 | ret_val = e1000_set_d3_lplu_state(hw, FALSE); | 1328 | ret_val = e1000_set_d3_lplu_state(hw, FALSE); |
@@ -1478,8 +1558,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw) | |||
1478 | if (ret_val) | 1558 | if (ret_val) |
1479 | return ret_val; | 1559 | return ret_val; |
1480 | 1560 | ||
1481 | /* Enable Pass False Carrier on the PHY */ | 1561 | phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; |
1482 | phy_data |= GG82563_KMCR_PASS_FALSE_CARRIER; | ||
1483 | 1562 | ||
1484 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, | 1563 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, |
1485 | phy_data); | 1564 | phy_data); |
@@ -1561,28 +1640,40 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw) | |||
1561 | phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; | 1640 | phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; |
1562 | if(hw->disable_polarity_correction == 1) | 1641 | if(hw->disable_polarity_correction == 1) |
1563 | phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; | 1642 | phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; |
1564 | ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); | 1643 | ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); |
1565 | if(ret_val) | 1644 | if (ret_val) |
1566 | return ret_val; | ||
1567 | |||
1568 | /* Force TX_CLK in the Extended PHY Specific Control Register | ||
1569 | * to 25MHz clock. | ||
1570 | */ | ||
1571 | ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); | ||
1572 | if(ret_val) | ||
1573 | return ret_val; | 1645 | return ret_val; |
1574 | 1646 | ||
1575 | phy_data |= M88E1000_EPSCR_TX_CLK_25; | ||
1576 | |||
1577 | if (hw->phy_revision < M88E1011_I_REV_4) { | 1647 | if (hw->phy_revision < M88E1011_I_REV_4) { |
1578 | /* Configure Master and Slave downshift values */ | 1648 | /* Force TX_CLK in the Extended PHY Specific Control Register |
1579 | phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | | 1649 | * to 25MHz clock. |
1650 | */ | ||
1651 | ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); | ||
1652 | if (ret_val) | ||
1653 | return ret_val; | ||
1654 | |||
1655 | phy_data |= M88E1000_EPSCR_TX_CLK_25; | ||
1656 | |||
1657 | if ((hw->phy_revision == E1000_REVISION_2) && | ||
1658 | (hw->phy_id == M88E1111_I_PHY_ID)) { | ||
1659 | /* Vidalia Phy, set the downshift counter to 5x */ | ||
1660 | phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); | ||
1661 | phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; | ||
1662 | ret_val = e1000_write_phy_reg(hw, | ||
1663 | M88E1000_EXT_PHY_SPEC_CTRL, phy_data); | ||
1664 | if (ret_val) | ||
1665 | return ret_val; | ||
1666 | } else { | ||
1667 | /* Configure Master and Slave downshift values */ | ||
1668 | phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | | ||
1580 | M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); | 1669 | M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); |
1581 | phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | | 1670 | phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | |
1582 | M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); | 1671 | M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); |
1583 | ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); | 1672 | ret_val = e1000_write_phy_reg(hw, |
1584 | if(ret_val) | 1673 | M88E1000_EXT_PHY_SPEC_CTRL, phy_data); |
1585 | return ret_val; | 1674 | if (ret_val) |
1675 | return ret_val; | ||
1676 | } | ||
1586 | } | 1677 | } |
1587 | 1678 | ||
1588 | /* SW Reset the PHY so all changes take effect */ | 1679 | /* SW Reset the PHY so all changes take effect */ |
@@ -1620,6 +1711,10 @@ e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
1620 | if(hw->autoneg_advertised == 0) | 1711 | if(hw->autoneg_advertised == 0) |
1621 | hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 1712 | hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
1622 | 1713 | ||
1714 | /* IFE phy only supports 10/100 */ | ||
1715 | if (hw->phy_type == e1000_phy_ife) | ||
1716 | hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; | ||
1717 | |||
1623 | DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); | 1718 | DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); |
1624 | ret_val = e1000_phy_setup_autoneg(hw); | 1719 | ret_val = e1000_phy_setup_autoneg(hw); |
1625 | if(ret_val) { | 1720 | if(ret_val) { |
@@ -1717,6 +1812,26 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1717 | 1812 | ||
1718 | DEBUGFUNC("e1000_setup_copper_link"); | 1813 | DEBUGFUNC("e1000_setup_copper_link"); |
1719 | 1814 | ||
1815 | switch (hw->mac_type) { | ||
1816 | case e1000_80003es2lan: | ||
1817 | case e1000_ich8lan: | ||
1818 | /* Set the mac to wait the maximum time between each | ||
1819 | * iteration and increase the max iterations when | ||
1820 | * polling the phy; this fixes erroneous timeouts at 10Mbps. */ | ||
1821 | ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); | ||
1822 | if (ret_val) | ||
1823 | return ret_val; | ||
1824 | ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), ®_data); | ||
1825 | if (ret_val) | ||
1826 | return ret_val; | ||
1827 | reg_data |= 0x3F; | ||
1828 | ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data); | ||
1829 | if (ret_val) | ||
1830 | return ret_val; | ||
1831 | default: | ||
1832 | break; | ||
1833 | } | ||
1834 | |||
1720 | /* Check if it is a valid PHY and set PHY mode if necessary. */ | 1835 | /* Check if it is a valid PHY and set PHY mode if necessary. */ |
1721 | ret_val = e1000_copper_link_preconfig(hw); | 1836 | ret_val = e1000_copper_link_preconfig(hw); |
1722 | if(ret_val) | 1837 | if(ret_val) |
@@ -1724,10 +1839,8 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1724 | 1839 | ||
1725 | switch (hw->mac_type) { | 1840 | switch (hw->mac_type) { |
1726 | case e1000_80003es2lan: | 1841 | case e1000_80003es2lan: |
1727 | ret_val = e1000_read_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL, | 1842 | /* Kumeran registers are written-only */ |
1728 | ®_data); | 1843 | reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT; |
1729 | if (ret_val) | ||
1730 | return ret_val; | ||
1731 | reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING; | 1844 | reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING; |
1732 | ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL, | 1845 | ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL, |
1733 | reg_data); | 1846 | reg_data); |
@@ -1739,6 +1852,7 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1739 | } | 1852 | } |
1740 | 1853 | ||
1741 | if (hw->phy_type == e1000_phy_igp || | 1854 | if (hw->phy_type == e1000_phy_igp || |
1855 | hw->phy_type == e1000_phy_igp_3 || | ||
1742 | hw->phy_type == e1000_phy_igp_2) { | 1856 | hw->phy_type == e1000_phy_igp_2) { |
1743 | ret_val = e1000_copper_link_igp_setup(hw); | 1857 | ret_val = e1000_copper_link_igp_setup(hw); |
1744 | if(ret_val) | 1858 | if(ret_val) |
@@ -1803,7 +1917,7 @@ e1000_setup_copper_link(struct e1000_hw *hw) | |||
1803 | * hw - Struct containing variables accessed by shared code | 1917 | * hw - Struct containing variables accessed by shared code |
1804 | ******************************************************************************/ | 1918 | ******************************************************************************/ |
1805 | static int32_t | 1919 | static int32_t |
1806 | e1000_configure_kmrn_for_10_100(struct e1000_hw *hw) | 1920 | e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex) |
1807 | { | 1921 | { |
1808 | int32_t ret_val = E1000_SUCCESS; | 1922 | int32_t ret_val = E1000_SUCCESS; |
1809 | uint32_t tipg; | 1923 | uint32_t tipg; |
@@ -1823,6 +1937,18 @@ e1000_configure_kmrn_for_10_100(struct e1000_hw *hw) | |||
1823 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100; | 1937 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100; |
1824 | E1000_WRITE_REG(hw, TIPG, tipg); | 1938 | E1000_WRITE_REG(hw, TIPG, tipg); |
1825 | 1939 | ||
1940 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); | ||
1941 | |||
1942 | if (ret_val) | ||
1943 | return ret_val; | ||
1944 | |||
1945 | if (duplex == HALF_DUPLEX) | ||
1946 | reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; | ||
1947 | else | ||
1948 | reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; | ||
1949 | |||
1950 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); | ||
1951 | |||
1826 | return ret_val; | 1952 | return ret_val; |
1827 | } | 1953 | } |
1828 | 1954 | ||
@@ -1847,6 +1973,14 @@ e1000_configure_kmrn_for_1000(struct e1000_hw *hw) | |||
1847 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; | 1973 | tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000; |
1848 | E1000_WRITE_REG(hw, TIPG, tipg); | 1974 | E1000_WRITE_REG(hw, TIPG, tipg); |
1849 | 1975 | ||
1976 | ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); | ||
1977 | |||
1978 | if (ret_val) | ||
1979 | return ret_val; | ||
1980 | |||
1981 | reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; | ||
1982 | ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); | ||
1983 | |||
1850 | return ret_val; | 1984 | return ret_val; |
1851 | } | 1985 | } |
1852 | 1986 | ||
@@ -1869,10 +2003,13 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1869 | if(ret_val) | 2003 | if(ret_val) |
1870 | return ret_val; | 2004 | return ret_val; |
1871 | 2005 | ||
1872 | /* Read the MII 1000Base-T Control Register (Address 9). */ | 2006 | if (hw->phy_type != e1000_phy_ife) { |
1873 | ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); | 2007 | /* Read the MII 1000Base-T Control Register (Address 9). */ |
1874 | if(ret_val) | 2008 | ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); |
1875 | return ret_val; | 2009 | if (ret_val) |
2010 | return ret_val; | ||
2011 | } else | ||
2012 | mii_1000t_ctrl_reg=0; | ||
1876 | 2013 | ||
1877 | /* Need to parse both autoneg_advertised and fc and set up | 2014 | /* Need to parse both autoneg_advertised and fc and set up |
1878 | * the appropriate PHY registers. First we will parse for | 2015 | * the appropriate PHY registers. First we will parse for |
@@ -1923,6 +2060,9 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1923 | if(hw->autoneg_advertised & ADVERTISE_1000_FULL) { | 2060 | if(hw->autoneg_advertised & ADVERTISE_1000_FULL) { |
1924 | DEBUGOUT("Advertise 1000mb Full duplex\n"); | 2061 | DEBUGOUT("Advertise 1000mb Full duplex\n"); |
1925 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; | 2062 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; |
2063 | if (hw->phy_type == e1000_phy_ife) { | ||
2064 | DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n"); | ||
2065 | } | ||
1926 | } | 2066 | } |
1927 | 2067 | ||
1928 | /* Check for a software override of the flow control settings, and | 2068 | /* Check for a software override of the flow control settings, and |
@@ -1984,9 +2124,11 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
1984 | 2124 | ||
1985 | DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); | 2125 | DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); |
1986 | 2126 | ||
1987 | ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); | 2127 | if (hw->phy_type != e1000_phy_ife) { |
1988 | if(ret_val) | 2128 | ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); |
1989 | return ret_val; | 2129 | if (ret_val) |
2130 | return ret_val; | ||
2131 | } | ||
1990 | 2132 | ||
1991 | return E1000_SUCCESS; | 2133 | return E1000_SUCCESS; |
1992 | } | 2134 | } |
@@ -2089,6 +2231,18 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) | |||
2089 | 2231 | ||
2090 | /* Need to reset the PHY or these changes will be ignored */ | 2232 | /* Need to reset the PHY or these changes will be ignored */ |
2091 | mii_ctrl_reg |= MII_CR_RESET; | 2233 | mii_ctrl_reg |= MII_CR_RESET; |
2234 | /* Disable MDI-X support for 10/100 */ | ||
2235 | } else if (hw->phy_type == e1000_phy_ife) { | ||
2236 | ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data); | ||
2237 | if (ret_val) | ||
2238 | return ret_val; | ||
2239 | |||
2240 | phy_data &= ~IFE_PMC_AUTO_MDIX; | ||
2241 | phy_data &= ~IFE_PMC_FORCE_MDIX; | ||
2242 | |||
2243 | ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data); | ||
2244 | if (ret_val) | ||
2245 | return ret_val; | ||
2092 | } else { | 2246 | } else { |
2093 | /* Clear Auto-Crossover to force MDI manually. IGP requires MDI | 2247 | /* Clear Auto-Crossover to force MDI manually. IGP requires MDI |
2094 | * forced whenever speed or duplex are forced. | 2248 | * forced whenever speed or duplex are forced. |
@@ -2721,8 +2875,12 @@ e1000_check_for_link(struct e1000_hw *hw) | |||
2721 | */ | 2875 | */ |
2722 | if(hw->tbi_compatibility_en) { | 2876 | if(hw->tbi_compatibility_en) { |
2723 | uint16_t speed, duplex; | 2877 | uint16_t speed, duplex; |
2724 | e1000_get_speed_and_duplex(hw, &speed, &duplex); | 2878 | ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); |
2725 | if(speed != SPEED_1000) { | 2879 | if (ret_val) { |
2880 | DEBUGOUT("Error getting link speed and duplex\n"); | ||
2881 | return ret_val; | ||
2882 | } | ||
2883 | if (speed != SPEED_1000) { | ||
2726 | /* If link speed is not set to gigabit speed, we do not need | 2884 | /* If link speed is not set to gigabit speed, we do not need |
2727 | * to enable TBI compatibility. | 2885 | * to enable TBI compatibility. |
2728 | */ | 2886 | */ |
@@ -2889,7 +3047,13 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw, | |||
2889 | if (*speed == SPEED_1000) | 3047 | if (*speed == SPEED_1000) |
2890 | ret_val = e1000_configure_kmrn_for_1000(hw); | 3048 | ret_val = e1000_configure_kmrn_for_1000(hw); |
2891 | else | 3049 | else |
2892 | ret_val = e1000_configure_kmrn_for_10_100(hw); | 3050 | ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex); |
3051 | if (ret_val) | ||
3052 | return ret_val; | ||
3053 | } | ||
3054 | |||
3055 | if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { | ||
3056 | ret_val = e1000_kumeran_lock_loss_workaround(hw); | ||
2893 | if (ret_val) | 3057 | if (ret_val) |
2894 | return ret_val; | 3058 | return ret_val; |
2895 | } | 3059 | } |
@@ -3079,6 +3243,9 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) | |||
3079 | 3243 | ||
3080 | DEBUGFUNC("e1000_swfw_sync_acquire"); | 3244 | DEBUGFUNC("e1000_swfw_sync_acquire"); |
3081 | 3245 | ||
3246 | if (hw->swfwhw_semaphore_present) | ||
3247 | return e1000_get_software_flag(hw); | ||
3248 | |||
3082 | if (!hw->swfw_sync_present) | 3249 | if (!hw->swfw_sync_present) |
3083 | return e1000_get_hw_eeprom_semaphore(hw); | 3250 | return e1000_get_hw_eeprom_semaphore(hw); |
3084 | 3251 | ||
@@ -3118,6 +3285,11 @@ e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) | |||
3118 | 3285 | ||
3119 | DEBUGFUNC("e1000_swfw_sync_release"); | 3286 | DEBUGFUNC("e1000_swfw_sync_release"); |
3120 | 3287 | ||
3288 | if (hw->swfwhw_semaphore_present) { | ||
3289 | e1000_release_software_flag(hw); | ||
3290 | return; | ||
3291 | } | ||
3292 | |||
3121 | if (!hw->swfw_sync_present) { | 3293 | if (!hw->swfw_sync_present) { |
3122 | e1000_put_hw_eeprom_semaphore(hw); | 3294 | e1000_put_hw_eeprom_semaphore(hw); |
3123 | return; | 3295 | return; |
@@ -3160,7 +3332,8 @@ e1000_read_phy_reg(struct e1000_hw *hw, | |||
3160 | if (e1000_swfw_sync_acquire(hw, swfw)) | 3332 | if (e1000_swfw_sync_acquire(hw, swfw)) |
3161 | return -E1000_ERR_SWFW_SYNC; | 3333 | return -E1000_ERR_SWFW_SYNC; |
3162 | 3334 | ||
3163 | if((hw->phy_type == e1000_phy_igp || | 3335 | if ((hw->phy_type == e1000_phy_igp || |
3336 | hw->phy_type == e1000_phy_igp_3 || | ||
3164 | hw->phy_type == e1000_phy_igp_2) && | 3337 | hw->phy_type == e1000_phy_igp_2) && |
3165 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { | 3338 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { |
3166 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, | 3339 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, |
@@ -3299,7 +3472,8 @@ e1000_write_phy_reg(struct e1000_hw *hw, | |||
3299 | if (e1000_swfw_sync_acquire(hw, swfw)) | 3472 | if (e1000_swfw_sync_acquire(hw, swfw)) |
3300 | return -E1000_ERR_SWFW_SYNC; | 3473 | return -E1000_ERR_SWFW_SYNC; |
3301 | 3474 | ||
3302 | if((hw->phy_type == e1000_phy_igp || | 3475 | if ((hw->phy_type == e1000_phy_igp || |
3476 | hw->phy_type == e1000_phy_igp_3 || | ||
3303 | hw->phy_type == e1000_phy_igp_2) && | 3477 | hw->phy_type == e1000_phy_igp_2) && |
3304 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { | 3478 | (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { |
3305 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, | 3479 | ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, |
@@ -3514,7 +3688,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3514 | E1000_WRITE_FLUSH(hw); | 3688 | E1000_WRITE_FLUSH(hw); |
3515 | 3689 | ||
3516 | if (hw->mac_type >= e1000_82571) | 3690 | if (hw->mac_type >= e1000_82571) |
3517 | msec_delay(10); | 3691 | msec_delay_irq(10); |
3518 | e1000_swfw_sync_release(hw, swfw); | 3692 | e1000_swfw_sync_release(hw, swfw); |
3519 | } else { | 3693 | } else { |
3520 | /* Read the Extended Device Control Register, assert the PHY_RESET_DIR | 3694 | /* Read the Extended Device Control Register, assert the PHY_RESET_DIR |
@@ -3544,6 +3718,12 @@ e1000_phy_hw_reset(struct e1000_hw *hw) | |||
3544 | ret_val = e1000_get_phy_cfg_done(hw); | 3718 | ret_val = e1000_get_phy_cfg_done(hw); |
3545 | e1000_release_software_semaphore(hw); | 3719 | e1000_release_software_semaphore(hw); |
3546 | 3720 | ||
3721 | if ((hw->mac_type == e1000_ich8lan) && | ||
3722 | (hw->phy_type == e1000_phy_igp_3)) { | ||
3723 | ret_val = e1000_init_lcd_from_nvm(hw); | ||
3724 | if (ret_val) | ||
3725 | return ret_val; | ||
3726 | } | ||
3547 | return ret_val; | 3727 | return ret_val; |
3548 | } | 3728 | } |
3549 | 3729 | ||
@@ -3572,9 +3752,11 @@ e1000_phy_reset(struct e1000_hw *hw) | |||
3572 | case e1000_82541_rev_2: | 3752 | case e1000_82541_rev_2: |
3573 | case e1000_82571: | 3753 | case e1000_82571: |
3574 | case e1000_82572: | 3754 | case e1000_82572: |
3755 | case e1000_ich8lan: | ||
3575 | ret_val = e1000_phy_hw_reset(hw); | 3756 | ret_val = e1000_phy_hw_reset(hw); |
3576 | if(ret_val) | 3757 | if(ret_val) |
3577 | return ret_val; | 3758 | return ret_val; |
3759 | |||
3578 | break; | 3760 | break; |
3579 | default: | 3761 | default: |
3580 | ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); | 3762 | ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); |
@@ -3597,11 +3779,120 @@ e1000_phy_reset(struct e1000_hw *hw) | |||
3597 | } | 3779 | } |
3598 | 3780 | ||
3599 | /****************************************************************************** | 3781 | /****************************************************************************** |
3782 | * Work-around for 82566 power-down: on D3 entry- | ||
3783 | * 1) disable gigabit link | ||
3784 | * 2) write VR power-down enable | ||
3785 | * 3) read it back | ||
3786 | * if successful continue, else issue LCD reset and repeat | ||
3787 | * | ||
3788 | * hw - struct containing variables accessed by shared code | ||
3789 | ******************************************************************************/ | ||
3790 | void | ||
3791 | e1000_phy_powerdown_workaround(struct e1000_hw *hw) | ||
3792 | { | ||
3793 | int32_t reg; | ||
3794 | uint16_t phy_data; | ||
3795 | int32_t retry = 0; | ||
3796 | |||
3797 | DEBUGFUNC("e1000_phy_powerdown_workaround"); | ||
3798 | |||
3799 | if (hw->phy_type != e1000_phy_igp_3) | ||
3800 | return; | ||
3801 | |||
3802 | do { | ||
3803 | /* Disable link */ | ||
3804 | reg = E1000_READ_REG(hw, PHY_CTRL); | ||
3805 | E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | | ||
3806 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | ||
3807 | |||
3808 | /* Write VR power-down enable */ | ||
3809 | e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data); | ||
3810 | e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data | | ||
3811 | IGP3_VR_CTRL_MODE_SHUT); | ||
3812 | |||
3813 | /* Read it back and test */ | ||
3814 | e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data); | ||
3815 | if ((phy_data & IGP3_VR_CTRL_MODE_SHUT) || retry) | ||
3816 | break; | ||
3817 | |||
3818 | /* Issue PHY reset and repeat at most one more time */ | ||
3819 | reg = E1000_READ_REG(hw, CTRL); | ||
3820 | E1000_WRITE_REG(hw, CTRL, reg | E1000_CTRL_PHY_RST); | ||
3821 | retry++; | ||
3822 | } while (retry); | ||
3823 | |||
3824 | return; | ||
3825 | |||
3826 | } | ||
3827 | |||
3828 | /****************************************************************************** | ||
3829 | * Work-around for 82566 Kumeran PCS lock loss: | ||
3830 | * On link status change (i.e. PCI reset, speed change) and link is up and | ||
3831 | * speed is gigabit- | ||
3832 | * 0) if workaround is optionally disabled do nothing | ||
3833 | * 1) wait 1ms for Kumeran link to come up | ||
3834 | * 2) check Kumeran Diagnostic register PCS lock loss bit | ||
3835 | * 3) if not set the link is locked (all is good), otherwise... | ||
3836 | * 4) reset the PHY | ||
3837 | * 5) repeat up to 10 times | ||
3838 | * Note: this is only called for IGP3 copper when speed is 1gb. | ||
3839 | * | ||
3840 | * hw - struct containing variables accessed by shared code | ||
3841 | ******************************************************************************/ | ||
3842 | int32_t | ||
3843 | e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) | ||
3844 | { | ||
3845 | int32_t ret_val; | ||
3846 | int32_t reg; | ||
3847 | int32_t cnt; | ||
3848 | uint16_t phy_data; | ||
3849 | |||
3850 | if (hw->kmrn_lock_loss_workaround_disabled) | ||
3851 | return E1000_SUCCESS; | ||
3852 | |||
3853 | /* Make sure link is up before proceeding. If not just return. | ||
3854 | * Attempting this while link is negotiating fouls up link | ||
3855 | * stability */ | ||
3856 | ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); | ||
3857 | ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); | ||
3858 | |||
3859 | if (phy_data & MII_SR_LINK_STATUS) { | ||
3860 | for (cnt = 0; cnt < 10; cnt++) { | ||
3861 | /* read once to clear */ | ||
3862 | ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data); | ||
3863 | if (ret_val) | ||
3864 | return ret_val; | ||
3865 | /* and again to get new status */ | ||
3866 | ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data); | ||
3867 | if (ret_val) | ||
3868 | return ret_val; | ||
3869 | |||
3870 | /* check for PCS lock */ | ||
3871 | if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) | ||
3872 | return E1000_SUCCESS; | ||
3873 | |||
3874 | /* Issue PHY reset */ | ||
3875 | e1000_phy_hw_reset(hw); | ||
3876 | msec_delay_irq(5); | ||
3877 | } | ||
3878 | /* Disable GigE link negotiation */ | ||
3879 | reg = E1000_READ_REG(hw, PHY_CTRL); | ||
3880 | E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE | | ||
3881 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | ||
3882 | |||
3883 | /* unable to acquire PCS lock */ | ||
3884 | return E1000_ERR_PHY; | ||
3885 | } | ||
3886 | |||
3887 | return E1000_SUCCESS; | ||
3888 | } | ||
3889 | |||
3890 | /****************************************************************************** | ||
3600 | * Probes the expected PHY address for known PHY IDs | 3891 | * Probes the expected PHY address for known PHY IDs |
3601 | * | 3892 | * |
3602 | * hw - Struct containing variables accessed by shared code | 3893 | * hw - Struct containing variables accessed by shared code |
3603 | ******************************************************************************/ | 3894 | ******************************************************************************/ |
3604 | static int32_t | 3895 | int32_t |
3605 | e1000_detect_gig_phy(struct e1000_hw *hw) | 3896 | e1000_detect_gig_phy(struct e1000_hw *hw) |
3606 | { | 3897 | { |
3607 | int32_t phy_init_status, ret_val; | 3898 | int32_t phy_init_status, ret_val; |
@@ -3613,8 +3904,8 @@ e1000_detect_gig_phy(struct e1000_hw *hw) | |||
3613 | /* The 82571 firmware may still be configuring the PHY. In this | 3904 | /* The 82571 firmware may still be configuring the PHY. In this |
3614 | * case, we cannot access the PHY until the configuration is done. So | 3905 | * case, we cannot access the PHY until the configuration is done. So |
3615 | * we explicitly set the PHY values. */ | 3906 | * we explicitly set the PHY values. */ |
3616 | if(hw->mac_type == e1000_82571 || | 3907 | if (hw->mac_type == e1000_82571 || |
3617 | hw->mac_type == e1000_82572) { | 3908 | hw->mac_type == e1000_82572) { |
3618 | hw->phy_id = IGP01E1000_I_PHY_ID; | 3909 | hw->phy_id = IGP01E1000_I_PHY_ID; |
3619 | hw->phy_type = e1000_phy_igp_2; | 3910 | hw->phy_type = e1000_phy_igp_2; |
3620 | return E1000_SUCCESS; | 3911 | return E1000_SUCCESS; |
@@ -3631,7 +3922,7 @@ e1000_detect_gig_phy(struct e1000_hw *hw) | |||
3631 | 3922 | ||
3632 | /* Read the PHY ID Registers to identify which PHY is onboard. */ | 3923 | /* Read the PHY ID Registers to identify which PHY is onboard. */ |
3633 | ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); | 3924 | ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); |
3634 | if(ret_val) | 3925 | if (ret_val) |
3635 | return ret_val; | 3926 | return ret_val; |
3636 | 3927 | ||
3637 | hw->phy_id = (uint32_t) (phy_id_high << 16); | 3928 | hw->phy_id = (uint32_t) (phy_id_high << 16); |
@@ -3669,6 +3960,12 @@ e1000_detect_gig_phy(struct e1000_hw *hw) | |||
3669 | case e1000_80003es2lan: | 3960 | case e1000_80003es2lan: |
3670 | if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE; | 3961 | if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE; |
3671 | break; | 3962 | break; |
3963 | case e1000_ich8lan: | ||
3964 | if (hw->phy_id == IGP03E1000_E_PHY_ID) match = TRUE; | ||
3965 | if (hw->phy_id == IFE_E_PHY_ID) match = TRUE; | ||
3966 | if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = TRUE; | ||
3967 | if (hw->phy_id == IFE_C_E_PHY_ID) match = TRUE; | ||
3968 | break; | ||
3672 | default: | 3969 | default: |
3673 | DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); | 3970 | DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); |
3674 | return -E1000_ERR_CONFIG; | 3971 | return -E1000_ERR_CONFIG; |
@@ -3784,6 +4081,53 @@ e1000_phy_igp_get_info(struct e1000_hw *hw, | |||
3784 | } | 4081 | } |
3785 | 4082 | ||
3786 | /****************************************************************************** | 4083 | /****************************************************************************** |
4084 | * Get PHY information from various PHY registers for ife PHY only. | ||
4085 | * | ||
4086 | * hw - Struct containing variables accessed by shared code | ||
4087 | * phy_info - PHY information structure | ||
4088 | ******************************************************************************/ | ||
4089 | int32_t | ||
4090 | e1000_phy_ife_get_info(struct e1000_hw *hw, | ||
4091 | struct e1000_phy_info *phy_info) | ||
4092 | { | ||
4093 | int32_t ret_val; | ||
4094 | uint16_t phy_data, polarity; | ||
4095 | |||
4096 | DEBUGFUNC("e1000_phy_ife_get_info"); | ||
4097 | |||
4098 | phy_info->downshift = (e1000_downshift)hw->speed_downgraded; | ||
4099 | phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; | ||
4100 | |||
4101 | ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data); | ||
4102 | if (ret_val) | ||
4103 | return ret_val; | ||
4104 | phy_info->polarity_correction = | ||
4105 | (phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >> | ||
4106 | IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT; | ||
4107 | |||
4108 | if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) { | ||
4109 | ret_val = e1000_check_polarity(hw, &polarity); | ||
4110 | if (ret_val) | ||
4111 | return ret_val; | ||
4112 | } else { | ||
4113 | /* Polarity is forced. */ | ||
4114 | polarity = (phy_data & IFE_PSC_FORCE_POLARITY) >> | ||
4115 | IFE_PSC_FORCE_POLARITY_SHIFT; | ||
4116 | } | ||
4117 | phy_info->cable_polarity = polarity; | ||
4118 | |||
4119 | ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data); | ||
4120 | if (ret_val) | ||
4121 | return ret_val; | ||
4122 | |||
4123 | phy_info->mdix_mode = | ||
4124 | (phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >> | ||
4125 | IFE_PMC_MDIX_MODE_SHIFT; | ||
4126 | |||
4127 | return E1000_SUCCESS; | ||
4128 | } | ||
4129 | |||
4130 | /****************************************************************************** | ||
3787 | * Get PHY information from various PHY registers fot m88 PHY only. | 4131 | * Get PHY information from various PHY registers fot m88 PHY only. |
3788 | * | 4132 | * |
3789 | * hw - Struct containing variables accessed by shared code | 4133 | * hw - Struct containing variables accessed by shared code |
@@ -3898,9 +4242,12 @@ e1000_phy_get_info(struct e1000_hw *hw, | |||
3898 | return -E1000_ERR_CONFIG; | 4242 | return -E1000_ERR_CONFIG; |
3899 | } | 4243 | } |
3900 | 4244 | ||
3901 | if(hw->phy_type == e1000_phy_igp || | 4245 | if (hw->phy_type == e1000_phy_igp || |
4246 | hw->phy_type == e1000_phy_igp_3 || | ||
3902 | hw->phy_type == e1000_phy_igp_2) | 4247 | hw->phy_type == e1000_phy_igp_2) |
3903 | return e1000_phy_igp_get_info(hw, phy_info); | 4248 | return e1000_phy_igp_get_info(hw, phy_info); |
4249 | else if (hw->phy_type == e1000_phy_ife) | ||
4250 | return e1000_phy_ife_get_info(hw, phy_info); | ||
3904 | else | 4251 | else |
3905 | return e1000_phy_m88_get_info(hw, phy_info); | 4252 | return e1000_phy_m88_get_info(hw, phy_info); |
3906 | } | 4253 | } |
@@ -4049,6 +4396,35 @@ e1000_init_eeprom_params(struct e1000_hw *hw) | |||
4049 | eeprom->use_eerd = TRUE; | 4396 | eeprom->use_eerd = TRUE; |
4050 | eeprom->use_eewr = FALSE; | 4397 | eeprom->use_eewr = FALSE; |
4051 | break; | 4398 | break; |
4399 | case e1000_ich8lan: | ||
4400 | { | ||
4401 | int32_t i = 0; | ||
4402 | uint32_t flash_size = E1000_READ_ICH8_REG(hw, ICH8_FLASH_GFPREG); | ||
4403 | |||
4404 | eeprom->type = e1000_eeprom_ich8; | ||
4405 | eeprom->use_eerd = FALSE; | ||
4406 | eeprom->use_eewr = FALSE; | ||
4407 | eeprom->word_size = E1000_SHADOW_RAM_WORDS; | ||
4408 | |||
4409 | /* Zero the shadow RAM structure. But don't load it from NVM | ||
4410 | * so as to save time for driver init */ | ||
4411 | if (hw->eeprom_shadow_ram != NULL) { | ||
4412 | for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { | ||
4413 | hw->eeprom_shadow_ram[i].modified = FALSE; | ||
4414 | hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF; | ||
4415 | } | ||
4416 | } | ||
4417 | |||
4418 | hw->flash_base_addr = (flash_size & ICH8_GFPREG_BASE_MASK) * | ||
4419 | ICH8_FLASH_SECTOR_SIZE; | ||
4420 | |||
4421 | hw->flash_bank_size = ((flash_size >> 16) & ICH8_GFPREG_BASE_MASK) + 1; | ||
4422 | hw->flash_bank_size -= (flash_size & ICH8_GFPREG_BASE_MASK); | ||
4423 | hw->flash_bank_size *= ICH8_FLASH_SECTOR_SIZE; | ||
4424 | hw->flash_bank_size /= 2 * sizeof(uint16_t); | ||
4425 | |||
4426 | break; | ||
4427 | } | ||
4052 | default: | 4428 | default: |
4053 | break; | 4429 | break; |
4054 | } | 4430 | } |
@@ -4469,7 +4845,10 @@ e1000_read_eeprom(struct e1000_hw *hw, | |||
4469 | return ret_val; | 4845 | return ret_val; |
4470 | } | 4846 | } |
4471 | 4847 | ||
4472 | if(eeprom->type == e1000_eeprom_spi) { | 4848 | if (eeprom->type == e1000_eeprom_ich8) |
4849 | return e1000_read_eeprom_ich8(hw, offset, words, data); | ||
4850 | |||
4851 | if (eeprom->type == e1000_eeprom_spi) { | ||
4473 | uint16_t word_in; | 4852 | uint16_t word_in; |
4474 | uint8_t read_opcode = EEPROM_READ_OPCODE_SPI; | 4853 | uint8_t read_opcode = EEPROM_READ_OPCODE_SPI; |
4475 | 4854 | ||
@@ -4636,7 +5015,10 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) | |||
4636 | 5015 | ||
4637 | DEBUGFUNC("e1000_is_onboard_nvm_eeprom"); | 5016 | DEBUGFUNC("e1000_is_onboard_nvm_eeprom"); |
4638 | 5017 | ||
4639 | if(hw->mac_type == e1000_82573) { | 5018 | if (hw->mac_type == e1000_ich8lan) |
5019 | return FALSE; | ||
5020 | |||
5021 | if (hw->mac_type == e1000_82573) { | ||
4640 | eecd = E1000_READ_REG(hw, EECD); | 5022 | eecd = E1000_READ_REG(hw, EECD); |
4641 | 5023 | ||
4642 | /* Isolate bits 15 & 16 */ | 5024 | /* Isolate bits 15 & 16 */ |
@@ -4686,8 +5068,22 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw) | |||
4686 | } | 5068 | } |
4687 | } | 5069 | } |
4688 | 5070 | ||
4689 | for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { | 5071 | if (hw->mac_type == e1000_ich8lan) { |
4690 | if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { | 5072 | /* Drivers must allocate the shadow ram structure for the |
5073 | * EEPROM checksum to be updated. Otherwise, this bit as well | ||
5074 | * as the checksum must both be set correctly for this | ||
5075 | * validation to pass. | ||
5076 | */ | ||
5077 | e1000_read_eeprom(hw, 0x19, 1, &eeprom_data); | ||
5078 | if ((eeprom_data & 0x40) == 0) { | ||
5079 | eeprom_data |= 0x40; | ||
5080 | e1000_write_eeprom(hw, 0x19, 1, &eeprom_data); | ||
5081 | e1000_update_eeprom_checksum(hw); | ||
5082 | } | ||
5083 | } | ||
5084 | |||
5085 | for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { | ||
5086 | if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { | ||
4691 | DEBUGOUT("EEPROM Read Error\n"); | 5087 | DEBUGOUT("EEPROM Read Error\n"); |
4692 | return -E1000_ERR_EEPROM; | 5088 | return -E1000_ERR_EEPROM; |
4693 | } | 5089 | } |
@@ -4713,6 +5109,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw) | |||
4713 | int32_t | 5109 | int32_t |
4714 | e1000_update_eeprom_checksum(struct e1000_hw *hw) | 5110 | e1000_update_eeprom_checksum(struct e1000_hw *hw) |
4715 | { | 5111 | { |
5112 | uint32_t ctrl_ext; | ||
4716 | uint16_t checksum = 0; | 5113 | uint16_t checksum = 0; |
4717 | uint16_t i, eeprom_data; | 5114 | uint16_t i, eeprom_data; |
4718 | 5115 | ||
@@ -4731,6 +5128,14 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw) | |||
4731 | return -E1000_ERR_EEPROM; | 5128 | return -E1000_ERR_EEPROM; |
4732 | } else if (hw->eeprom.type == e1000_eeprom_flash) { | 5129 | } else if (hw->eeprom.type == e1000_eeprom_flash) { |
4733 | e1000_commit_shadow_ram(hw); | 5130 | e1000_commit_shadow_ram(hw); |
5131 | } else if (hw->eeprom.type == e1000_eeprom_ich8) { | ||
5132 | e1000_commit_shadow_ram(hw); | ||
5133 | /* Reload the EEPROM, or else modifications will not appear | ||
5134 | * until after next adapter reset. */ | ||
5135 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | ||
5136 | ctrl_ext |= E1000_CTRL_EXT_EE_RST; | ||
5137 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | ||
5138 | msec_delay(10); | ||
4734 | } | 5139 | } |
4735 | return E1000_SUCCESS; | 5140 | return E1000_SUCCESS; |
4736 | } | 5141 | } |
@@ -4770,6 +5175,9 @@ e1000_write_eeprom(struct e1000_hw *hw, | |||
4770 | if(eeprom->use_eewr == TRUE) | 5175 | if(eeprom->use_eewr == TRUE) |
4771 | return e1000_write_eeprom_eewr(hw, offset, words, data); | 5176 | return e1000_write_eeprom_eewr(hw, offset, words, data); |
4772 | 5177 | ||
5178 | if (eeprom->type == e1000_eeprom_ich8) | ||
5179 | return e1000_write_eeprom_ich8(hw, offset, words, data); | ||
5180 | |||
4773 | /* Prepare the EEPROM for writing */ | 5181 | /* Prepare the EEPROM for writing */ |
4774 | if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) | 5182 | if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) |
4775 | return -E1000_ERR_EEPROM; | 5183 | return -E1000_ERR_EEPROM; |
@@ -4957,11 +5365,17 @@ e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
4957 | uint32_t flop = 0; | 5365 | uint32_t flop = 0; |
4958 | uint32_t i = 0; | 5366 | uint32_t i = 0; |
4959 | int32_t error = E1000_SUCCESS; | 5367 | int32_t error = E1000_SUCCESS; |
4960 | 5368 | uint32_t old_bank_offset = 0; | |
4961 | /* The flop register will be used to determine if flash type is STM */ | 5369 | uint32_t new_bank_offset = 0; |
4962 | flop = E1000_READ_REG(hw, FLOP); | 5370 | uint32_t sector_retries = 0; |
5371 | uint8_t low_byte = 0; | ||
5372 | uint8_t high_byte = 0; | ||
5373 | uint8_t temp_byte = 0; | ||
5374 | boolean_t sector_write_failed = FALSE; | ||
4963 | 5375 | ||
4964 | if (hw->mac_type == e1000_82573) { | 5376 | if (hw->mac_type == e1000_82573) { |
5377 | /* The flop register will be used to determine if flash type is STM */ | ||
5378 | flop = E1000_READ_REG(hw, FLOP); | ||
4965 | for (i=0; i < attempts; i++) { | 5379 | for (i=0; i < attempts; i++) { |
4966 | eecd = E1000_READ_REG(hw, EECD); | 5380 | eecd = E1000_READ_REG(hw, EECD); |
4967 | if ((eecd & E1000_EECD_FLUPD) == 0) { | 5381 | if ((eecd & E1000_EECD_FLUPD) == 0) { |
@@ -4995,6 +5409,106 @@ e1000_commit_shadow_ram(struct e1000_hw *hw) | |||
4995 | } | 5409 | } |
4996 | } | 5410 | } |
4997 | 5411 | ||
5412 | if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) { | ||
5413 | /* We're writing to the opposite bank so if we're on bank 1, | ||
5414 | * write to bank 0 etc. We also need to erase the segment that | ||
5415 | * is going to be written */ | ||
5416 | if (!(E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL)) { | ||
5417 | new_bank_offset = hw->flash_bank_size * 2; | ||
5418 | old_bank_offset = 0; | ||
5419 | e1000_erase_ich8_4k_segment(hw, 1); | ||
5420 | } else { | ||
5421 | old_bank_offset = hw->flash_bank_size * 2; | ||
5422 | new_bank_offset = 0; | ||
5423 | e1000_erase_ich8_4k_segment(hw, 0); | ||
5424 | } | ||
5425 | |||
5426 | do { | ||
5427 | sector_write_failed = FALSE; | ||
5428 | /* Loop for every byte in the shadow RAM, | ||
5429 | * which is in units of words. */ | ||
5430 | for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { | ||
5431 | /* Determine whether to write the value stored | ||
5432 | * in the other NVM bank or a modified value stored | ||
5433 | * in the shadow RAM */ | ||
5434 | if (hw->eeprom_shadow_ram[i].modified == TRUE) { | ||
5435 | low_byte = (uint8_t)hw->eeprom_shadow_ram[i].eeprom_word; | ||
5436 | e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset, | ||
5437 | &temp_byte); | ||
5438 | udelay(100); | ||
5439 | error = e1000_verify_write_ich8_byte(hw, | ||
5440 | (i << 1) + new_bank_offset, | ||
5441 | low_byte); | ||
5442 | if (error != E1000_SUCCESS) | ||
5443 | sector_write_failed = TRUE; | ||
5444 | high_byte = | ||
5445 | (uint8_t)(hw->eeprom_shadow_ram[i].eeprom_word >> 8); | ||
5446 | e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1, | ||
5447 | &temp_byte); | ||
5448 | udelay(100); | ||
5449 | } else { | ||
5450 | e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset, | ||
5451 | &low_byte); | ||
5452 | udelay(100); | ||
5453 | error = e1000_verify_write_ich8_byte(hw, | ||
5454 | (i << 1) + new_bank_offset, low_byte); | ||
5455 | if (error != E1000_SUCCESS) | ||
5456 | sector_write_failed = TRUE; | ||
5457 | e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1, | ||
5458 | &high_byte); | ||
5459 | } | ||
5460 | |||
5461 | /* If the word is 0x13, then make sure the signature bits | ||
5462 | * (15:14) are 11b until the commit has completed. | ||
5463 | * This will allow us to write 10b which indicates the | ||
5464 | * signature is valid. We want to do this after the write | ||
5465 | * has completed so that we don't mark the segment valid | ||
5466 | * while the write is still in progress */ | ||
5467 | if (i == E1000_ICH8_NVM_SIG_WORD) | ||
5468 | high_byte = E1000_ICH8_NVM_SIG_MASK | high_byte; | ||
5469 | |||
5470 | error = e1000_verify_write_ich8_byte(hw, | ||
5471 | (i << 1) + new_bank_offset + 1, high_byte); | ||
5472 | if (error != E1000_SUCCESS) | ||
5473 | sector_write_failed = TRUE; | ||
5474 | |||
5475 | if (sector_write_failed == FALSE) { | ||
5476 | /* Clear the now not used entry in the cache */ | ||
5477 | hw->eeprom_shadow_ram[i].modified = FALSE; | ||
5478 | hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF; | ||
5479 | } | ||
5480 | } | ||
5481 | |||
5482 | /* Don't bother writing the segment valid bits if sector | ||
5483 | * programming failed. */ | ||
5484 | if (sector_write_failed == FALSE) { | ||
5485 | /* Finally validate the new segment by setting bit 15:14 | ||
5486 | * to 10b in word 0x13 , this can be done without an | ||
5487 | * erase as well since these bits are 11 to start with | ||
5488 | * and we need to change bit 14 to 0b */ | ||
5489 | e1000_read_ich8_byte(hw, | ||
5490 | E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset, | ||
5491 | &high_byte); | ||
5492 | high_byte &= 0xBF; | ||
5493 | error = e1000_verify_write_ich8_byte(hw, | ||
5494 | E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset, | ||
5495 | high_byte); | ||
5496 | if (error != E1000_SUCCESS) | ||
5497 | sector_write_failed = TRUE; | ||
5498 | |||
5499 | /* And invalidate the previously valid segment by setting | ||
5500 | * its signature word (0x13) high_byte to 0b. This can be | ||
5501 | * done without an erase because flash erase sets all bits | ||
5502 | * to 1's. We can write 1's to 0's without an erase */ | ||
5503 | error = e1000_verify_write_ich8_byte(hw, | ||
5504 | E1000_ICH8_NVM_SIG_WORD * 2 + 1 + old_bank_offset, | ||
5505 | 0); | ||
5506 | if (error != E1000_SUCCESS) | ||
5507 | sector_write_failed = TRUE; | ||
5508 | } | ||
5509 | } while (++sector_retries < 10 && sector_write_failed == TRUE); | ||
5510 | } | ||
5511 | |||
4998 | return error; | 5512 | return error; |
4999 | } | 5513 | } |
5000 | 5514 | ||
@@ -5102,15 +5616,19 @@ e1000_init_rx_addrs(struct e1000_hw *hw) | |||
5102 | * the other port. */ | 5616 | * the other port. */ |
5103 | if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE)) | 5617 | if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE)) |
5104 | rar_num -= 1; | 5618 | rar_num -= 1; |
5619 | if (hw->mac_type == e1000_ich8lan) | ||
5620 | rar_num = E1000_RAR_ENTRIES_ICH8LAN; | ||
5621 | |||
5105 | /* Zero out the other 15 receive addresses. */ | 5622 | /* Zero out the other 15 receive addresses. */ |
5106 | DEBUGOUT("Clearing RAR[1-15]\n"); | 5623 | DEBUGOUT("Clearing RAR[1-15]\n"); |
5107 | for(i = 1; i < rar_num; i++) { | 5624 | for(i = 1; i < rar_num; i++) { |
5108 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); | 5625 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); |
5626 | E1000_WRITE_FLUSH(hw); | ||
5109 | E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); | 5627 | E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); |
5628 | E1000_WRITE_FLUSH(hw); | ||
5110 | } | 5629 | } |
5111 | } | 5630 | } |
5112 | 5631 | ||
5113 | #if 0 | ||
5114 | /****************************************************************************** | 5632 | /****************************************************************************** |
5115 | * Updates the MAC's list of multicast addresses. | 5633 | * Updates the MAC's list of multicast addresses. |
5116 | * | 5634 | * |
@@ -5145,6 +5663,8 @@ e1000_mc_addr_list_update(struct e1000_hw *hw, | |||
5145 | /* Clear RAR[1-15] */ | 5663 | /* Clear RAR[1-15] */ |
5146 | DEBUGOUT(" Clearing RAR[1-15]\n"); | 5664 | DEBUGOUT(" Clearing RAR[1-15]\n"); |
5147 | num_rar_entry = E1000_RAR_ENTRIES; | 5665 | num_rar_entry = E1000_RAR_ENTRIES; |
5666 | if (hw->mac_type == e1000_ich8lan) | ||
5667 | num_rar_entry = E1000_RAR_ENTRIES_ICH8LAN; | ||
5148 | /* Reserve a spot for the Locally Administered Address to work around | 5668 | /* Reserve a spot for the Locally Administered Address to work around |
5149 | * an 82571 issue in which a reset on one port will reload the MAC on | 5669 | * an 82571 issue in which a reset on one port will reload the MAC on |
5150 | * the other port. */ | 5670 | * the other port. */ |
@@ -5153,14 +5673,19 @@ e1000_mc_addr_list_update(struct e1000_hw *hw, | |||
5153 | 5673 | ||
5154 | for(i = rar_used_count; i < num_rar_entry; i++) { | 5674 | for(i = rar_used_count; i < num_rar_entry; i++) { |
5155 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); | 5675 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); |
5676 | E1000_WRITE_FLUSH(hw); | ||
5156 | E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); | 5677 | E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); |
5678 | E1000_WRITE_FLUSH(hw); | ||
5157 | } | 5679 | } |
5158 | 5680 | ||
5159 | /* Clear the MTA */ | 5681 | /* Clear the MTA */ |
5160 | DEBUGOUT(" Clearing MTA\n"); | 5682 | DEBUGOUT(" Clearing MTA\n"); |
5161 | num_mta_entry = E1000_NUM_MTA_REGISTERS; | 5683 | num_mta_entry = E1000_NUM_MTA_REGISTERS; |
5684 | if (hw->mac_type == e1000_ich8lan) | ||
5685 | num_mta_entry = E1000_NUM_MTA_REGISTERS_ICH8LAN; | ||
5162 | for(i = 0; i < num_mta_entry; i++) { | 5686 | for(i = 0; i < num_mta_entry; i++) { |
5163 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); | 5687 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); |
5688 | E1000_WRITE_FLUSH(hw); | ||
5164 | } | 5689 | } |
5165 | 5690 | ||
5166 | /* Add the new addresses */ | 5691 | /* Add the new addresses */ |
@@ -5194,7 +5719,6 @@ e1000_mc_addr_list_update(struct e1000_hw *hw, | |||
5194 | } | 5719 | } |
5195 | DEBUGOUT("MC Update Complete\n"); | 5720 | DEBUGOUT("MC Update Complete\n"); |
5196 | } | 5721 | } |
5197 | #endif /* 0 */ | ||
5198 | 5722 | ||
5199 | /****************************************************************************** | 5723 | /****************************************************************************** |
5200 | * Hashes an address to determine its location in the multicast table | 5724 | * Hashes an address to determine its location in the multicast table |
@@ -5217,24 +5741,46 @@ e1000_hash_mc_addr(struct e1000_hw *hw, | |||
5217 | * LSB MSB | 5741 | * LSB MSB |
5218 | */ | 5742 | */ |
5219 | case 0: | 5743 | case 0: |
5220 | /* [47:36] i.e. 0x563 for above example address */ | 5744 | if (hw->mac_type == e1000_ich8lan) { |
5221 | hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); | 5745 | /* [47:38] i.e. 0x158 for above example address */ |
5746 | hash_value = ((mc_addr[4] >> 6) | (((uint16_t) mc_addr[5]) << 2)); | ||
5747 | } else { | ||
5748 | /* [47:36] i.e. 0x563 for above example address */ | ||
5749 | hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); | ||
5750 | } | ||
5222 | break; | 5751 | break; |
5223 | case 1: | 5752 | case 1: |
5224 | /* [46:35] i.e. 0xAC6 for above example address */ | 5753 | if (hw->mac_type == e1000_ich8lan) { |
5225 | hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5)); | 5754 | /* [46:37] i.e. 0x2B1 for above example address */ |
5755 | hash_value = ((mc_addr[4] >> 5) | (((uint16_t) mc_addr[5]) << 3)); | ||
5756 | } else { | ||
5757 | /* [46:35] i.e. 0xAC6 for above example address */ | ||
5758 | hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5)); | ||
5759 | } | ||
5226 | break; | 5760 | break; |
5227 | case 2: | 5761 | case 2: |
5228 | /* [45:34] i.e. 0x5D8 for above example address */ | 5762 | if (hw->mac_type == e1000_ich8lan) { |
5229 | hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); | 5763 | /*[45:36] i.e. 0x163 for above example address */ |
5764 | hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); | ||
5765 | } else { | ||
5766 | /* [45:34] i.e. 0x5D8 for above example address */ | ||
5767 | hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); | ||
5768 | } | ||
5230 | break; | 5769 | break; |
5231 | case 3: | 5770 | case 3: |
5232 | /* [43:32] i.e. 0x634 for above example address */ | 5771 | if (hw->mac_type == e1000_ich8lan) { |
5233 | hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8)); | 5772 | /* [43:34] i.e. 0x18D for above example address */ |
5773 | hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); | ||
5774 | } else { | ||
5775 | /* [43:32] i.e. 0x634 for above example address */ | ||
5776 | hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8)); | ||
5777 | } | ||
5234 | break; | 5778 | break; |
5235 | } | 5779 | } |
5236 | 5780 | ||
5237 | hash_value &= 0xFFF; | 5781 | hash_value &= 0xFFF; |
5782 | if (hw->mac_type == e1000_ich8lan) | ||
5783 | hash_value &= 0x3FF; | ||
5238 | 5784 | ||
5239 | return hash_value; | 5785 | return hash_value; |
5240 | } | 5786 | } |
@@ -5262,6 +5808,8 @@ e1000_mta_set(struct e1000_hw *hw, | |||
5262 | * register are determined by the lower 5 bits of the value. | 5808 | * register are determined by the lower 5 bits of the value. |
5263 | */ | 5809 | */ |
5264 | hash_reg = (hash_value >> 5) & 0x7F; | 5810 | hash_reg = (hash_value >> 5) & 0x7F; |
5811 | if (hw->mac_type == e1000_ich8lan) | ||
5812 | hash_reg &= 0x1F; | ||
5265 | hash_bit = hash_value & 0x1F; | 5813 | hash_bit = hash_value & 0x1F; |
5266 | 5814 | ||
5267 | mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg); | 5815 | mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg); |
@@ -5275,9 +5823,12 @@ e1000_mta_set(struct e1000_hw *hw, | |||
5275 | if((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) { | 5823 | if((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) { |
5276 | temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1)); | 5824 | temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1)); |
5277 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); | 5825 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); |
5826 | E1000_WRITE_FLUSH(hw); | ||
5278 | E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp); | 5827 | E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp); |
5828 | E1000_WRITE_FLUSH(hw); | ||
5279 | } else { | 5829 | } else { |
5280 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); | 5830 | E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); |
5831 | E1000_WRITE_FLUSH(hw); | ||
5281 | } | 5832 | } |
5282 | } | 5833 | } |
5283 | 5834 | ||
@@ -5334,7 +5885,9 @@ e1000_rar_set(struct e1000_hw *hw, | |||
5334 | } | 5885 | } |
5335 | 5886 | ||
5336 | E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); | 5887 | E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); |
5888 | E1000_WRITE_FLUSH(hw); | ||
5337 | E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); | 5889 | E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); |
5890 | E1000_WRITE_FLUSH(hw); | ||
5338 | } | 5891 | } |
5339 | 5892 | ||
5340 | /****************************************************************************** | 5893 | /****************************************************************************** |
@@ -5351,12 +5904,18 @@ e1000_write_vfta(struct e1000_hw *hw, | |||
5351 | { | 5904 | { |
5352 | uint32_t temp; | 5905 | uint32_t temp; |
5353 | 5906 | ||
5354 | if((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { | 5907 | if (hw->mac_type == e1000_ich8lan) |
5908 | return; | ||
5909 | |||
5910 | if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { | ||
5355 | temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); | 5911 | temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); |
5356 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); | 5912 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); |
5913 | E1000_WRITE_FLUSH(hw); | ||
5357 | E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); | 5914 | E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); |
5915 | E1000_WRITE_FLUSH(hw); | ||
5358 | } else { | 5916 | } else { |
5359 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); | 5917 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); |
5918 | E1000_WRITE_FLUSH(hw); | ||
5360 | } | 5919 | } |
5361 | } | 5920 | } |
5362 | 5921 | ||
@@ -5373,6 +5932,9 @@ e1000_clear_vfta(struct e1000_hw *hw) | |||
5373 | uint32_t vfta_offset = 0; | 5932 | uint32_t vfta_offset = 0; |
5374 | uint32_t vfta_bit_in_reg = 0; | 5933 | uint32_t vfta_bit_in_reg = 0; |
5375 | 5934 | ||
5935 | if (hw->mac_type == e1000_ich8lan) | ||
5936 | return; | ||
5937 | |||
5376 | if (hw->mac_type == e1000_82573) { | 5938 | if (hw->mac_type == e1000_82573) { |
5377 | if (hw->mng_cookie.vlan_id != 0) { | 5939 | if (hw->mng_cookie.vlan_id != 0) { |
5378 | /* The VFTA is a 4096b bit-field, each identifying a single VLAN | 5940 | /* The VFTA is a 4096b bit-field, each identifying a single VLAN |
@@ -5392,6 +5954,7 @@ e1000_clear_vfta(struct e1000_hw *hw) | |||
5392 | * manageability unit */ | 5954 | * manageability unit */ |
5393 | vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; | 5955 | vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; |
5394 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); | 5956 | E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); |
5957 | E1000_WRITE_FLUSH(hw); | ||
5395 | } | 5958 | } |
5396 | } | 5959 | } |
5397 | 5960 | ||
@@ -5421,9 +5984,18 @@ e1000_id_led_init(struct e1000_hw * hw) | |||
5421 | DEBUGOUT("EEPROM Read Error\n"); | 5984 | DEBUGOUT("EEPROM Read Error\n"); |
5422 | return -E1000_ERR_EEPROM; | 5985 | return -E1000_ERR_EEPROM; |
5423 | } | 5986 | } |
5424 | if((eeprom_data== ID_LED_RESERVED_0000) || | 5987 | |
5425 | (eeprom_data == ID_LED_RESERVED_FFFF)) eeprom_data = ID_LED_DEFAULT; | 5988 | if ((hw->mac_type == e1000_82573) && |
5426 | for(i = 0; i < 4; i++) { | 5989 | (eeprom_data == ID_LED_RESERVED_82573)) |
5990 | eeprom_data = ID_LED_DEFAULT_82573; | ||
5991 | else if ((eeprom_data == ID_LED_RESERVED_0000) || | ||
5992 | (eeprom_data == ID_LED_RESERVED_FFFF)) { | ||
5993 | if (hw->mac_type == e1000_ich8lan) | ||
5994 | eeprom_data = ID_LED_DEFAULT_ICH8LAN; | ||
5995 | else | ||
5996 | eeprom_data = ID_LED_DEFAULT; | ||
5997 | } | ||
5998 | for (i = 0; i < 4; i++) { | ||
5427 | temp = (eeprom_data >> (i << 2)) & led_mask; | 5999 | temp = (eeprom_data >> (i << 2)) & led_mask; |
5428 | switch(temp) { | 6000 | switch(temp) { |
5429 | case ID_LED_ON1_DEF2: | 6001 | case ID_LED_ON1_DEF2: |
@@ -5519,6 +6091,44 @@ e1000_setup_led(struct e1000_hw *hw) | |||
5519 | } | 6091 | } |
5520 | 6092 | ||
5521 | /****************************************************************************** | 6093 | /****************************************************************************** |
6094 | * Used on 82571 and later Si that has LED blink bits. | ||
6095 | * Callers must use their own timer and should have already called | ||
6096 | * e1000_id_led_init() | ||
6097 | * Call e1000_cleanup led() to stop blinking | ||
6098 | * | ||
6099 | * hw - Struct containing variables accessed by shared code | ||
6100 | *****************************************************************************/ | ||
6101 | int32_t | ||
6102 | e1000_blink_led_start(struct e1000_hw *hw) | ||
6103 | { | ||
6104 | int16_t i; | ||
6105 | uint32_t ledctl_blink = 0; | ||
6106 | |||
6107 | DEBUGFUNC("e1000_id_led_blink_on"); | ||
6108 | |||
6109 | if (hw->mac_type < e1000_82571) { | ||
6110 | /* Nothing to do */ | ||
6111 | return E1000_SUCCESS; | ||
6112 | } | ||
6113 | if (hw->media_type == e1000_media_type_fiber) { | ||
6114 | /* always blink LED0 for PCI-E fiber */ | ||
6115 | ledctl_blink = E1000_LEDCTL_LED0_BLINK | | ||
6116 | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); | ||
6117 | } else { | ||
6118 | /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */ | ||
6119 | ledctl_blink = hw->ledctl_mode2; | ||
6120 | for (i=0; i < 4; i++) | ||
6121 | if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) == | ||
6122 | E1000_LEDCTL_MODE_LED_ON) | ||
6123 | ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8)); | ||
6124 | } | ||
6125 | |||
6126 | E1000_WRITE_REG(hw, LEDCTL, ledctl_blink); | ||
6127 | |||
6128 | return E1000_SUCCESS; | ||
6129 | } | ||
6130 | |||
6131 | /****************************************************************************** | ||
5522 | * Restores the saved state of the SW controlable LED. | 6132 | * Restores the saved state of the SW controlable LED. |
5523 | * | 6133 | * |
5524 | * hw - Struct containing variables accessed by shared code | 6134 | * hw - Struct containing variables accessed by shared code |
@@ -5548,6 +6158,10 @@ e1000_cleanup_led(struct e1000_hw *hw) | |||
5548 | return ret_val; | 6158 | return ret_val; |
5549 | /* Fall Through */ | 6159 | /* Fall Through */ |
5550 | default: | 6160 | default: |
6161 | if (hw->phy_type == e1000_phy_ife) { | ||
6162 | e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); | ||
6163 | break; | ||
6164 | } | ||
5551 | /* Restore LEDCTL settings */ | 6165 | /* Restore LEDCTL settings */ |
5552 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_default); | 6166 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_default); |
5553 | break; | 6167 | break; |
@@ -5592,7 +6206,10 @@ e1000_led_on(struct e1000_hw *hw) | |||
5592 | /* Clear SW Defineable Pin 0 to turn on the LED */ | 6206 | /* Clear SW Defineable Pin 0 to turn on the LED */ |
5593 | ctrl &= ~E1000_CTRL_SWDPIN0; | 6207 | ctrl &= ~E1000_CTRL_SWDPIN0; |
5594 | ctrl |= E1000_CTRL_SWDPIO0; | 6208 | ctrl |= E1000_CTRL_SWDPIO0; |
5595 | } else if(hw->media_type == e1000_media_type_copper) { | 6209 | } else if (hw->phy_type == e1000_phy_ife) { |
6210 | e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, | ||
6211 | (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); | ||
6212 | } else if (hw->media_type == e1000_media_type_copper) { | ||
5596 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode2); | 6213 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode2); |
5597 | return E1000_SUCCESS; | 6214 | return E1000_SUCCESS; |
5598 | } | 6215 | } |
@@ -5640,7 +6257,10 @@ e1000_led_off(struct e1000_hw *hw) | |||
5640 | /* Set SW Defineable Pin 0 to turn off the LED */ | 6257 | /* Set SW Defineable Pin 0 to turn off the LED */ |
5641 | ctrl |= E1000_CTRL_SWDPIN0; | 6258 | ctrl |= E1000_CTRL_SWDPIN0; |
5642 | ctrl |= E1000_CTRL_SWDPIO0; | 6259 | ctrl |= E1000_CTRL_SWDPIO0; |
5643 | } else if(hw->media_type == e1000_media_type_copper) { | 6260 | } else if (hw->phy_type == e1000_phy_ife) { |
6261 | e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, | ||
6262 | (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); | ||
6263 | } else if (hw->media_type == e1000_media_type_copper) { | ||
5644 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1); | 6264 | E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1); |
5645 | return E1000_SUCCESS; | 6265 | return E1000_SUCCESS; |
5646 | } | 6266 | } |
@@ -5678,12 +6298,16 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw) | |||
5678 | temp = E1000_READ_REG(hw, XOFFRXC); | 6298 | temp = E1000_READ_REG(hw, XOFFRXC); |
5679 | temp = E1000_READ_REG(hw, XOFFTXC); | 6299 | temp = E1000_READ_REG(hw, XOFFTXC); |
5680 | temp = E1000_READ_REG(hw, FCRUC); | 6300 | temp = E1000_READ_REG(hw, FCRUC); |
6301 | |||
6302 | if (hw->mac_type != e1000_ich8lan) { | ||
5681 | temp = E1000_READ_REG(hw, PRC64); | 6303 | temp = E1000_READ_REG(hw, PRC64); |
5682 | temp = E1000_READ_REG(hw, PRC127); | 6304 | temp = E1000_READ_REG(hw, PRC127); |
5683 | temp = E1000_READ_REG(hw, PRC255); | 6305 | temp = E1000_READ_REG(hw, PRC255); |
5684 | temp = E1000_READ_REG(hw, PRC511); | 6306 | temp = E1000_READ_REG(hw, PRC511); |
5685 | temp = E1000_READ_REG(hw, PRC1023); | 6307 | temp = E1000_READ_REG(hw, PRC1023); |
5686 | temp = E1000_READ_REG(hw, PRC1522); | 6308 | temp = E1000_READ_REG(hw, PRC1522); |
6309 | } | ||
6310 | |||
5687 | temp = E1000_READ_REG(hw, GPRC); | 6311 | temp = E1000_READ_REG(hw, GPRC); |
5688 | temp = E1000_READ_REG(hw, BPRC); | 6312 | temp = E1000_READ_REG(hw, BPRC); |
5689 | temp = E1000_READ_REG(hw, MPRC); | 6313 | temp = E1000_READ_REG(hw, MPRC); |
@@ -5703,12 +6327,16 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw) | |||
5703 | temp = E1000_READ_REG(hw, TOTH); | 6327 | temp = E1000_READ_REG(hw, TOTH); |
5704 | temp = E1000_READ_REG(hw, TPR); | 6328 | temp = E1000_READ_REG(hw, TPR); |
5705 | temp = E1000_READ_REG(hw, TPT); | 6329 | temp = E1000_READ_REG(hw, TPT); |
6330 | |||
6331 | if (hw->mac_type != e1000_ich8lan) { | ||
5706 | temp = E1000_READ_REG(hw, PTC64); | 6332 | temp = E1000_READ_REG(hw, PTC64); |
5707 | temp = E1000_READ_REG(hw, PTC127); | 6333 | temp = E1000_READ_REG(hw, PTC127); |
5708 | temp = E1000_READ_REG(hw, PTC255); | 6334 | temp = E1000_READ_REG(hw, PTC255); |
5709 | temp = E1000_READ_REG(hw, PTC511); | 6335 | temp = E1000_READ_REG(hw, PTC511); |
5710 | temp = E1000_READ_REG(hw, PTC1023); | 6336 | temp = E1000_READ_REG(hw, PTC1023); |
5711 | temp = E1000_READ_REG(hw, PTC1522); | 6337 | temp = E1000_READ_REG(hw, PTC1522); |
6338 | } | ||
6339 | |||
5712 | temp = E1000_READ_REG(hw, MPTC); | 6340 | temp = E1000_READ_REG(hw, MPTC); |
5713 | temp = E1000_READ_REG(hw, BPTC); | 6341 | temp = E1000_READ_REG(hw, BPTC); |
5714 | 6342 | ||
@@ -5731,6 +6359,9 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw) | |||
5731 | 6359 | ||
5732 | temp = E1000_READ_REG(hw, IAC); | 6360 | temp = E1000_READ_REG(hw, IAC); |
5733 | temp = E1000_READ_REG(hw, ICRXOC); | 6361 | temp = E1000_READ_REG(hw, ICRXOC); |
6362 | |||
6363 | if (hw->mac_type == e1000_ich8lan) return; | ||
6364 | |||
5734 | temp = E1000_READ_REG(hw, ICRXPTC); | 6365 | temp = E1000_READ_REG(hw, ICRXPTC); |
5735 | temp = E1000_READ_REG(hw, ICRXATC); | 6366 | temp = E1000_READ_REG(hw, ICRXATC); |
5736 | temp = E1000_READ_REG(hw, ICTXPTC); | 6367 | temp = E1000_READ_REG(hw, ICTXPTC); |
@@ -5911,6 +6542,7 @@ e1000_get_bus_info(struct e1000_hw *hw) | |||
5911 | hw->bus_width = e1000_bus_width_pciex_1; | 6542 | hw->bus_width = e1000_bus_width_pciex_1; |
5912 | break; | 6543 | break; |
5913 | case e1000_82571: | 6544 | case e1000_82571: |
6545 | case e1000_ich8lan: | ||
5914 | case e1000_80003es2lan: | 6546 | case e1000_80003es2lan: |
5915 | hw->bus_type = e1000_bus_type_pci_express; | 6547 | hw->bus_type = e1000_bus_type_pci_express; |
5916 | hw->bus_speed = e1000_bus_speed_2500; | 6548 | hw->bus_speed = e1000_bus_speed_2500; |
@@ -5948,8 +6580,6 @@ e1000_get_bus_info(struct e1000_hw *hw) | |||
5948 | break; | 6580 | break; |
5949 | } | 6581 | } |
5950 | } | 6582 | } |
5951 | |||
5952 | #if 0 | ||
5953 | /****************************************************************************** | 6583 | /****************************************************************************** |
5954 | * Reads a value from one of the devices registers using port I/O (as opposed | 6584 | * Reads a value from one of the devices registers using port I/O (as opposed |
5955 | * memory mapped I/O). Only 82544 and newer devices support port I/O. | 6585 | * memory mapped I/O). Only 82544 and newer devices support port I/O. |
@@ -5967,7 +6597,6 @@ e1000_read_reg_io(struct e1000_hw *hw, | |||
5967 | e1000_io_write(hw, io_addr, offset); | 6597 | e1000_io_write(hw, io_addr, offset); |
5968 | return e1000_io_read(hw, io_data); | 6598 | return e1000_io_read(hw, io_data); |
5969 | } | 6599 | } |
5970 | #endif /* 0 */ | ||
5971 | 6600 | ||
5972 | /****************************************************************************** | 6601 | /****************************************************************************** |
5973 | * Writes a value to one of the devices registers using port I/O (as opposed to | 6602 | * Writes a value to one of the devices registers using port I/O (as opposed to |
@@ -6012,8 +6641,6 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
6012 | { | 6641 | { |
6013 | int32_t ret_val; | 6642 | int32_t ret_val; |
6014 | uint16_t agc_value = 0; | 6643 | uint16_t agc_value = 0; |
6015 | uint16_t cur_agc, min_agc = IGP01E1000_AGC_LENGTH_TABLE_SIZE; | ||
6016 | uint16_t max_agc = 0; | ||
6017 | uint16_t i, phy_data; | 6644 | uint16_t i, phy_data; |
6018 | uint16_t cable_length; | 6645 | uint16_t cable_length; |
6019 | 6646 | ||
@@ -6086,6 +6713,8 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
6086 | break; | 6713 | break; |
6087 | } | 6714 | } |
6088 | } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ | 6715 | } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ |
6716 | uint16_t cur_agc_value; | ||
6717 | uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; | ||
6089 | uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = | 6718 | uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = |
6090 | {IGP01E1000_PHY_AGC_A, | 6719 | {IGP01E1000_PHY_AGC_A, |
6091 | IGP01E1000_PHY_AGC_B, | 6720 | IGP01E1000_PHY_AGC_B, |
@@ -6098,23 +6727,23 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
6098 | if(ret_val) | 6727 | if(ret_val) |
6099 | return ret_val; | 6728 | return ret_val; |
6100 | 6729 | ||
6101 | cur_agc = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; | 6730 | cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; |
6102 | 6731 | ||
6103 | /* Array bound check. */ | 6732 | /* Value bound check. */ |
6104 | if((cur_agc >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || | 6733 | if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || |
6105 | (cur_agc == 0)) | 6734 | (cur_agc_value == 0)) |
6106 | return -E1000_ERR_PHY; | 6735 | return -E1000_ERR_PHY; |
6107 | 6736 | ||
6108 | agc_value += cur_agc; | 6737 | agc_value += cur_agc_value; |
6109 | 6738 | ||
6110 | /* Update minimal AGC value. */ | 6739 | /* Update minimal AGC value. */ |
6111 | if(min_agc > cur_agc) | 6740 | if (min_agc_value > cur_agc_value) |
6112 | min_agc = cur_agc; | 6741 | min_agc_value = cur_agc_value; |
6113 | } | 6742 | } |
6114 | 6743 | ||
6115 | /* Remove the minimal AGC result for length < 50m */ | 6744 | /* Remove the minimal AGC result for length < 50m */ |
6116 | if(agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { | 6745 | if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { |
6117 | agc_value -= min_agc; | 6746 | agc_value -= min_agc_value; |
6118 | 6747 | ||
6119 | /* Get the average length of the remaining 3 channels */ | 6748 | /* Get the average length of the remaining 3 channels */ |
6120 | agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); | 6749 | agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); |
@@ -6130,7 +6759,10 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
6130 | IGP01E1000_AGC_RANGE) : 0; | 6759 | IGP01E1000_AGC_RANGE) : 0; |
6131 | *max_length = e1000_igp_cable_length_table[agc_value] + | 6760 | *max_length = e1000_igp_cable_length_table[agc_value] + |
6132 | IGP01E1000_AGC_RANGE; | 6761 | IGP01E1000_AGC_RANGE; |
6133 | } else if (hw->phy_type == e1000_phy_igp_2) { | 6762 | } else if (hw->phy_type == e1000_phy_igp_2 || |
6763 | hw->phy_type == e1000_phy_igp_3) { | ||
6764 | uint16_t cur_agc_index, max_agc_index = 0; | ||
6765 | uint16_t min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1; | ||
6134 | uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = | 6766 | uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = |
6135 | {IGP02E1000_PHY_AGC_A, | 6767 | {IGP02E1000_PHY_AGC_A, |
6136 | IGP02E1000_PHY_AGC_B, | 6768 | IGP02E1000_PHY_AGC_B, |
@@ -6145,19 +6777,27 @@ e1000_get_cable_length(struct e1000_hw *hw, | |||
6145 | /* Getting bits 15:9, which represent the combination of course and | 6777 | /* Getting bits 15:9, which represent the combination of course and |
6146 | * fine gain values. The result is a number that can be put into | 6778 | * fine gain values. The result is a number that can be put into |
6147 | * the lookup table to obtain the approximate cable length. */ | 6779 | * the lookup table to obtain the approximate cable length. */ |
6148 | cur_agc = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & | 6780 | cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & |
6149 | IGP02E1000_AGC_LENGTH_MASK; | 6781 | IGP02E1000_AGC_LENGTH_MASK; |
6150 | 6782 | ||
6151 | /* Remove min & max AGC values from calculation. */ | 6783 | /* Array index bound check. */ |
6152 | if (e1000_igp_2_cable_length_table[min_agc] > e1000_igp_2_cable_length_table[cur_agc]) | 6784 | if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) || |
6153 | min_agc = cur_agc; | 6785 | (cur_agc_index == 0)) |
6154 | if (e1000_igp_2_cable_length_table[max_agc] < e1000_igp_2_cable_length_table[cur_agc]) | 6786 | return -E1000_ERR_PHY; |
6155 | max_agc = cur_agc; | ||
6156 | 6787 | ||
6157 | agc_value += e1000_igp_2_cable_length_table[cur_agc]; | 6788 | /* Remove min & max AGC values from calculation. */ |
6789 | if (e1000_igp_2_cable_length_table[min_agc_index] > | ||
6790 | e1000_igp_2_cable_length_table[cur_agc_index]) | ||
6791 | min_agc_index = cur_agc_index; | ||
6792 | if (e1000_igp_2_cable_length_table[max_agc_index] < | ||
6793 | e1000_igp_2_cable_length_table[cur_agc_index]) | ||
6794 | max_agc_index = cur_agc_index; | ||
6795 | |||
6796 | agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; | ||
6158 | } | 6797 | } |
6159 | 6798 | ||
6160 | agc_value -= (e1000_igp_2_cable_length_table[min_agc] + e1000_igp_2_cable_length_table[max_agc]); | 6799 | agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + |
6800 | e1000_igp_2_cable_length_table[max_agc_index]); | ||
6161 | agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); | 6801 | agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); |
6162 | 6802 | ||
6163 | /* Calculate cable length with the error range of +/- 10 meters. */ | 6803 | /* Calculate cable length with the error range of +/- 10 meters. */ |
@@ -6203,7 +6843,8 @@ e1000_check_polarity(struct e1000_hw *hw, | |||
6203 | return ret_val; | 6843 | return ret_val; |
6204 | *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >> | 6844 | *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >> |
6205 | M88E1000_PSSR_REV_POLARITY_SHIFT; | 6845 | M88E1000_PSSR_REV_POLARITY_SHIFT; |
6206 | } else if(hw->phy_type == e1000_phy_igp || | 6846 | } else if (hw->phy_type == e1000_phy_igp || |
6847 | hw->phy_type == e1000_phy_igp_3 || | ||
6207 | hw->phy_type == e1000_phy_igp_2) { | 6848 | hw->phy_type == e1000_phy_igp_2) { |
6208 | /* Read the Status register to check the speed */ | 6849 | /* Read the Status register to check the speed */ |
6209 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, | 6850 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, |
@@ -6229,6 +6870,13 @@ e1000_check_polarity(struct e1000_hw *hw, | |||
6229 | * 100 Mbps this bit is always 0) */ | 6870 | * 100 Mbps this bit is always 0) */ |
6230 | *polarity = phy_data & IGP01E1000_PSSR_POLARITY_REVERSED; | 6871 | *polarity = phy_data & IGP01E1000_PSSR_POLARITY_REVERSED; |
6231 | } | 6872 | } |
6873 | } else if (hw->phy_type == e1000_phy_ife) { | ||
6874 | ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL, | ||
6875 | &phy_data); | ||
6876 | if (ret_val) | ||
6877 | return ret_val; | ||
6878 | *polarity = (phy_data & IFE_PESC_POLARITY_REVERSED) >> | ||
6879 | IFE_PESC_POLARITY_REVERSED_SHIFT; | ||
6232 | } | 6880 | } |
6233 | return E1000_SUCCESS; | 6881 | return E1000_SUCCESS; |
6234 | } | 6882 | } |
@@ -6256,7 +6904,8 @@ e1000_check_downshift(struct e1000_hw *hw) | |||
6256 | 6904 | ||
6257 | DEBUGFUNC("e1000_check_downshift"); | 6905 | DEBUGFUNC("e1000_check_downshift"); |
6258 | 6906 | ||
6259 | if(hw->phy_type == e1000_phy_igp || | 6907 | if (hw->phy_type == e1000_phy_igp || |
6908 | hw->phy_type == e1000_phy_igp_3 || | ||
6260 | hw->phy_type == e1000_phy_igp_2) { | 6909 | hw->phy_type == e1000_phy_igp_2) { |
6261 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, | 6910 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, |
6262 | &phy_data); | 6911 | &phy_data); |
@@ -6273,6 +6922,9 @@ e1000_check_downshift(struct e1000_hw *hw) | |||
6273 | 6922 | ||
6274 | hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> | 6923 | hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> |
6275 | M88E1000_PSSR_DOWNSHIFT_SHIFT; | 6924 | M88E1000_PSSR_DOWNSHIFT_SHIFT; |
6925 | } else if (hw->phy_type == e1000_phy_ife) { | ||
6926 | /* e1000_phy_ife supports 10/100 speed only */ | ||
6927 | hw->speed_downgraded = FALSE; | ||
6276 | } | 6928 | } |
6277 | 6929 | ||
6278 | return E1000_SUCCESS; | 6930 | return E1000_SUCCESS; |
@@ -6317,7 +6969,9 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw, | |||
6317 | 6969 | ||
6318 | if(speed == SPEED_1000) { | 6970 | if(speed == SPEED_1000) { |
6319 | 6971 | ||
6320 | e1000_get_cable_length(hw, &min_length, &max_length); | 6972 | ret_val = e1000_get_cable_length(hw, &min_length, &max_length); |
6973 | if (ret_val) | ||
6974 | return ret_val; | ||
6321 | 6975 | ||
6322 | if((hw->dsp_config_state == e1000_dsp_config_enabled) && | 6976 | if((hw->dsp_config_state == e1000_dsp_config_enabled) && |
6323 | min_length >= e1000_igp_cable_length_50) { | 6977 | min_length >= e1000_igp_cable_length_50) { |
@@ -6525,20 +7179,27 @@ static int32_t | |||
6525 | e1000_set_d3_lplu_state(struct e1000_hw *hw, | 7179 | e1000_set_d3_lplu_state(struct e1000_hw *hw, |
6526 | boolean_t active) | 7180 | boolean_t active) |
6527 | { | 7181 | { |
7182 | uint32_t phy_ctrl = 0; | ||
6528 | int32_t ret_val; | 7183 | int32_t ret_val; |
6529 | uint16_t phy_data; | 7184 | uint16_t phy_data; |
6530 | DEBUGFUNC("e1000_set_d3_lplu_state"); | 7185 | DEBUGFUNC("e1000_set_d3_lplu_state"); |
6531 | 7186 | ||
6532 | if(hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2) | 7187 | if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2 |
7188 | && hw->phy_type != e1000_phy_igp_3) | ||
6533 | return E1000_SUCCESS; | 7189 | return E1000_SUCCESS; |
6534 | 7190 | ||
6535 | /* During driver activity LPLU should not be used or it will attain link | 7191 | /* During driver activity LPLU should not be used or it will attain link |
6536 | * from the lowest speeds starting from 10Mbps. The capability is used for | 7192 | * from the lowest speeds starting from 10Mbps. The capability is used for |
6537 | * Dx transitions and states */ | 7193 | * Dx transitions and states */ |
6538 | if(hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) { | 7194 | if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) { |
6539 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); | 7195 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); |
6540 | if(ret_val) | 7196 | if (ret_val) |
6541 | return ret_val; | 7197 | return ret_val; |
7198 | } else if (hw->mac_type == e1000_ich8lan) { | ||
7199 | /* MAC writes into PHY register based on the state transition | ||
7200 | * and start auto-negotiation. SW driver can overwrite the settings | ||
7201 | * in CSR PHY power control E1000_PHY_CTRL register. */ | ||
7202 | phy_ctrl = E1000_READ_REG(hw, PHY_CTRL); | ||
6542 | } else { | 7203 | } else { |
6543 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); | 7204 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); |
6544 | if(ret_val) | 7205 | if(ret_val) |
@@ -6553,11 +7214,16 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw, | |||
6553 | if(ret_val) | 7214 | if(ret_val) |
6554 | return ret_val; | 7215 | return ret_val; |
6555 | } else { | 7216 | } else { |
7217 | if (hw->mac_type == e1000_ich8lan) { | ||
7218 | phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; | ||
7219 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | ||
7220 | } else { | ||
6556 | phy_data &= ~IGP02E1000_PM_D3_LPLU; | 7221 | phy_data &= ~IGP02E1000_PM_D3_LPLU; |
6557 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, | 7222 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, |
6558 | phy_data); | 7223 | phy_data); |
6559 | if (ret_val) | 7224 | if (ret_val) |
6560 | return ret_val; | 7225 | return ret_val; |
7226 | } | ||
6561 | } | 7227 | } |
6562 | 7228 | ||
6563 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during | 7229 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during |
@@ -6593,17 +7259,22 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw, | |||
6593 | (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { | 7259 | (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { |
6594 | 7260 | ||
6595 | if(hw->mac_type == e1000_82541_rev_2 || | 7261 | if(hw->mac_type == e1000_82541_rev_2 || |
6596 | hw->mac_type == e1000_82547_rev_2) { | 7262 | hw->mac_type == e1000_82547_rev_2) { |
6597 | phy_data |= IGP01E1000_GMII_FLEX_SPD; | 7263 | phy_data |= IGP01E1000_GMII_FLEX_SPD; |
6598 | ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); | 7264 | ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); |
6599 | if(ret_val) | 7265 | if(ret_val) |
6600 | return ret_val; | 7266 | return ret_val; |
6601 | } else { | 7267 | } else { |
7268 | if (hw->mac_type == e1000_ich8lan) { | ||
7269 | phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; | ||
7270 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | ||
7271 | } else { | ||
6602 | phy_data |= IGP02E1000_PM_D3_LPLU; | 7272 | phy_data |= IGP02E1000_PM_D3_LPLU; |
6603 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, | 7273 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, |
6604 | phy_data); | 7274 | phy_data); |
6605 | if (ret_val) | 7275 | if (ret_val) |
6606 | return ret_val; | 7276 | return ret_val; |
7277 | } | ||
6607 | } | 7278 | } |
6608 | 7279 | ||
6609 | /* When LPLU is enabled we should disable SmartSpeed */ | 7280 | /* When LPLU is enabled we should disable SmartSpeed */ |
@@ -6638,6 +7309,7 @@ static int32_t | |||
6638 | e1000_set_d0_lplu_state(struct e1000_hw *hw, | 7309 | e1000_set_d0_lplu_state(struct e1000_hw *hw, |
6639 | boolean_t active) | 7310 | boolean_t active) |
6640 | { | 7311 | { |
7312 | uint32_t phy_ctrl = 0; | ||
6641 | int32_t ret_val; | 7313 | int32_t ret_val; |
6642 | uint16_t phy_data; | 7314 | uint16_t phy_data; |
6643 | DEBUGFUNC("e1000_set_d0_lplu_state"); | 7315 | DEBUGFUNC("e1000_set_d0_lplu_state"); |
@@ -6645,15 +7317,24 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw, | |||
6645 | if(hw->mac_type <= e1000_82547_rev_2) | 7317 | if(hw->mac_type <= e1000_82547_rev_2) |
6646 | return E1000_SUCCESS; | 7318 | return E1000_SUCCESS; |
6647 | 7319 | ||
7320 | if (hw->mac_type == e1000_ich8lan) { | ||
7321 | phy_ctrl = E1000_READ_REG(hw, PHY_CTRL); | ||
7322 | } else { | ||
6648 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); | 7323 | ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); |
6649 | if(ret_val) | 7324 | if(ret_val) |
6650 | return ret_val; | 7325 | return ret_val; |
7326 | } | ||
6651 | 7327 | ||
6652 | if (!active) { | 7328 | if (!active) { |
7329 | if (hw->mac_type == e1000_ich8lan) { | ||
7330 | phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; | ||
7331 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | ||
7332 | } else { | ||
6653 | phy_data &= ~IGP02E1000_PM_D0_LPLU; | 7333 | phy_data &= ~IGP02E1000_PM_D0_LPLU; |
6654 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); | 7334 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); |
6655 | if (ret_val) | 7335 | if (ret_val) |
6656 | return ret_val; | 7336 | return ret_val; |
7337 | } | ||
6657 | 7338 | ||
6658 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during | 7339 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during |
6659 | * Dx states where the power conservation is most important. During | 7340 | * Dx states where the power conservation is most important. During |
@@ -6686,10 +7367,15 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw, | |||
6686 | 7367 | ||
6687 | } else { | 7368 | } else { |
6688 | 7369 | ||
7370 | if (hw->mac_type == e1000_ich8lan) { | ||
7371 | phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; | ||
7372 | E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl); | ||
7373 | } else { | ||
6689 | phy_data |= IGP02E1000_PM_D0_LPLU; | 7374 | phy_data |= IGP02E1000_PM_D0_LPLU; |
6690 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); | 7375 | ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); |
6691 | if (ret_val) | 7376 | if (ret_val) |
6692 | return ret_val; | 7377 | return ret_val; |
7378 | } | ||
6693 | 7379 | ||
6694 | /* When LPLU is enabled we should disable SmartSpeed */ | 7380 | /* When LPLU is enabled we should disable SmartSpeed */ |
6695 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); | 7381 | ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); |
@@ -6928,8 +7614,10 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw, | |||
6928 | 7614 | ||
6929 | length >>= 2; | 7615 | length >>= 2; |
6930 | /* The device driver writes the relevant command block into the ram area. */ | 7616 | /* The device driver writes the relevant command block into the ram area. */ |
6931 | for (i = 0; i < length; i++) | 7617 | for (i = 0; i < length; i++) { |
6932 | E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i)); | 7618 | E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i)); |
7619 | E1000_WRITE_FLUSH(hw); | ||
7620 | } | ||
6933 | 7621 | ||
6934 | return E1000_SUCCESS; | 7622 | return E1000_SUCCESS; |
6935 | } | 7623 | } |
@@ -6961,15 +7649,18 @@ e1000_mng_write_commit( | |||
6961 | * returns - TRUE when the mode is IAMT or FALSE. | 7649 | * returns - TRUE when the mode is IAMT or FALSE. |
6962 | ****************************************************************************/ | 7650 | ****************************************************************************/ |
6963 | boolean_t | 7651 | boolean_t |
6964 | e1000_check_mng_mode( | 7652 | e1000_check_mng_mode(struct e1000_hw *hw) |
6965 | struct e1000_hw *hw) | ||
6966 | { | 7653 | { |
6967 | uint32_t fwsm; | 7654 | uint32_t fwsm; |
6968 | 7655 | ||
6969 | fwsm = E1000_READ_REG(hw, FWSM); | 7656 | fwsm = E1000_READ_REG(hw, FWSM); |
6970 | 7657 | ||
6971 | if((fwsm & E1000_FWSM_MODE_MASK) == | 7658 | if (hw->mac_type == e1000_ich8lan) { |
6972 | (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) | 7659 | if ((fwsm & E1000_FWSM_MODE_MASK) == |
7660 | (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) | ||
7661 | return TRUE; | ||
7662 | } else if ((fwsm & E1000_FWSM_MODE_MASK) == | ||
7663 | (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) | ||
6973 | return TRUE; | 7664 | return TRUE; |
6974 | 7665 | ||
6975 | return FALSE; | 7666 | return FALSE; |
@@ -7209,7 +7900,6 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw) | |||
7209 | E1000_WRITE_REG(hw, CTRL, ctrl); | 7900 | E1000_WRITE_REG(hw, CTRL, ctrl); |
7210 | } | 7901 | } |
7211 | 7902 | ||
7212 | #if 0 | ||
7213 | /*************************************************************************** | 7903 | /*************************************************************************** |
7214 | * | 7904 | * |
7215 | * Enables PCI-Express master access. | 7905 | * Enables PCI-Express master access. |
@@ -7233,7 +7923,6 @@ e1000_enable_pciex_master(struct e1000_hw *hw) | |||
7233 | ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE; | 7923 | ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE; |
7234 | E1000_WRITE_REG(hw, CTRL, ctrl); | 7924 | E1000_WRITE_REG(hw, CTRL, ctrl); |
7235 | } | 7925 | } |
7236 | #endif /* 0 */ | ||
7237 | 7926 | ||
7238 | /******************************************************************************* | 7927 | /******************************************************************************* |
7239 | * | 7928 | * |
@@ -7299,8 +7988,10 @@ e1000_get_auto_rd_done(struct e1000_hw *hw) | |||
7299 | case e1000_82572: | 7988 | case e1000_82572: |
7300 | case e1000_82573: | 7989 | case e1000_82573: |
7301 | case e1000_80003es2lan: | 7990 | case e1000_80003es2lan: |
7302 | while(timeout) { | 7991 | case e1000_ich8lan: |
7303 | if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break; | 7992 | while (timeout) { |
7993 | if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) | ||
7994 | break; | ||
7304 | else msec_delay(1); | 7995 | else msec_delay(1); |
7305 | timeout--; | 7996 | timeout--; |
7306 | } | 7997 | } |
@@ -7340,7 +8031,7 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw) | |||
7340 | 8031 | ||
7341 | switch (hw->mac_type) { | 8032 | switch (hw->mac_type) { |
7342 | default: | 8033 | default: |
7343 | msec_delay(10); | 8034 | msec_delay_irq(10); |
7344 | break; | 8035 | break; |
7345 | case e1000_80003es2lan: | 8036 | case e1000_80003es2lan: |
7346 | /* Separate *_CFG_DONE_* bit for each port */ | 8037 | /* Separate *_CFG_DONE_* bit for each port */ |
@@ -7523,6 +8214,13 @@ int32_t | |||
7523 | e1000_check_phy_reset_block(struct e1000_hw *hw) | 8214 | e1000_check_phy_reset_block(struct e1000_hw *hw) |
7524 | { | 8215 | { |
7525 | uint32_t manc = 0; | 8216 | uint32_t manc = 0; |
8217 | uint32_t fwsm = 0; | ||
8218 | |||
8219 | if (hw->mac_type == e1000_ich8lan) { | ||
8220 | fwsm = E1000_READ_REG(hw, FWSM); | ||
8221 | return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS | ||
8222 | : E1000_BLK_PHY_RESET; | ||
8223 | } | ||
7526 | 8224 | ||
7527 | if (hw->mac_type > e1000_82547_rev_2) | 8225 | if (hw->mac_type > e1000_82547_rev_2) |
7528 | manc = E1000_READ_REG(hw, MANC); | 8226 | manc = E1000_READ_REG(hw, MANC); |
@@ -7549,6 +8247,8 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw) | |||
7549 | if((fwsm & E1000_FWSM_MODE_MASK) != 0) | 8247 | if((fwsm & E1000_FWSM_MODE_MASK) != 0) |
7550 | return TRUE; | 8248 | return TRUE; |
7551 | break; | 8249 | break; |
8250 | case e1000_ich8lan: | ||
8251 | return TRUE; | ||
7552 | default: | 8252 | default: |
7553 | break; | 8253 | break; |
7554 | } | 8254 | } |
@@ -7556,4 +8256,846 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw) | |||
7556 | } | 8256 | } |
7557 | 8257 | ||
7558 | 8258 | ||
8259 | /****************************************************************************** | ||
8260 | * Configure PCI-Ex no-snoop | ||
8261 | * | ||
8262 | * hw - Struct containing variables accessed by shared code. | ||
8263 | * no_snoop - Bitmap of no-snoop events. | ||
8264 | * | ||
8265 | * returns: E1000_SUCCESS | ||
8266 | * | ||
8267 | *****************************************************************************/ | ||
8268 | int32_t | ||
8269 | e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop) | ||
8270 | { | ||
8271 | uint32_t gcr_reg = 0; | ||
8272 | |||
8273 | DEBUGFUNC("e1000_set_pci_ex_no_snoop"); | ||
8274 | |||
8275 | if (hw->bus_type == e1000_bus_type_unknown) | ||
8276 | e1000_get_bus_info(hw); | ||
8277 | |||
8278 | if (hw->bus_type != e1000_bus_type_pci_express) | ||
8279 | return E1000_SUCCESS; | ||
8280 | |||
8281 | if (no_snoop) { | ||
8282 | gcr_reg = E1000_READ_REG(hw, GCR); | ||
8283 | gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL); | ||
8284 | gcr_reg |= no_snoop; | ||
8285 | E1000_WRITE_REG(hw, GCR, gcr_reg); | ||
8286 | } | ||
8287 | if (hw->mac_type == e1000_ich8lan) { | ||
8288 | uint32_t ctrl_ext; | ||
8289 | |||
8290 | E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL); | ||
8291 | |||
8292 | ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); | ||
8293 | ctrl_ext |= E1000_CTRL_EXT_RO_DIS; | ||
8294 | E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); | ||
8295 | } | ||
8296 | |||
8297 | return E1000_SUCCESS; | ||
8298 | } | ||
8299 | |||
8300 | /*************************************************************************** | ||
8301 | * | ||
8302 | * Get software semaphore FLAG bit (SWFLAG). | ||
8303 | * SWFLAG is used to synchronize the access to all shared resource between | ||
8304 | * SW, FW and HW. | ||
8305 | * | ||
8306 | * hw: Struct containing variables accessed by shared code | ||
8307 | * | ||
8308 | ***************************************************************************/ | ||
8309 | int32_t | ||
8310 | e1000_get_software_flag(struct e1000_hw *hw) | ||
8311 | { | ||
8312 | int32_t timeout = PHY_CFG_TIMEOUT; | ||
8313 | uint32_t extcnf_ctrl; | ||
8314 | |||
8315 | DEBUGFUNC("e1000_get_software_flag"); | ||
8316 | |||
8317 | if (hw->mac_type == e1000_ich8lan) { | ||
8318 | while (timeout) { | ||
8319 | extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); | ||
8320 | extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; | ||
8321 | E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); | ||
8322 | |||
8323 | extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); | ||
8324 | if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) | ||
8325 | break; | ||
8326 | msec_delay_irq(1); | ||
8327 | timeout--; | ||
8328 | } | ||
8329 | |||
8330 | if (!timeout) { | ||
8331 | DEBUGOUT("FW or HW locks the resource too long.\n"); | ||
8332 | return -E1000_ERR_CONFIG; | ||
8333 | } | ||
8334 | } | ||
8335 | |||
8336 | return E1000_SUCCESS; | ||
8337 | } | ||
8338 | |||
8339 | /*************************************************************************** | ||
8340 | * | ||
8341 | * Release software semaphore FLAG bit (SWFLAG). | ||
8342 | * SWFLAG is used to synchronize the access to all shared resource between | ||
8343 | * SW, FW and HW. | ||
8344 | * | ||
8345 | * hw: Struct containing variables accessed by shared code | ||
8346 | * | ||
8347 | ***************************************************************************/ | ||
8348 | void | ||
8349 | e1000_release_software_flag(struct e1000_hw *hw) | ||
8350 | { | ||
8351 | uint32_t extcnf_ctrl; | ||
8352 | |||
8353 | DEBUGFUNC("e1000_release_software_flag"); | ||
8354 | |||
8355 | if (hw->mac_type == e1000_ich8lan) { | ||
8356 | extcnf_ctrl= E1000_READ_REG(hw, EXTCNF_CTRL); | ||
8357 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | ||
8358 | E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); | ||
8359 | } | ||
8360 | |||
8361 | return; | ||
8362 | } | ||
8363 | |||
8364 | /*************************************************************************** | ||
8365 | * | ||
8366 | * Disable dynamic power down mode in ife PHY. | ||
8367 | * It can be used to workaround band-gap problem. | ||
8368 | * | ||
8369 | * hw: Struct containing variables accessed by shared code | ||
8370 | * | ||
8371 | ***************************************************************************/ | ||
8372 | int32_t | ||
8373 | e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw) | ||
8374 | { | ||
8375 | uint16_t phy_data; | ||
8376 | int32_t ret_val = E1000_SUCCESS; | ||
8377 | |||
8378 | DEBUGFUNC("e1000_ife_disable_dynamic_power_down"); | ||
8379 | |||
8380 | if (hw->phy_type == e1000_phy_ife) { | ||
8381 | ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data); | ||
8382 | if (ret_val) | ||
8383 | return ret_val; | ||
8384 | |||
8385 | phy_data |= IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN; | ||
8386 | ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data); | ||
8387 | } | ||
8388 | |||
8389 | return ret_val; | ||
8390 | } | ||
8391 | |||
8392 | /*************************************************************************** | ||
8393 | * | ||
8394 | * Enable dynamic power down mode in ife PHY. | ||
8395 | * It can be used to workaround band-gap problem. | ||
8396 | * | ||
8397 | * hw: Struct containing variables accessed by shared code | ||
8398 | * | ||
8399 | ***************************************************************************/ | ||
8400 | int32_t | ||
8401 | e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw) | ||
8402 | { | ||
8403 | uint16_t phy_data; | ||
8404 | int32_t ret_val = E1000_SUCCESS; | ||
8405 | |||
8406 | DEBUGFUNC("e1000_ife_enable_dynamic_power_down"); | ||
8407 | |||
8408 | if (hw->phy_type == e1000_phy_ife) { | ||
8409 | ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data); | ||
8410 | if (ret_val) | ||
8411 | return ret_val; | ||
8412 | |||
8413 | phy_data &= ~IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN; | ||
8414 | ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data); | ||
8415 | } | ||
8416 | |||
8417 | return ret_val; | ||
8418 | } | ||
8419 | |||
8420 | /****************************************************************************** | ||
8421 | * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access | ||
8422 | * register. | ||
8423 | * | ||
8424 | * hw - Struct containing variables accessed by shared code | ||
8425 | * offset - offset of word in the EEPROM to read | ||
8426 | * data - word read from the EEPROM | ||
8427 | * words - number of words to read | ||
8428 | *****************************************************************************/ | ||
8429 | int32_t | ||
8430 | e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, | ||
8431 | uint16_t *data) | ||
8432 | { | ||
8433 | int32_t error = E1000_SUCCESS; | ||
8434 | uint32_t flash_bank = 0; | ||
8435 | uint32_t act_offset = 0; | ||
8436 | uint32_t bank_offset = 0; | ||
8437 | uint16_t word = 0; | ||
8438 | uint16_t i = 0; | ||
8439 | |||
8440 | /* We need to know which is the valid flash bank. In the event | ||
8441 | * that we didn't allocate eeprom_shadow_ram, we may not be | ||
8442 | * managing flash_bank. So it cannot be trusted and needs | ||
8443 | * to be updated with each read. | ||
8444 | */ | ||
8445 | /* Value of bit 22 corresponds to the flash bank we're on. */ | ||
8446 | flash_bank = (E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL) ? 1 : 0; | ||
8447 | |||
8448 | /* Adjust offset appropriately if we're on bank 1 - adjust for word size */ | ||
8449 | bank_offset = flash_bank * (hw->flash_bank_size * 2); | ||
8450 | |||
8451 | error = e1000_get_software_flag(hw); | ||
8452 | if (error != E1000_SUCCESS) | ||
8453 | return error; | ||
8454 | |||
8455 | for (i = 0; i < words; i++) { | ||
8456 | if (hw->eeprom_shadow_ram != NULL && | ||
8457 | hw->eeprom_shadow_ram[offset+i].modified == TRUE) { | ||
8458 | data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word; | ||
8459 | } else { | ||
8460 | /* The NVM part needs a byte offset, hence * 2 */ | ||
8461 | act_offset = bank_offset + ((offset + i) * 2); | ||
8462 | error = e1000_read_ich8_word(hw, act_offset, &word); | ||
8463 | if (error != E1000_SUCCESS) | ||
8464 | break; | ||
8465 | data[i] = word; | ||
8466 | } | ||
8467 | } | ||
8468 | |||
8469 | e1000_release_software_flag(hw); | ||
8470 | |||
8471 | return error; | ||
8472 | } | ||
8473 | |||
8474 | /****************************************************************************** | ||
8475 | * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access | ||
8476 | * register. Actually, writes are written to the shadow ram cache in the hw | ||
8477 | * structure hw->e1000_shadow_ram. e1000_commit_shadow_ram flushes this to | ||
8478 | * the NVM, which occurs when the NVM checksum is updated. | ||
8479 | * | ||
8480 | * hw - Struct containing variables accessed by shared code | ||
8481 | * offset - offset of word in the EEPROM to write | ||
8482 | * words - number of words to write | ||
8483 | * data - words to write to the EEPROM | ||
8484 | *****************************************************************************/ | ||
8485 | int32_t | ||
8486 | e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, | ||
8487 | uint16_t *data) | ||
8488 | { | ||
8489 | uint32_t i = 0; | ||
8490 | int32_t error = E1000_SUCCESS; | ||
8491 | |||
8492 | error = e1000_get_software_flag(hw); | ||
8493 | if (error != E1000_SUCCESS) | ||
8494 | return error; | ||
8495 | |||
8496 | /* A driver can write to the NVM only if it has eeprom_shadow_ram | ||
8497 | * allocated. Subsequent reads to the modified words are read from | ||
8498 | * this cached structure as well. Writes will only go into this | ||
8499 | * cached structure unless it's followed by a call to | ||
8500 | * e1000_update_eeprom_checksum() where it will commit the changes | ||
8501 | * and clear the "modified" field. | ||
8502 | */ | ||
8503 | if (hw->eeprom_shadow_ram != NULL) { | ||
8504 | for (i = 0; i < words; i++) { | ||
8505 | if ((offset + i) < E1000_SHADOW_RAM_WORDS) { | ||
8506 | hw->eeprom_shadow_ram[offset+i].modified = TRUE; | ||
8507 | hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i]; | ||
8508 | } else { | ||
8509 | error = -E1000_ERR_EEPROM; | ||
8510 | break; | ||
8511 | } | ||
8512 | } | ||
8513 | } else { | ||
8514 | /* Drivers have the option to not allocate eeprom_shadow_ram as long | ||
8515 | * as they don't perform any NVM writes. An attempt in doing so | ||
8516 | * will result in this error. | ||
8517 | */ | ||
8518 | error = -E1000_ERR_EEPROM; | ||
8519 | } | ||
8520 | |||
8521 | e1000_release_software_flag(hw); | ||
8522 | |||
8523 | return error; | ||
8524 | } | ||
8525 | |||
8526 | /****************************************************************************** | ||
8527 | * This function does initial flash setup so that a new read/write/erase cycle | ||
8528 | * can be started. | ||
8529 | * | ||
8530 | * hw - The pointer to the hw structure | ||
8531 | ****************************************************************************/ | ||
8532 | int32_t | ||
8533 | e1000_ich8_cycle_init(struct e1000_hw *hw) | ||
8534 | { | ||
8535 | union ich8_hws_flash_status hsfsts; | ||
8536 | int32_t error = E1000_ERR_EEPROM; | ||
8537 | int32_t i = 0; | ||
8538 | |||
8539 | DEBUGFUNC("e1000_ich8_cycle_init"); | ||
8540 | |||
8541 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | ||
8542 | |||
8543 | /* May be check the Flash Des Valid bit in Hw status */ | ||
8544 | if (hsfsts.hsf_status.fldesvalid == 0) { | ||
8545 | DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used."); | ||
8546 | return error; | ||
8547 | } | ||
8548 | |||
8549 | /* Clear FCERR in Hw status by writing 1 */ | ||
8550 | /* Clear DAEL in Hw status by writing a 1 */ | ||
8551 | hsfsts.hsf_status.flcerr = 1; | ||
8552 | hsfsts.hsf_status.dael = 1; | ||
8553 | |||
8554 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval); | ||
8555 | |||
8556 | /* Either we should have a hardware SPI cycle in progress bit to check | ||
8557 | * against, in order to start a new cycle or FDONE bit should be changed | ||
8558 | * in the hardware so that it is 1 after harware reset, which can then be | ||
8559 | * used as an indication whether a cycle is in progress or has been | ||
8560 | * completed .. we should also have some software semaphore mechanism to | ||
8561 | * guard FDONE or the cycle in progress bit so that two threads access to | ||
8562 | * those bits can be sequentiallized or a way so that 2 threads dont | ||
8563 | * start the cycle at the same time */ | ||
8564 | |||
8565 | if (hsfsts.hsf_status.flcinprog == 0) { | ||
8566 | /* There is no cycle running at present, so we can start a cycle */ | ||
8567 | /* Begin by setting Flash Cycle Done. */ | ||
8568 | hsfsts.hsf_status.flcdone = 1; | ||
8569 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval); | ||
8570 | error = E1000_SUCCESS; | ||
8571 | } else { | ||
8572 | /* otherwise poll for sometime so the current cycle has a chance | ||
8573 | * to end before giving up. */ | ||
8574 | for (i = 0; i < ICH8_FLASH_COMMAND_TIMEOUT; i++) { | ||
8575 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | ||
8576 | if (hsfsts.hsf_status.flcinprog == 0) { | ||
8577 | error = E1000_SUCCESS; | ||
8578 | break; | ||
8579 | } | ||
8580 | udelay(1); | ||
8581 | } | ||
8582 | if (error == E1000_SUCCESS) { | ||
8583 | /* Successful in waiting for previous cycle to timeout, | ||
8584 | * now set the Flash Cycle Done. */ | ||
8585 | hsfsts.hsf_status.flcdone = 1; | ||
8586 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval); | ||
8587 | } else { | ||
8588 | DEBUGOUT("Flash controller busy, cannot get access"); | ||
8589 | } | ||
8590 | } | ||
8591 | return error; | ||
8592 | } | ||
8593 | |||
8594 | /****************************************************************************** | ||
8595 | * This function starts a flash cycle and waits for its completion | ||
8596 | * | ||
8597 | * hw - The pointer to the hw structure | ||
8598 | ****************************************************************************/ | ||
8599 | int32_t | ||
8600 | e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout) | ||
8601 | { | ||
8602 | union ich8_hws_flash_ctrl hsflctl; | ||
8603 | union ich8_hws_flash_status hsfsts; | ||
8604 | int32_t error = E1000_ERR_EEPROM; | ||
8605 | uint32_t i = 0; | ||
8606 | |||
8607 | /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ | ||
8608 | hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); | ||
8609 | hsflctl.hsf_ctrl.flcgo = 1; | ||
8610 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); | ||
8611 | |||
8612 | /* wait till FDONE bit is set to 1 */ | ||
8613 | do { | ||
8614 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | ||
8615 | if (hsfsts.hsf_status.flcdone == 1) | ||
8616 | break; | ||
8617 | udelay(1); | ||
8618 | i++; | ||
8619 | } while (i < timeout); | ||
8620 | if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) { | ||
8621 | error = E1000_SUCCESS; | ||
8622 | } | ||
8623 | return error; | ||
8624 | } | ||
8625 | |||
8626 | /****************************************************************************** | ||
8627 | * Reads a byte or word from the NVM using the ICH8 flash access registers. | ||
8628 | * | ||
8629 | * hw - The pointer to the hw structure | ||
8630 | * index - The index of the byte or word to read. | ||
8631 | * size - Size of data to read, 1=byte 2=word | ||
8632 | * data - Pointer to the word to store the value read. | ||
8633 | *****************************************************************************/ | ||
8634 | int32_t | ||
8635 | e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, | ||
8636 | uint32_t size, uint16_t* data) | ||
8637 | { | ||
8638 | union ich8_hws_flash_status hsfsts; | ||
8639 | union ich8_hws_flash_ctrl hsflctl; | ||
8640 | uint32_t flash_linear_address; | ||
8641 | uint32_t flash_data = 0; | ||
8642 | int32_t error = -E1000_ERR_EEPROM; | ||
8643 | int32_t count = 0; | ||
8644 | |||
8645 | DEBUGFUNC("e1000_read_ich8_data"); | ||
8646 | |||
8647 | if (size < 1 || size > 2 || data == 0x0 || | ||
8648 | index > ICH8_FLASH_LINEAR_ADDR_MASK) | ||
8649 | return error; | ||
8650 | |||
8651 | flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) + | ||
8652 | hw->flash_base_addr; | ||
8653 | |||
8654 | do { | ||
8655 | udelay(1); | ||
8656 | /* Steps */ | ||
8657 | error = e1000_ich8_cycle_init(hw); | ||
8658 | if (error != E1000_SUCCESS) | ||
8659 | break; | ||
8660 | |||
8661 | hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); | ||
8662 | /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ | ||
8663 | hsflctl.hsf_ctrl.fldbcount = size - 1; | ||
8664 | hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_READ; | ||
8665 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); | ||
8666 | |||
8667 | /* Write the last 24 bits of index into Flash Linear address field in | ||
8668 | * Flash Address */ | ||
8669 | /* TODO: TBD maybe check the index against the size of flash */ | ||
8670 | |||
8671 | E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address); | ||
8672 | |||
8673 | error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT); | ||
8674 | |||
8675 | /* Check if FCERR is set to 1, if set to 1, clear it and try the whole | ||
8676 | * sequence a few more times, else read in (shift in) the Flash Data0, | ||
8677 | * the order is least significant byte first msb to lsb */ | ||
8678 | if (error == E1000_SUCCESS) { | ||
8679 | flash_data = E1000_READ_ICH8_REG(hw, ICH8_FLASH_FDATA0); | ||
8680 | if (size == 1) { | ||
8681 | *data = (uint8_t)(flash_data & 0x000000FF); | ||
8682 | } else if (size == 2) { | ||
8683 | *data = (uint16_t)(flash_data & 0x0000FFFF); | ||
8684 | } | ||
8685 | break; | ||
8686 | } else { | ||
8687 | /* If we've gotten here, then things are probably completely hosed, | ||
8688 | * but if the error condition is detected, it won't hurt to give | ||
8689 | * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times. | ||
8690 | */ | ||
8691 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | ||
8692 | if (hsfsts.hsf_status.flcerr == 1) { | ||
8693 | /* Repeat for some time before giving up. */ | ||
8694 | continue; | ||
8695 | } else if (hsfsts.hsf_status.flcdone == 0) { | ||
8696 | DEBUGOUT("Timeout error - flash cycle did not complete."); | ||
8697 | break; | ||
8698 | } | ||
8699 | } | ||
8700 | } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT); | ||
8701 | |||
8702 | return error; | ||
8703 | } | ||
8704 | |||
8705 | /****************************************************************************** | ||
8706 | * Writes One /two bytes to the NVM using the ICH8 flash access registers. | ||
8707 | * | ||
8708 | * hw - The pointer to the hw structure | ||
8709 | * index - The index of the byte/word to read. | ||
8710 | * size - Size of data to read, 1=byte 2=word | ||
8711 | * data - The byte(s) to write to the NVM. | ||
8712 | *****************************************************************************/ | ||
8713 | int32_t | ||
8714 | e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, | ||
8715 | uint16_t data) | ||
8716 | { | ||
8717 | union ich8_hws_flash_status hsfsts; | ||
8718 | union ich8_hws_flash_ctrl hsflctl; | ||
8719 | uint32_t flash_linear_address; | ||
8720 | uint32_t flash_data = 0; | ||
8721 | int32_t error = -E1000_ERR_EEPROM; | ||
8722 | int32_t count = 0; | ||
8723 | |||
8724 | DEBUGFUNC("e1000_write_ich8_data"); | ||
8725 | |||
8726 | if (size < 1 || size > 2 || data > size * 0xff || | ||
8727 | index > ICH8_FLASH_LINEAR_ADDR_MASK) | ||
8728 | return error; | ||
8729 | |||
8730 | flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) + | ||
8731 | hw->flash_base_addr; | ||
8732 | |||
8733 | do { | ||
8734 | udelay(1); | ||
8735 | /* Steps */ | ||
8736 | error = e1000_ich8_cycle_init(hw); | ||
8737 | if (error != E1000_SUCCESS) | ||
8738 | break; | ||
8739 | |||
8740 | hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); | ||
8741 | /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ | ||
8742 | hsflctl.hsf_ctrl.fldbcount = size -1; | ||
8743 | hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_WRITE; | ||
8744 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); | ||
8745 | |||
8746 | /* Write the last 24 bits of index into Flash Linear address field in | ||
8747 | * Flash Address */ | ||
8748 | E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address); | ||
8749 | |||
8750 | if (size == 1) | ||
8751 | flash_data = (uint32_t)data & 0x00FF; | ||
8752 | else | ||
8753 | flash_data = (uint32_t)data; | ||
8754 | |||
8755 | E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FDATA0, flash_data); | ||
8756 | |||
8757 | /* check if FCERR is set to 1 , if set to 1, clear it and try the whole | ||
8758 | * sequence a few more times else done */ | ||
8759 | error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT); | ||
8760 | if (error == E1000_SUCCESS) { | ||
8761 | break; | ||
8762 | } else { | ||
8763 | /* If we're here, then things are most likely completely hosed, | ||
8764 | * but if the error condition is detected, it won't hurt to give | ||
8765 | * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times. | ||
8766 | */ | ||
8767 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | ||
8768 | if (hsfsts.hsf_status.flcerr == 1) { | ||
8769 | /* Repeat for some time before giving up. */ | ||
8770 | continue; | ||
8771 | } else if (hsfsts.hsf_status.flcdone == 0) { | ||
8772 | DEBUGOUT("Timeout error - flash cycle did not complete."); | ||
8773 | break; | ||
8774 | } | ||
8775 | } | ||
8776 | } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT); | ||
8777 | |||
8778 | return error; | ||
8779 | } | ||
8780 | |||
8781 | /****************************************************************************** | ||
8782 | * Reads a single byte from the NVM using the ICH8 flash access registers. | ||
8783 | * | ||
8784 | * hw - pointer to e1000_hw structure | ||
8785 | * index - The index of the byte to read. | ||
8786 | * data - Pointer to a byte to store the value read. | ||
8787 | *****************************************************************************/ | ||
8788 | int32_t | ||
8789 | e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data) | ||
8790 | { | ||
8791 | int32_t status = E1000_SUCCESS; | ||
8792 | uint16_t word = 0; | ||
8793 | |||
8794 | status = e1000_read_ich8_data(hw, index, 1, &word); | ||
8795 | if (status == E1000_SUCCESS) { | ||
8796 | *data = (uint8_t)word; | ||
8797 | } | ||
8798 | |||
8799 | return status; | ||
8800 | } | ||
8801 | |||
8802 | /****************************************************************************** | ||
8803 | * Writes a single byte to the NVM using the ICH8 flash access registers. | ||
8804 | * Performs verification by reading back the value and then going through | ||
8805 | * a retry algorithm before giving up. | ||
8806 | * | ||
8807 | * hw - pointer to e1000_hw structure | ||
8808 | * index - The index of the byte to write. | ||
8809 | * byte - The byte to write to the NVM. | ||
8810 | *****************************************************************************/ | ||
8811 | int32_t | ||
8812 | e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte) | ||
8813 | { | ||
8814 | int32_t error = E1000_SUCCESS; | ||
8815 | int32_t program_retries; | ||
8816 | uint8_t temp_byte; | ||
8817 | |||
8818 | e1000_write_ich8_byte(hw, index, byte); | ||
8819 | udelay(100); | ||
8820 | |||
8821 | for (program_retries = 0; program_retries < 100; program_retries++) { | ||
8822 | e1000_read_ich8_byte(hw, index, &temp_byte); | ||
8823 | if (temp_byte == byte) | ||
8824 | break; | ||
8825 | udelay(10); | ||
8826 | e1000_write_ich8_byte(hw, index, byte); | ||
8827 | udelay(100); | ||
8828 | } | ||
8829 | if (program_retries == 100) | ||
8830 | error = E1000_ERR_EEPROM; | ||
8831 | |||
8832 | return error; | ||
8833 | } | ||
8834 | |||
8835 | /****************************************************************************** | ||
8836 | * Writes a single byte to the NVM using the ICH8 flash access registers. | ||
8837 | * | ||
8838 | * hw - pointer to e1000_hw structure | ||
8839 | * index - The index of the byte to read. | ||
8840 | * data - The byte to write to the NVM. | ||
8841 | *****************************************************************************/ | ||
8842 | int32_t | ||
8843 | e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data) | ||
8844 | { | ||
8845 | int32_t status = E1000_SUCCESS; | ||
8846 | uint16_t word = (uint16_t)data; | ||
8847 | |||
8848 | status = e1000_write_ich8_data(hw, index, 1, word); | ||
8849 | |||
8850 | return status; | ||
8851 | } | ||
8852 | |||
8853 | /****************************************************************************** | ||
8854 | * Reads a word from the NVM using the ICH8 flash access registers. | ||
8855 | * | ||
8856 | * hw - pointer to e1000_hw structure | ||
8857 | * index - The starting byte index of the word to read. | ||
8858 | * data - Pointer to a word to store the value read. | ||
8859 | *****************************************************************************/ | ||
8860 | int32_t | ||
8861 | e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data) | ||
8862 | { | ||
8863 | int32_t status = E1000_SUCCESS; | ||
8864 | status = e1000_read_ich8_data(hw, index, 2, data); | ||
8865 | return status; | ||
8866 | } | ||
8867 | |||
8868 | /****************************************************************************** | ||
8869 | * Writes a word to the NVM using the ICH8 flash access registers. | ||
8870 | * | ||
8871 | * hw - pointer to e1000_hw structure | ||
8872 | * index - The starting byte index of the word to read. | ||
8873 | * data - The word to write to the NVM. | ||
8874 | *****************************************************************************/ | ||
8875 | int32_t | ||
8876 | e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data) | ||
8877 | { | ||
8878 | int32_t status = E1000_SUCCESS; | ||
8879 | status = e1000_write_ich8_data(hw, index, 2, data); | ||
8880 | return status; | ||
8881 | } | ||
8882 | |||
8883 | /****************************************************************************** | ||
8884 | * Erases the bank specified. Each bank is a 4k block. Segments are 0 based. | ||
8885 | * segment N is 4096 * N + flash_reg_addr. | ||
8886 | * | ||
8887 | * hw - pointer to e1000_hw structure | ||
8888 | * segment - 0 for first segment, 1 for second segment, etc. | ||
8889 | *****************************************************************************/ | ||
8890 | int32_t | ||
8891 | e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment) | ||
8892 | { | ||
8893 | union ich8_hws_flash_status hsfsts; | ||
8894 | union ich8_hws_flash_ctrl hsflctl; | ||
8895 | uint32_t flash_linear_address; | ||
8896 | int32_t count = 0; | ||
8897 | int32_t error = E1000_ERR_EEPROM; | ||
8898 | int32_t iteration, seg_size; | ||
8899 | int32_t sector_size; | ||
8900 | int32_t j = 0; | ||
8901 | int32_t error_flag = 0; | ||
8902 | |||
8903 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | ||
8904 | |||
8905 | /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */ | ||
8906 | /* 00: The Hw sector is 256 bytes, hence we need to erase 16 | ||
8907 | * consecutive sectors. The start index for the nth Hw sector can be | ||
8908 | * calculated as = segment * 4096 + n * 256 | ||
8909 | * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. | ||
8910 | * The start index for the nth Hw sector can be calculated | ||
8911 | * as = segment * 4096 | ||
8912 | * 10: Error condition | ||
8913 | * 11: The Hw sector size is much bigger than the size asked to | ||
8914 | * erase...error condition */ | ||
8915 | if (hsfsts.hsf_status.berasesz == 0x0) { | ||
8916 | /* Hw sector size 256 */ | ||
8917 | sector_size = seg_size = ICH8_FLASH_SEG_SIZE_256; | ||
8918 | iteration = ICH8_FLASH_SECTOR_SIZE / ICH8_FLASH_SEG_SIZE_256; | ||
8919 | } else if (hsfsts.hsf_status.berasesz == 0x1) { | ||
8920 | sector_size = seg_size = ICH8_FLASH_SEG_SIZE_4K; | ||
8921 | iteration = 1; | ||
8922 | } else if (hsfsts.hsf_status.berasesz == 0x3) { | ||
8923 | sector_size = seg_size = ICH8_FLASH_SEG_SIZE_64K; | ||
8924 | iteration = 1; | ||
8925 | } else { | ||
8926 | return error; | ||
8927 | } | ||
8928 | |||
8929 | for (j = 0; j < iteration ; j++) { | ||
8930 | do { | ||
8931 | count++; | ||
8932 | /* Steps */ | ||
8933 | error = e1000_ich8_cycle_init(hw); | ||
8934 | if (error != E1000_SUCCESS) { | ||
8935 | error_flag = 1; | ||
8936 | break; | ||
8937 | } | ||
8938 | |||
8939 | /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash | ||
8940 | * Control */ | ||
8941 | hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL); | ||
8942 | hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_ERASE; | ||
8943 | E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval); | ||
8944 | |||
8945 | /* Write the last 24 bits of an index within the block into Flash | ||
8946 | * Linear address field in Flash Address. This probably needs to | ||
8947 | * be calculated here based off the on-chip segment size and the | ||
8948 | * software segment size assumed (4K) */ | ||
8949 | /* TBD */ | ||
8950 | flash_linear_address = segment * sector_size + j * seg_size; | ||
8951 | flash_linear_address &= ICH8_FLASH_LINEAR_ADDR_MASK; | ||
8952 | flash_linear_address += hw->flash_base_addr; | ||
8953 | |||
8954 | E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address); | ||
8955 | |||
8956 | error = e1000_ich8_flash_cycle(hw, 1000000); | ||
8957 | /* Check if FCERR is set to 1. If 1, clear it and try the whole | ||
8958 | * sequence a few more times else Done */ | ||
8959 | if (error == E1000_SUCCESS) { | ||
8960 | break; | ||
8961 | } else { | ||
8962 | hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS); | ||
8963 | if (hsfsts.hsf_status.flcerr == 1) { | ||
8964 | /* repeat for some time before giving up */ | ||
8965 | continue; | ||
8966 | } else if (hsfsts.hsf_status.flcdone == 0) { | ||
8967 | error_flag = 1; | ||
8968 | break; | ||
8969 | } | ||
8970 | } | ||
8971 | } while ((count < ICH8_FLASH_CYCLE_REPEAT_COUNT) && !error_flag); | ||
8972 | if (error_flag == 1) | ||
8973 | break; | ||
8974 | } | ||
8975 | if (error_flag != 1) | ||
8976 | error = E1000_SUCCESS; | ||
8977 | return error; | ||
8978 | } | ||
8979 | |||
8980 | /****************************************************************************** | ||
8981 | * | ||
8982 | * Reverse duplex setting without breaking the link. | ||
8983 | * | ||
8984 | * hw: Struct containing variables accessed by shared code | ||
8985 | * | ||
8986 | *****************************************************************************/ | ||
8987 | int32_t | ||
8988 | e1000_duplex_reversal(struct e1000_hw *hw) | ||
8989 | { | ||
8990 | int32_t ret_val; | ||
8991 | uint16_t phy_data; | ||
8992 | |||
8993 | if (hw->phy_type != e1000_phy_igp_3) | ||
8994 | return E1000_SUCCESS; | ||
8995 | |||
8996 | ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); | ||
8997 | if (ret_val) | ||
8998 | return ret_val; | ||
8999 | |||
9000 | phy_data ^= MII_CR_FULL_DUPLEX; | ||
9001 | |||
9002 | ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); | ||
9003 | if (ret_val) | ||
9004 | return ret_val; | ||
9005 | |||
9006 | ret_val = e1000_read_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, &phy_data); | ||
9007 | if (ret_val) | ||
9008 | return ret_val; | ||
9009 | |||
9010 | phy_data |= IGP3_PHY_MISC_DUPLEX_MANUAL_SET; | ||
9011 | ret_val = e1000_write_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, phy_data); | ||
9012 | |||
9013 | return ret_val; | ||
9014 | } | ||
9015 | |||
9016 | int32_t | ||
9017 | e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, | ||
9018 | uint32_t cnf_base_addr, uint32_t cnf_size) | ||
9019 | { | ||
9020 | uint32_t ret_val = E1000_SUCCESS; | ||
9021 | uint16_t word_addr, reg_data, reg_addr; | ||
9022 | uint16_t i; | ||
9023 | |||
9024 | /* cnf_base_addr is in DWORD */ | ||
9025 | word_addr = (uint16_t)(cnf_base_addr << 1); | ||
9026 | |||
9027 | /* cnf_size is returned in size of dwords */ | ||
9028 | for (i = 0; i < cnf_size; i++) { | ||
9029 | ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, ®_data); | ||
9030 | if (ret_val) | ||
9031 | return ret_val; | ||
9032 | |||
9033 | ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, ®_addr); | ||
9034 | if (ret_val) | ||
9035 | return ret_val; | ||
9036 | |||
9037 | ret_val = e1000_get_software_flag(hw); | ||
9038 | if (ret_val != E1000_SUCCESS) | ||
9039 | return ret_val; | ||
9040 | |||
9041 | ret_val = e1000_write_phy_reg_ex(hw, (uint32_t)reg_addr, reg_data); | ||
9042 | |||
9043 | e1000_release_software_flag(hw); | ||
9044 | } | ||
9045 | |||
9046 | return ret_val; | ||
9047 | } | ||
9048 | |||
9049 | |||
9050 | int32_t | ||
9051 | e1000_init_lcd_from_nvm(struct e1000_hw *hw) | ||
9052 | { | ||
9053 | uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop; | ||
9054 | |||
9055 | if (hw->phy_type != e1000_phy_igp_3) | ||
9056 | return E1000_SUCCESS; | ||
9057 | |||
9058 | /* Check if SW needs configure the PHY */ | ||
9059 | reg_data = E1000_READ_REG(hw, FEXTNVM); | ||
9060 | if (!(reg_data & FEXTNVM_SW_CONFIG)) | ||
9061 | return E1000_SUCCESS; | ||
9062 | |||
9063 | /* Wait for basic configuration completes before proceeding*/ | ||
9064 | loop = 0; | ||
9065 | do { | ||
9066 | reg_data = E1000_READ_REG(hw, STATUS) & E1000_STATUS_LAN_INIT_DONE; | ||
9067 | udelay(100); | ||
9068 | loop++; | ||
9069 | } while ((!reg_data) && (loop < 50)); | ||
9070 | |||
9071 | /* Clear the Init Done bit for the next init event */ | ||
9072 | reg_data = E1000_READ_REG(hw, STATUS); | ||
9073 | reg_data &= ~E1000_STATUS_LAN_INIT_DONE; | ||
9074 | E1000_WRITE_REG(hw, STATUS, reg_data); | ||
9075 | |||
9076 | /* Make sure HW does not configure LCD from PHY extended configuration | ||
9077 | before SW configuration */ | ||
9078 | reg_data = E1000_READ_REG(hw, EXTCNF_CTRL); | ||
9079 | if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) { | ||
9080 | reg_data = E1000_READ_REG(hw, EXTCNF_SIZE); | ||
9081 | cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH; | ||
9082 | cnf_size >>= 16; | ||
9083 | if (cnf_size) { | ||
9084 | reg_data = E1000_READ_REG(hw, EXTCNF_CTRL); | ||
9085 | cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER; | ||
9086 | /* cnf_base_addr is in DWORD */ | ||
9087 | cnf_base_addr >>= 16; | ||
9088 | |||
9089 | /* Configure LCD from extended configuration region. */ | ||
9090 | ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr, | ||
9091 | cnf_size); | ||
9092 | if (ret_val) | ||
9093 | return ret_val; | ||
9094 | } | ||
9095 | } | ||
9096 | |||
9097 | return E1000_SUCCESS; | ||
9098 | } | ||
9099 | |||
9100 | |||
7559 | 9101 | ||
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index 467c9ed944f8..f9341e3276b3 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h | |||
@@ -62,6 +62,7 @@ typedef enum { | |||
62 | e1000_82572, | 62 | e1000_82572, |
63 | e1000_82573, | 63 | e1000_82573, |
64 | e1000_80003es2lan, | 64 | e1000_80003es2lan, |
65 | e1000_ich8lan, | ||
65 | e1000_num_macs | 66 | e1000_num_macs |
66 | } e1000_mac_type; | 67 | } e1000_mac_type; |
67 | 68 | ||
@@ -70,6 +71,7 @@ typedef enum { | |||
70 | e1000_eeprom_spi, | 71 | e1000_eeprom_spi, |
71 | e1000_eeprom_microwire, | 72 | e1000_eeprom_microwire, |
72 | e1000_eeprom_flash, | 73 | e1000_eeprom_flash, |
74 | e1000_eeprom_ich8, | ||
73 | e1000_eeprom_none, /* No NVM support */ | 75 | e1000_eeprom_none, /* No NVM support */ |
74 | e1000_num_eeprom_types | 76 | e1000_num_eeprom_types |
75 | } e1000_eeprom_type; | 77 | } e1000_eeprom_type; |
@@ -98,6 +100,11 @@ typedef enum { | |||
98 | e1000_fc_default = 0xFF | 100 | e1000_fc_default = 0xFF |
99 | } e1000_fc_type; | 101 | } e1000_fc_type; |
100 | 102 | ||
103 | struct e1000_shadow_ram { | ||
104 | uint16_t eeprom_word; | ||
105 | boolean_t modified; | ||
106 | }; | ||
107 | |||
101 | /* PCI bus types */ | 108 | /* PCI bus types */ |
102 | typedef enum { | 109 | typedef enum { |
103 | e1000_bus_type_unknown = 0, | 110 | e1000_bus_type_unknown = 0, |
@@ -218,6 +225,8 @@ typedef enum { | |||
218 | e1000_phy_igp, | 225 | e1000_phy_igp, |
219 | e1000_phy_igp_2, | 226 | e1000_phy_igp_2, |
220 | e1000_phy_gg82563, | 227 | e1000_phy_gg82563, |
228 | e1000_phy_igp_3, | ||
229 | e1000_phy_ife, | ||
221 | e1000_phy_undefined = 0xFF | 230 | e1000_phy_undefined = 0xFF |
222 | } e1000_phy_type; | 231 | } e1000_phy_type; |
223 | 232 | ||
@@ -313,6 +322,10 @@ int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy | |||
313 | int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); | 322 | int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); |
314 | int32_t e1000_phy_hw_reset(struct e1000_hw *hw); | 323 | int32_t e1000_phy_hw_reset(struct e1000_hw *hw); |
315 | int32_t e1000_phy_reset(struct e1000_hw *hw); | 324 | int32_t e1000_phy_reset(struct e1000_hw *hw); |
325 | void e1000_phy_powerdown_workaround(struct e1000_hw *hw); | ||
326 | int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw); | ||
327 | int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size); | ||
328 | int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw); | ||
316 | int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); | 329 | int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); |
317 | int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); | 330 | int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); |
318 | int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data); | 331 | int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data); |
@@ -331,6 +344,7 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw); | |||
331 | #define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ | 344 | #define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ |
332 | #define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ | 345 | #define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ |
333 | #define E1000_MNG_IAMT_MODE 0x3 | 346 | #define E1000_MNG_IAMT_MODE 0x3 |
347 | #define E1000_MNG_ICH_IAMT_MODE 0x2 | ||
334 | #define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ | 348 | #define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ |
335 | 349 | ||
336 | #define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ | 350 | #define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ |
@@ -388,6 +402,8 @@ int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num); | |||
388 | int32_t e1000_read_mac_addr(struct e1000_hw * hw); | 402 | int32_t e1000_read_mac_addr(struct e1000_hw * hw); |
389 | int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask); | 403 | int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask); |
390 | void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); | 404 | void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); |
405 | void e1000_release_software_flag(struct e1000_hw *hw); | ||
406 | int32_t e1000_get_software_flag(struct e1000_hw *hw); | ||
391 | 407 | ||
392 | /* Filters (multicast, vlan, receive) */ | 408 | /* Filters (multicast, vlan, receive) */ |
393 | void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count); | 409 | void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count); |
@@ -401,6 +417,7 @@ int32_t e1000_setup_led(struct e1000_hw *hw); | |||
401 | int32_t e1000_cleanup_led(struct e1000_hw *hw); | 417 | int32_t e1000_cleanup_led(struct e1000_hw *hw); |
402 | int32_t e1000_led_on(struct e1000_hw *hw); | 418 | int32_t e1000_led_on(struct e1000_hw *hw); |
403 | int32_t e1000_led_off(struct e1000_hw *hw); | 419 | int32_t e1000_led_off(struct e1000_hw *hw); |
420 | int32_t e1000_blink_led_start(struct e1000_hw *hw); | ||
404 | 421 | ||
405 | /* Adaptive IFS Functions */ | 422 | /* Adaptive IFS Functions */ |
406 | 423 | ||
@@ -422,6 +439,29 @@ int32_t e1000_disable_pciex_master(struct e1000_hw *hw); | |||
422 | int32_t e1000_get_software_semaphore(struct e1000_hw *hw); | 439 | int32_t e1000_get_software_semaphore(struct e1000_hw *hw); |
423 | void e1000_release_software_semaphore(struct e1000_hw *hw); | 440 | void e1000_release_software_semaphore(struct e1000_hw *hw); |
424 | int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | 441 | int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); |
442 | int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop); | ||
443 | |||
444 | int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, | ||
445 | uint8_t *data); | ||
446 | int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, | ||
447 | uint8_t byte); | ||
448 | int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, | ||
449 | uint8_t byte); | ||
450 | int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, | ||
451 | uint16_t *data); | ||
452 | int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, | ||
453 | uint32_t size, uint16_t *data); | ||
454 | int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, | ||
455 | uint16_t words, uint16_t *data); | ||
456 | int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, | ||
457 | uint16_t words, uint16_t *data); | ||
458 | int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment); | ||
459 | |||
460 | |||
461 | #define E1000_READ_REG_IO(a, reg) \ | ||
462 | e1000_read_reg_io((a), E1000_##reg) | ||
463 | #define E1000_WRITE_REG_IO(a, reg, val) \ | ||
464 | e1000_write_reg_io((a), E1000_##reg, val) | ||
425 | 465 | ||
426 | /* PCI Device IDs */ | 466 | /* PCI Device IDs */ |
427 | #define E1000_DEV_ID_82542 0x1000 | 467 | #define E1000_DEV_ID_82542 0x1000 |
@@ -446,6 +486,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | |||
446 | #define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D | 486 | #define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D |
447 | #define E1000_DEV_ID_82541EI 0x1013 | 487 | #define E1000_DEV_ID_82541EI 0x1013 |
448 | #define E1000_DEV_ID_82541EI_MOBILE 0x1018 | 488 | #define E1000_DEV_ID_82541EI_MOBILE 0x1018 |
489 | #define E1000_DEV_ID_82541ER_LOM 0x1014 | ||
449 | #define E1000_DEV_ID_82541ER 0x1078 | 490 | #define E1000_DEV_ID_82541ER 0x1078 |
450 | #define E1000_DEV_ID_82547GI 0x1075 | 491 | #define E1000_DEV_ID_82547GI 0x1075 |
451 | #define E1000_DEV_ID_82541GI 0x1076 | 492 | #define E1000_DEV_ID_82541GI 0x1076 |
@@ -457,18 +498,28 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | |||
457 | #define E1000_DEV_ID_82546GB_PCIE 0x108A | 498 | #define E1000_DEV_ID_82546GB_PCIE 0x108A |
458 | #define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 | 499 | #define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 |
459 | #define E1000_DEV_ID_82547EI 0x1019 | 500 | #define E1000_DEV_ID_82547EI 0x1019 |
501 | #define E1000_DEV_ID_82547EI_MOBILE 0x101A | ||
460 | #define E1000_DEV_ID_82571EB_COPPER 0x105E | 502 | #define E1000_DEV_ID_82571EB_COPPER 0x105E |
461 | #define E1000_DEV_ID_82571EB_FIBER 0x105F | 503 | #define E1000_DEV_ID_82571EB_FIBER 0x105F |
462 | #define E1000_DEV_ID_82571EB_SERDES 0x1060 | 504 | #define E1000_DEV_ID_82571EB_SERDES 0x1060 |
463 | #define E1000_DEV_ID_82572EI_COPPER 0x107D | 505 | #define E1000_DEV_ID_82572EI_COPPER 0x107D |
464 | #define E1000_DEV_ID_82572EI_FIBER 0x107E | 506 | #define E1000_DEV_ID_82572EI_FIBER 0x107E |
465 | #define E1000_DEV_ID_82572EI_SERDES 0x107F | 507 | #define E1000_DEV_ID_82572EI_SERDES 0x107F |
508 | #define E1000_DEV_ID_82572EI 0x10B9 | ||
466 | #define E1000_DEV_ID_82573E 0x108B | 509 | #define E1000_DEV_ID_82573E 0x108B |
467 | #define E1000_DEV_ID_82573E_IAMT 0x108C | 510 | #define E1000_DEV_ID_82573E_IAMT 0x108C |
468 | #define E1000_DEV_ID_82573L 0x109A | 511 | #define E1000_DEV_ID_82573L 0x109A |
469 | #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 | 512 | #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 |
470 | #define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 | 513 | #define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 |
471 | #define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 | 514 | #define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 |
515 | #define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA | ||
516 | #define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB | ||
517 | |||
518 | #define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 | ||
519 | #define E1000_DEV_ID_ICH8_IGP_AMT 0x104A | ||
520 | #define E1000_DEV_ID_ICH8_IGP_C 0x104B | ||
521 | #define E1000_DEV_ID_ICH8_IFE 0x104C | ||
522 | #define E1000_DEV_ID_ICH8_IGP_M 0x104D | ||
472 | 523 | ||
473 | 524 | ||
474 | #define NODE_ADDRESS_SIZE 6 | 525 | #define NODE_ADDRESS_SIZE 6 |
@@ -539,6 +590,14 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | |||
539 | E1000_IMS_RXSEQ | \ | 590 | E1000_IMS_RXSEQ | \ |
540 | E1000_IMS_LSC) | 591 | E1000_IMS_LSC) |
541 | 592 | ||
593 | /* Additional interrupts need to be handled for e1000_ich8lan: | ||
594 | DSW = The FW changed the status of the DISSW bit in FWSM | ||
595 | PHYINT = The LAN connected device generates an interrupt | ||
596 | EPRST = Manageability reset event */ | ||
597 | #define IMS_ICH8LAN_ENABLE_MASK (\ | ||
598 | E1000_IMS_DSW | \ | ||
599 | E1000_IMS_PHYINT | \ | ||
600 | E1000_IMS_EPRST) | ||
542 | 601 | ||
543 | /* Number of high/low register pairs in the RAR. The RAR (Receive Address | 602 | /* Number of high/low register pairs in the RAR. The RAR (Receive Address |
544 | * Registers) holds the directed and multicast addresses that we monitor. We | 603 | * Registers) holds the directed and multicast addresses that we monitor. We |
@@ -546,6 +605,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); | |||
546 | * E1000_RAR_ENTRIES - 1 multicast addresses. | 605 | * E1000_RAR_ENTRIES - 1 multicast addresses. |
547 | */ | 606 | */ |
548 | #define E1000_RAR_ENTRIES 15 | 607 | #define E1000_RAR_ENTRIES 15 |
608 | #define E1000_RAR_ENTRIES_ICH8LAN 7 | ||
549 | 609 | ||
550 | #define MIN_NUMBER_OF_DESCRIPTORS 8 | 610 | #define MIN_NUMBER_OF_DESCRIPTORS 8 |
551 | #define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 | 611 | #define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 |
@@ -767,6 +827,9 @@ struct e1000_data_desc { | |||
767 | #define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ | 827 | #define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ |
768 | #define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ | 828 | #define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ |
769 | 829 | ||
830 | #define E1000_NUM_UNICAST_ICH8LAN 7 | ||
831 | #define E1000_MC_TBL_SIZE_ICH8LAN 32 | ||
832 | |||
770 | 833 | ||
771 | /* Receive Address Register */ | 834 | /* Receive Address Register */ |
772 | struct e1000_rar { | 835 | struct e1000_rar { |
@@ -776,6 +839,7 @@ struct e1000_rar { | |||
776 | 839 | ||
777 | /* Number of entries in the Multicast Table Array (MTA). */ | 840 | /* Number of entries in the Multicast Table Array (MTA). */ |
778 | #define E1000_NUM_MTA_REGISTERS 128 | 841 | #define E1000_NUM_MTA_REGISTERS 128 |
842 | #define E1000_NUM_MTA_REGISTERS_ICH8LAN 32 | ||
779 | 843 | ||
780 | /* IPv4 Address Table Entry */ | 844 | /* IPv4 Address Table Entry */ |
781 | struct e1000_ipv4_at_entry { | 845 | struct e1000_ipv4_at_entry { |
@@ -786,6 +850,7 @@ struct e1000_ipv4_at_entry { | |||
786 | /* Four wakeup IP addresses are supported */ | 850 | /* Four wakeup IP addresses are supported */ |
787 | #define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 | 851 | #define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 |
788 | #define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX | 852 | #define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX |
853 | #define E1000_IP4AT_SIZE_ICH8LAN 3 | ||
789 | #define E1000_IP6AT_SIZE 1 | 854 | #define E1000_IP6AT_SIZE 1 |
790 | 855 | ||
791 | /* IPv6 Address Table Entry */ | 856 | /* IPv6 Address Table Entry */ |
@@ -844,6 +909,7 @@ struct e1000_ffvt_entry { | |||
844 | #define E1000_FLA 0x0001C /* Flash Access - RW */ | 909 | #define E1000_FLA 0x0001C /* Flash Access - RW */ |
845 | #define E1000_MDIC 0x00020 /* MDI Control - RW */ | 910 | #define E1000_MDIC 0x00020 /* MDI Control - RW */ |
846 | #define E1000_SCTL 0x00024 /* SerDes Control - RW */ | 911 | #define E1000_SCTL 0x00024 /* SerDes Control - RW */ |
912 | #define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ | ||
847 | #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ | 913 | #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ |
848 | #define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ | 914 | #define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ |
849 | #define E1000_FCT 0x00030 /* Flow Control Type - RW */ | 915 | #define E1000_FCT 0x00030 /* Flow Control Type - RW */ |
@@ -872,6 +938,8 @@ struct e1000_ffvt_entry { | |||
872 | #define E1000_LEDCTL 0x00E00 /* LED Control - RW */ | 938 | #define E1000_LEDCTL 0x00E00 /* LED Control - RW */ |
873 | #define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ | 939 | #define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ |
874 | #define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ | 940 | #define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ |
941 | #define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ | ||
942 | #define FEXTNVM_SW_CONFIG 0x0001 | ||
875 | #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ | 943 | #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ |
876 | #define E1000_PBS 0x01008 /* Packet Buffer Size */ | 944 | #define E1000_PBS 0x01008 /* Packet Buffer Size */ |
877 | #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ | 945 | #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ |
@@ -899,11 +967,13 @@ struct e1000_ffvt_entry { | |||
899 | #define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ | 967 | #define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ |
900 | #define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ | 968 | #define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ |
901 | #define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ | 969 | #define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ |
902 | #define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */ | 970 | #define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ |
971 | #define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ | ||
903 | #define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ | 972 | #define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ |
904 | #define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ | 973 | #define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ |
905 | #define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ | 974 | #define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ |
906 | #define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ | 975 | #define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ |
976 | #define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ | ||
907 | #define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ | 977 | #define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ |
908 | #define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ | 978 | #define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ |
909 | #define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ | 979 | #define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ |
@@ -1050,6 +1120,7 @@ struct e1000_ffvt_entry { | |||
1050 | #define E1000_82542_FLA E1000_FLA | 1120 | #define E1000_82542_FLA E1000_FLA |
1051 | #define E1000_82542_MDIC E1000_MDIC | 1121 | #define E1000_82542_MDIC E1000_MDIC |
1052 | #define E1000_82542_SCTL E1000_SCTL | 1122 | #define E1000_82542_SCTL E1000_SCTL |
1123 | #define E1000_82542_FEXTNVM E1000_FEXTNVM | ||
1053 | #define E1000_82542_FCAL E1000_FCAL | 1124 | #define E1000_82542_FCAL E1000_FCAL |
1054 | #define E1000_82542_FCAH E1000_FCAH | 1125 | #define E1000_82542_FCAH E1000_FCAH |
1055 | #define E1000_82542_FCT E1000_FCT | 1126 | #define E1000_82542_FCT E1000_FCT |
@@ -1073,6 +1144,19 @@ struct e1000_ffvt_entry { | |||
1073 | #define E1000_82542_RDLEN0 E1000_82542_RDLEN | 1144 | #define E1000_82542_RDLEN0 E1000_82542_RDLEN |
1074 | #define E1000_82542_RDH0 E1000_82542_RDH | 1145 | #define E1000_82542_RDH0 E1000_82542_RDH |
1075 | #define E1000_82542_RDT0 E1000_82542_RDT | 1146 | #define E1000_82542_RDT0 E1000_82542_RDT |
1147 | #define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication | ||
1148 | * RX Control - RW */ | ||
1149 | #define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) | ||
1150 | #define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ | ||
1151 | #define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ | ||
1152 | #define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ | ||
1153 | #define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ | ||
1154 | #define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ | ||
1155 | #define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ | ||
1156 | #define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ | ||
1157 | #define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ | ||
1158 | #define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ | ||
1159 | #define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ | ||
1076 | #define E1000_82542_RDTR1 0x00130 | 1160 | #define E1000_82542_RDTR1 0x00130 |
1077 | #define E1000_82542_RDBAL1 0x00138 | 1161 | #define E1000_82542_RDBAL1 0x00138 |
1078 | #define E1000_82542_RDBAH1 0x0013C | 1162 | #define E1000_82542_RDBAH1 0x0013C |
@@ -1110,11 +1194,14 @@ struct e1000_ffvt_entry { | |||
1110 | #define E1000_82542_FLOP E1000_FLOP | 1194 | #define E1000_82542_FLOP E1000_FLOP |
1111 | #define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL | 1195 | #define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL |
1112 | #define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE | 1196 | #define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE |
1197 | #define E1000_82542_PHY_CTRL E1000_PHY_CTRL | ||
1113 | #define E1000_82542_ERT E1000_ERT | 1198 | #define E1000_82542_ERT E1000_ERT |
1114 | #define E1000_82542_RXDCTL E1000_RXDCTL | 1199 | #define E1000_82542_RXDCTL E1000_RXDCTL |
1200 | #define E1000_82542_RXDCTL1 E1000_RXDCTL1 | ||
1115 | #define E1000_82542_RADV E1000_RADV | 1201 | #define E1000_82542_RADV E1000_RADV |
1116 | #define E1000_82542_RSRPD E1000_RSRPD | 1202 | #define E1000_82542_RSRPD E1000_RSRPD |
1117 | #define E1000_82542_TXDMAC E1000_TXDMAC | 1203 | #define E1000_82542_TXDMAC E1000_TXDMAC |
1204 | #define E1000_82542_KABGTXD E1000_KABGTXD | ||
1118 | #define E1000_82542_TDFHS E1000_TDFHS | 1205 | #define E1000_82542_TDFHS E1000_TDFHS |
1119 | #define E1000_82542_TDFTS E1000_TDFTS | 1206 | #define E1000_82542_TDFTS E1000_TDFTS |
1120 | #define E1000_82542_TDFPC E1000_TDFPC | 1207 | #define E1000_82542_TDFPC E1000_TDFPC |
@@ -1310,13 +1397,16 @@ struct e1000_hw_stats { | |||
1310 | 1397 | ||
1311 | /* Structure containing variables used by the shared code (e1000_hw.c) */ | 1398 | /* Structure containing variables used by the shared code (e1000_hw.c) */ |
1312 | struct e1000_hw { | 1399 | struct e1000_hw { |
1313 | uint8_t __iomem *hw_addr; | 1400 | uint8_t *hw_addr; |
1314 | uint8_t *flash_address; | 1401 | uint8_t *flash_address; |
1315 | e1000_mac_type mac_type; | 1402 | e1000_mac_type mac_type; |
1316 | e1000_phy_type phy_type; | 1403 | e1000_phy_type phy_type; |
1317 | uint32_t phy_init_script; | 1404 | uint32_t phy_init_script; |
1318 | e1000_media_type media_type; | 1405 | e1000_media_type media_type; |
1319 | void *back; | 1406 | void *back; |
1407 | struct e1000_shadow_ram *eeprom_shadow_ram; | ||
1408 | uint32_t flash_bank_size; | ||
1409 | uint32_t flash_base_addr; | ||
1320 | e1000_fc_type fc; | 1410 | e1000_fc_type fc; |
1321 | e1000_bus_speed bus_speed; | 1411 | e1000_bus_speed bus_speed; |
1322 | e1000_bus_width bus_width; | 1412 | e1000_bus_width bus_width; |
@@ -1328,6 +1418,7 @@ struct e1000_hw { | |||
1328 | uint32_t asf_firmware_present; | 1418 | uint32_t asf_firmware_present; |
1329 | uint32_t eeprom_semaphore_present; | 1419 | uint32_t eeprom_semaphore_present; |
1330 | uint32_t swfw_sync_present; | 1420 | uint32_t swfw_sync_present; |
1421 | uint32_t swfwhw_semaphore_present; | ||
1331 | unsigned long io_base; | 1422 | unsigned long io_base; |
1332 | uint32_t phy_id; | 1423 | uint32_t phy_id; |
1333 | uint32_t phy_revision; | 1424 | uint32_t phy_revision; |
@@ -1387,6 +1478,7 @@ struct e1000_hw { | |||
1387 | boolean_t in_ifs_mode; | 1478 | boolean_t in_ifs_mode; |
1388 | boolean_t mng_reg_access_disabled; | 1479 | boolean_t mng_reg_access_disabled; |
1389 | boolean_t leave_av_bit_off; | 1480 | boolean_t leave_av_bit_off; |
1481 | boolean_t kmrn_lock_loss_workaround_disabled; | ||
1390 | }; | 1482 | }; |
1391 | 1483 | ||
1392 | 1484 | ||
@@ -1435,6 +1527,7 @@ struct e1000_hw { | |||
1435 | #define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ | 1527 | #define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ |
1436 | #define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ | 1528 | #define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ |
1437 | #define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ | 1529 | #define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ |
1530 | #define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ | ||
1438 | 1531 | ||
1439 | /* Device Status */ | 1532 | /* Device Status */ |
1440 | #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ | 1533 | #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ |
@@ -1449,6 +1542,8 @@ struct e1000_hw { | |||
1449 | #define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ | 1542 | #define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ |
1450 | #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ | 1543 | #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ |
1451 | #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ | 1544 | #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ |
1545 | #define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion | ||
1546 | by EEPROM/Flash */ | ||
1452 | #define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ | 1547 | #define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ |
1453 | #define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ | 1548 | #define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ |
1454 | #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ | 1549 | #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ |
@@ -1506,6 +1601,10 @@ struct e1000_hw { | |||
1506 | #define E1000_STM_OPCODE 0xDB00 | 1601 | #define E1000_STM_OPCODE 0xDB00 |
1507 | #define E1000_HICR_FW_RESET 0xC0 | 1602 | #define E1000_HICR_FW_RESET 0xC0 |
1508 | 1603 | ||
1604 | #define E1000_SHADOW_RAM_WORDS 2048 | ||
1605 | #define E1000_ICH8_NVM_SIG_WORD 0x13 | ||
1606 | #define E1000_ICH8_NVM_SIG_MASK 0xC0 | ||
1607 | |||
1509 | /* EEPROM Read */ | 1608 | /* EEPROM Read */ |
1510 | #define E1000_EERD_START 0x00000001 /* Start Read */ | 1609 | #define E1000_EERD_START 0x00000001 /* Start Read */ |
1511 | #define E1000_EERD_DONE 0x00000010 /* Read Done */ | 1610 | #define E1000_EERD_DONE 0x00000010 /* Read Done */ |
@@ -1551,7 +1650,6 @@ struct e1000_hw { | |||
1551 | #define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 | 1650 | #define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 |
1552 | #define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 | 1651 | #define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 |
1553 | #define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 | 1652 | #define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 |
1554 | #define E1000_CTRL_EXT_CANC 0x04000000 /* Interrupt delay cancellation */ | ||
1555 | #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ | 1653 | #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ |
1556 | #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ | 1654 | #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ |
1557 | #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ | 1655 | #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ |
@@ -1591,12 +1689,31 @@ struct e1000_hw { | |||
1591 | #define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 | 1689 | #define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 |
1592 | 1690 | ||
1593 | /* In-Band Control */ | 1691 | /* In-Band Control */ |
1692 | #define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 | ||
1594 | #define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 | 1693 | #define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 |
1595 | 1694 | ||
1596 | /* Half-Duplex Control */ | 1695 | /* Half-Duplex Control */ |
1597 | #define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 | 1696 | #define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 |
1598 | #define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 | 1697 | #define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 |
1599 | 1698 | ||
1699 | #define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E | ||
1700 | |||
1701 | #define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 | ||
1702 | #define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 | ||
1703 | |||
1704 | #define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 | ||
1705 | #define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 | ||
1706 | #define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 | ||
1707 | |||
1708 | #define E1000_KABGTXD_BGSQLBIAS 0x00050000 | ||
1709 | |||
1710 | #define E1000_PHY_CTRL_SPD_EN 0x00000001 | ||
1711 | #define E1000_PHY_CTRL_D0A_LPLU 0x00000002 | ||
1712 | #define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 | ||
1713 | #define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 | ||
1714 | #define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 | ||
1715 | #define E1000_PHY_CTRL_B2B_EN 0x00000080 | ||
1716 | |||
1600 | /* LED Control */ | 1717 | /* LED Control */ |
1601 | #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F | 1718 | #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F |
1602 | #define E1000_LEDCTL_LED0_MODE_SHIFT 0 | 1719 | #define E1000_LEDCTL_LED0_MODE_SHIFT 0 |
@@ -1666,6 +1783,9 @@ struct e1000_hw { | |||
1666 | #define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ | 1783 | #define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ |
1667 | #define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ | 1784 | #define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ |
1668 | #define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ | 1785 | #define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ |
1786 | #define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ | ||
1787 | #define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ | ||
1788 | #define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */ | ||
1669 | 1789 | ||
1670 | /* Interrupt Cause Set */ | 1790 | /* Interrupt Cause Set */ |
1671 | #define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | 1791 | #define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ |
@@ -1692,6 +1812,9 @@ struct e1000_hw { | |||
1692 | #define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ | 1812 | #define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ |
1693 | #define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ | 1813 | #define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ |
1694 | #define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ | 1814 | #define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ |
1815 | #define E1000_ICS_DSW E1000_ICR_DSW | ||
1816 | #define E1000_ICS_PHYINT E1000_ICR_PHYINT | ||
1817 | #define E1000_ICS_EPRST E1000_ICR_EPRST | ||
1695 | 1818 | ||
1696 | /* Interrupt Mask Set */ | 1819 | /* Interrupt Mask Set */ |
1697 | #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | 1820 | #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ |
@@ -1718,6 +1841,9 @@ struct e1000_hw { | |||
1718 | #define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ | 1841 | #define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ |
1719 | #define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ | 1842 | #define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ |
1720 | #define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ | 1843 | #define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ |
1844 | #define E1000_IMS_DSW E1000_ICR_DSW | ||
1845 | #define E1000_IMS_PHYINT E1000_ICR_PHYINT | ||
1846 | #define E1000_IMS_EPRST E1000_ICR_EPRST | ||
1721 | 1847 | ||
1722 | /* Interrupt Mask Clear */ | 1848 | /* Interrupt Mask Clear */ |
1723 | #define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | 1849 | #define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ |
@@ -1744,6 +1870,9 @@ struct e1000_hw { | |||
1744 | #define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ | 1870 | #define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ |
1745 | #define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ | 1871 | #define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ |
1746 | #define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ | 1872 | #define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ |
1873 | #define E1000_IMC_DSW E1000_ICR_DSW | ||
1874 | #define E1000_IMC_PHYINT E1000_ICR_PHYINT | ||
1875 | #define E1000_IMC_EPRST E1000_ICR_EPRST | ||
1747 | 1876 | ||
1748 | /* Receive Control */ | 1877 | /* Receive Control */ |
1749 | #define E1000_RCTL_RST 0x00000001 /* Software reset */ | 1878 | #define E1000_RCTL_RST 0x00000001 /* Software reset */ |
@@ -1918,9 +2047,10 @@ struct e1000_hw { | |||
1918 | #define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 | 2047 | #define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 |
1919 | #define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 | 2048 | #define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 |
1920 | #define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 | 2049 | #define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 |
1921 | #define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00040000 | 2050 | #define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 |
1922 | #define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 | 2051 | #define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 |
1923 | #define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 | 2052 | #define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 |
2053 | #define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 | ||
1924 | 2054 | ||
1925 | /* Definitions for power management and wakeup registers */ | 2055 | /* Definitions for power management and wakeup registers */ |
1926 | /* Wake Up Control */ | 2056 | /* Wake Up Control */ |
@@ -2010,6 +2140,15 @@ struct e1000_hw { | |||
2010 | #define E1000_FWSM_MODE_SHIFT 1 | 2140 | #define E1000_FWSM_MODE_SHIFT 1 |
2011 | #define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ | 2141 | #define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ |
2012 | 2142 | ||
2143 | #define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ | ||
2144 | #define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ | ||
2145 | #define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ | ||
2146 | #define E1000_FWSM_SKUEL_SHIFT 29 | ||
2147 | #define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ | ||
2148 | #define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ | ||
2149 | #define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ | ||
2150 | #define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ | ||
2151 | |||
2013 | /* FFLT Debug Register */ | 2152 | /* FFLT Debug Register */ |
2014 | #define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ | 2153 | #define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ |
2015 | 2154 | ||
@@ -2082,6 +2221,8 @@ struct e1000_host_command_info { | |||
2082 | E1000_GCR_TXDSCW_NO_SNOOP | \ | 2221 | E1000_GCR_TXDSCW_NO_SNOOP | \ |
2083 | E1000_GCR_TXDSCR_NO_SNOOP) | 2222 | E1000_GCR_TXDSCR_NO_SNOOP) |
2084 | 2223 | ||
2224 | #define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL | ||
2225 | |||
2085 | #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 | 2226 | #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 |
2086 | /* Function Active and Power State to MNG */ | 2227 | /* Function Active and Power State to MNG */ |
2087 | #define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 | 2228 | #define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 |
@@ -2140,8 +2281,10 @@ struct e1000_host_command_info { | |||
2140 | #define EEPROM_PHY_CLASS_WORD 0x0007 | 2281 | #define EEPROM_PHY_CLASS_WORD 0x0007 |
2141 | #define EEPROM_INIT_CONTROL1_REG 0x000A | 2282 | #define EEPROM_INIT_CONTROL1_REG 0x000A |
2142 | #define EEPROM_INIT_CONTROL2_REG 0x000F | 2283 | #define EEPROM_INIT_CONTROL2_REG 0x000F |
2284 | #define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 | ||
2143 | #define EEPROM_INIT_CONTROL3_PORT_B 0x0014 | 2285 | #define EEPROM_INIT_CONTROL3_PORT_B 0x0014 |
2144 | #define EEPROM_INIT_3GIO_3 0x001A | 2286 | #define EEPROM_INIT_3GIO_3 0x001A |
2287 | #define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 | ||
2145 | #define EEPROM_INIT_CONTROL3_PORT_A 0x0024 | 2288 | #define EEPROM_INIT_CONTROL3_PORT_A 0x0024 |
2146 | #define EEPROM_CFG 0x0012 | 2289 | #define EEPROM_CFG 0x0012 |
2147 | #define EEPROM_FLASH_VERSION 0x0032 | 2290 | #define EEPROM_FLASH_VERSION 0x0032 |
@@ -2153,10 +2296,16 @@ struct e1000_host_command_info { | |||
2153 | /* Word definitions for ID LED Settings */ | 2296 | /* Word definitions for ID LED Settings */ |
2154 | #define ID_LED_RESERVED_0000 0x0000 | 2297 | #define ID_LED_RESERVED_0000 0x0000 |
2155 | #define ID_LED_RESERVED_FFFF 0xFFFF | 2298 | #define ID_LED_RESERVED_FFFF 0xFFFF |
2299 | #define ID_LED_RESERVED_82573 0xF746 | ||
2300 | #define ID_LED_DEFAULT_82573 0x1811 | ||
2156 | #define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ | 2301 | #define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ |
2157 | (ID_LED_OFF1_OFF2 << 8) | \ | 2302 | (ID_LED_OFF1_OFF2 << 8) | \ |
2158 | (ID_LED_DEF1_DEF2 << 4) | \ | 2303 | (ID_LED_DEF1_DEF2 << 4) | \ |
2159 | (ID_LED_DEF1_DEF2)) | 2304 | (ID_LED_DEF1_DEF2)) |
2305 | #define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ | ||
2306 | (ID_LED_DEF1_OFF2 << 8) | \ | ||
2307 | (ID_LED_DEF1_ON2 << 4) | \ | ||
2308 | (ID_LED_DEF1_DEF2)) | ||
2160 | #define ID_LED_DEF1_DEF2 0x1 | 2309 | #define ID_LED_DEF1_DEF2 0x1 |
2161 | #define ID_LED_DEF1_ON2 0x2 | 2310 | #define ID_LED_DEF1_ON2 0x2 |
2162 | #define ID_LED_DEF1_OFF2 0x3 | 2311 | #define ID_LED_DEF1_OFF2 0x3 |
@@ -2191,6 +2340,11 @@ struct e1000_host_command_info { | |||
2191 | #define EEPROM_WORD0F_ASM_DIR 0x2000 | 2340 | #define EEPROM_WORD0F_ASM_DIR 0x2000 |
2192 | #define EEPROM_WORD0F_ANE 0x0800 | 2341 | #define EEPROM_WORD0F_ANE 0x0800 |
2193 | #define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 | 2342 | #define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 |
2343 | #define EEPROM_WORD0F_LPLU 0x0001 | ||
2344 | |||
2345 | /* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ | ||
2346 | #define EEPROM_WORD1020_GIGA_DISABLE 0x0010 | ||
2347 | #define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 | ||
2194 | 2348 | ||
2195 | /* Mask bits for fields in Word 0x1a of the EEPROM */ | 2349 | /* Mask bits for fields in Word 0x1a of the EEPROM */ |
2196 | #define EEPROM_WORD1A_ASPM_MASK 0x000C | 2350 | #define EEPROM_WORD1A_ASPM_MASK 0x000C |
@@ -2265,23 +2419,29 @@ struct e1000_host_command_info { | |||
2265 | #define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 | 2419 | #define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 |
2266 | #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 | 2420 | #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 |
2267 | #define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 | 2421 | #define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 |
2268 | #define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x1FFF0000 | 2422 | #define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 |
2269 | 2423 | ||
2270 | #define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF | 2424 | #define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF |
2271 | #define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 | 2425 | #define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 |
2272 | #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 | 2426 | #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 |
2427 | #define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 | ||
2428 | #define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 | ||
2273 | 2429 | ||
2274 | /* PBA constants */ | 2430 | /* PBA constants */ |
2431 | #define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ | ||
2275 | #define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ | 2432 | #define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ |
2276 | #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ | 2433 | #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ |
2277 | #define E1000_PBA_22K 0x0016 | 2434 | #define E1000_PBA_22K 0x0016 |
2278 | #define E1000_PBA_24K 0x0018 | 2435 | #define E1000_PBA_24K 0x0018 |
2279 | #define E1000_PBA_30K 0x001E | 2436 | #define E1000_PBA_30K 0x001E |
2280 | #define E1000_PBA_32K 0x0020 | 2437 | #define E1000_PBA_32K 0x0020 |
2438 | #define E1000_PBA_34K 0x0022 | ||
2281 | #define E1000_PBA_38K 0x0026 | 2439 | #define E1000_PBA_38K 0x0026 |
2282 | #define E1000_PBA_40K 0x0028 | 2440 | #define E1000_PBA_40K 0x0028 |
2283 | #define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ | 2441 | #define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ |
2284 | 2442 | ||
2443 | #define E1000_PBS_16K E1000_PBA_16K | ||
2444 | |||
2285 | /* Flow Control Constants */ | 2445 | /* Flow Control Constants */ |
2286 | #define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 | 2446 | #define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 |
2287 | #define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 | 2447 | #define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 |
@@ -2336,7 +2496,7 @@ struct e1000_host_command_info { | |||
2336 | /* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ | 2496 | /* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ |
2337 | #define AUTO_READ_DONE_TIMEOUT 10 | 2497 | #define AUTO_READ_DONE_TIMEOUT 10 |
2338 | /* Number of milliseconds we wait for PHY configuration done after MAC reset */ | 2498 | /* Number of milliseconds we wait for PHY configuration done after MAC reset */ |
2339 | #define PHY_CFG_TIMEOUT 40 | 2499 | #define PHY_CFG_TIMEOUT 100 |
2340 | 2500 | ||
2341 | #define E1000_TX_BUFFER_SIZE ((uint32_t)1514) | 2501 | #define E1000_TX_BUFFER_SIZE ((uint32_t)1514) |
2342 | 2502 | ||
@@ -2764,6 +2924,17 @@ struct e1000_host_command_info { | |||
2764 | #define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ | 2924 | #define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ |
2765 | #define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ | 2925 | #define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ |
2766 | 2926 | ||
2927 | /* M88EC018 Rev 2 specific DownShift settings */ | ||
2928 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 | ||
2929 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 | ||
2930 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 | ||
2931 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 | ||
2932 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 | ||
2933 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 | ||
2934 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 | ||
2935 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 | ||
2936 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 | ||
2937 | |||
2767 | /* IGP01E1000 Specific Port Config Register - R/W */ | 2938 | /* IGP01E1000 Specific Port Config Register - R/W */ |
2768 | #define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 | 2939 | #define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 |
2769 | #define IGP01E1000_PSCFR_PRE_EN 0x0020 | 2940 | #define IGP01E1000_PSCFR_PRE_EN 0x0020 |
@@ -2990,6 +3161,221 @@ struct e1000_host_command_info { | |||
2990 | #define L1LXT971A_PHY_ID 0x001378E0 | 3161 | #define L1LXT971A_PHY_ID 0x001378E0 |
2991 | #define GG82563_E_PHY_ID 0x01410CA0 | 3162 | #define GG82563_E_PHY_ID 0x01410CA0 |
2992 | 3163 | ||
3164 | |||
3165 | /* Bits... | ||
3166 | * 15-5: page | ||
3167 | * 4-0: register offset | ||
3168 | */ | ||
3169 | #define PHY_PAGE_SHIFT 5 | ||
3170 | #define PHY_REG(page, reg) \ | ||
3171 | (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) | ||
3172 | |||
3173 | #define IGP3_PHY_PORT_CTRL \ | ||
3174 | PHY_REG(769, 17) /* Port General Configuration */ | ||
3175 | #define IGP3_PHY_RATE_ADAPT_CTRL \ | ||
3176 | PHY_REG(769, 25) /* Rate Adapter Control Register */ | ||
3177 | |||
3178 | #define IGP3_KMRN_FIFO_CTRL_STATS \ | ||
3179 | PHY_REG(770, 16) /* KMRN FIFO's control/status register */ | ||
3180 | #define IGP3_KMRN_POWER_MNG_CTRL \ | ||
3181 | PHY_REG(770, 17) /* KMRN Power Management Control Register */ | ||
3182 | #define IGP3_KMRN_INBAND_CTRL \ | ||
3183 | PHY_REG(770, 18) /* KMRN Inband Control Register */ | ||
3184 | #define IGP3_KMRN_DIAG \ | ||
3185 | PHY_REG(770, 19) /* KMRN Diagnostic register */ | ||
3186 | #define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ | ||
3187 | #define IGP3_KMRN_ACK_TIMEOUT \ | ||
3188 | PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ | ||
3189 | |||
3190 | #define IGP3_VR_CTRL \ | ||
3191 | PHY_REG(776, 18) /* Voltage regulator control register */ | ||
3192 | #define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ | ||
3193 | |||
3194 | #define IGP3_CAPABILITY \ | ||
3195 | PHY_REG(776, 19) /* IGP3 Capability Register */ | ||
3196 | |||
3197 | /* Capabilities for SKU Control */ | ||
3198 | #define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ | ||
3199 | #define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ | ||
3200 | #define IGP3_CAP_ASF 0x0004 /* Support ASF */ | ||
3201 | #define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ | ||
3202 | #define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ | ||
3203 | #define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ | ||
3204 | #define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ | ||
3205 | #define IGP3_CAP_RSS 0x0080 /* Support RSS */ | ||
3206 | #define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ | ||
3207 | #define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ | ||
3208 | |||
3209 | #define IGP3_PPC_JORDAN_EN 0x0001 | ||
3210 | #define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 | ||
3211 | |||
3212 | #define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 | ||
3213 | #define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E | ||
3214 | #define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 | ||
3215 | #define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 | ||
3216 | |||
3217 | #define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ | ||
3218 | #define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ | ||
3219 | |||
3220 | #define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) | ||
3221 | #define IGP3_KMRN_EC_DIS_INBAND 0x0080 | ||
3222 | |||
3223 | #define IGP03E1000_E_PHY_ID 0x02A80390 | ||
3224 | #define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ | ||
3225 | #define IFE_PLUS_E_PHY_ID 0x02A80320 | ||
3226 | #define IFE_C_E_PHY_ID 0x02A80310 | ||
3227 | |||
3228 | #define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ | ||
3229 | #define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ | ||
3230 | #define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ | ||
3231 | #define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnet Counter */ | ||
3232 | #define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ | ||
3233 | #define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ | ||
3234 | #define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ | ||
3235 | #define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ | ||
3236 | #define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ | ||
3237 | #define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ | ||
3238 | #define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ | ||
3239 | #define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ | ||
3240 | #define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ | ||
3241 | |||
3242 | #define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Defaut 1 = Disable auto reduced power down */ | ||
3243 | #define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ | ||
3244 | #define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ | ||
3245 | #define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ | ||
3246 | #define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ | ||
3247 | #define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ | ||
3248 | #define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ | ||
3249 | #define IFE_PESC_POLARITY_REVERSED_SHIFT 8 | ||
3250 | |||
3251 | #define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dyanmic Power Down disabled */ | ||
3252 | #define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ | ||
3253 | #define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ | ||
3254 | #define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ | ||
3255 | #define IFE_PSC_FORCE_POLARITY_SHIFT 5 | ||
3256 | #define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 | ||
3257 | |||
3258 | #define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ | ||
3259 | #define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ | ||
3260 | #define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ | ||
3261 | #define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorthm is completed */ | ||
3262 | #define IFE_PMC_MDIX_MODE_SHIFT 6 | ||
3263 | #define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ | ||
3264 | |||
3265 | #define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ | ||
3266 | #define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ | ||
3267 | #define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ | ||
3268 | #define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ | ||
3269 | #define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ | ||
3270 | #define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ | ||
3271 | #define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ | ||
3272 | #define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ | ||
3273 | #define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ | ||
3274 | #define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ | ||
3275 | #define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ | ||
3276 | |||
3277 | #define ICH8_FLASH_COMMAND_TIMEOUT 500 /* 500 ms , should be adjusted */ | ||
3278 | #define ICH8_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles , should be adjusted */ | ||
3279 | #define ICH8_FLASH_SEG_SIZE_256 256 | ||
3280 | #define ICH8_FLASH_SEG_SIZE_4K 4096 | ||
3281 | #define ICH8_FLASH_SEG_SIZE_64K 65536 | ||
3282 | |||
3283 | #define ICH8_CYCLE_READ 0x0 | ||
3284 | #define ICH8_CYCLE_RESERVED 0x1 | ||
3285 | #define ICH8_CYCLE_WRITE 0x2 | ||
3286 | #define ICH8_CYCLE_ERASE 0x3 | ||
3287 | |||
3288 | #define ICH8_FLASH_GFPREG 0x0000 | ||
3289 | #define ICH8_FLASH_HSFSTS 0x0004 | ||
3290 | #define ICH8_FLASH_HSFCTL 0x0006 | ||
3291 | #define ICH8_FLASH_FADDR 0x0008 | ||
3292 | #define ICH8_FLASH_FDATA0 0x0010 | ||
3293 | #define ICH8_FLASH_FRACC 0x0050 | ||
3294 | #define ICH8_FLASH_FREG0 0x0054 | ||
3295 | #define ICH8_FLASH_FREG1 0x0058 | ||
3296 | #define ICH8_FLASH_FREG2 0x005C | ||
3297 | #define ICH8_FLASH_FREG3 0x0060 | ||
3298 | #define ICH8_FLASH_FPR0 0x0074 | ||
3299 | #define ICH8_FLASH_FPR1 0x0078 | ||
3300 | #define ICH8_FLASH_SSFSTS 0x0090 | ||
3301 | #define ICH8_FLASH_SSFCTL 0x0092 | ||
3302 | #define ICH8_FLASH_PREOP 0x0094 | ||
3303 | #define ICH8_FLASH_OPTYPE 0x0096 | ||
3304 | #define ICH8_FLASH_OPMENU 0x0098 | ||
3305 | |||
3306 | #define ICH8_FLASH_REG_MAPSIZE 0x00A0 | ||
3307 | #define ICH8_FLASH_SECTOR_SIZE 4096 | ||
3308 | #define ICH8_GFPREG_BASE_MASK 0x1FFF | ||
3309 | #define ICH8_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF | ||
3310 | |||
3311 | /* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ | ||
3312 | /* Offset 04h HSFSTS */ | ||
3313 | union ich8_hws_flash_status { | ||
3314 | struct ich8_hsfsts { | ||
3315 | #ifdef E1000_BIG_ENDIAN | ||
3316 | uint16_t reserved2 :6; | ||
3317 | uint16_t fldesvalid :1; | ||
3318 | uint16_t flockdn :1; | ||
3319 | uint16_t flcdone :1; | ||
3320 | uint16_t flcerr :1; | ||
3321 | uint16_t dael :1; | ||
3322 | uint16_t berasesz :2; | ||
3323 | uint16_t flcinprog :1; | ||
3324 | uint16_t reserved1 :2; | ||
3325 | #else | ||
3326 | uint16_t flcdone :1; /* bit 0 Flash Cycle Done */ | ||
3327 | uint16_t flcerr :1; /* bit 1 Flash Cycle Error */ | ||
3328 | uint16_t dael :1; /* bit 2 Direct Access error Log */ | ||
3329 | uint16_t berasesz :2; /* bit 4:3 Block/Sector Erase Size */ | ||
3330 | uint16_t flcinprog :1; /* bit 5 flash SPI cycle in Progress */ | ||
3331 | uint16_t reserved1 :2; /* bit 13:6 Reserved */ | ||
3332 | uint16_t reserved2 :6; /* bit 13:6 Reserved */ | ||
3333 | uint16_t fldesvalid :1; /* bit 14 Flash Descriptor Valid */ | ||
3334 | uint16_t flockdn :1; /* bit 15 Flash Configuration Lock-Down */ | ||
3335 | #endif | ||
3336 | } hsf_status; | ||
3337 | uint16_t regval; | ||
3338 | }; | ||
3339 | |||
3340 | /* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */ | ||
3341 | /* Offset 06h FLCTL */ | ||
3342 | union ich8_hws_flash_ctrl { | ||
3343 | struct ich8_hsflctl { | ||
3344 | #ifdef E1000_BIG_ENDIAN | ||
3345 | uint16_t fldbcount :2; | ||
3346 | uint16_t flockdn :6; | ||
3347 | uint16_t flcgo :1; | ||
3348 | uint16_t flcycle :2; | ||
3349 | uint16_t reserved :5; | ||
3350 | #else | ||
3351 | uint16_t flcgo :1; /* 0 Flash Cycle Go */ | ||
3352 | uint16_t flcycle :2; /* 2:1 Flash Cycle */ | ||
3353 | uint16_t reserved :5; /* 7:3 Reserved */ | ||
3354 | uint16_t fldbcount :2; /* 9:8 Flash Data Byte Count */ | ||
3355 | uint16_t flockdn :6; /* 15:10 Reserved */ | ||
3356 | #endif | ||
3357 | } hsf_ctrl; | ||
3358 | uint16_t regval; | ||
3359 | }; | ||
3360 | |||
3361 | /* ICH8 Flash Region Access Permissions */ | ||
3362 | union ich8_hws_flash_regacc { | ||
3363 | struct ich8_flracc { | ||
3364 | #ifdef E1000_BIG_ENDIAN | ||
3365 | uint32_t gmwag :8; | ||
3366 | uint32_t gmrag :8; | ||
3367 | uint32_t grwa :8; | ||
3368 | uint32_t grra :8; | ||
3369 | #else | ||
3370 | uint32_t grra :8; /* 0:7 GbE region Read Access */ | ||
3371 | uint32_t grwa :8; /* 8:15 GbE region Write Access */ | ||
3372 | uint32_t gmrag :8; /* 23:16 GbE Master Read Access Grant */ | ||
3373 | uint32_t gmwag :8; /* 31:24 GbE Master Write Access Grant */ | ||
3374 | #endif | ||
3375 | } hsf_flregacc; | ||
3376 | uint16_t regval; | ||
3377 | }; | ||
3378 | |||
2993 | /* Miscellaneous PHY bit definitions. */ | 3379 | /* Miscellaneous PHY bit definitions. */ |
2994 | #define PHY_PREAMBLE 0xFFFFFFFF | 3380 | #define PHY_PREAMBLE 0xFFFFFFFF |
2995 | #define PHY_SOF 0x01 | 3381 | #define PHY_SOF 0x01 |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index f77624f5f17b..da62db897426 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | |||
36 | #else | 36 | #else |
37 | #define DRIVERNAPI "-NAPI" | 37 | #define DRIVERNAPI "-NAPI" |
38 | #endif | 38 | #endif |
39 | #define DRV_VERSION "7.0.38-k4"DRIVERNAPI | 39 | #define DRV_VERSION "7.1.9-k4"DRIVERNAPI |
40 | char e1000_driver_version[] = DRV_VERSION; | 40 | char e1000_driver_version[] = DRV_VERSION; |
41 | static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; | 41 | static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; |
42 | 42 | ||
@@ -73,6 +73,11 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
73 | INTEL_E1000_ETHERNET_DEVICE(0x1026), | 73 | INTEL_E1000_ETHERNET_DEVICE(0x1026), |
74 | INTEL_E1000_ETHERNET_DEVICE(0x1027), | 74 | INTEL_E1000_ETHERNET_DEVICE(0x1027), |
75 | INTEL_E1000_ETHERNET_DEVICE(0x1028), | 75 | INTEL_E1000_ETHERNET_DEVICE(0x1028), |
76 | INTEL_E1000_ETHERNET_DEVICE(0x1049), | ||
77 | INTEL_E1000_ETHERNET_DEVICE(0x104A), | ||
78 | INTEL_E1000_ETHERNET_DEVICE(0x104B), | ||
79 | INTEL_E1000_ETHERNET_DEVICE(0x104C), | ||
80 | INTEL_E1000_ETHERNET_DEVICE(0x104D), | ||
76 | INTEL_E1000_ETHERNET_DEVICE(0x105E), | 81 | INTEL_E1000_ETHERNET_DEVICE(0x105E), |
77 | INTEL_E1000_ETHERNET_DEVICE(0x105F), | 82 | INTEL_E1000_ETHERNET_DEVICE(0x105F), |
78 | INTEL_E1000_ETHERNET_DEVICE(0x1060), | 83 | INTEL_E1000_ETHERNET_DEVICE(0x1060), |
@@ -96,6 +101,8 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
96 | INTEL_E1000_ETHERNET_DEVICE(0x109A), | 101 | INTEL_E1000_ETHERNET_DEVICE(0x109A), |
97 | INTEL_E1000_ETHERNET_DEVICE(0x10B5), | 102 | INTEL_E1000_ETHERNET_DEVICE(0x10B5), |
98 | INTEL_E1000_ETHERNET_DEVICE(0x10B9), | 103 | INTEL_E1000_ETHERNET_DEVICE(0x10B9), |
104 | INTEL_E1000_ETHERNET_DEVICE(0x10BA), | ||
105 | INTEL_E1000_ETHERNET_DEVICE(0x10BB), | ||
99 | /* required last entry */ | 106 | /* required last entry */ |
100 | {0,} | 107 | {0,} |
101 | }; | 108 | }; |
@@ -133,7 +140,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, | |||
133 | static void e1000_set_multi(struct net_device *netdev); | 140 | static void e1000_set_multi(struct net_device *netdev); |
134 | static void e1000_update_phy_info(unsigned long data); | 141 | static void e1000_update_phy_info(unsigned long data); |
135 | static void e1000_watchdog(unsigned long data); | 142 | static void e1000_watchdog(unsigned long data); |
136 | static void e1000_watchdog_task(struct e1000_adapter *adapter); | ||
137 | static void e1000_82547_tx_fifo_stall(unsigned long data); | 143 | static void e1000_82547_tx_fifo_stall(unsigned long data); |
138 | static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev); | 144 | static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev); |
139 | static struct net_device_stats * e1000_get_stats(struct net_device *netdev); | 145 | static struct net_device_stats * e1000_get_stats(struct net_device *netdev); |
@@ -178,8 +184,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); | |||
178 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); | 184 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); |
179 | static void e1000_restore_vlan(struct e1000_adapter *adapter); | 185 | static void e1000_restore_vlan(struct e1000_adapter *adapter); |
180 | 186 | ||
181 | #ifdef CONFIG_PM | ||
182 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); | 187 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); |
188 | #ifdef CONFIG_PM | ||
183 | static int e1000_resume(struct pci_dev *pdev); | 189 | static int e1000_resume(struct pci_dev *pdev); |
184 | #endif | 190 | #endif |
185 | static void e1000_shutdown(struct pci_dev *pdev); | 191 | static void e1000_shutdown(struct pci_dev *pdev); |
@@ -206,8 +212,8 @@ static struct pci_driver e1000_driver = { | |||
206 | .probe = e1000_probe, | 212 | .probe = e1000_probe, |
207 | .remove = __devexit_p(e1000_remove), | 213 | .remove = __devexit_p(e1000_remove), |
208 | /* Power Managment Hooks */ | 214 | /* Power Managment Hooks */ |
209 | #ifdef CONFIG_PM | ||
210 | .suspend = e1000_suspend, | 215 | .suspend = e1000_suspend, |
216 | #ifdef CONFIG_PM | ||
211 | .resume = e1000_resume, | 217 | .resume = e1000_resume, |
212 | #endif | 218 | #endif |
213 | .shutdown = e1000_shutdown, | 219 | .shutdown = e1000_shutdown, |
@@ -261,6 +267,44 @@ e1000_exit_module(void) | |||
261 | 267 | ||
262 | module_exit(e1000_exit_module); | 268 | module_exit(e1000_exit_module); |
263 | 269 | ||
270 | static int e1000_request_irq(struct e1000_adapter *adapter) | ||
271 | { | ||
272 | struct net_device *netdev = adapter->netdev; | ||
273 | int flags, err = 0; | ||
274 | |||
275 | flags = IRQF_SHARED; | ||
276 | #ifdef CONFIG_PCI_MSI | ||
277 | if (adapter->hw.mac_type > e1000_82547_rev_2) { | ||
278 | adapter->have_msi = TRUE; | ||
279 | if ((err = pci_enable_msi(adapter->pdev))) { | ||
280 | DPRINTK(PROBE, ERR, | ||
281 | "Unable to allocate MSI interrupt Error: %d\n", err); | ||
282 | adapter->have_msi = FALSE; | ||
283 | } | ||
284 | } | ||
285 | if (adapter->have_msi) | ||
286 | flags &= ~IRQF_SHARED; | ||
287 | #endif | ||
288 | if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags, | ||
289 | netdev->name, netdev))) | ||
290 | DPRINTK(PROBE, ERR, | ||
291 | "Unable to allocate interrupt Error: %d\n", err); | ||
292 | |||
293 | return err; | ||
294 | } | ||
295 | |||
296 | static void e1000_free_irq(struct e1000_adapter *adapter) | ||
297 | { | ||
298 | struct net_device *netdev = adapter->netdev; | ||
299 | |||
300 | free_irq(adapter->pdev->irq, netdev); | ||
301 | |||
302 | #ifdef CONFIG_PCI_MSI | ||
303 | if (adapter->have_msi) | ||
304 | pci_disable_msi(adapter->pdev); | ||
305 | #endif | ||
306 | } | ||
307 | |||
264 | /** | 308 | /** |
265 | * e1000_irq_disable - Mask off interrupt generation on the NIC | 309 | * e1000_irq_disable - Mask off interrupt generation on the NIC |
266 | * @adapter: board private structure | 310 | * @adapter: board private structure |
@@ -329,6 +373,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter) | |||
329 | { | 373 | { |
330 | uint32_t ctrl_ext; | 374 | uint32_t ctrl_ext; |
331 | uint32_t swsm; | 375 | uint32_t swsm; |
376 | uint32_t extcnf; | ||
332 | 377 | ||
333 | /* Let firmware taken over control of h/w */ | 378 | /* Let firmware taken over control of h/w */ |
334 | switch (adapter->hw.mac_type) { | 379 | switch (adapter->hw.mac_type) { |
@@ -343,6 +388,11 @@ e1000_release_hw_control(struct e1000_adapter *adapter) | |||
343 | swsm = E1000_READ_REG(&adapter->hw, SWSM); | 388 | swsm = E1000_READ_REG(&adapter->hw, SWSM); |
344 | E1000_WRITE_REG(&adapter->hw, SWSM, | 389 | E1000_WRITE_REG(&adapter->hw, SWSM, |
345 | swsm & ~E1000_SWSM_DRV_LOAD); | 390 | swsm & ~E1000_SWSM_DRV_LOAD); |
391 | case e1000_ich8lan: | ||
392 | extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT); | ||
393 | E1000_WRITE_REG(&adapter->hw, CTRL_EXT, | ||
394 | extcnf & ~E1000_CTRL_EXT_DRV_LOAD); | ||
395 | break; | ||
346 | default: | 396 | default: |
347 | break; | 397 | break; |
348 | } | 398 | } |
@@ -364,6 +414,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter) | |||
364 | { | 414 | { |
365 | uint32_t ctrl_ext; | 415 | uint32_t ctrl_ext; |
366 | uint32_t swsm; | 416 | uint32_t swsm; |
417 | uint32_t extcnf; | ||
367 | /* Let firmware know the driver has taken over */ | 418 | /* Let firmware know the driver has taken over */ |
368 | switch (adapter->hw.mac_type) { | 419 | switch (adapter->hw.mac_type) { |
369 | case e1000_82571: | 420 | case e1000_82571: |
@@ -378,6 +429,11 @@ e1000_get_hw_control(struct e1000_adapter *adapter) | |||
378 | E1000_WRITE_REG(&adapter->hw, SWSM, | 429 | E1000_WRITE_REG(&adapter->hw, SWSM, |
379 | swsm | E1000_SWSM_DRV_LOAD); | 430 | swsm | E1000_SWSM_DRV_LOAD); |
380 | break; | 431 | break; |
432 | case e1000_ich8lan: | ||
433 | extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL); | ||
434 | E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL, | ||
435 | extcnf | E1000_EXTCNF_CTRL_SWFLAG); | ||
436 | break; | ||
381 | default: | 437 | default: |
382 | break; | 438 | break; |
383 | } | 439 | } |
@@ -387,18 +443,10 @@ int | |||
387 | e1000_up(struct e1000_adapter *adapter) | 443 | e1000_up(struct e1000_adapter *adapter) |
388 | { | 444 | { |
389 | struct net_device *netdev = adapter->netdev; | 445 | struct net_device *netdev = adapter->netdev; |
390 | int i, err; | 446 | int i; |
391 | 447 | ||
392 | /* hardware has been reset, we need to reload some things */ | 448 | /* hardware has been reset, we need to reload some things */ |
393 | 449 | ||
394 | /* Reset the PHY if it was previously powered down */ | ||
395 | if (adapter->hw.media_type == e1000_media_type_copper) { | ||
396 | uint16_t mii_reg; | ||
397 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | ||
398 | if (mii_reg & MII_CR_POWER_DOWN) | ||
399 | e1000_phy_hw_reset(&adapter->hw); | ||
400 | } | ||
401 | |||
402 | e1000_set_multi(netdev); | 450 | e1000_set_multi(netdev); |
403 | 451 | ||
404 | e1000_restore_vlan(adapter); | 452 | e1000_restore_vlan(adapter); |
@@ -415,24 +463,6 @@ e1000_up(struct e1000_adapter *adapter) | |||
415 | E1000_DESC_UNUSED(ring)); | 463 | E1000_DESC_UNUSED(ring)); |
416 | } | 464 | } |
417 | 465 | ||
418 | #ifdef CONFIG_PCI_MSI | ||
419 | if (adapter->hw.mac_type > e1000_82547_rev_2) { | ||
420 | adapter->have_msi = TRUE; | ||
421 | if ((err = pci_enable_msi(adapter->pdev))) { | ||
422 | DPRINTK(PROBE, ERR, | ||
423 | "Unable to allocate MSI interrupt Error: %d\n", err); | ||
424 | adapter->have_msi = FALSE; | ||
425 | } | ||
426 | } | ||
427 | #endif | ||
428 | if ((err = request_irq(adapter->pdev->irq, &e1000_intr, | ||
429 | IRQF_SHARED | IRQF_SAMPLE_RANDOM, | ||
430 | netdev->name, netdev))) { | ||
431 | DPRINTK(PROBE, ERR, | ||
432 | "Unable to allocate interrupt Error: %d\n", err); | ||
433 | return err; | ||
434 | } | ||
435 | |||
436 | adapter->tx_queue_len = netdev->tx_queue_len; | 466 | adapter->tx_queue_len = netdev->tx_queue_len; |
437 | 467 | ||
438 | mod_timer(&adapter->watchdog_timer, jiffies); | 468 | mod_timer(&adapter->watchdog_timer, jiffies); |
@@ -445,21 +475,60 @@ e1000_up(struct e1000_adapter *adapter) | |||
445 | return 0; | 475 | return 0; |
446 | } | 476 | } |
447 | 477 | ||
478 | /** | ||
479 | * e1000_power_up_phy - restore link in case the phy was powered down | ||
480 | * @adapter: address of board private structure | ||
481 | * | ||
482 | * The phy may be powered down to save power and turn off link when the | ||
483 | * driver is unloaded and wake on lan is not enabled (among others) | ||
484 | * *** this routine MUST be followed by a call to e1000_reset *** | ||
485 | * | ||
486 | **/ | ||
487 | |||
488 | static void e1000_power_up_phy(struct e1000_adapter *adapter) | ||
489 | { | ||
490 | uint16_t mii_reg = 0; | ||
491 | |||
492 | /* Just clear the power down bit to wake the phy back up */ | ||
493 | if (adapter->hw.media_type == e1000_media_type_copper) { | ||
494 | /* according to the manual, the phy will retain its | ||
495 | * settings across a power-down/up cycle */ | ||
496 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | ||
497 | mii_reg &= ~MII_CR_POWER_DOWN; | ||
498 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); | ||
499 | } | ||
500 | } | ||
501 | |||
502 | static void e1000_power_down_phy(struct e1000_adapter *adapter) | ||
503 | { | ||
504 | boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) && | ||
505 | e1000_check_mng_mode(&adapter->hw); | ||
506 | /* Power down the PHY so no link is implied when interface is down | ||
507 | * The PHY cannot be powered down if any of the following is TRUE | ||
508 | * (a) WoL is enabled | ||
509 | * (b) AMT is active | ||
510 | * (c) SoL/IDER session is active */ | ||
511 | if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && | ||
512 | adapter->hw.mac_type != e1000_ich8lan && | ||
513 | adapter->hw.media_type == e1000_media_type_copper && | ||
514 | !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) && | ||
515 | !mng_mode_enabled && | ||
516 | !e1000_check_phy_reset_block(&adapter->hw)) { | ||
517 | uint16_t mii_reg = 0; | ||
518 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | ||
519 | mii_reg |= MII_CR_POWER_DOWN; | ||
520 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); | ||
521 | mdelay(1); | ||
522 | } | ||
523 | } | ||
524 | |||
448 | void | 525 | void |
449 | e1000_down(struct e1000_adapter *adapter) | 526 | e1000_down(struct e1000_adapter *adapter) |
450 | { | 527 | { |
451 | struct net_device *netdev = adapter->netdev; | 528 | struct net_device *netdev = adapter->netdev; |
452 | boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) && | ||
453 | e1000_check_mng_mode(&adapter->hw); | ||
454 | 529 | ||
455 | e1000_irq_disable(adapter); | 530 | e1000_irq_disable(adapter); |
456 | 531 | ||
457 | free_irq(adapter->pdev->irq, netdev); | ||
458 | #ifdef CONFIG_PCI_MSI | ||
459 | if (adapter->hw.mac_type > e1000_82547_rev_2 && | ||
460 | adapter->have_msi == TRUE) | ||
461 | pci_disable_msi(adapter->pdev); | ||
462 | #endif | ||
463 | del_timer_sync(&adapter->tx_fifo_stall_timer); | 532 | del_timer_sync(&adapter->tx_fifo_stall_timer); |
464 | del_timer_sync(&adapter->watchdog_timer); | 533 | del_timer_sync(&adapter->watchdog_timer); |
465 | del_timer_sync(&adapter->phy_info_timer); | 534 | del_timer_sync(&adapter->phy_info_timer); |
@@ -476,23 +545,17 @@ e1000_down(struct e1000_adapter *adapter) | |||
476 | e1000_reset(adapter); | 545 | e1000_reset(adapter); |
477 | e1000_clean_all_tx_rings(adapter); | 546 | e1000_clean_all_tx_rings(adapter); |
478 | e1000_clean_all_rx_rings(adapter); | 547 | e1000_clean_all_rx_rings(adapter); |
548 | } | ||
479 | 549 | ||
480 | /* Power down the PHY so no link is implied when interface is down * | 550 | void |
481 | * The PHY cannot be powered down if any of the following is TRUE * | 551 | e1000_reinit_locked(struct e1000_adapter *adapter) |
482 | * (a) WoL is enabled | 552 | { |
483 | * (b) AMT is active | 553 | WARN_ON(in_interrupt()); |
484 | * (c) SoL/IDER session is active */ | 554 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) |
485 | if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && | 555 | msleep(1); |
486 | adapter->hw.media_type == e1000_media_type_copper && | 556 | e1000_down(adapter); |
487 | !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) && | 557 | e1000_up(adapter); |
488 | !mng_mode_enabled && | 558 | clear_bit(__E1000_RESETTING, &adapter->flags); |
489 | !e1000_check_phy_reset_block(&adapter->hw)) { | ||
490 | uint16_t mii_reg; | ||
491 | e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); | ||
492 | mii_reg |= MII_CR_POWER_DOWN; | ||
493 | e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); | ||
494 | mdelay(1); | ||
495 | } | ||
496 | } | 559 | } |
497 | 560 | ||
498 | void | 561 | void |
@@ -518,6 +581,9 @@ e1000_reset(struct e1000_adapter *adapter) | |||
518 | case e1000_82573: | 581 | case e1000_82573: |
519 | pba = E1000_PBA_12K; | 582 | pba = E1000_PBA_12K; |
520 | break; | 583 | break; |
584 | case e1000_ich8lan: | ||
585 | pba = E1000_PBA_8K; | ||
586 | break; | ||
521 | default: | 587 | default: |
522 | pba = E1000_PBA_48K; | 588 | pba = E1000_PBA_48K; |
523 | break; | 589 | break; |
@@ -542,6 +608,12 @@ e1000_reset(struct e1000_adapter *adapter) | |||
542 | /* Set the FC high water mark to 90% of the FIFO size. | 608 | /* Set the FC high water mark to 90% of the FIFO size. |
543 | * Required to clear last 3 LSB */ | 609 | * Required to clear last 3 LSB */ |
544 | fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8; | 610 | fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8; |
611 | /* We can't use 90% on small FIFOs because the remainder | ||
612 | * would be less than 1 full frame. In this case, we size | ||
613 | * it to allow at least a full frame above the high water | ||
614 | * mark. */ | ||
615 | if (pba < E1000_PBA_16K) | ||
616 | fc_high_water_mark = (pba * 1024) - 1600; | ||
545 | 617 | ||
546 | adapter->hw.fc_high_water = fc_high_water_mark; | 618 | adapter->hw.fc_high_water = fc_high_water_mark; |
547 | adapter->hw.fc_low_water = fc_high_water_mark - 8; | 619 | adapter->hw.fc_low_water = fc_high_water_mark - 8; |
@@ -564,6 +636,23 @@ e1000_reset(struct e1000_adapter *adapter) | |||
564 | 636 | ||
565 | e1000_reset_adaptive(&adapter->hw); | 637 | e1000_reset_adaptive(&adapter->hw); |
566 | e1000_phy_get_info(&adapter->hw, &adapter->phy_info); | 638 | e1000_phy_get_info(&adapter->hw, &adapter->phy_info); |
639 | |||
640 | if (!adapter->smart_power_down && | ||
641 | (adapter->hw.mac_type == e1000_82571 || | ||
642 | adapter->hw.mac_type == e1000_82572)) { | ||
643 | uint16_t phy_data = 0; | ||
644 | /* speed up time to link by disabling smart power down, ignore | ||
645 | * the return value of this function because there is nothing | ||
646 | * different we would do if it failed */ | ||
647 | e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, | ||
648 | &phy_data); | ||
649 | phy_data &= ~IGP02E1000_PM_SPD; | ||
650 | e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, | ||
651 | phy_data); | ||
652 | } | ||
653 | |||
654 | if (adapter->hw.mac_type < e1000_ich8lan) | ||
655 | /* FIXME: this code is duplicate and wrong for PCI Express */ | ||
567 | if (adapter->en_mng_pt) { | 656 | if (adapter->en_mng_pt) { |
568 | manc = E1000_READ_REG(&adapter->hw, MANC); | 657 | manc = E1000_READ_REG(&adapter->hw, MANC); |
569 | manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST); | 658 | manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST); |
@@ -590,6 +679,7 @@ e1000_probe(struct pci_dev *pdev, | |||
590 | struct net_device *netdev; | 679 | struct net_device *netdev; |
591 | struct e1000_adapter *adapter; | 680 | struct e1000_adapter *adapter; |
592 | unsigned long mmio_start, mmio_len; | 681 | unsigned long mmio_start, mmio_len; |
682 | unsigned long flash_start, flash_len; | ||
593 | 683 | ||
594 | static int cards_found = 0; | 684 | static int cards_found = 0; |
595 | static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */ | 685 | static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */ |
@@ -599,10 +689,12 @@ e1000_probe(struct pci_dev *pdev, | |||
599 | if ((err = pci_enable_device(pdev))) | 689 | if ((err = pci_enable_device(pdev))) |
600 | return err; | 690 | return err; |
601 | 691 | ||
602 | if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { | 692 | if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && |
693 | !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { | ||
603 | pci_using_dac = 1; | 694 | pci_using_dac = 1; |
604 | } else { | 695 | } else { |
605 | if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { | 696 | if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) && |
697 | (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { | ||
606 | E1000_ERR("No usable DMA configuration, aborting\n"); | 698 | E1000_ERR("No usable DMA configuration, aborting\n"); |
607 | return err; | 699 | return err; |
608 | } | 700 | } |
@@ -682,6 +774,19 @@ e1000_probe(struct pci_dev *pdev, | |||
682 | if ((err = e1000_sw_init(adapter))) | 774 | if ((err = e1000_sw_init(adapter))) |
683 | goto err_sw_init; | 775 | goto err_sw_init; |
684 | 776 | ||
777 | /* Flash BAR mapping must happen after e1000_sw_init | ||
778 | * because it depends on mac_type */ | ||
779 | if ((adapter->hw.mac_type == e1000_ich8lan) && | ||
780 | (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { | ||
781 | flash_start = pci_resource_start(pdev, 1); | ||
782 | flash_len = pci_resource_len(pdev, 1); | ||
783 | adapter->hw.flash_address = ioremap(flash_start, flash_len); | ||
784 | if (!adapter->hw.flash_address) { | ||
785 | err = -EIO; | ||
786 | goto err_flashmap; | ||
787 | } | ||
788 | } | ||
789 | |||
685 | if ((err = e1000_check_phy_reset_block(&adapter->hw))) | 790 | if ((err = e1000_check_phy_reset_block(&adapter->hw))) |
686 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); | 791 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); |
687 | 792 | ||
@@ -700,6 +805,8 @@ e1000_probe(struct pci_dev *pdev, | |||
700 | NETIF_F_HW_VLAN_TX | | 805 | NETIF_F_HW_VLAN_TX | |
701 | NETIF_F_HW_VLAN_RX | | 806 | NETIF_F_HW_VLAN_RX | |
702 | NETIF_F_HW_VLAN_FILTER; | 807 | NETIF_F_HW_VLAN_FILTER; |
808 | if (adapter->hw.mac_type == e1000_ich8lan) | ||
809 | netdev->features &= ~NETIF_F_HW_VLAN_FILTER; | ||
703 | } | 810 | } |
704 | 811 | ||
705 | #ifdef NETIF_F_TSO | 812 | #ifdef NETIF_F_TSO |
@@ -715,11 +822,17 @@ e1000_probe(struct pci_dev *pdev, | |||
715 | if (pci_using_dac) | 822 | if (pci_using_dac) |
716 | netdev->features |= NETIF_F_HIGHDMA; | 823 | netdev->features |= NETIF_F_HIGHDMA; |
717 | 824 | ||
718 | /* hard_start_xmit is safe against parallel locking */ | ||
719 | netdev->features |= NETIF_F_LLTX; | 825 | netdev->features |= NETIF_F_LLTX; |
720 | 826 | ||
721 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); | 827 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); |
722 | 828 | ||
829 | /* initialize eeprom parameters */ | ||
830 | |||
831 | if (e1000_init_eeprom_params(&adapter->hw)) { | ||
832 | E1000_ERR("EEPROM initialization failed\n"); | ||
833 | return -EIO; | ||
834 | } | ||
835 | |||
723 | /* before reading the EEPROM, reset the controller to | 836 | /* before reading the EEPROM, reset the controller to |
724 | * put the device in a known good starting state */ | 837 | * put the device in a known good starting state */ |
725 | 838 | ||
@@ -758,9 +871,6 @@ e1000_probe(struct pci_dev *pdev, | |||
758 | adapter->watchdog_timer.function = &e1000_watchdog; | 871 | adapter->watchdog_timer.function = &e1000_watchdog; |
759 | adapter->watchdog_timer.data = (unsigned long) adapter; | 872 | adapter->watchdog_timer.data = (unsigned long) adapter; |
760 | 873 | ||
761 | INIT_WORK(&adapter->watchdog_task, | ||
762 | (void (*)(void *))e1000_watchdog_task, adapter); | ||
763 | |||
764 | init_timer(&adapter->phy_info_timer); | 874 | init_timer(&adapter->phy_info_timer); |
765 | adapter->phy_info_timer.function = &e1000_update_phy_info; | 875 | adapter->phy_info_timer.function = &e1000_update_phy_info; |
766 | adapter->phy_info_timer.data = (unsigned long) adapter; | 876 | adapter->phy_info_timer.data = (unsigned long) adapter; |
@@ -790,6 +900,11 @@ e1000_probe(struct pci_dev *pdev, | |||
790 | EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); | 900 | EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); |
791 | eeprom_apme_mask = E1000_EEPROM_82544_APM; | 901 | eeprom_apme_mask = E1000_EEPROM_82544_APM; |
792 | break; | 902 | break; |
903 | case e1000_ich8lan: | ||
904 | e1000_read_eeprom(&adapter->hw, | ||
905 | EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data); | ||
906 | eeprom_apme_mask = E1000_EEPROM_ICH8_APME; | ||
907 | break; | ||
793 | case e1000_82546: | 908 | case e1000_82546: |
794 | case e1000_82546_rev_3: | 909 | case e1000_82546_rev_3: |
795 | case e1000_82571: | 910 | case e1000_82571: |
@@ -849,6 +964,9 @@ e1000_probe(struct pci_dev *pdev, | |||
849 | return 0; | 964 | return 0; |
850 | 965 | ||
851 | err_register: | 966 | err_register: |
967 | if (adapter->hw.flash_address) | ||
968 | iounmap(adapter->hw.flash_address); | ||
969 | err_flashmap: | ||
852 | err_sw_init: | 970 | err_sw_init: |
853 | err_eeprom: | 971 | err_eeprom: |
854 | iounmap(adapter->hw.hw_addr); | 972 | iounmap(adapter->hw.hw_addr); |
@@ -882,6 +1000,7 @@ e1000_remove(struct pci_dev *pdev) | |||
882 | flush_scheduled_work(); | 1000 | flush_scheduled_work(); |
883 | 1001 | ||
884 | if (adapter->hw.mac_type >= e1000_82540 && | 1002 | if (adapter->hw.mac_type >= e1000_82540 && |
1003 | adapter->hw.mac_type != e1000_ich8lan && | ||
885 | adapter->hw.media_type == e1000_media_type_copper) { | 1004 | adapter->hw.media_type == e1000_media_type_copper) { |
886 | manc = E1000_READ_REG(&adapter->hw, MANC); | 1005 | manc = E1000_READ_REG(&adapter->hw, MANC); |
887 | if (manc & E1000_MANC_SMBUS_EN) { | 1006 | if (manc & E1000_MANC_SMBUS_EN) { |
@@ -910,6 +1029,8 @@ e1000_remove(struct pci_dev *pdev) | |||
910 | #endif | 1029 | #endif |
911 | 1030 | ||
912 | iounmap(adapter->hw.hw_addr); | 1031 | iounmap(adapter->hw.hw_addr); |
1032 | if (adapter->hw.flash_address) | ||
1033 | iounmap(adapter->hw.flash_address); | ||
913 | pci_release_regions(pdev); | 1034 | pci_release_regions(pdev); |
914 | 1035 | ||
915 | free_netdev(netdev); | 1036 | free_netdev(netdev); |
@@ -947,7 +1068,7 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
947 | 1068 | ||
948 | pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); | 1069 | pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); |
949 | 1070 | ||
950 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE; | 1071 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
951 | adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; | 1072 | adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; |
952 | hw->max_frame_size = netdev->mtu + | 1073 | hw->max_frame_size = netdev->mtu + |
953 | ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | 1074 | ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
@@ -960,13 +1081,6 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
960 | return -EIO; | 1081 | return -EIO; |
961 | } | 1082 | } |
962 | 1083 | ||
963 | /* initialize eeprom parameters */ | ||
964 | |||
965 | if (e1000_init_eeprom_params(hw)) { | ||
966 | E1000_ERR("EEPROM initialization failed\n"); | ||
967 | return -EIO; | ||
968 | } | ||
969 | |||
970 | switch (hw->mac_type) { | 1084 | switch (hw->mac_type) { |
971 | default: | 1085 | default: |
972 | break; | 1086 | break; |
@@ -1078,6 +1192,10 @@ e1000_open(struct net_device *netdev) | |||
1078 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1192 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1079 | int err; | 1193 | int err; |
1080 | 1194 | ||
1195 | /* disallow open during test */ | ||
1196 | if (test_bit(__E1000_DRIVER_TESTING, &adapter->flags)) | ||
1197 | return -EBUSY; | ||
1198 | |||
1081 | /* allocate transmit descriptors */ | 1199 | /* allocate transmit descriptors */ |
1082 | 1200 | ||
1083 | if ((err = e1000_setup_all_tx_resources(adapter))) | 1201 | if ((err = e1000_setup_all_tx_resources(adapter))) |
@@ -1088,6 +1206,12 @@ e1000_open(struct net_device *netdev) | |||
1088 | if ((err = e1000_setup_all_rx_resources(adapter))) | 1206 | if ((err = e1000_setup_all_rx_resources(adapter))) |
1089 | goto err_setup_rx; | 1207 | goto err_setup_rx; |
1090 | 1208 | ||
1209 | err = e1000_request_irq(adapter); | ||
1210 | if (err) | ||
1211 | goto err_up; | ||
1212 | |||
1213 | e1000_power_up_phy(adapter); | ||
1214 | |||
1091 | if ((err = e1000_up(adapter))) | 1215 | if ((err = e1000_up(adapter))) |
1092 | goto err_up; | 1216 | goto err_up; |
1093 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 1217 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
@@ -1131,7 +1255,10 @@ e1000_close(struct net_device *netdev) | |||
1131 | { | 1255 | { |
1132 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1256 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1133 | 1257 | ||
1258 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); | ||
1134 | e1000_down(adapter); | 1259 | e1000_down(adapter); |
1260 | e1000_power_down_phy(adapter); | ||
1261 | e1000_free_irq(adapter); | ||
1135 | 1262 | ||
1136 | e1000_free_all_tx_resources(adapter); | 1263 | e1000_free_all_tx_resources(adapter); |
1137 | e1000_free_all_rx_resources(adapter); | 1264 | e1000_free_all_rx_resources(adapter); |
@@ -1189,8 +1316,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter, | |||
1189 | int size; | 1316 | int size; |
1190 | 1317 | ||
1191 | size = sizeof(struct e1000_buffer) * txdr->count; | 1318 | size = sizeof(struct e1000_buffer) * txdr->count; |
1192 | 1319 | txdr->buffer_info = vmalloc(size); | |
1193 | txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); | ||
1194 | if (!txdr->buffer_info) { | 1320 | if (!txdr->buffer_info) { |
1195 | DPRINTK(PROBE, ERR, | 1321 | DPRINTK(PROBE, ERR, |
1196 | "Unable to allocate memory for the transmit descriptor ring\n"); | 1322 | "Unable to allocate memory for the transmit descriptor ring\n"); |
@@ -1302,11 +1428,11 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1302 | tdba = adapter->tx_ring[0].dma; | 1428 | tdba = adapter->tx_ring[0].dma; |
1303 | tdlen = adapter->tx_ring[0].count * | 1429 | tdlen = adapter->tx_ring[0].count * |
1304 | sizeof(struct e1000_tx_desc); | 1430 | sizeof(struct e1000_tx_desc); |
1305 | E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); | ||
1306 | E1000_WRITE_REG(hw, TDBAH, (tdba >> 32)); | ||
1307 | E1000_WRITE_REG(hw, TDLEN, tdlen); | 1431 | E1000_WRITE_REG(hw, TDLEN, tdlen); |
1308 | E1000_WRITE_REG(hw, TDH, 0); | 1432 | E1000_WRITE_REG(hw, TDBAH, (tdba >> 32)); |
1433 | E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); | ||
1309 | E1000_WRITE_REG(hw, TDT, 0); | 1434 | E1000_WRITE_REG(hw, TDT, 0); |
1435 | E1000_WRITE_REG(hw, TDH, 0); | ||
1310 | adapter->tx_ring[0].tdh = E1000_TDH; | 1436 | adapter->tx_ring[0].tdh = E1000_TDH; |
1311 | adapter->tx_ring[0].tdt = E1000_TDT; | 1437 | adapter->tx_ring[0].tdt = E1000_TDT; |
1312 | break; | 1438 | break; |
@@ -1418,7 +1544,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter, | |||
1418 | int size, desc_len; | 1544 | int size, desc_len; |
1419 | 1545 | ||
1420 | size = sizeof(struct e1000_buffer) * rxdr->count; | 1546 | size = sizeof(struct e1000_buffer) * rxdr->count; |
1421 | rxdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus)); | 1547 | rxdr->buffer_info = vmalloc(size); |
1422 | if (!rxdr->buffer_info) { | 1548 | if (!rxdr->buffer_info) { |
1423 | DPRINTK(PROBE, ERR, | 1549 | DPRINTK(PROBE, ERR, |
1424 | "Unable to allocate memory for the receive descriptor ring\n"); | 1550 | "Unable to allocate memory for the receive descriptor ring\n"); |
@@ -1560,9 +1686,6 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1560 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | 1686 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | |
1561 | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); | 1687 | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); |
1562 | 1688 | ||
1563 | if (adapter->hw.mac_type > e1000_82543) | ||
1564 | rctl |= E1000_RCTL_SECRC; | ||
1565 | |||
1566 | if (adapter->hw.tbi_compatibility_on == 1) | 1689 | if (adapter->hw.tbi_compatibility_on == 1) |
1567 | rctl |= E1000_RCTL_SBP; | 1690 | rctl |= E1000_RCTL_SBP; |
1568 | else | 1691 | else |
@@ -1628,7 +1751,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1628 | rfctl |= E1000_RFCTL_IPV6_DIS; | 1751 | rfctl |= E1000_RFCTL_IPV6_DIS; |
1629 | E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); | 1752 | E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); |
1630 | 1753 | ||
1631 | rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; | 1754 | rctl |= E1000_RCTL_DTYP_PS; |
1632 | 1755 | ||
1633 | psrctl |= adapter->rx_ps_bsize0 >> | 1756 | psrctl |= adapter->rx_ps_bsize0 >> |
1634 | E1000_PSRCTL_BSIZE0_SHIFT; | 1757 | E1000_PSRCTL_BSIZE0_SHIFT; |
@@ -1712,11 +1835,11 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1712 | case 1: | 1835 | case 1: |
1713 | default: | 1836 | default: |
1714 | rdba = adapter->rx_ring[0].dma; | 1837 | rdba = adapter->rx_ring[0].dma; |
1715 | E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); | ||
1716 | E1000_WRITE_REG(hw, RDBAH, (rdba >> 32)); | ||
1717 | E1000_WRITE_REG(hw, RDLEN, rdlen); | 1838 | E1000_WRITE_REG(hw, RDLEN, rdlen); |
1718 | E1000_WRITE_REG(hw, RDH, 0); | 1839 | E1000_WRITE_REG(hw, RDBAH, (rdba >> 32)); |
1840 | E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); | ||
1719 | E1000_WRITE_REG(hw, RDT, 0); | 1841 | E1000_WRITE_REG(hw, RDT, 0); |
1842 | E1000_WRITE_REG(hw, RDH, 0); | ||
1720 | adapter->rx_ring[0].rdh = E1000_RDH; | 1843 | adapter->rx_ring[0].rdh = E1000_RDH; |
1721 | adapter->rx_ring[0].rdt = E1000_RDT; | 1844 | adapter->rx_ring[0].rdt = E1000_RDT; |
1722 | break; | 1845 | break; |
@@ -1741,9 +1864,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1741 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); | 1864 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); |
1742 | } | 1865 | } |
1743 | 1866 | ||
1744 | if (hw->mac_type == e1000_82573) | ||
1745 | E1000_WRITE_REG(hw, ERT, 0x0100); | ||
1746 | |||
1747 | /* Enable Receives */ | 1867 | /* Enable Receives */ |
1748 | E1000_WRITE_REG(hw, RCTL, rctl); | 1868 | E1000_WRITE_REG(hw, RCTL, rctl); |
1749 | } | 1869 | } |
@@ -2083,6 +2203,12 @@ e1000_set_multi(struct net_device *netdev) | |||
2083 | uint32_t rctl; | 2203 | uint32_t rctl; |
2084 | uint32_t hash_value; | 2204 | uint32_t hash_value; |
2085 | int i, rar_entries = E1000_RAR_ENTRIES; | 2205 | int i, rar_entries = E1000_RAR_ENTRIES; |
2206 | int mta_reg_count = (hw->mac_type == e1000_ich8lan) ? | ||
2207 | E1000_NUM_MTA_REGISTERS_ICH8LAN : | ||
2208 | E1000_NUM_MTA_REGISTERS; | ||
2209 | |||
2210 | if (adapter->hw.mac_type == e1000_ich8lan) | ||
2211 | rar_entries = E1000_RAR_ENTRIES_ICH8LAN; | ||
2086 | 2212 | ||
2087 | /* reserve RAR[14] for LAA over-write work-around */ | 2213 | /* reserve RAR[14] for LAA over-write work-around */ |
2088 | if (adapter->hw.mac_type == e1000_82571) | 2214 | if (adapter->hw.mac_type == e1000_82571) |
@@ -2121,14 +2247,18 @@ e1000_set_multi(struct net_device *netdev) | |||
2121 | mc_ptr = mc_ptr->next; | 2247 | mc_ptr = mc_ptr->next; |
2122 | } else { | 2248 | } else { |
2123 | E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); | 2249 | E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); |
2250 | E1000_WRITE_FLUSH(hw); | ||
2124 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); | 2251 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); |
2252 | E1000_WRITE_FLUSH(hw); | ||
2125 | } | 2253 | } |
2126 | } | 2254 | } |
2127 | 2255 | ||
2128 | /* clear the old settings from the multicast hash table */ | 2256 | /* clear the old settings from the multicast hash table */ |
2129 | 2257 | ||
2130 | for (i = 0; i < E1000_NUM_MTA_REGISTERS; i++) | 2258 | for (i = 0; i < mta_reg_count; i++) { |
2131 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); | 2259 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); |
2260 | E1000_WRITE_FLUSH(hw); | ||
2261 | } | ||
2132 | 2262 | ||
2133 | /* load any remaining addresses into the hash table */ | 2263 | /* load any remaining addresses into the hash table */ |
2134 | 2264 | ||
@@ -2201,19 +2331,19 @@ static void | |||
2201 | e1000_watchdog(unsigned long data) | 2331 | e1000_watchdog(unsigned long data) |
2202 | { | 2332 | { |
2203 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 2333 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
2204 | |||
2205 | /* Do the rest outside of interrupt context */ | ||
2206 | schedule_work(&adapter->watchdog_task); | ||
2207 | } | ||
2208 | |||
2209 | static void | ||
2210 | e1000_watchdog_task(struct e1000_adapter *adapter) | ||
2211 | { | ||
2212 | struct net_device *netdev = adapter->netdev; | 2334 | struct net_device *netdev = adapter->netdev; |
2213 | struct e1000_tx_ring *txdr = adapter->tx_ring; | 2335 | struct e1000_tx_ring *txdr = adapter->tx_ring; |
2214 | uint32_t link, tctl; | 2336 | uint32_t link, tctl; |
2215 | 2337 | int32_t ret_val; | |
2216 | e1000_check_for_link(&adapter->hw); | 2338 | |
2339 | ret_val = e1000_check_for_link(&adapter->hw); | ||
2340 | if ((ret_val == E1000_ERR_PHY) && | ||
2341 | (adapter->hw.phy_type == e1000_phy_igp_3) && | ||
2342 | (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { | ||
2343 | /* See e1000_kumeran_lock_loss_workaround() */ | ||
2344 | DPRINTK(LINK, INFO, | ||
2345 | "Gigabit has been disabled, downgrading speed\n"); | ||
2346 | } | ||
2217 | if (adapter->hw.mac_type == e1000_82573) { | 2347 | if (adapter->hw.mac_type == e1000_82573) { |
2218 | e1000_enable_tx_pkt_filtering(&adapter->hw); | 2348 | e1000_enable_tx_pkt_filtering(&adapter->hw); |
2219 | if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) | 2349 | if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) |
@@ -2394,7 +2524,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2394 | uint8_t ipcss, ipcso, tucss, tucso, hdr_len; | 2524 | uint8_t ipcss, ipcso, tucss, tucso, hdr_len; |
2395 | int err; | 2525 | int err; |
2396 | 2526 | ||
2397 | if (skb_shinfo(skb)->gso_size) { | 2527 | if (skb_is_gso(skb)) { |
2398 | if (skb_header_cloned(skb)) { | 2528 | if (skb_header_cloned(skb)) { |
2399 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 2529 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
2400 | if (err) | 2530 | if (err) |
@@ -2519,7 +2649,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, | |||
2519 | * tso gets written back prematurely before the data is fully | 2649 | * tso gets written back prematurely before the data is fully |
2520 | * DMA'd to the controller */ | 2650 | * DMA'd to the controller */ |
2521 | if (!skb->data_len && tx_ring->last_tx_tso && | 2651 | if (!skb->data_len && tx_ring->last_tx_tso && |
2522 | !skb_shinfo(skb)->gso_size) { | 2652 | !skb_is_gso(skb)) { |
2523 | tx_ring->last_tx_tso = 0; | 2653 | tx_ring->last_tx_tso = 0; |
2524 | size -= 4; | 2654 | size -= 4; |
2525 | } | 2655 | } |
@@ -2779,9 +2909,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2779 | case e1000_82571: | 2909 | case e1000_82571: |
2780 | case e1000_82572: | 2910 | case e1000_82572: |
2781 | case e1000_82573: | 2911 | case e1000_82573: |
2912 | case e1000_ich8lan: | ||
2782 | pull_size = min((unsigned int)4, skb->data_len); | 2913 | pull_size = min((unsigned int)4, skb->data_len); |
2783 | if (!__pskb_pull_tail(skb, pull_size)) { | 2914 | if (!__pskb_pull_tail(skb, pull_size)) { |
2784 | printk(KERN_ERR | 2915 | DPRINTK(DRV, ERR, |
2785 | "__pskb_pull_tail failed.\n"); | 2916 | "__pskb_pull_tail failed.\n"); |
2786 | dev_kfree_skb_any(skb); | 2917 | dev_kfree_skb_any(skb); |
2787 | return NETDEV_TX_OK; | 2918 | return NETDEV_TX_OK; |
@@ -2806,8 +2937,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2806 | 2937 | ||
2807 | #ifdef NETIF_F_TSO | 2938 | #ifdef NETIF_F_TSO |
2808 | /* Controller Erratum workaround */ | 2939 | /* Controller Erratum workaround */ |
2809 | if (!skb->data_len && tx_ring->last_tx_tso && | 2940 | if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) |
2810 | !skb_shinfo(skb)->gso_size) | ||
2811 | count++; | 2941 | count++; |
2812 | #endif | 2942 | #endif |
2813 | 2943 | ||
@@ -2919,8 +3049,7 @@ e1000_reset_task(struct net_device *netdev) | |||
2919 | { | 3049 | { |
2920 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3050 | struct e1000_adapter *adapter = netdev_priv(netdev); |
2921 | 3051 | ||
2922 | e1000_down(adapter); | 3052 | e1000_reinit_locked(adapter); |
2923 | e1000_up(adapter); | ||
2924 | } | 3053 | } |
2925 | 3054 | ||
2926 | /** | 3055 | /** |
@@ -2964,6 +3093,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
2964 | /* Adapter-specific max frame size limits. */ | 3093 | /* Adapter-specific max frame size limits. */ |
2965 | switch (adapter->hw.mac_type) { | 3094 | switch (adapter->hw.mac_type) { |
2966 | case e1000_undefined ... e1000_82542_rev2_1: | 3095 | case e1000_undefined ... e1000_82542_rev2_1: |
3096 | case e1000_ich8lan: | ||
2967 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | 3097 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { |
2968 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); | 3098 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); |
2969 | return -EINVAL; | 3099 | return -EINVAL; |
@@ -3018,7 +3148,6 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3018 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; | 3148 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; |
3019 | 3149 | ||
3020 | /* adjust allocation if LPE protects us, and we aren't using SBP */ | 3150 | /* adjust allocation if LPE protects us, and we aren't using SBP */ |
3021 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 | ||
3022 | if (!adapter->hw.tbi_compatibility_on && | 3151 | if (!adapter->hw.tbi_compatibility_on && |
3023 | ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || | 3152 | ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || |
3024 | (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) | 3153 | (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) |
@@ -3026,10 +3155,8 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3026 | 3155 | ||
3027 | netdev->mtu = new_mtu; | 3156 | netdev->mtu = new_mtu; |
3028 | 3157 | ||
3029 | if (netif_running(netdev)) { | 3158 | if (netif_running(netdev)) |
3030 | e1000_down(adapter); | 3159 | e1000_reinit_locked(adapter); |
3031 | e1000_up(adapter); | ||
3032 | } | ||
3033 | 3160 | ||
3034 | adapter->hw.max_frame_size = max_frame; | 3161 | adapter->hw.max_frame_size = max_frame; |
3035 | 3162 | ||
@@ -3074,12 +3201,15 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3074 | adapter->stats.bprc += E1000_READ_REG(hw, BPRC); | 3201 | adapter->stats.bprc += E1000_READ_REG(hw, BPRC); |
3075 | adapter->stats.mprc += E1000_READ_REG(hw, MPRC); | 3202 | adapter->stats.mprc += E1000_READ_REG(hw, MPRC); |
3076 | adapter->stats.roc += E1000_READ_REG(hw, ROC); | 3203 | adapter->stats.roc += E1000_READ_REG(hw, ROC); |
3204 | |||
3205 | if (adapter->hw.mac_type != e1000_ich8lan) { | ||
3077 | adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); | 3206 | adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); |
3078 | adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); | 3207 | adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); |
3079 | adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); | 3208 | adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); |
3080 | adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); | 3209 | adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); |
3081 | adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); | 3210 | adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); |
3082 | adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); | 3211 | adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); |
3212 | } | ||
3083 | 3213 | ||
3084 | adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); | 3214 | adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); |
3085 | adapter->stats.mpc += E1000_READ_REG(hw, MPC); | 3215 | adapter->stats.mpc += E1000_READ_REG(hw, MPC); |
@@ -3107,12 +3237,16 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3107 | adapter->stats.totl += E1000_READ_REG(hw, TOTL); | 3237 | adapter->stats.totl += E1000_READ_REG(hw, TOTL); |
3108 | adapter->stats.toth += E1000_READ_REG(hw, TOTH); | 3238 | adapter->stats.toth += E1000_READ_REG(hw, TOTH); |
3109 | adapter->stats.tpr += E1000_READ_REG(hw, TPR); | 3239 | adapter->stats.tpr += E1000_READ_REG(hw, TPR); |
3240 | |||
3241 | if (adapter->hw.mac_type != e1000_ich8lan) { | ||
3110 | adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); | 3242 | adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); |
3111 | adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); | 3243 | adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); |
3112 | adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); | 3244 | adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); |
3113 | adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); | 3245 | adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); |
3114 | adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); | 3246 | adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); |
3115 | adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); | 3247 | adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); |
3248 | } | ||
3249 | |||
3116 | adapter->stats.mptc += E1000_READ_REG(hw, MPTC); | 3250 | adapter->stats.mptc += E1000_READ_REG(hw, MPTC); |
3117 | adapter->stats.bptc += E1000_READ_REG(hw, BPTC); | 3251 | adapter->stats.bptc += E1000_READ_REG(hw, BPTC); |
3118 | 3252 | ||
@@ -3134,6 +3268,8 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3134 | if (hw->mac_type > e1000_82547_rev_2) { | 3268 | if (hw->mac_type > e1000_82547_rev_2) { |
3135 | adapter->stats.iac += E1000_READ_REG(hw, IAC); | 3269 | adapter->stats.iac += E1000_READ_REG(hw, IAC); |
3136 | adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); | 3270 | adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); |
3271 | |||
3272 | if (adapter->hw.mac_type != e1000_ich8lan) { | ||
3137 | adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); | 3273 | adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); |
3138 | adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); | 3274 | adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); |
3139 | adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); | 3275 | adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); |
@@ -3141,6 +3277,7 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3141 | adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); | 3277 | adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); |
3142 | adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); | 3278 | adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); |
3143 | adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); | 3279 | adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); |
3280 | } | ||
3144 | } | 3281 | } |
3145 | 3282 | ||
3146 | /* Fill out the OS statistics structure */ | 3283 | /* Fill out the OS statistics structure */ |
@@ -3249,8 +3386,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3249 | E1000_WRITE_REG(hw, IMC, ~0); | 3386 | E1000_WRITE_REG(hw, IMC, ~0); |
3250 | E1000_WRITE_FLUSH(hw); | 3387 | E1000_WRITE_FLUSH(hw); |
3251 | } | 3388 | } |
3252 | if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) | 3389 | if (likely(netif_rx_schedule_prep(netdev))) |
3253 | __netif_rx_schedule(&adapter->polling_netdev[0]); | 3390 | __netif_rx_schedule(netdev); |
3254 | else | 3391 | else |
3255 | e1000_irq_enable(adapter); | 3392 | e1000_irq_enable(adapter); |
3256 | #else | 3393 | #else |
@@ -3293,34 +3430,26 @@ e1000_clean(struct net_device *poll_dev, int *budget) | |||
3293 | { | 3430 | { |
3294 | struct e1000_adapter *adapter; | 3431 | struct e1000_adapter *adapter; |
3295 | int work_to_do = min(*budget, poll_dev->quota); | 3432 | int work_to_do = min(*budget, poll_dev->quota); |
3296 | int tx_cleaned = 0, i = 0, work_done = 0; | 3433 | int tx_cleaned = 0, work_done = 0; |
3297 | 3434 | ||
3298 | /* Must NOT use netdev_priv macro here. */ | 3435 | /* Must NOT use netdev_priv macro here. */ |
3299 | adapter = poll_dev->priv; | 3436 | adapter = poll_dev->priv; |
3300 | 3437 | ||
3301 | /* Keep link state information with original netdev */ | 3438 | /* Keep link state information with original netdev */ |
3302 | if (!netif_carrier_ok(adapter->netdev)) | 3439 | if (!netif_carrier_ok(poll_dev)) |
3303 | goto quit_polling; | 3440 | goto quit_polling; |
3304 | 3441 | ||
3305 | while (poll_dev != &adapter->polling_netdev[i]) { | 3442 | /* e1000_clean is called per-cpu. This lock protects |
3306 | i++; | 3443 | * tx_ring[0] from being cleaned by multiple cpus |
3307 | BUG_ON(i == adapter->num_rx_queues); | 3444 | * simultaneously. A failure obtaining the lock means |
3445 | * tx_ring[0] is currently being cleaned anyway. */ | ||
3446 | if (spin_trylock(&adapter->tx_queue_lock)) { | ||
3447 | tx_cleaned = e1000_clean_tx_irq(adapter, | ||
3448 | &adapter->tx_ring[0]); | ||
3449 | spin_unlock(&adapter->tx_queue_lock); | ||
3308 | } | 3450 | } |
3309 | 3451 | ||
3310 | if (likely(adapter->num_tx_queues == 1)) { | 3452 | adapter->clean_rx(adapter, &adapter->rx_ring[0], |
3311 | /* e1000_clean is called per-cpu. This lock protects | ||
3312 | * tx_ring[0] from being cleaned by multiple cpus | ||
3313 | * simultaneously. A failure obtaining the lock means | ||
3314 | * tx_ring[0] is currently being cleaned anyway. */ | ||
3315 | if (spin_trylock(&adapter->tx_queue_lock)) { | ||
3316 | tx_cleaned = e1000_clean_tx_irq(adapter, | ||
3317 | &adapter->tx_ring[0]); | ||
3318 | spin_unlock(&adapter->tx_queue_lock); | ||
3319 | } | ||
3320 | } else | ||
3321 | tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); | ||
3322 | |||
3323 | adapter->clean_rx(adapter, &adapter->rx_ring[i], | ||
3324 | &work_done, work_to_do); | 3453 | &work_done, work_to_do); |
3325 | 3454 | ||
3326 | *budget -= work_done; | 3455 | *budget -= work_done; |
@@ -3328,7 +3457,7 @@ e1000_clean(struct net_device *poll_dev, int *budget) | |||
3328 | 3457 | ||
3329 | /* If no Tx and not enough Rx work done, exit the polling mode */ | 3458 | /* If no Tx and not enough Rx work done, exit the polling mode */ |
3330 | if ((!tx_cleaned && (work_done == 0)) || | 3459 | if ((!tx_cleaned && (work_done == 0)) || |
3331 | !netif_running(adapter->netdev)) { | 3460 | !netif_running(poll_dev)) { |
3332 | quit_polling: | 3461 | quit_polling: |
3333 | netif_rx_complete(poll_dev); | 3462 | netif_rx_complete(poll_dev); |
3334 | e1000_irq_enable(adapter); | 3463 | e1000_irq_enable(adapter); |
@@ -3543,11 +3672,15 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3543 | 3672 | ||
3544 | length = le16_to_cpu(rx_desc->length); | 3673 | length = le16_to_cpu(rx_desc->length); |
3545 | 3674 | ||
3675 | /* adjust length to remove Ethernet CRC */ | ||
3676 | length -= 4; | ||
3677 | |||
3546 | if (unlikely(!(status & E1000_RXD_STAT_EOP))) { | 3678 | if (unlikely(!(status & E1000_RXD_STAT_EOP))) { |
3547 | /* All receives must fit into a single buffer */ | 3679 | /* All receives must fit into a single buffer */ |
3548 | E1000_DBG("%s: Receive packet consumed multiple" | 3680 | E1000_DBG("%s: Receive packet consumed multiple" |
3549 | " buffers\n", netdev->name); | 3681 | " buffers\n", netdev->name); |
3550 | dev_kfree_skb_irq(skb); | 3682 | /* recycle */ |
3683 | buffer_info-> skb = skb; | ||
3551 | goto next_desc; | 3684 | goto next_desc; |
3552 | } | 3685 | } |
3553 | 3686 | ||
@@ -3675,7 +3808,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3675 | buffer_info = &rx_ring->buffer_info[i]; | 3808 | buffer_info = &rx_ring->buffer_info[i]; |
3676 | 3809 | ||
3677 | while (staterr & E1000_RXD_STAT_DD) { | 3810 | while (staterr & E1000_RXD_STAT_DD) { |
3678 | buffer_info = &rx_ring->buffer_info[i]; | ||
3679 | ps_page = &rx_ring->ps_page[i]; | 3811 | ps_page = &rx_ring->ps_page[i]; |
3680 | ps_page_dma = &rx_ring->ps_page_dma[i]; | 3812 | ps_page_dma = &rx_ring->ps_page_dma[i]; |
3681 | #ifdef CONFIG_E1000_NAPI | 3813 | #ifdef CONFIG_E1000_NAPI |
@@ -3747,8 +3879,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3747 | pci_dma_sync_single_for_device(pdev, | 3879 | pci_dma_sync_single_for_device(pdev, |
3748 | ps_page_dma->ps_page_dma[0], | 3880 | ps_page_dma->ps_page_dma[0], |
3749 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 3881 | PAGE_SIZE, PCI_DMA_FROMDEVICE); |
3882 | /* remove the CRC */ | ||
3883 | l1 -= 4; | ||
3750 | skb_put(skb, l1); | 3884 | skb_put(skb, l1); |
3751 | length += l1; | ||
3752 | goto copydone; | 3885 | goto copydone; |
3753 | } /* if */ | 3886 | } /* if */ |
3754 | } | 3887 | } |
@@ -3767,6 +3900,10 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3767 | skb->truesize += length; | 3900 | skb->truesize += length; |
3768 | } | 3901 | } |
3769 | 3902 | ||
3903 | /* strip the ethernet crc, problem is we're using pages now so | ||
3904 | * this whole operation can get a little cpu intensive */ | ||
3905 | pskb_trim(skb, skb->len - 4); | ||
3906 | |||
3770 | copydone: | 3907 | copydone: |
3771 | e1000_rx_checksum(adapter, staterr, | 3908 | e1000_rx_checksum(adapter, staterr, |
3772 | le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); | 3909 | le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); |
@@ -4180,10 +4317,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4180 | return retval; | 4317 | return retval; |
4181 | } | 4318 | } |
4182 | } | 4319 | } |
4183 | if (netif_running(adapter->netdev)) { | 4320 | if (netif_running(adapter->netdev)) |
4184 | e1000_down(adapter); | 4321 | e1000_reinit_locked(adapter); |
4185 | e1000_up(adapter); | 4322 | else |
4186 | } else | ||
4187 | e1000_reset(adapter); | 4323 | e1000_reset(adapter); |
4188 | break; | 4324 | break; |
4189 | case M88E1000_PHY_SPEC_CTRL: | 4325 | case M88E1000_PHY_SPEC_CTRL: |
@@ -4200,10 +4336,9 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
4200 | case PHY_CTRL: | 4336 | case PHY_CTRL: |
4201 | if (mii_reg & MII_CR_POWER_DOWN) | 4337 | if (mii_reg & MII_CR_POWER_DOWN) |
4202 | break; | 4338 | break; |
4203 | if (netif_running(adapter->netdev)) { | 4339 | if (netif_running(adapter->netdev)) |
4204 | e1000_down(adapter); | 4340 | e1000_reinit_locked(adapter); |
4205 | e1000_up(adapter); | 4341 | else |
4206 | } else | ||
4207 | e1000_reset(adapter); | 4342 | e1000_reset(adapter); |
4208 | break; | 4343 | break; |
4209 | } | 4344 | } |
@@ -4277,18 +4412,21 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
4277 | ctrl |= E1000_CTRL_VME; | 4412 | ctrl |= E1000_CTRL_VME; |
4278 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 4413 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); |
4279 | 4414 | ||
4415 | if (adapter->hw.mac_type != e1000_ich8lan) { | ||
4280 | /* enable VLAN receive filtering */ | 4416 | /* enable VLAN receive filtering */ |
4281 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 4417 | rctl = E1000_READ_REG(&adapter->hw, RCTL); |
4282 | rctl |= E1000_RCTL_VFE; | 4418 | rctl |= E1000_RCTL_VFE; |
4283 | rctl &= ~E1000_RCTL_CFIEN; | 4419 | rctl &= ~E1000_RCTL_CFIEN; |
4284 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); | 4420 | E1000_WRITE_REG(&adapter->hw, RCTL, rctl); |
4285 | e1000_update_mng_vlan(adapter); | 4421 | e1000_update_mng_vlan(adapter); |
4422 | } | ||
4286 | } else { | 4423 | } else { |
4287 | /* disable VLAN tag insert/strip */ | 4424 | /* disable VLAN tag insert/strip */ |
4288 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); | 4425 | ctrl = E1000_READ_REG(&adapter->hw, CTRL); |
4289 | ctrl &= ~E1000_CTRL_VME; | 4426 | ctrl &= ~E1000_CTRL_VME; |
4290 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | 4427 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); |
4291 | 4428 | ||
4429 | if (adapter->hw.mac_type != e1000_ich8lan) { | ||
4292 | /* disable VLAN filtering */ | 4430 | /* disable VLAN filtering */ |
4293 | rctl = E1000_READ_REG(&adapter->hw, RCTL); | 4431 | rctl = E1000_READ_REG(&adapter->hw, RCTL); |
4294 | rctl &= ~E1000_RCTL_VFE; | 4432 | rctl &= ~E1000_RCTL_VFE; |
@@ -4297,6 +4435,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) | |||
4297 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | 4435 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
4298 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | 4436 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
4299 | } | 4437 | } |
4438 | } | ||
4300 | } | 4439 | } |
4301 | 4440 | ||
4302 | e1000_irq_enable(adapter); | 4441 | e1000_irq_enable(adapter); |
@@ -4458,12 +4597,16 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4458 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4597 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4459 | uint32_t ctrl, ctrl_ext, rctl, manc, status; | 4598 | uint32_t ctrl, ctrl_ext, rctl, manc, status; |
4460 | uint32_t wufc = adapter->wol; | 4599 | uint32_t wufc = adapter->wol; |
4600 | #ifdef CONFIG_PM | ||
4461 | int retval = 0; | 4601 | int retval = 0; |
4602 | #endif | ||
4462 | 4603 | ||
4463 | netif_device_detach(netdev); | 4604 | netif_device_detach(netdev); |
4464 | 4605 | ||
4465 | if (netif_running(netdev)) | 4606 | if (netif_running(netdev)) { |
4607 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); | ||
4466 | e1000_down(adapter); | 4608 | e1000_down(adapter); |
4609 | } | ||
4467 | 4610 | ||
4468 | #ifdef CONFIG_PM | 4611 | #ifdef CONFIG_PM |
4469 | /* Implement our own version of pci_save_state(pdev) because pci- | 4612 | /* Implement our own version of pci_save_state(pdev) because pci- |
@@ -4521,7 +4664,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4521 | pci_enable_wake(pdev, PCI_D3cold, 0); | 4664 | pci_enable_wake(pdev, PCI_D3cold, 0); |
4522 | } | 4665 | } |
4523 | 4666 | ||
4667 | /* FIXME: this code is incorrect for PCI Express */ | ||
4524 | if (adapter->hw.mac_type >= e1000_82540 && | 4668 | if (adapter->hw.mac_type >= e1000_82540 && |
4669 | adapter->hw.mac_type != e1000_ich8lan && | ||
4525 | adapter->hw.media_type == e1000_media_type_copper) { | 4670 | adapter->hw.media_type == e1000_media_type_copper) { |
4526 | manc = E1000_READ_REG(&adapter->hw, MANC); | 4671 | manc = E1000_READ_REG(&adapter->hw, MANC); |
4527 | if (manc & E1000_MANC_SMBUS_EN) { | 4672 | if (manc & E1000_MANC_SMBUS_EN) { |
@@ -4532,6 +4677,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4532 | } | 4677 | } |
4533 | } | 4678 | } |
4534 | 4679 | ||
4680 | if (adapter->hw.phy_type == e1000_phy_igp_3) | ||
4681 | e1000_phy_powerdown_workaround(&adapter->hw); | ||
4682 | |||
4535 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | 4683 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
4536 | * would have already happened in close and is redundant. */ | 4684 | * would have already happened in close and is redundant. */ |
4537 | e1000_release_hw_control(adapter); | 4685 | e1000_release_hw_control(adapter); |
@@ -4567,7 +4715,9 @@ e1000_resume(struct pci_dev *pdev) | |||
4567 | 4715 | ||
4568 | netif_device_attach(netdev); | 4716 | netif_device_attach(netdev); |
4569 | 4717 | ||
4718 | /* FIXME: this code is incorrect for PCI Express */ | ||
4570 | if (adapter->hw.mac_type >= e1000_82540 && | 4719 | if (adapter->hw.mac_type >= e1000_82540 && |
4720 | adapter->hw.mac_type != e1000_ich8lan && | ||
4571 | adapter->hw.media_type == e1000_media_type_copper) { | 4721 | adapter->hw.media_type == e1000_media_type_copper) { |
4572 | manc = E1000_READ_REG(&adapter->hw, MANC); | 4722 | manc = E1000_READ_REG(&adapter->hw, MANC); |
4573 | manc &= ~(E1000_MANC_ARP_EN); | 4723 | manc &= ~(E1000_MANC_ARP_EN); |
@@ -4601,6 +4751,7 @@ static void | |||
4601 | e1000_netpoll(struct net_device *netdev) | 4751 | e1000_netpoll(struct net_device *netdev) |
4602 | { | 4752 | { |
4603 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4753 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4754 | |||
4604 | disable_irq(adapter->pdev->irq); | 4755 | disable_irq(adapter->pdev->irq); |
4605 | e1000_intr(adapter->pdev->irq, netdev, NULL); | 4756 | e1000_intr(adapter->pdev->irq, netdev, NULL); |
4606 | e1000_clean_tx_irq(adapter, adapter->tx_ring); | 4757 | e1000_clean_tx_irq(adapter, adapter->tx_ring); |
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h index 048d052be29d..2d3e8b06cab0 100644 --- a/drivers/net/e1000/e1000_osdep.h +++ b/drivers/net/e1000/e1000_osdep.h | |||
@@ -127,4 +127,17 @@ typedef enum { | |||
127 | 127 | ||
128 | #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) | 128 | #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) |
129 | 129 | ||
130 | #define E1000_WRITE_ICH8_REG(a, reg, value) ( \ | ||
131 | writel((value), ((a)->flash_address + reg))) | ||
132 | |||
133 | #define E1000_READ_ICH8_REG(a, reg) ( \ | ||
134 | readl((a)->flash_address + reg)) | ||
135 | |||
136 | #define E1000_WRITE_ICH8_REG16(a, reg, value) ( \ | ||
137 | writew((value), ((a)->flash_address + reg))) | ||
138 | |||
139 | #define E1000_READ_ICH8_REG16(a, reg) ( \ | ||
140 | readw((a)->flash_address + reg)) | ||
141 | |||
142 | |||
130 | #endif /* _E1000_OSDEP_H_ */ | 143 | #endif /* _E1000_OSDEP_H_ */ |
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c index e55f8969a0fb..0ef413172c68 100644 --- a/drivers/net/e1000/e1000_param.c +++ b/drivers/net/e1000/e1000_param.c | |||
@@ -45,6 +45,16 @@ | |||
45 | */ | 45 | */ |
46 | 46 | ||
47 | #define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } | 47 | #define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } |
48 | /* Module Parameters are always initialized to -1, so that the driver | ||
49 | * can tell the difference between no user specified value or the | ||
50 | * user asking for the default value. | ||
51 | * The true default values are loaded in when e1000_check_options is called. | ||
52 | * | ||
53 | * This is a GCC extension to ANSI C. | ||
54 | * See the item "Labeled Elements in Initializers" in the section | ||
55 | * "Extensions to the C Language Family" of the GCC documentation. | ||
56 | */ | ||
57 | |||
48 | #define E1000_PARAM(X, desc) \ | 58 | #define E1000_PARAM(X, desc) \ |
49 | static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ | 59 | static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ |
50 | static int num_##X = 0; \ | 60 | static int num_##X = 0; \ |
@@ -183,6 +193,24 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); | |||
183 | 193 | ||
184 | E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); | 194 | E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); |
185 | 195 | ||
196 | /* Enable Smart Power Down of the PHY | ||
197 | * | ||
198 | * Valid Range: 0, 1 | ||
199 | * | ||
200 | * Default Value: 0 (disabled) | ||
201 | */ | ||
202 | |||
203 | E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); | ||
204 | |||
205 | /* Enable Kumeran Lock Loss workaround | ||
206 | * | ||
207 | * Valid Range: 0, 1 | ||
208 | * | ||
209 | * Default Value: 1 (enabled) | ||
210 | */ | ||
211 | |||
212 | E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); | ||
213 | |||
186 | #define AUTONEG_ADV_DEFAULT 0x2F | 214 | #define AUTONEG_ADV_DEFAULT 0x2F |
187 | #define AUTONEG_ADV_MASK 0x2F | 215 | #define AUTONEG_ADV_MASK 0x2F |
188 | #define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL | 216 | #define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL |
@@ -296,6 +324,7 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
296 | DPRINTK(PROBE, NOTICE, | 324 | DPRINTK(PROBE, NOTICE, |
297 | "Warning: no configuration for board #%i\n", bd); | 325 | "Warning: no configuration for board #%i\n", bd); |
298 | DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); | 326 | DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); |
327 | bd = E1000_MAX_NIC; | ||
299 | } | 328 | } |
300 | 329 | ||
301 | { /* Transmit Descriptor Count */ | 330 | { /* Transmit Descriptor Count */ |
@@ -313,14 +342,9 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
313 | opt.arg.r.max = mac_type < e1000_82544 ? | 342 | opt.arg.r.max = mac_type < e1000_82544 ? |
314 | E1000_MAX_TXD : E1000_MAX_82544_TXD; | 343 | E1000_MAX_TXD : E1000_MAX_82544_TXD; |
315 | 344 | ||
316 | if (num_TxDescriptors > bd) { | 345 | tx_ring->count = TxDescriptors[bd]; |
317 | tx_ring->count = TxDescriptors[bd]; | 346 | e1000_validate_option(&tx_ring->count, &opt, adapter); |
318 | e1000_validate_option(&tx_ring->count, &opt, adapter); | 347 | E1000_ROUNDUP(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE); |
319 | E1000_ROUNDUP(tx_ring->count, | ||
320 | REQ_TX_DESCRIPTOR_MULTIPLE); | ||
321 | } else { | ||
322 | tx_ring->count = opt.def; | ||
323 | } | ||
324 | for (i = 0; i < adapter->num_tx_queues; i++) | 348 | for (i = 0; i < adapter->num_tx_queues; i++) |
325 | tx_ring[i].count = tx_ring->count; | 349 | tx_ring[i].count = tx_ring->count; |
326 | } | 350 | } |
@@ -339,14 +363,9 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
339 | opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD : | 363 | opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD : |
340 | E1000_MAX_82544_RXD; | 364 | E1000_MAX_82544_RXD; |
341 | 365 | ||
342 | if (num_RxDescriptors > bd) { | 366 | rx_ring->count = RxDescriptors[bd]; |
343 | rx_ring->count = RxDescriptors[bd]; | 367 | e1000_validate_option(&rx_ring->count, &opt, adapter); |
344 | e1000_validate_option(&rx_ring->count, &opt, adapter); | 368 | E1000_ROUNDUP(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); |
345 | E1000_ROUNDUP(rx_ring->count, | ||
346 | REQ_RX_DESCRIPTOR_MULTIPLE); | ||
347 | } else { | ||
348 | rx_ring->count = opt.def; | ||
349 | } | ||
350 | for (i = 0; i < adapter->num_rx_queues; i++) | 369 | for (i = 0; i < adapter->num_rx_queues; i++) |
351 | rx_ring[i].count = rx_ring->count; | 370 | rx_ring[i].count = rx_ring->count; |
352 | } | 371 | } |
@@ -358,13 +377,9 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
358 | .def = OPTION_ENABLED | 377 | .def = OPTION_ENABLED |
359 | }; | 378 | }; |
360 | 379 | ||
361 | if (num_XsumRX > bd) { | 380 | int rx_csum = XsumRX[bd]; |
362 | int rx_csum = XsumRX[bd]; | 381 | e1000_validate_option(&rx_csum, &opt, adapter); |
363 | e1000_validate_option(&rx_csum, &opt, adapter); | 382 | adapter->rx_csum = rx_csum; |
364 | adapter->rx_csum = rx_csum; | ||
365 | } else { | ||
366 | adapter->rx_csum = opt.def; | ||
367 | } | ||
368 | } | 383 | } |
369 | { /* Flow Control */ | 384 | { /* Flow Control */ |
370 | 385 | ||
@@ -384,13 +399,9 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
384 | .p = fc_list }} | 399 | .p = fc_list }} |
385 | }; | 400 | }; |
386 | 401 | ||
387 | if (num_FlowControl > bd) { | 402 | int fc = FlowControl[bd]; |
388 | int fc = FlowControl[bd]; | 403 | e1000_validate_option(&fc, &opt, adapter); |
389 | e1000_validate_option(&fc, &opt, adapter); | 404 | adapter->hw.fc = adapter->hw.original_fc = fc; |
390 | adapter->hw.fc = adapter->hw.original_fc = fc; | ||
391 | } else { | ||
392 | adapter->hw.fc = adapter->hw.original_fc = opt.def; | ||
393 | } | ||
394 | } | 405 | } |
395 | { /* Transmit Interrupt Delay */ | 406 | { /* Transmit Interrupt Delay */ |
396 | struct e1000_option opt = { | 407 | struct e1000_option opt = { |
@@ -402,13 +413,8 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
402 | .max = MAX_TXDELAY }} | 413 | .max = MAX_TXDELAY }} |
403 | }; | 414 | }; |
404 | 415 | ||
405 | if (num_TxIntDelay > bd) { | 416 | adapter->tx_int_delay = TxIntDelay[bd]; |
406 | adapter->tx_int_delay = TxIntDelay[bd]; | 417 | e1000_validate_option(&adapter->tx_int_delay, &opt, adapter); |
407 | e1000_validate_option(&adapter->tx_int_delay, &opt, | ||
408 | adapter); | ||
409 | } else { | ||
410 | adapter->tx_int_delay = opt.def; | ||
411 | } | ||
412 | } | 418 | } |
413 | { /* Transmit Absolute Interrupt Delay */ | 419 | { /* Transmit Absolute Interrupt Delay */ |
414 | struct e1000_option opt = { | 420 | struct e1000_option opt = { |
@@ -420,13 +426,9 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
420 | .max = MAX_TXABSDELAY }} | 426 | .max = MAX_TXABSDELAY }} |
421 | }; | 427 | }; |
422 | 428 | ||
423 | if (num_TxAbsIntDelay > bd) { | 429 | adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; |
424 | adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; | 430 | e1000_validate_option(&adapter->tx_abs_int_delay, &opt, |
425 | e1000_validate_option(&adapter->tx_abs_int_delay, &opt, | 431 | adapter); |
426 | adapter); | ||
427 | } else { | ||
428 | adapter->tx_abs_int_delay = opt.def; | ||
429 | } | ||
430 | } | 432 | } |
431 | { /* Receive Interrupt Delay */ | 433 | { /* Receive Interrupt Delay */ |
432 | struct e1000_option opt = { | 434 | struct e1000_option opt = { |
@@ -438,13 +440,8 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
438 | .max = MAX_RXDELAY }} | 440 | .max = MAX_RXDELAY }} |
439 | }; | 441 | }; |
440 | 442 | ||
441 | if (num_RxIntDelay > bd) { | 443 | adapter->rx_int_delay = RxIntDelay[bd]; |
442 | adapter->rx_int_delay = RxIntDelay[bd]; | 444 | e1000_validate_option(&adapter->rx_int_delay, &opt, adapter); |
443 | e1000_validate_option(&adapter->rx_int_delay, &opt, | ||
444 | adapter); | ||
445 | } else { | ||
446 | adapter->rx_int_delay = opt.def; | ||
447 | } | ||
448 | } | 445 | } |
449 | { /* Receive Absolute Interrupt Delay */ | 446 | { /* Receive Absolute Interrupt Delay */ |
450 | struct e1000_option opt = { | 447 | struct e1000_option opt = { |
@@ -456,13 +453,9 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
456 | .max = MAX_RXABSDELAY }} | 453 | .max = MAX_RXABSDELAY }} |
457 | }; | 454 | }; |
458 | 455 | ||
459 | if (num_RxAbsIntDelay > bd) { | 456 | adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; |
460 | adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; | 457 | e1000_validate_option(&adapter->rx_abs_int_delay, &opt, |
461 | e1000_validate_option(&adapter->rx_abs_int_delay, &opt, | 458 | adapter); |
462 | adapter); | ||
463 | } else { | ||
464 | adapter->rx_abs_int_delay = opt.def; | ||
465 | } | ||
466 | } | 459 | } |
467 | { /* Interrupt Throttling Rate */ | 460 | { /* Interrupt Throttling Rate */ |
468 | struct e1000_option opt = { | 461 | struct e1000_option opt = { |
@@ -474,26 +467,44 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
474 | .max = MAX_ITR }} | 467 | .max = MAX_ITR }} |
475 | }; | 468 | }; |
476 | 469 | ||
477 | if (num_InterruptThrottleRate > bd) { | 470 | adapter->itr = InterruptThrottleRate[bd]; |
478 | adapter->itr = InterruptThrottleRate[bd]; | 471 | switch (adapter->itr) { |
479 | switch (adapter->itr) { | 472 | case 0: |
480 | case 0: | 473 | DPRINTK(PROBE, INFO, "%s turned off\n", opt.name); |
481 | DPRINTK(PROBE, INFO, "%s turned off\n", | 474 | break; |
482 | opt.name); | 475 | case 1: |
483 | break; | 476 | DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", |
484 | case 1: | 477 | opt.name); |
485 | DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", | 478 | break; |
486 | opt.name); | 479 | default: |
487 | break; | 480 | e1000_validate_option(&adapter->itr, &opt, adapter); |
488 | default: | 481 | break; |
489 | e1000_validate_option(&adapter->itr, &opt, | ||
490 | adapter); | ||
491 | break; | ||
492 | } | ||
493 | } else { | ||
494 | adapter->itr = opt.def; | ||
495 | } | 482 | } |
496 | } | 483 | } |
484 | { /* Smart Power Down */ | ||
485 | struct e1000_option opt = { | ||
486 | .type = enable_option, | ||
487 | .name = "PHY Smart Power Down", | ||
488 | .err = "defaulting to Disabled", | ||
489 | .def = OPTION_DISABLED | ||
490 | }; | ||
491 | |||
492 | int spd = SmartPowerDownEnable[bd]; | ||
493 | e1000_validate_option(&spd, &opt, adapter); | ||
494 | adapter->smart_power_down = spd; | ||
495 | } | ||
496 | { /* Kumeran Lock Loss Workaround */ | ||
497 | struct e1000_option opt = { | ||
498 | .type = enable_option, | ||
499 | .name = "Kumeran Lock Loss Workaround", | ||
500 | .err = "defaulting to Enabled", | ||
501 | .def = OPTION_ENABLED | ||
502 | }; | ||
503 | |||
504 | int kmrn_lock_loss = KumeranLockLoss[bd]; | ||
505 | e1000_validate_option(&kmrn_lock_loss, &opt, adapter); | ||
506 | adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss; | ||
507 | } | ||
497 | 508 | ||
498 | switch (adapter->hw.media_type) { | 509 | switch (adapter->hw.media_type) { |
499 | case e1000_media_type_fiber: | 510 | case e1000_media_type_fiber: |
@@ -519,17 +530,18 @@ static void __devinit | |||
519 | e1000_check_fiber_options(struct e1000_adapter *adapter) | 530 | e1000_check_fiber_options(struct e1000_adapter *adapter) |
520 | { | 531 | { |
521 | int bd = adapter->bd_number; | 532 | int bd = adapter->bd_number; |
522 | if (num_Speed > bd) { | 533 | bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd; |
534 | if ((Speed[bd] != OPTION_UNSET)) { | ||
523 | DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " | 535 | DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " |
524 | "parameter ignored\n"); | 536 | "parameter ignored\n"); |
525 | } | 537 | } |
526 | 538 | ||
527 | if (num_Duplex > bd) { | 539 | if ((Duplex[bd] != OPTION_UNSET)) { |
528 | DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " | 540 | DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " |
529 | "parameter ignored\n"); | 541 | "parameter ignored\n"); |
530 | } | 542 | } |
531 | 543 | ||
532 | if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { | 544 | if ((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) { |
533 | DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " | 545 | DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " |
534 | "not valid for fiber adapters, " | 546 | "not valid for fiber adapters, " |
535 | "parameter ignored\n"); | 547 | "parameter ignored\n"); |
@@ -548,6 +560,7 @@ e1000_check_copper_options(struct e1000_adapter *adapter) | |||
548 | { | 560 | { |
549 | int speed, dplx, an; | 561 | int speed, dplx, an; |
550 | int bd = adapter->bd_number; | 562 | int bd = adapter->bd_number; |
563 | bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd; | ||
551 | 564 | ||
552 | { /* Speed */ | 565 | { /* Speed */ |
553 | struct e1000_opt_list speed_list[] = {{ 0, "" }, | 566 | struct e1000_opt_list speed_list[] = {{ 0, "" }, |
@@ -564,12 +577,8 @@ e1000_check_copper_options(struct e1000_adapter *adapter) | |||
564 | .p = speed_list }} | 577 | .p = speed_list }} |
565 | }; | 578 | }; |
566 | 579 | ||
567 | if (num_Speed > bd) { | 580 | speed = Speed[bd]; |
568 | speed = Speed[bd]; | 581 | e1000_validate_option(&speed, &opt, adapter); |
569 | e1000_validate_option(&speed, &opt, adapter); | ||
570 | } else { | ||
571 | speed = opt.def; | ||
572 | } | ||
573 | } | 582 | } |
574 | { /* Duplex */ | 583 | { /* Duplex */ |
575 | struct e1000_opt_list dplx_list[] = {{ 0, "" }, | 584 | struct e1000_opt_list dplx_list[] = {{ 0, "" }, |
@@ -591,15 +600,11 @@ e1000_check_copper_options(struct e1000_adapter *adapter) | |||
591 | "Speed/Duplex/AutoNeg parameter ignored.\n"); | 600 | "Speed/Duplex/AutoNeg parameter ignored.\n"); |
592 | return; | 601 | return; |
593 | } | 602 | } |
594 | if (num_Duplex > bd) { | 603 | dplx = Duplex[bd]; |
595 | dplx = Duplex[bd]; | 604 | e1000_validate_option(&dplx, &opt, adapter); |
596 | e1000_validate_option(&dplx, &opt, adapter); | ||
597 | } else { | ||
598 | dplx = opt.def; | ||
599 | } | ||
600 | } | 605 | } |
601 | 606 | ||
602 | if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { | 607 | if (AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) { |
603 | DPRINTK(PROBE, INFO, | 608 | DPRINTK(PROBE, INFO, |
604 | "AutoNeg specified along with Speed or Duplex, " | 609 | "AutoNeg specified along with Speed or Duplex, " |
605 | "parameter ignored\n"); | 610 | "parameter ignored\n"); |
@@ -648,19 +653,15 @@ e1000_check_copper_options(struct e1000_adapter *adapter) | |||
648 | .p = an_list }} | 653 | .p = an_list }} |
649 | }; | 654 | }; |
650 | 655 | ||
651 | if (num_AutoNeg > bd) { | 656 | an = AutoNeg[bd]; |
652 | an = AutoNeg[bd]; | 657 | e1000_validate_option(&an, &opt, adapter); |
653 | e1000_validate_option(&an, &opt, adapter); | ||
654 | } else { | ||
655 | an = opt.def; | ||
656 | } | ||
657 | adapter->hw.autoneg_advertised = an; | 658 | adapter->hw.autoneg_advertised = an; |
658 | } | 659 | } |
659 | 660 | ||
660 | switch (speed + dplx) { | 661 | switch (speed + dplx) { |
661 | case 0: | 662 | case 0: |
662 | adapter->hw.autoneg = adapter->fc_autoneg = 1; | 663 | adapter->hw.autoneg = adapter->fc_autoneg = 1; |
663 | if ((num_Speed > bd) && (speed != 0 || dplx != 0)) | 664 | if (Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET) |
664 | DPRINTK(PROBE, INFO, | 665 | DPRINTK(PROBE, INFO, |
665 | "Speed and duplex autonegotiation enabled\n"); | 666 | "Speed and duplex autonegotiation enabled\n"); |
666 | break; | 667 | break; |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 037d870712ff..11b8f1b43dd5 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -240,10 +240,12 @@ enum { | |||
240 | #define NVREG_RNDSEED_FORCE2 0x2d00 | 240 | #define NVREG_RNDSEED_FORCE2 0x2d00 |
241 | #define NVREG_RNDSEED_FORCE3 0x7400 | 241 | #define NVREG_RNDSEED_FORCE3 0x7400 |
242 | 242 | ||
243 | NvRegUnknownSetupReg1 = 0xA0, | 243 | NvRegTxDeferral = 0xA0, |
244 | #define NVREG_UNKSETUP1_VAL 0x16070f | 244 | #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f |
245 | NvRegUnknownSetupReg2 = 0xA4, | 245 | #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f |
246 | #define NVREG_UNKSETUP2_VAL 0x16 | 246 | #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f |
247 | NvRegRxDeferral = 0xA4, | ||
248 | #define NVREG_RX_DEFERRAL_DEFAULT 0x16 | ||
247 | NvRegMacAddrA = 0xA8, | 249 | NvRegMacAddrA = 0xA8, |
248 | NvRegMacAddrB = 0xAC, | 250 | NvRegMacAddrB = 0xAC, |
249 | NvRegMulticastAddrA = 0xB0, | 251 | NvRegMulticastAddrA = 0xB0, |
@@ -269,8 +271,10 @@ enum { | |||
269 | #define NVREG_LINKSPEED_MASK (0xFFF) | 271 | #define NVREG_LINKSPEED_MASK (0xFFF) |
270 | NvRegUnknownSetupReg5 = 0x130, | 272 | NvRegUnknownSetupReg5 = 0x130, |
271 | #define NVREG_UNKSETUP5_BIT31 (1<<31) | 273 | #define NVREG_UNKSETUP5_BIT31 (1<<31) |
272 | NvRegUnknownSetupReg3 = 0x13c, | 274 | NvRegTxWatermark = 0x13c, |
273 | #define NVREG_UNKSETUP3_VAL1 0x200010 | 275 | #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 |
276 | #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 | ||
277 | #define NVREG_TX_WM_DESC2_3_1000 0xfe08000 | ||
274 | NvRegTxRxControl = 0x144, | 278 | NvRegTxRxControl = 0x144, |
275 | #define NVREG_TXRXCTL_KICK 0x0001 | 279 | #define NVREG_TXRXCTL_KICK 0x0001 |
276 | #define NVREG_TXRXCTL_BIT1 0x0002 | 280 | #define NVREG_TXRXCTL_BIT1 0x0002 |
@@ -658,7 +662,7 @@ static const struct register_test nv_registers_test[] = { | |||
658 | { NvRegMisc1, 0x03c }, | 662 | { NvRegMisc1, 0x03c }, |
659 | { NvRegOffloadConfig, 0x03ff }, | 663 | { NvRegOffloadConfig, 0x03ff }, |
660 | { NvRegMulticastAddrA, 0xffffffff }, | 664 | { NvRegMulticastAddrA, 0xffffffff }, |
661 | { NvRegUnknownSetupReg3, 0x0ff }, | 665 | { NvRegTxWatermark, 0x0ff }, |
662 | { NvRegWakeUpFlags, 0x07777 }, | 666 | { NvRegWakeUpFlags, 0x07777 }, |
663 | { 0,0 } | 667 | { 0,0 } |
664 | }; | 668 | }; |
@@ -1495,7 +1499,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1495 | np->tx_skbuff[nr] = skb; | 1499 | np->tx_skbuff[nr] = skb; |
1496 | 1500 | ||
1497 | #ifdef NETIF_F_TSO | 1501 | #ifdef NETIF_F_TSO |
1498 | if (skb_shinfo(skb)->gso_size) | 1502 | if (skb_is_gso(skb)) |
1499 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); | 1503 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); |
1500 | else | 1504 | else |
1501 | #endif | 1505 | #endif |
@@ -2127,7 +2131,7 @@ static int nv_update_linkspeed(struct net_device *dev) | |||
2127 | int newdup = np->duplex; | 2131 | int newdup = np->duplex; |
2128 | int mii_status; | 2132 | int mii_status; |
2129 | int retval = 0; | 2133 | int retval = 0; |
2130 | u32 control_1000, status_1000, phyreg, pause_flags; | 2134 | u32 control_1000, status_1000, phyreg, pause_flags, txreg; |
2131 | 2135 | ||
2132 | /* BMSR_LSTATUS is latched, read it twice: | 2136 | /* BMSR_LSTATUS is latched, read it twice: |
2133 | * we want the current value. | 2137 | * we want the current value. |
@@ -2245,6 +2249,26 @@ set_speed: | |||
2245 | phyreg |= PHY_1000; | 2249 | phyreg |= PHY_1000; |
2246 | writel(phyreg, base + NvRegPhyInterface); | 2250 | writel(phyreg, base + NvRegPhyInterface); |
2247 | 2251 | ||
2252 | if (phyreg & PHY_RGMII) { | ||
2253 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) | ||
2254 | txreg = NVREG_TX_DEFERRAL_RGMII_1000; | ||
2255 | else | ||
2256 | txreg = NVREG_TX_DEFERRAL_RGMII_10_100; | ||
2257 | } else { | ||
2258 | txreg = NVREG_TX_DEFERRAL_DEFAULT; | ||
2259 | } | ||
2260 | writel(txreg, base + NvRegTxDeferral); | ||
2261 | |||
2262 | if (np->desc_ver == DESC_VER_1) { | ||
2263 | txreg = NVREG_TX_WM_DESC1_DEFAULT; | ||
2264 | } else { | ||
2265 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) | ||
2266 | txreg = NVREG_TX_WM_DESC2_3_1000; | ||
2267 | else | ||
2268 | txreg = NVREG_TX_WM_DESC2_3_DEFAULT; | ||
2269 | } | ||
2270 | writel(txreg, base + NvRegTxWatermark); | ||
2271 | |||
2248 | writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), | 2272 | writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), |
2249 | base + NvRegMisc1); | 2273 | base + NvRegMisc1); |
2250 | pci_push(base); | 2274 | pci_push(base); |
@@ -3910,7 +3934,10 @@ static int nv_open(struct net_device *dev) | |||
3910 | 3934 | ||
3911 | /* 5) continue setup */ | 3935 | /* 5) continue setup */ |
3912 | writel(np->linkspeed, base + NvRegLinkSpeed); | 3936 | writel(np->linkspeed, base + NvRegLinkSpeed); |
3913 | writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); | 3937 | if (np->desc_ver == DESC_VER_1) |
3938 | writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); | ||
3939 | else | ||
3940 | writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); | ||
3914 | writel(np->txrxctl_bits, base + NvRegTxRxControl); | 3941 | writel(np->txrxctl_bits, base + NvRegTxRxControl); |
3915 | writel(np->vlanctl_bits, base + NvRegVlanControl); | 3942 | writel(np->vlanctl_bits, base + NvRegVlanControl); |
3916 | pci_push(base); | 3943 | pci_push(base); |
@@ -3932,8 +3959,8 @@ static int nv_open(struct net_device *dev) | |||
3932 | writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); | 3959 | writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); |
3933 | get_random_bytes(&i, sizeof(i)); | 3960 | get_random_bytes(&i, sizeof(i)); |
3934 | writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); | 3961 | writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); |
3935 | writel(NVREG_UNKSETUP1_VAL, base + NvRegUnknownSetupReg1); | 3962 | writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); |
3936 | writel(NVREG_UNKSETUP2_VAL, base + NvRegUnknownSetupReg2); | 3963 | writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); |
3937 | if (poll_interval == -1) { | 3964 | if (poll_interval == -1) { |
3938 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) | 3965 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) |
3939 | writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); | 3966 | writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); |
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 0641f54fc638..889f338132fa 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c | |||
@@ -122,6 +122,12 @@ struct bpqdev { | |||
122 | 122 | ||
123 | static LIST_HEAD(bpq_devices); | 123 | static LIST_HEAD(bpq_devices); |
124 | 124 | ||
125 | /* | ||
126 | * bpqether network devices are paired with ethernet devices below them, so | ||
127 | * form a special "super class" of normal ethernet devices; split their locks | ||
128 | * off into a separate class since they always nest. | ||
129 | */ | ||
130 | static struct lock_class_key bpq_netdev_xmit_lock_key; | ||
125 | 131 | ||
126 | /* ------------------------------------------------------------------------ */ | 132 | /* ------------------------------------------------------------------------ */ |
127 | 133 | ||
@@ -528,6 +534,7 @@ static int bpq_new_device(struct net_device *edev) | |||
528 | err = register_netdevice(ndev); | 534 | err = register_netdevice(ndev); |
529 | if (err) | 535 | if (err) |
530 | goto error; | 536 | goto error; |
537 | lockdep_set_class(&ndev->_xmit_lock, &bpq_netdev_xmit_lock_key); | ||
531 | 538 | ||
532 | /* List protected by RTNL */ | 539 | /* List protected by RTNL */ |
533 | list_add_rcu(&bpq->bpq_list, &bpq_devices); | 540 | list_add_rcu(&bpq->bpq_list, &bpq_devices); |
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 3a42afab5036..43e3f33ed5e2 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c | |||
@@ -271,6 +271,7 @@ static int __init ifb_init_module(void) | |||
271 | for (i = 0; i < numifbs && !err; i++) | 271 | for (i = 0; i < numifbs && !err; i++) |
272 | err = ifb_init_one(i); | 272 | err = ifb_init_one(i); |
273 | if (err) { | 273 | if (err) { |
274 | i--; | ||
274 | while (--i >= 0) | 275 | while (--i >= 0) |
275 | ifb_free_one(i); | 276 | ifb_free_one(i); |
276 | } | 277 | } |
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index bf1fca5a3fa0..e3c8cd5eca67 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c | |||
@@ -146,7 +146,7 @@ static int __init ali_ircc_init(void) | |||
146 | { | 146 | { |
147 | ali_chip_t *chip; | 147 | ali_chip_t *chip; |
148 | chipio_t info; | 148 | chipio_t info; |
149 | int ret = -ENODEV; | 149 | int ret; |
150 | int cfg, cfg_base; | 150 | int cfg, cfg_base; |
151 | int reg, revision; | 151 | int reg, revision; |
152 | int i = 0; | 152 | int i = 0; |
@@ -160,6 +160,7 @@ static int __init ali_ircc_init(void) | |||
160 | return ret; | 160 | return ret; |
161 | } | 161 | } |
162 | 162 | ||
163 | ret = -ENODEV; | ||
163 | 164 | ||
164 | /* Probe for all the ALi chipsets we know about */ | 165 | /* Probe for all the ALi chipsets we know about */ |
165 | for (chip= chips; chip->name; chip++, i++) | 166 | for (chip= chips; chip->name; chip++, i++) |
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index a4674044bd6f..2eff45bedc7c 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c | |||
@@ -2353,7 +2353,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base) | |||
2353 | #ifdef CONFIG_PCI | 2353 | #ifdef CONFIG_PCI |
2354 | #define PCIID_VENDOR_INTEL 0x8086 | 2354 | #define PCIID_VENDOR_INTEL 0x8086 |
2355 | #define PCIID_VENDOR_ALI 0x10b9 | 2355 | #define PCIID_VENDOR_ALI 0x10b9 |
2356 | static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __devinitdata = { | 2356 | static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = { |
2357 | { | 2357 | { |
2358 | .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ | 2358 | .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ |
2359 | .device = 0x24cc, | 2359 | .device = 0x24cc, |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index b91e082483f6..7bbd447289b5 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -1173,7 +1173,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) | |||
1173 | uint16_t ipcse, tucse, mss; | 1173 | uint16_t ipcse, tucse, mss; |
1174 | int err; | 1174 | int err; |
1175 | 1175 | ||
1176 | if(likely(skb_shinfo(skb)->gso_size)) { | 1176 | if (likely(skb_is_gso(skb))) { |
1177 | if (skb_header_cloned(skb)) { | 1177 | if (skb_header_cloned(skb)) { |
1178 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 1178 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
1179 | if (err) | 1179 | if (err) |
@@ -1281,7 +1281,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1281 | 1281 | ||
1282 | while(len) { | 1282 | while(len) { |
1283 | buffer_info = &tx_ring->buffer_info[i]; | 1283 | buffer_info = &tx_ring->buffer_info[i]; |
1284 | size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE); | 1284 | size = min(len, IXGB_MAX_DATA_PER_TXD); |
1285 | buffer_info->length = size; | 1285 | buffer_info->length = size; |
1286 | buffer_info->dma = | 1286 | buffer_info->dma = |
1287 | pci_map_single(adapter->pdev, | 1287 | pci_map_single(adapter->pdev, |
@@ -1306,7 +1306,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb, | |||
1306 | 1306 | ||
1307 | while(len) { | 1307 | while(len) { |
1308 | buffer_info = &tx_ring->buffer_info[i]; | 1308 | buffer_info = &tx_ring->buffer_info[i]; |
1309 | size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE); | 1309 | size = min(len, IXGB_MAX_DATA_PER_TXD); |
1310 | buffer_info->length = size; | 1310 | buffer_info->length = size; |
1311 | buffer_info->dma = | 1311 | buffer_info->dma = |
1312 | pci_map_page(adapter->pdev, | 1312 | pci_map_page(adapter->pdev, |
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 43fef7de8cb9..997cbce9af6e 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c | |||
@@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) | |||
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | #ifdef LOOPBACK_TSO | 141 | #ifdef LOOPBACK_TSO |
142 | if (skb_shinfo(skb)->gso_size) { | 142 | if (skb_is_gso(skb)) { |
143 | BUG_ON(skb->protocol != htons(ETH_P_IP)); | 143 | BUG_ON(skb->protocol != htons(ETH_P_IP)); |
144 | BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); | 144 | BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); |
145 | 145 | ||
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index f4c8fd373b9b..c3e52c806b13 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -620,7 +620,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) | |||
620 | return -ENXIO; | 620 | return -ENXIO; |
621 | } | 621 | } |
622 | dev_info(&mgp->pdev->dev, "handoff confirmed\n"); | 622 | dev_info(&mgp->pdev->dev, "handoff confirmed\n"); |
623 | myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); | 623 | myri10ge_dummy_rdma(mgp, 1); |
624 | 624 | ||
625 | return 0; | 625 | return 0; |
626 | } | 626 | } |
@@ -2116,7 +2116,7 @@ abort_linearize: | |||
2116 | } | 2116 | } |
2117 | idx = (idx + 1) & tx->mask; | 2117 | idx = (idx + 1) & tx->mask; |
2118 | } while (idx != last_idx); | 2118 | } while (idx != last_idx); |
2119 | if (skb_shinfo(skb)->gso_size) { | 2119 | if (skb_is_gso(skb)) { |
2120 | printk(KERN_ERR | 2120 | printk(KERN_ERR |
2121 | "myri10ge: %s: TSO but wanted to linearize?!?!?\n", | 2121 | "myri10ge: %s: TSO but wanted to linearize?!?!?\n", |
2122 | mgp->dev->name); | 2122 | mgp->dev->name); |
@@ -2412,14 +2412,20 @@ static int myri10ge_resume(struct pci_dev *pdev) | |||
2412 | return -EIO; | 2412 | return -EIO; |
2413 | } | 2413 | } |
2414 | myri10ge_restore_state(mgp); | 2414 | myri10ge_restore_state(mgp); |
2415 | pci_enable_device(pdev); | 2415 | |
2416 | status = pci_enable_device(pdev); | ||
2417 | if (status < 0) { | ||
2418 | dev_err(&pdev->dev, "failed to enable device\n"); | ||
2419 | return -EIO; | ||
2420 | } | ||
2421 | |||
2416 | pci_set_master(pdev); | 2422 | pci_set_master(pdev); |
2417 | 2423 | ||
2418 | status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED, | 2424 | status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED, |
2419 | netdev->name, mgp); | 2425 | netdev->name, mgp); |
2420 | if (status != 0) { | 2426 | if (status != 0) { |
2421 | dev_err(&pdev->dev, "failed to allocate IRQ\n"); | 2427 | dev_err(&pdev->dev, "failed to allocate IRQ\n"); |
2422 | goto abort_with_msi; | 2428 | goto abort_with_enabled; |
2423 | } | 2429 | } |
2424 | 2430 | ||
2425 | myri10ge_reset(mgp); | 2431 | myri10ge_reset(mgp); |
@@ -2438,7 +2444,8 @@ static int myri10ge_resume(struct pci_dev *pdev) | |||
2438 | 2444 | ||
2439 | return 0; | 2445 | return 0; |
2440 | 2446 | ||
2441 | abort_with_msi: | 2447 | abort_with_enabled: |
2448 | pci_disable_device(pdev); | ||
2442 | return -EIO; | 2449 | return -EIO; |
2443 | 2450 | ||
2444 | } | 2451 | } |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index c6b77acb35ef..e1fe3a0a7b0b 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -1976,7 +1976,6 @@ static int start_nic(struct s2io_nic *nic) | |||
1976 | XENA_dev_config_t __iomem *bar0 = nic->bar0; | 1976 | XENA_dev_config_t __iomem *bar0 = nic->bar0; |
1977 | struct net_device *dev = nic->dev; | 1977 | struct net_device *dev = nic->dev; |
1978 | register u64 val64 = 0; | 1978 | register u64 val64 = 0; |
1979 | u16 interruptible; | ||
1980 | u16 subid, i; | 1979 | u16 subid, i; |
1981 | mac_info_t *mac_control; | 1980 | mac_info_t *mac_control; |
1982 | struct config_param *config; | 1981 | struct config_param *config; |
@@ -2047,16 +2046,6 @@ static int start_nic(struct s2io_nic *nic) | |||
2047 | return FAILURE; | 2046 | return FAILURE; |
2048 | } | 2047 | } |
2049 | 2048 | ||
2050 | /* Enable select interrupts */ | ||
2051 | if (nic->intr_type != INTA) | ||
2052 | en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS); | ||
2053 | else { | ||
2054 | interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; | ||
2055 | interruptible |= TX_PIC_INTR | RX_PIC_INTR; | ||
2056 | interruptible |= TX_MAC_INTR | RX_MAC_INTR; | ||
2057 | en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS); | ||
2058 | } | ||
2059 | |||
2060 | /* | 2049 | /* |
2061 | * With some switches, link might be already up at this point. | 2050 | * With some switches, link might be already up at this point. |
2062 | * Because of this weird behavior, when we enable laser, | 2051 | * Because of this weird behavior, when we enable laser, |
@@ -3749,101 +3738,19 @@ static int s2io_open(struct net_device *dev) | |||
3749 | if (err) { | 3738 | if (err) { |
3750 | DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", | 3739 | DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", |
3751 | dev->name); | 3740 | dev->name); |
3752 | if (err == -ENODEV) | 3741 | goto hw_init_failed; |
3753 | goto hw_init_failed; | ||
3754 | else | ||
3755 | goto hw_enable_failed; | ||
3756 | } | ||
3757 | |||
3758 | /* Store the values of the MSIX table in the nic_t structure */ | ||
3759 | store_xmsi_data(sp); | ||
3760 | |||
3761 | /* After proper initialization of H/W, register ISR */ | ||
3762 | if (sp->intr_type == MSI) { | ||
3763 | err = request_irq((int) sp->pdev->irq, s2io_msi_handle, | ||
3764 | IRQF_SHARED, sp->name, dev); | ||
3765 | if (err) { | ||
3766 | DBG_PRINT(ERR_DBG, "%s: MSI registration \ | ||
3767 | failed\n", dev->name); | ||
3768 | goto isr_registration_failed; | ||
3769 | } | ||
3770 | } | ||
3771 | if (sp->intr_type == MSI_X) { | ||
3772 | int i; | ||
3773 | |||
3774 | for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { | ||
3775 | if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { | ||
3776 | sprintf(sp->desc1, "%s:MSI-X-%d-TX", | ||
3777 | dev->name, i); | ||
3778 | err = request_irq(sp->entries[i].vector, | ||
3779 | s2io_msix_fifo_handle, 0, sp->desc1, | ||
3780 | sp->s2io_entries[i].arg); | ||
3781 | DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1, | ||
3782 | (unsigned long long)sp->msix_info[i].addr); | ||
3783 | } else { | ||
3784 | sprintf(sp->desc2, "%s:MSI-X-%d-RX", | ||
3785 | dev->name, i); | ||
3786 | err = request_irq(sp->entries[i].vector, | ||
3787 | s2io_msix_ring_handle, 0, sp->desc2, | ||
3788 | sp->s2io_entries[i].arg); | ||
3789 | DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2, | ||
3790 | (unsigned long long)sp->msix_info[i].addr); | ||
3791 | } | ||
3792 | if (err) { | ||
3793 | DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \ | ||
3794 | failed\n", dev->name, i); | ||
3795 | DBG_PRINT(ERR_DBG, "Returned: %d\n", err); | ||
3796 | goto isr_registration_failed; | ||
3797 | } | ||
3798 | sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS; | ||
3799 | } | ||
3800 | } | ||
3801 | if (sp->intr_type == INTA) { | ||
3802 | err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED, | ||
3803 | sp->name, dev); | ||
3804 | if (err) { | ||
3805 | DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", | ||
3806 | dev->name); | ||
3807 | goto isr_registration_failed; | ||
3808 | } | ||
3809 | } | 3742 | } |
3810 | 3743 | ||
3811 | if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) { | 3744 | if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) { |
3812 | DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n"); | 3745 | DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n"); |
3746 | s2io_card_down(sp); | ||
3813 | err = -ENODEV; | 3747 | err = -ENODEV; |
3814 | goto setting_mac_address_failed; | 3748 | goto hw_init_failed; |
3815 | } | 3749 | } |
3816 | 3750 | ||
3817 | netif_start_queue(dev); | 3751 | netif_start_queue(dev); |
3818 | return 0; | 3752 | return 0; |
3819 | 3753 | ||
3820 | setting_mac_address_failed: | ||
3821 | if (sp->intr_type != MSI_X) | ||
3822 | free_irq(sp->pdev->irq, dev); | ||
3823 | isr_registration_failed: | ||
3824 | del_timer_sync(&sp->alarm_timer); | ||
3825 | if (sp->intr_type == MSI_X) { | ||
3826 | int i; | ||
3827 | u16 msi_control; /* Temp variable */ | ||
3828 | |||
3829 | for (i=1; (sp->s2io_entries[i].in_use == | ||
3830 | MSIX_REGISTERED_SUCCESS); i++) { | ||
3831 | int vector = sp->entries[i].vector; | ||
3832 | void *arg = sp->s2io_entries[i].arg; | ||
3833 | |||
3834 | free_irq(vector, arg); | ||
3835 | } | ||
3836 | pci_disable_msix(sp->pdev); | ||
3837 | |||
3838 | /* Temp */ | ||
3839 | pci_read_config_word(sp->pdev, 0x42, &msi_control); | ||
3840 | msi_control &= 0xFFFE; /* Disable MSI */ | ||
3841 | pci_write_config_word(sp->pdev, 0x42, msi_control); | ||
3842 | } | ||
3843 | else if (sp->intr_type == MSI) | ||
3844 | pci_disable_msi(sp->pdev); | ||
3845 | hw_enable_failed: | ||
3846 | s2io_reset(sp); | ||
3847 | hw_init_failed: | 3754 | hw_init_failed: |
3848 | if (sp->intr_type == MSI_X) { | 3755 | if (sp->intr_type == MSI_X) { |
3849 | if (sp->entries) | 3756 | if (sp->entries) |
@@ -3874,7 +3781,7 @@ static int s2io_close(struct net_device *dev) | |||
3874 | flush_scheduled_work(); | 3781 | flush_scheduled_work(); |
3875 | netif_stop_queue(dev); | 3782 | netif_stop_queue(dev); |
3876 | /* Reset card, kill tasklet and free Tx and Rx buffers. */ | 3783 | /* Reset card, kill tasklet and free Tx and Rx buffers. */ |
3877 | s2io_card_down(sp, 1); | 3784 | s2io_card_down(sp); |
3878 | 3785 | ||
3879 | sp->device_close_flag = TRUE; /* Device is shut down. */ | 3786 | sp->device_close_flag = TRUE; /* Device is shut down. */ |
3880 | return 0; | 3787 | return 0; |
@@ -5919,7 +5826,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu) | |||
5919 | 5826 | ||
5920 | dev->mtu = new_mtu; | 5827 | dev->mtu = new_mtu; |
5921 | if (netif_running(dev)) { | 5828 | if (netif_running(dev)) { |
5922 | s2io_card_down(sp, 0); | 5829 | s2io_card_down(sp); |
5923 | netif_stop_queue(dev); | 5830 | netif_stop_queue(dev); |
5924 | if (s2io_card_up(sp)) { | 5831 | if (s2io_card_up(sp)) { |
5925 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", | 5832 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", |
@@ -6216,43 +6123,106 @@ static int rxd_owner_bit_reset(nic_t *sp) | |||
6216 | 6123 | ||
6217 | } | 6124 | } |
6218 | 6125 | ||
6219 | static void s2io_card_down(nic_t * sp, int flag) | 6126 | static int s2io_add_isr(nic_t * sp) |
6220 | { | 6127 | { |
6221 | int cnt = 0; | 6128 | int ret = 0; |
6222 | XENA_dev_config_t __iomem *bar0 = sp->bar0; | ||
6223 | unsigned long flags; | ||
6224 | register u64 val64 = 0; | ||
6225 | struct net_device *dev = sp->dev; | 6129 | struct net_device *dev = sp->dev; |
6130 | int err = 0; | ||
6226 | 6131 | ||
6227 | del_timer_sync(&sp->alarm_timer); | 6132 | if (sp->intr_type == MSI) |
6228 | /* If s2io_set_link task is executing, wait till it completes. */ | 6133 | ret = s2io_enable_msi(sp); |
6229 | while (test_and_set_bit(0, &(sp->link_state))) { | 6134 | else if (sp->intr_type == MSI_X) |
6230 | msleep(50); | 6135 | ret = s2io_enable_msi_x(sp); |
6136 | if (ret) { | ||
6137 | DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name); | ||
6138 | sp->intr_type = INTA; | ||
6231 | } | 6139 | } |
6232 | atomic_set(&sp->card_state, CARD_DOWN); | ||
6233 | 6140 | ||
6234 | /* disable Tx and Rx traffic on the NIC */ | 6141 | /* Store the values of the MSIX table in the nic_t structure */ |
6235 | stop_nic(sp); | 6142 | store_xmsi_data(sp); |
6236 | if (flag) { | ||
6237 | if (sp->intr_type == MSI_X) { | ||
6238 | int i; | ||
6239 | u16 msi_control; | ||
6240 | 6143 | ||
6241 | for (i=1; (sp->s2io_entries[i].in_use == | 6144 | /* After proper initialization of H/W, register ISR */ |
6242 | MSIX_REGISTERED_SUCCESS); i++) { | 6145 | if (sp->intr_type == MSI) { |
6243 | int vector = sp->entries[i].vector; | 6146 | err = request_irq((int) sp->pdev->irq, s2io_msi_handle, |
6244 | void *arg = sp->s2io_entries[i].arg; | 6147 | IRQF_SHARED, sp->name, dev); |
6148 | if (err) { | ||
6149 | pci_disable_msi(sp->pdev); | ||
6150 | DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n", | ||
6151 | dev->name); | ||
6152 | return -1; | ||
6153 | } | ||
6154 | } | ||
6155 | if (sp->intr_type == MSI_X) { | ||
6156 | int i; | ||
6245 | 6157 | ||
6246 | free_irq(vector, arg); | 6158 | for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { |
6159 | if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { | ||
6160 | sprintf(sp->desc[i], "%s:MSI-X-%d-TX", | ||
6161 | dev->name, i); | ||
6162 | err = request_irq(sp->entries[i].vector, | ||
6163 | s2io_msix_fifo_handle, 0, sp->desc[i], | ||
6164 | sp->s2io_entries[i].arg); | ||
6165 | DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i], | ||
6166 | (unsigned long long)sp->msix_info[i].addr); | ||
6167 | } else { | ||
6168 | sprintf(sp->desc[i], "%s:MSI-X-%d-RX", | ||
6169 | dev->name, i); | ||
6170 | err = request_irq(sp->entries[i].vector, | ||
6171 | s2io_msix_ring_handle, 0, sp->desc[i], | ||
6172 | sp->s2io_entries[i].arg); | ||
6173 | DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i], | ||
6174 | (unsigned long long)sp->msix_info[i].addr); | ||
6247 | } | 6175 | } |
6248 | pci_read_config_word(sp->pdev, 0x42, &msi_control); | 6176 | if (err) { |
6249 | msi_control &= 0xFFFE; /* Disable MSI */ | 6177 | DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration " |
6250 | pci_write_config_word(sp->pdev, 0x42, msi_control); | 6178 | "failed\n", dev->name, i); |
6251 | pci_disable_msix(sp->pdev); | 6179 | DBG_PRINT(ERR_DBG, "Returned: %d\n", err); |
6252 | } else { | 6180 | return -1; |
6253 | free_irq(sp->pdev->irq, dev); | 6181 | } |
6254 | if (sp->intr_type == MSI) | 6182 | sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS; |
6255 | pci_disable_msi(sp->pdev); | 6183 | } |
6184 | } | ||
6185 | if (sp->intr_type == INTA) { | ||
6186 | err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED, | ||
6187 | sp->name, dev); | ||
6188 | if (err) { | ||
6189 | DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", | ||
6190 | dev->name); | ||
6191 | return -1; | ||
6192 | } | ||
6193 | } | ||
6194 | return 0; | ||
6195 | } | ||
6196 | static void s2io_rem_isr(nic_t * sp) | ||
6197 | { | ||
6198 | int cnt = 0; | ||
6199 | struct net_device *dev = sp->dev; | ||
6200 | |||
6201 | if (sp->intr_type == MSI_X) { | ||
6202 | int i; | ||
6203 | u16 msi_control; | ||
6204 | |||
6205 | for (i=1; (sp->s2io_entries[i].in_use == | ||
6206 | MSIX_REGISTERED_SUCCESS); i++) { | ||
6207 | int vector = sp->entries[i].vector; | ||
6208 | void *arg = sp->s2io_entries[i].arg; | ||
6209 | |||
6210 | free_irq(vector, arg); | ||
6211 | } | ||
6212 | pci_read_config_word(sp->pdev, 0x42, &msi_control); | ||
6213 | msi_control &= 0xFFFE; /* Disable MSI */ | ||
6214 | pci_write_config_word(sp->pdev, 0x42, msi_control); | ||
6215 | |||
6216 | pci_disable_msix(sp->pdev); | ||
6217 | } else { | ||
6218 | free_irq(sp->pdev->irq, dev); | ||
6219 | if (sp->intr_type == MSI) { | ||
6220 | u16 val; | ||
6221 | |||
6222 | pci_disable_msi(sp->pdev); | ||
6223 | pci_read_config_word(sp->pdev, 0x4c, &val); | ||
6224 | val ^= 0x1; | ||
6225 | pci_write_config_word(sp->pdev, 0x4c, val); | ||
6256 | } | 6226 | } |
6257 | } | 6227 | } |
6258 | /* Waiting till all Interrupt handlers are complete */ | 6228 | /* Waiting till all Interrupt handlers are complete */ |
@@ -6263,6 +6233,26 @@ static void s2io_card_down(nic_t * sp, int flag) | |||
6263 | break; | 6233 | break; |
6264 | cnt++; | 6234 | cnt++; |
6265 | } while(cnt < 5); | 6235 | } while(cnt < 5); |
6236 | } | ||
6237 | |||
6238 | static void s2io_card_down(nic_t * sp) | ||
6239 | { | ||
6240 | int cnt = 0; | ||
6241 | XENA_dev_config_t __iomem *bar0 = sp->bar0; | ||
6242 | unsigned long flags; | ||
6243 | register u64 val64 = 0; | ||
6244 | |||
6245 | del_timer_sync(&sp->alarm_timer); | ||
6246 | /* If s2io_set_link task is executing, wait till it completes. */ | ||
6247 | while (test_and_set_bit(0, &(sp->link_state))) { | ||
6248 | msleep(50); | ||
6249 | } | ||
6250 | atomic_set(&sp->card_state, CARD_DOWN); | ||
6251 | |||
6252 | /* disable Tx and Rx traffic on the NIC */ | ||
6253 | stop_nic(sp); | ||
6254 | |||
6255 | s2io_rem_isr(sp); | ||
6266 | 6256 | ||
6267 | /* Kill tasklet. */ | 6257 | /* Kill tasklet. */ |
6268 | tasklet_kill(&sp->task); | 6258 | tasklet_kill(&sp->task); |
@@ -6314,23 +6304,16 @@ static int s2io_card_up(nic_t * sp) | |||
6314 | mac_info_t *mac_control; | 6304 | mac_info_t *mac_control; |
6315 | struct config_param *config; | 6305 | struct config_param *config; |
6316 | struct net_device *dev = (struct net_device *) sp->dev; | 6306 | struct net_device *dev = (struct net_device *) sp->dev; |
6307 | u16 interruptible; | ||
6317 | 6308 | ||
6318 | /* Initialize the H/W I/O registers */ | 6309 | /* Initialize the H/W I/O registers */ |
6319 | if (init_nic(sp) != 0) { | 6310 | if (init_nic(sp) != 0) { |
6320 | DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", | 6311 | DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n", |
6321 | dev->name); | 6312 | dev->name); |
6313 | s2io_reset(sp); | ||
6322 | return -ENODEV; | 6314 | return -ENODEV; |
6323 | } | 6315 | } |
6324 | 6316 | ||
6325 | if (sp->intr_type == MSI) | ||
6326 | ret = s2io_enable_msi(sp); | ||
6327 | else if (sp->intr_type == MSI_X) | ||
6328 | ret = s2io_enable_msi_x(sp); | ||
6329 | if (ret) { | ||
6330 | DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name); | ||
6331 | sp->intr_type = INTA; | ||
6332 | } | ||
6333 | |||
6334 | /* | 6317 | /* |
6335 | * Initializing the Rx buffers. For now we are considering only 1 | 6318 | * Initializing the Rx buffers. For now we are considering only 1 |
6336 | * Rx ring and initializing buffers into 30 Rx blocks | 6319 | * Rx ring and initializing buffers into 30 Rx blocks |
@@ -6361,21 +6344,39 @@ static int s2io_card_up(nic_t * sp) | |||
6361 | sp->lro_max_aggr_per_sess = lro_max_pkts; | 6344 | sp->lro_max_aggr_per_sess = lro_max_pkts; |
6362 | } | 6345 | } |
6363 | 6346 | ||
6364 | /* Enable tasklet for the device */ | ||
6365 | tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev); | ||
6366 | |||
6367 | /* Enable Rx Traffic and interrupts on the NIC */ | 6347 | /* Enable Rx Traffic and interrupts on the NIC */ |
6368 | if (start_nic(sp)) { | 6348 | if (start_nic(sp)) { |
6369 | DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name); | 6349 | DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name); |
6370 | tasklet_kill(&sp->task); | ||
6371 | s2io_reset(sp); | 6350 | s2io_reset(sp); |
6372 | free_irq(dev->irq, dev); | 6351 | free_rx_buffers(sp); |
6352 | return -ENODEV; | ||
6353 | } | ||
6354 | |||
6355 | /* Add interrupt service routine */ | ||
6356 | if (s2io_add_isr(sp) != 0) { | ||
6357 | if (sp->intr_type == MSI_X) | ||
6358 | s2io_rem_isr(sp); | ||
6359 | s2io_reset(sp); | ||
6373 | free_rx_buffers(sp); | 6360 | free_rx_buffers(sp); |
6374 | return -ENODEV; | 6361 | return -ENODEV; |
6375 | } | 6362 | } |
6376 | 6363 | ||
6377 | S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2)); | 6364 | S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2)); |
6378 | 6365 | ||
6366 | /* Enable tasklet for the device */ | ||
6367 | tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev); | ||
6368 | |||
6369 | /* Enable select interrupts */ | ||
6370 | if (sp->intr_type != INTA) | ||
6371 | en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS); | ||
6372 | else { | ||
6373 | interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; | ||
6374 | interruptible |= TX_PIC_INTR | RX_PIC_INTR; | ||
6375 | interruptible |= TX_MAC_INTR | RX_MAC_INTR; | ||
6376 | en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS); | ||
6377 | } | ||
6378 | |||
6379 | |||
6379 | atomic_set(&sp->card_state, CARD_UP); | 6380 | atomic_set(&sp->card_state, CARD_UP); |
6380 | return 0; | 6381 | return 0; |
6381 | } | 6382 | } |
@@ -6395,7 +6396,7 @@ static void s2io_restart_nic(unsigned long data) | |||
6395 | struct net_device *dev = (struct net_device *) data; | 6396 | struct net_device *dev = (struct net_device *) data; |
6396 | nic_t *sp = dev->priv; | 6397 | nic_t *sp = dev->priv; |
6397 | 6398 | ||
6398 | s2io_card_down(sp, 0); | 6399 | s2io_card_down(sp); |
6399 | if (s2io_card_up(sp)) { | 6400 | if (s2io_card_up(sp)) { |
6400 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", | 6401 | DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", |
6401 | dev->name); | 6402 | dev->name); |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index c43f52179708..217097bc22f1 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -829,8 +829,7 @@ struct s2io_nic { | |||
829 | #define MSIX_FLG 0xA5 | 829 | #define MSIX_FLG 0xA5 |
830 | struct msix_entry *entries; | 830 | struct msix_entry *entries; |
831 | struct s2io_msix_entry *s2io_entries; | 831 | struct s2io_msix_entry *s2io_entries; |
832 | char desc1[35]; | 832 | char desc[MAX_REQUESTED_MSI_X][25]; |
833 | char desc2[35]; | ||
834 | 833 | ||
835 | int avail_msix_vectors; /* No. of MSI-X vectors granted by system */ | 834 | int avail_msix_vectors; /* No. of MSI-X vectors granted by system */ |
836 | 835 | ||
@@ -1002,7 +1001,7 @@ static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); | |||
1002 | static struct ethtool_ops netdev_ethtool_ops; | 1001 | static struct ethtool_ops netdev_ethtool_ops; |
1003 | static void s2io_set_link(unsigned long data); | 1002 | static void s2io_set_link(unsigned long data); |
1004 | static int s2io_set_swapper(nic_t * sp); | 1003 | static int s2io_set_swapper(nic_t * sp); |
1005 | static void s2io_card_down(nic_t *nic, int flag); | 1004 | static void s2io_card_down(nic_t *nic); |
1006 | static int s2io_card_up(nic_t *nic); | 1005 | static int s2io_card_up(nic_t *nic); |
1007 | static int get_xena_rev_id(struct pci_dev *pdev); | 1006 | static int get_xena_rev_id(struct pci_dev *pdev); |
1008 | static void restore_xmsi_data(nic_t *nic); | 1007 | static void restore_xmsi_data(nic_t *nic); |
diff --git a/drivers/net/sk98lin/h/xmac_ii.h b/drivers/net/sk98lin/h/xmac_ii.h index 2b19f8ad0318..7f8e6d0084c7 100644 --- a/drivers/net/sk98lin/h/xmac_ii.h +++ b/drivers/net/sk98lin/h/xmac_ii.h | |||
@@ -1473,7 +1473,7 @@ extern "C" { | |||
1473 | #define GM_TXCR_FORCE_JAM (1<<15) /* Bit 15: Force Jam / Flow-Control */ | 1473 | #define GM_TXCR_FORCE_JAM (1<<15) /* Bit 15: Force Jam / Flow-Control */ |
1474 | #define GM_TXCR_CRC_DIS (1<<14) /* Bit 14: Disable insertion of CRC */ | 1474 | #define GM_TXCR_CRC_DIS (1<<14) /* Bit 14: Disable insertion of CRC */ |
1475 | #define GM_TXCR_PAD_DIS (1<<13) /* Bit 13: Disable padding of packets */ | 1475 | #define GM_TXCR_PAD_DIS (1<<13) /* Bit 13: Disable padding of packets */ |
1476 | #define GM_TXCR_COL_THR_MSK (1<<10) /* Bit 12..10: Collision Threshold */ | 1476 | #define GM_TXCR_COL_THR_MSK (7<<10) /* Bit 12..10: Collision Threshold */ |
1477 | 1477 | ||
1478 | #define TX_COL_THR(x) (SHIFT10(x) & GM_TXCR_COL_THR_MSK) | 1478 | #define TX_COL_THR(x) (SHIFT10(x) & GM_TXCR_COL_THR_MSK) |
1479 | 1479 | ||
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 82200bfaa8ed..7de9a07b2ac2 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -516,10 +516,7 @@ static int skge_set_pauseparam(struct net_device *dev, | |||
516 | /* Chip internal frequency for clock calculations */ | 516 | /* Chip internal frequency for clock calculations */ |
517 | static inline u32 hwkhz(const struct skge_hw *hw) | 517 | static inline u32 hwkhz(const struct skge_hw *hw) |
518 | { | 518 | { |
519 | if (hw->chip_id == CHIP_ID_GENESIS) | 519 | return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125; |
520 | return 53215; /* or: 53.125 MHz */ | ||
521 | else | ||
522 | return 78215; /* or: 78.125 MHz */ | ||
523 | } | 520 | } |
524 | 521 | ||
525 | /* Chip HZ to microseconds */ | 522 | /* Chip HZ to microseconds */ |
diff --git a/drivers/net/skge.h b/drivers/net/skge.h index ed19ff47ce11..593387b3c0dd 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h | |||
@@ -1734,11 +1734,11 @@ enum { | |||
1734 | GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ | 1734 | GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ |
1735 | GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ | 1735 | GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ |
1736 | GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ | 1736 | GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ |
1737 | GM_TXCR_COL_THR_MSK = 1<<10, /* Bit 12..10: Collision Threshold */ | 1737 | GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */ |
1738 | }; | 1738 | }; |
1739 | 1739 | ||
1740 | #define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) | 1740 | #define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) |
1741 | #define TX_COL_DEF 0x04 | 1741 | #define TX_COL_DEF 0x04 /* late collision after 64 byte */ |
1742 | 1742 | ||
1743 | /* GM_RX_CTRL 16 bit r/w Receive Control Register */ | 1743 | /* GM_RX_CTRL 16 bit r/w Receive Control Register */ |
1744 | enum { | 1744 | enum { |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 418f169a6a31..de91609ca112 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -50,7 +50,7 @@ | |||
50 | #include "sky2.h" | 50 | #include "sky2.h" |
51 | 51 | ||
52 | #define DRV_NAME "sky2" | 52 | #define DRV_NAME "sky2" |
53 | #define DRV_VERSION "1.4" | 53 | #define DRV_VERSION "1.5" |
54 | #define PFX DRV_NAME " " | 54 | #define PFX DRV_NAME " " |
55 | 55 | ||
56 | /* | 56 | /* |
@@ -65,6 +65,7 @@ | |||
65 | #define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) | 65 | #define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) |
66 | #define RX_DEF_PENDING RX_MAX_PENDING | 66 | #define RX_DEF_PENDING RX_MAX_PENDING |
67 | #define RX_SKB_ALIGN 8 | 67 | #define RX_SKB_ALIGN 8 |
68 | #define RX_BUF_WRITE 16 | ||
68 | 69 | ||
69 | #define TX_RING_SIZE 512 | 70 | #define TX_RING_SIZE 512 |
70 | #define TX_DEF_PENDING (TX_RING_SIZE - 1) | 71 | #define TX_DEF_PENDING (TX_RING_SIZE - 1) |
@@ -234,7 +235,6 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) | |||
234 | } | 235 | } |
235 | 236 | ||
236 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { | 237 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { |
237 | sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON); | ||
238 | sky2_pci_write32(hw, PCI_DEV_REG3, 0); | 238 | sky2_pci_write32(hw, PCI_DEV_REG3, 0); |
239 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); | 239 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); |
240 | reg1 &= P_ASPM_CONTROL_MSK; | 240 | reg1 &= P_ASPM_CONTROL_MSK; |
@@ -243,6 +243,7 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) | |||
243 | } | 243 | } |
244 | 244 | ||
245 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); | 245 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); |
246 | udelay(100); | ||
246 | 247 | ||
247 | break; | 248 | break; |
248 | 249 | ||
@@ -255,6 +256,7 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) | |||
255 | else | 256 | else |
256 | reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); | 257 | reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); |
257 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); | 258 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); |
259 | udelay(100); | ||
258 | 260 | ||
259 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) | 261 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) |
260 | sky2_write8(hw, B2_Y2_CLK_GATE, 0); | 262 | sky2_write8(hw, B2_Y2_CLK_GATE, 0); |
@@ -1159,7 +1161,7 @@ static unsigned tx_le_req(const struct sk_buff *skb) | |||
1159 | count = sizeof(dma_addr_t) / sizeof(u32); | 1161 | count = sizeof(dma_addr_t) / sizeof(u32); |
1160 | count += skb_shinfo(skb)->nr_frags * count; | 1162 | count += skb_shinfo(skb)->nr_frags * count; |
1161 | 1163 | ||
1162 | if (skb_shinfo(skb)->gso_size) | 1164 | if (skb_is_gso(skb)) |
1163 | ++count; | 1165 | ++count; |
1164 | 1166 | ||
1165 | if (skb->ip_summed == CHECKSUM_HW) | 1167 | if (skb->ip_summed == CHECKSUM_HW) |
@@ -1389,7 +1391,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) | |||
1389 | } | 1391 | } |
1390 | 1392 | ||
1391 | sky2->tx_cons = put; | 1393 | sky2->tx_cons = put; |
1392 | if (tx_avail(sky2) > MAX_SKB_TX_LE) | 1394 | if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) |
1393 | netif_wake_queue(dev); | 1395 | netif_wake_queue(dev); |
1394 | } | 1396 | } |
1395 | 1397 | ||
@@ -1888,9 +1890,6 @@ resubmit: | |||
1888 | re->skb->ip_summed = CHECKSUM_NONE; | 1890 | re->skb->ip_summed = CHECKSUM_NONE; |
1889 | sky2_rx_add(sky2, re->mapaddr); | 1891 | sky2_rx_add(sky2, re->mapaddr); |
1890 | 1892 | ||
1891 | /* Tell receiver about new buffers. */ | ||
1892 | sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put); | ||
1893 | |||
1894 | return skb; | 1893 | return skb; |
1895 | 1894 | ||
1896 | oversize: | 1895 | oversize: |
@@ -1937,7 +1936,9 @@ static inline int sky2_more_work(const struct sky2_hw *hw) | |||
1937 | /* Process status response ring */ | 1936 | /* Process status response ring */ |
1938 | static int sky2_status_intr(struct sky2_hw *hw, int to_do) | 1937 | static int sky2_status_intr(struct sky2_hw *hw, int to_do) |
1939 | { | 1938 | { |
1939 | struct sky2_port *sky2; | ||
1940 | int work_done = 0; | 1940 | int work_done = 0; |
1941 | unsigned buf_write[2] = { 0, 0 }; | ||
1941 | u16 hwidx = sky2_read16(hw, STAT_PUT_IDX); | 1942 | u16 hwidx = sky2_read16(hw, STAT_PUT_IDX); |
1942 | 1943 | ||
1943 | rmb(); | 1944 | rmb(); |
@@ -1945,7 +1946,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) | |||
1945 | while (hw->st_idx != hwidx) { | 1946 | while (hw->st_idx != hwidx) { |
1946 | struct sky2_status_le *le = hw->st_le + hw->st_idx; | 1947 | struct sky2_status_le *le = hw->st_le + hw->st_idx; |
1947 | struct net_device *dev; | 1948 | struct net_device *dev; |
1948 | struct sky2_port *sky2; | ||
1949 | struct sk_buff *skb; | 1949 | struct sk_buff *skb; |
1950 | u32 status; | 1950 | u32 status; |
1951 | u16 length; | 1951 | u16 length; |
@@ -1978,6 +1978,14 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) | |||
1978 | #endif | 1978 | #endif |
1979 | netif_receive_skb(skb); | 1979 | netif_receive_skb(skb); |
1980 | 1980 | ||
1981 | /* Update receiver after 16 frames */ | ||
1982 | if (++buf_write[le->link] == RX_BUF_WRITE) { | ||
1983 | sky2_put_idx(hw, rxqaddr[le->link], | ||
1984 | sky2->rx_put); | ||
1985 | buf_write[le->link] = 0; | ||
1986 | } | ||
1987 | |||
1988 | /* Stop after net poll weight */ | ||
1981 | if (++work_done >= to_do) | 1989 | if (++work_done >= to_do) |
1982 | goto exit_loop; | 1990 | goto exit_loop; |
1983 | break; | 1991 | break; |
@@ -2016,6 +2024,16 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) | |||
2016 | } | 2024 | } |
2017 | 2025 | ||
2018 | exit_loop: | 2026 | exit_loop: |
2027 | if (buf_write[0]) { | ||
2028 | sky2 = netdev_priv(hw->dev[0]); | ||
2029 | sky2_put_idx(hw, Q_R1, sky2->rx_put); | ||
2030 | } | ||
2031 | |||
2032 | if (buf_write[1]) { | ||
2033 | sky2 = netdev_priv(hw->dev[1]); | ||
2034 | sky2_put_idx(hw, Q_R2, sky2->rx_put); | ||
2035 | } | ||
2036 | |||
2019 | return work_done; | 2037 | return work_done; |
2020 | } | 2038 | } |
2021 | 2039 | ||
@@ -2186,9 +2204,6 @@ static int sky2_poll(struct net_device *dev0, int *budget) | |||
2186 | int work_done = 0; | 2204 | int work_done = 0; |
2187 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); | 2205 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); |
2188 | 2206 | ||
2189 | if (!~status) | ||
2190 | goto out; | ||
2191 | |||
2192 | if (status & Y2_IS_HW_ERR) | 2207 | if (status & Y2_IS_HW_ERR) |
2193 | sky2_hw_intr(hw); | 2208 | sky2_hw_intr(hw); |
2194 | 2209 | ||
@@ -2225,7 +2240,7 @@ static int sky2_poll(struct net_device *dev0, int *budget) | |||
2225 | 2240 | ||
2226 | if (sky2_more_work(hw)) | 2241 | if (sky2_more_work(hw)) |
2227 | return 1; | 2242 | return 1; |
2228 | out: | 2243 | |
2229 | netif_rx_complete(dev0); | 2244 | netif_rx_complete(dev0); |
2230 | 2245 | ||
2231 | sky2_read32(hw, B0_Y2_SP_LISR); | 2246 | sky2_read32(hw, B0_Y2_SP_LISR); |
@@ -2286,7 +2301,7 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk) | |||
2286 | } | 2301 | } |
2287 | 2302 | ||
2288 | 2303 | ||
2289 | static int __devinit sky2_reset(struct sky2_hw *hw) | 2304 | static int sky2_reset(struct sky2_hw *hw) |
2290 | { | 2305 | { |
2291 | u16 status; | 2306 | u16 status; |
2292 | u8 t8, pmd_type; | 2307 | u8 t8, pmd_type; |
@@ -3437,17 +3452,14 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3437 | return -EINVAL; | 3452 | return -EINVAL; |
3438 | 3453 | ||
3439 | del_timer_sync(&hw->idle_timer); | 3454 | del_timer_sync(&hw->idle_timer); |
3455 | netif_poll_disable(hw->dev[0]); | ||
3440 | 3456 | ||
3441 | for (i = 0; i < hw->ports; i++) { | 3457 | for (i = 0; i < hw->ports; i++) { |
3442 | struct net_device *dev = hw->dev[i]; | 3458 | struct net_device *dev = hw->dev[i]; |
3443 | 3459 | ||
3444 | if (dev) { | 3460 | if (netif_running(dev)) { |
3445 | if (!netif_running(dev)) | ||
3446 | continue; | ||
3447 | |||
3448 | sky2_down(dev); | 3461 | sky2_down(dev); |
3449 | netif_device_detach(dev); | 3462 | netif_device_detach(dev); |
3450 | netif_poll_disable(dev); | ||
3451 | } | 3463 | } |
3452 | } | 3464 | } |
3453 | 3465 | ||
@@ -3474,9 +3486,8 @@ static int sky2_resume(struct pci_dev *pdev) | |||
3474 | 3486 | ||
3475 | for (i = 0; i < hw->ports; i++) { | 3487 | for (i = 0; i < hw->ports; i++) { |
3476 | struct net_device *dev = hw->dev[i]; | 3488 | struct net_device *dev = hw->dev[i]; |
3477 | if (dev && netif_running(dev)) { | 3489 | if (netif_running(dev)) { |
3478 | netif_device_attach(dev); | 3490 | netif_device_attach(dev); |
3479 | netif_poll_enable(dev); | ||
3480 | 3491 | ||
3481 | err = sky2_up(dev); | 3492 | err = sky2_up(dev); |
3482 | if (err) { | 3493 | if (err) { |
@@ -3488,6 +3499,7 @@ static int sky2_resume(struct pci_dev *pdev) | |||
3488 | } | 3499 | } |
3489 | } | 3500 | } |
3490 | 3501 | ||
3502 | netif_poll_enable(hw->dev[0]); | ||
3491 | sky2_idle_start(hw); | 3503 | sky2_idle_start(hw); |
3492 | out: | 3504 | out: |
3493 | return err; | 3505 | return err; |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 8a0bc5525f0a..2db8d19b22d1 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -1480,7 +1480,7 @@ enum { | |||
1480 | GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ | 1480 | GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ |
1481 | GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ | 1481 | GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ |
1482 | GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ | 1482 | GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ |
1483 | GM_TXCR_COL_THR_MSK = 1<<10, /* Bit 12..10: Collision Threshold */ | 1483 | GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */ |
1484 | }; | 1484 | }; |
1485 | 1485 | ||
1486 | #define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) | 1486 | #define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) |
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index b4028049ed76..4ec4b4d23ae5 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h | |||
@@ -354,6 +354,24 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, | |||
354 | 354 | ||
355 | #define SMC_IRQ_FLAGS (0) | 355 | #define SMC_IRQ_FLAGS (0) |
356 | 356 | ||
357 | #elif defined(CONFIG_ARCH_VERSATILE) | ||
358 | |||
359 | #define SMC_CAN_USE_8BIT 1 | ||
360 | #define SMC_CAN_USE_16BIT 1 | ||
361 | #define SMC_CAN_USE_32BIT 1 | ||
362 | #define SMC_NOWAIT 1 | ||
363 | |||
364 | #define SMC_inb(a, r) readb((a) + (r)) | ||
365 | #define SMC_inw(a, r) readw((a) + (r)) | ||
366 | #define SMC_inl(a, r) readl((a) + (r)) | ||
367 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
368 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
369 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
370 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | ||
371 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | ||
372 | |||
373 | #define SMC_IRQ_FLAGS (0) | ||
374 | |||
357 | #else | 375 | #else |
358 | 376 | ||
359 | #define SMC_CAN_USE_8BIT 1 | 377 | #define SMC_CAN_USE_8BIT 1 |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index b30290d53f79..ec1a8e2d458e 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -84,7 +84,7 @@ MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl); | |||
84 | * | 84 | * |
85 | * returns the content of the specified SMMIO register. | 85 | * returns the content of the specified SMMIO register. |
86 | */ | 86 | */ |
87 | static u32 | 87 | static inline u32 |
88 | spider_net_read_reg(struct spider_net_card *card, u32 reg) | 88 | spider_net_read_reg(struct spider_net_card *card, u32 reg) |
89 | { | 89 | { |
90 | u32 value; | 90 | u32 value; |
@@ -101,7 +101,7 @@ spider_net_read_reg(struct spider_net_card *card, u32 reg) | |||
101 | * @reg: register to write to | 101 | * @reg: register to write to |
102 | * @value: value to write into the specified SMMIO register | 102 | * @value: value to write into the specified SMMIO register |
103 | */ | 103 | */ |
104 | static void | 104 | static inline void |
105 | spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) | 105 | spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) |
106 | { | 106 | { |
107 | value = cpu_to_le32(value); | 107 | value = cpu_to_le32(value); |
@@ -259,39 +259,10 @@ spider_net_get_mac_address(struct net_device *netdev) | |||
259 | * | 259 | * |
260 | * returns the status as in the dmac_cmd_status field of the descriptor | 260 | * returns the status as in the dmac_cmd_status field of the descriptor |
261 | */ | 261 | */ |
262 | static enum spider_net_descr_status | 262 | static inline int |
263 | spider_net_get_descr_status(struct spider_net_descr *descr) | 263 | spider_net_get_descr_status(struct spider_net_descr *descr) |
264 | { | 264 | { |
265 | u32 cmd_status; | 265 | return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK; |
266 | |||
267 | cmd_status = descr->dmac_cmd_status; | ||
268 | cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT; | ||
269 | /* no need to mask out any bits, as cmd_status is 32 bits wide only | ||
270 | * (and unsigned) */ | ||
271 | return cmd_status; | ||
272 | } | ||
273 | |||
274 | /** | ||
275 | * spider_net_set_descr_status -- sets the status of a descriptor | ||
276 | * @descr: descriptor to change | ||
277 | * @status: status to set in the descriptor | ||
278 | * | ||
279 | * changes the status to the specified value. Doesn't change other bits | ||
280 | * in the status | ||
281 | */ | ||
282 | static void | ||
283 | spider_net_set_descr_status(struct spider_net_descr *descr, | ||
284 | enum spider_net_descr_status status) | ||
285 | { | ||
286 | u32 cmd_status; | ||
287 | /* read the status */ | ||
288 | cmd_status = descr->dmac_cmd_status; | ||
289 | /* clean the upper 4 bits */ | ||
290 | cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO; | ||
291 | /* add the status to it */ | ||
292 | cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT; | ||
293 | /* and write it back */ | ||
294 | descr->dmac_cmd_status = cmd_status; | ||
295 | } | 266 | } |
296 | 267 | ||
297 | /** | 268 | /** |
@@ -328,24 +299,23 @@ spider_net_free_chain(struct spider_net_card *card, | |||
328 | static int | 299 | static int |
329 | spider_net_init_chain(struct spider_net_card *card, | 300 | spider_net_init_chain(struct spider_net_card *card, |
330 | struct spider_net_descr_chain *chain, | 301 | struct spider_net_descr_chain *chain, |
331 | struct spider_net_descr *start_descr, int no) | 302 | struct spider_net_descr *start_descr, |
303 | int direction, int no) | ||
332 | { | 304 | { |
333 | int i; | 305 | int i; |
334 | struct spider_net_descr *descr; | 306 | struct spider_net_descr *descr; |
335 | dma_addr_t buf; | 307 | dma_addr_t buf; |
336 | 308 | ||
337 | atomic_set(&card->rx_chain_refill,0); | ||
338 | |||
339 | descr = start_descr; | 309 | descr = start_descr; |
340 | memset(descr, 0, sizeof(*descr) * no); | 310 | memset(descr, 0, sizeof(*descr) * no); |
341 | 311 | ||
342 | /* set up the hardware pointers in each descriptor */ | 312 | /* set up the hardware pointers in each descriptor */ |
343 | for (i=0; i<no; i++, descr++) { | 313 | for (i=0; i<no; i++, descr++) { |
344 | spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); | 314 | descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; |
345 | 315 | ||
346 | buf = pci_map_single(card->pdev, descr, | 316 | buf = pci_map_single(card->pdev, descr, |
347 | SPIDER_NET_DESCR_SIZE, | 317 | SPIDER_NET_DESCR_SIZE, |
348 | PCI_DMA_BIDIRECTIONAL); | 318 | direction); |
349 | 319 | ||
350 | if (buf == DMA_ERROR_CODE) | 320 | if (buf == DMA_ERROR_CODE) |
351 | goto iommu_error; | 321 | goto iommu_error; |
@@ -360,10 +330,11 @@ spider_net_init_chain(struct spider_net_card *card, | |||
360 | start_descr->prev = descr-1; | 330 | start_descr->prev = descr-1; |
361 | 331 | ||
362 | descr = start_descr; | 332 | descr = start_descr; |
363 | for (i=0; i < no; i++, descr++) { | 333 | if (direction == PCI_DMA_FROMDEVICE) |
364 | descr->next_descr_addr = descr->next->bus_addr; | 334 | for (i=0; i < no; i++, descr++) |
365 | } | 335 | descr->next_descr_addr = descr->next->bus_addr; |
366 | 336 | ||
337 | spin_lock_init(&chain->lock); | ||
367 | chain->head = start_descr; | 338 | chain->head = start_descr; |
368 | chain->tail = start_descr; | 339 | chain->tail = start_descr; |
369 | 340 | ||
@@ -375,7 +346,7 @@ iommu_error: | |||
375 | if (descr->bus_addr) | 346 | if (descr->bus_addr) |
376 | pci_unmap_single(card->pdev, descr->bus_addr, | 347 | pci_unmap_single(card->pdev, descr->bus_addr, |
377 | SPIDER_NET_DESCR_SIZE, | 348 | SPIDER_NET_DESCR_SIZE, |
378 | PCI_DMA_BIDIRECTIONAL); | 349 | direction); |
379 | return -ENOMEM; | 350 | return -ENOMEM; |
380 | } | 351 | } |
381 | 352 | ||
@@ -396,7 +367,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card) | |||
396 | dev_kfree_skb(descr->skb); | 367 | dev_kfree_skb(descr->skb); |
397 | pci_unmap_single(card->pdev, descr->buf_addr, | 368 | pci_unmap_single(card->pdev, descr->buf_addr, |
398 | SPIDER_NET_MAX_FRAME, | 369 | SPIDER_NET_MAX_FRAME, |
399 | PCI_DMA_BIDIRECTIONAL); | 370 | PCI_DMA_FROMDEVICE); |
400 | } | 371 | } |
401 | descr = descr->next; | 372 | descr = descr->next; |
402 | } | 373 | } |
@@ -446,15 +417,16 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, | |||
446 | skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); | 417 | skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); |
447 | /* io-mmu-map the skb */ | 418 | /* io-mmu-map the skb */ |
448 | buf = pci_map_single(card->pdev, descr->skb->data, | 419 | buf = pci_map_single(card->pdev, descr->skb->data, |
449 | SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); | 420 | SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); |
450 | descr->buf_addr = buf; | 421 | descr->buf_addr = buf; |
451 | if (buf == DMA_ERROR_CODE) { | 422 | if (buf == DMA_ERROR_CODE) { |
452 | dev_kfree_skb_any(descr->skb); | 423 | dev_kfree_skb_any(descr->skb); |
453 | if (netif_msg_rx_err(card) && net_ratelimit()) | 424 | if (netif_msg_rx_err(card) && net_ratelimit()) |
454 | pr_err("Could not iommu-map rx buffer\n"); | 425 | pr_err("Could not iommu-map rx buffer\n"); |
455 | spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); | 426 | descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; |
456 | } else { | 427 | } else { |
457 | descr->dmac_cmd_status = SPIDER_NET_DMAC_RX_CARDOWNED; | 428 | descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | |
429 | SPIDER_NET_DMAC_NOINTR_COMPLETE; | ||
458 | } | 430 | } |
459 | 431 | ||
460 | return error; | 432 | return error; |
@@ -468,7 +440,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, | |||
468 | * chip by writing to the appropriate register. DMA is enabled in | 440 | * chip by writing to the appropriate register. DMA is enabled in |
469 | * spider_net_enable_rxdmac. | 441 | * spider_net_enable_rxdmac. |
470 | */ | 442 | */ |
471 | static void | 443 | static inline void |
472 | spider_net_enable_rxchtails(struct spider_net_card *card) | 444 | spider_net_enable_rxchtails(struct spider_net_card *card) |
473 | { | 445 | { |
474 | /* assume chain is aligned correctly */ | 446 | /* assume chain is aligned correctly */ |
@@ -483,7 +455,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card) | |||
483 | * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN | 455 | * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN |
484 | * in the GDADMACCNTR register | 456 | * in the GDADMACCNTR register |
485 | */ | 457 | */ |
486 | static void | 458 | static inline void |
487 | spider_net_enable_rxdmac(struct spider_net_card *card) | 459 | spider_net_enable_rxdmac(struct spider_net_card *card) |
488 | { | 460 | { |
489 | wmb(); | 461 | wmb(); |
@@ -500,23 +472,24 @@ spider_net_enable_rxdmac(struct spider_net_card *card) | |||
500 | static void | 472 | static void |
501 | spider_net_refill_rx_chain(struct spider_net_card *card) | 473 | spider_net_refill_rx_chain(struct spider_net_card *card) |
502 | { | 474 | { |
503 | struct spider_net_descr_chain *chain; | 475 | struct spider_net_descr_chain *chain = &card->rx_chain; |
504 | 476 | unsigned long flags; | |
505 | chain = &card->rx_chain; | ||
506 | 477 | ||
507 | /* one context doing the refill (and a second context seeing that | 478 | /* one context doing the refill (and a second context seeing that |
508 | * and omitting it) is ok. If called by NAPI, we'll be called again | 479 | * and omitting it) is ok. If called by NAPI, we'll be called again |
509 | * as spider_net_decode_one_descr is called several times. If some | 480 | * as spider_net_decode_one_descr is called several times. If some |
510 | * interrupt calls us, the NAPI is about to clean up anyway. */ | 481 | * interrupt calls us, the NAPI is about to clean up anyway. */ |
511 | if (atomic_inc_return(&card->rx_chain_refill) == 1) | 482 | if (!spin_trylock_irqsave(&chain->lock, flags)) |
512 | while (spider_net_get_descr_status(chain->head) == | 483 | return; |
513 | SPIDER_NET_DESCR_NOT_IN_USE) { | 484 | |
514 | if (spider_net_prepare_rx_descr(card, chain->head)) | 485 | while (spider_net_get_descr_status(chain->head) == |
515 | break; | 486 | SPIDER_NET_DESCR_NOT_IN_USE) { |
516 | chain->head = chain->head->next; | 487 | if (spider_net_prepare_rx_descr(card, chain->head)) |
517 | } | 488 | break; |
489 | chain->head = chain->head->next; | ||
490 | } | ||
518 | 491 | ||
519 | atomic_dec(&card->rx_chain_refill); | 492 | spin_unlock_irqrestore(&chain->lock, flags); |
520 | } | 493 | } |
521 | 494 | ||
522 | /** | 495 | /** |
@@ -554,111 +527,6 @@ error: | |||
554 | } | 527 | } |
555 | 528 | ||
556 | /** | 529 | /** |
557 | * spider_net_release_tx_descr - processes a used tx descriptor | ||
558 | * @card: card structure | ||
559 | * @descr: descriptor to release | ||
560 | * | ||
561 | * releases a used tx descriptor (unmapping, freeing of skb) | ||
562 | */ | ||
563 | static void | ||
564 | spider_net_release_tx_descr(struct spider_net_card *card, | ||
565 | struct spider_net_descr *descr) | ||
566 | { | ||
567 | struct sk_buff *skb; | ||
568 | |||
569 | /* unmap the skb */ | ||
570 | skb = descr->skb; | ||
571 | pci_unmap_single(card->pdev, descr->buf_addr, skb->len, | ||
572 | PCI_DMA_BIDIRECTIONAL); | ||
573 | |||
574 | dev_kfree_skb_any(skb); | ||
575 | |||
576 | /* set status to not used */ | ||
577 | spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); | ||
578 | } | ||
579 | |||
580 | /** | ||
581 | * spider_net_release_tx_chain - processes sent tx descriptors | ||
582 | * @card: adapter structure | ||
583 | * @brutal: if set, don't care about whether descriptor seems to be in use | ||
584 | * | ||
585 | * returns 0 if the tx ring is empty, otherwise 1. | ||
586 | * | ||
587 | * spider_net_release_tx_chain releases the tx descriptors that spider has | ||
588 | * finished with (if non-brutal) or simply release tx descriptors (if brutal). | ||
589 | * If some other context is calling this function, we return 1 so that we're | ||
590 | * scheduled again (if we were scheduled) and will not loose initiative. | ||
591 | */ | ||
592 | static int | ||
593 | spider_net_release_tx_chain(struct spider_net_card *card, int brutal) | ||
594 | { | ||
595 | struct spider_net_descr_chain *tx_chain = &card->tx_chain; | ||
596 | enum spider_net_descr_status status; | ||
597 | |||
598 | if (atomic_inc_return(&card->tx_chain_release) != 1) { | ||
599 | atomic_dec(&card->tx_chain_release); | ||
600 | return 1; | ||
601 | } | ||
602 | |||
603 | for (;;) { | ||
604 | status = spider_net_get_descr_status(tx_chain->tail); | ||
605 | switch (status) { | ||
606 | case SPIDER_NET_DESCR_CARDOWNED: | ||
607 | if (!brutal) | ||
608 | goto out; | ||
609 | /* fallthrough, if we release the descriptors | ||
610 | * brutally (then we don't care about | ||
611 | * SPIDER_NET_DESCR_CARDOWNED) */ | ||
612 | case SPIDER_NET_DESCR_RESPONSE_ERROR: | ||
613 | case SPIDER_NET_DESCR_PROTECTION_ERROR: | ||
614 | case SPIDER_NET_DESCR_FORCE_END: | ||
615 | if (netif_msg_tx_err(card)) | ||
616 | pr_err("%s: forcing end of tx descriptor " | ||
617 | "with status x%02x\n", | ||
618 | card->netdev->name, status); | ||
619 | card->netdev_stats.tx_dropped++; | ||
620 | break; | ||
621 | |||
622 | case SPIDER_NET_DESCR_COMPLETE: | ||
623 | card->netdev_stats.tx_packets++; | ||
624 | card->netdev_stats.tx_bytes += | ||
625 | tx_chain->tail->skb->len; | ||
626 | break; | ||
627 | |||
628 | default: /* any other value (== SPIDER_NET_DESCR_NOT_IN_USE) */ | ||
629 | goto out; | ||
630 | } | ||
631 | spider_net_release_tx_descr(card, tx_chain->tail); | ||
632 | tx_chain->tail = tx_chain->tail->next; | ||
633 | } | ||
634 | out: | ||
635 | atomic_dec(&card->tx_chain_release); | ||
636 | |||
637 | netif_wake_queue(card->netdev); | ||
638 | |||
639 | if (status == SPIDER_NET_DESCR_CARDOWNED) | ||
640 | return 1; | ||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | /** | ||
645 | * spider_net_cleanup_tx_ring - cleans up the TX ring | ||
646 | * @card: card structure | ||
647 | * | ||
648 | * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use | ||
649 | * interrupts to cleanup our TX ring) and returns sent packets to the stack | ||
650 | * by freeing them | ||
651 | */ | ||
652 | static void | ||
653 | spider_net_cleanup_tx_ring(struct spider_net_card *card) | ||
654 | { | ||
655 | if ( (spider_net_release_tx_chain(card, 0)) && | ||
656 | (card->netdev->flags & IFF_UP) ) { | ||
657 | mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); | ||
658 | } | ||
659 | } | ||
660 | |||
661 | /** | ||
662 | * spider_net_get_multicast_hash - generates hash for multicast filter table | 530 | * spider_net_get_multicast_hash - generates hash for multicast filter table |
663 | * @addr: multicast address | 531 | * @addr: multicast address |
664 | * | 532 | * |
@@ -761,97 +629,6 @@ spider_net_disable_rxdmac(struct spider_net_card *card) | |||
761 | } | 629 | } |
762 | 630 | ||
763 | /** | 631 | /** |
764 | * spider_net_stop - called upon ifconfig down | ||
765 | * @netdev: interface device structure | ||
766 | * | ||
767 | * always returns 0 | ||
768 | */ | ||
769 | int | ||
770 | spider_net_stop(struct net_device *netdev) | ||
771 | { | ||
772 | struct spider_net_card *card = netdev_priv(netdev); | ||
773 | |||
774 | tasklet_kill(&card->rxram_full_tl); | ||
775 | netif_poll_disable(netdev); | ||
776 | netif_carrier_off(netdev); | ||
777 | netif_stop_queue(netdev); | ||
778 | del_timer_sync(&card->tx_timer); | ||
779 | |||
780 | /* disable/mask all interrupts */ | ||
781 | spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); | ||
782 | spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); | ||
783 | spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); | ||
784 | |||
785 | /* free_irq(netdev->irq, netdev);*/ | ||
786 | free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev); | ||
787 | |||
788 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | ||
789 | SPIDER_NET_DMA_TX_FEND_VALUE); | ||
790 | |||
791 | /* turn off DMA, force end */ | ||
792 | spider_net_disable_rxdmac(card); | ||
793 | |||
794 | /* release chains */ | ||
795 | spider_net_release_tx_chain(card, 1); | ||
796 | |||
797 | spider_net_free_chain(card, &card->tx_chain); | ||
798 | spider_net_free_chain(card, &card->rx_chain); | ||
799 | |||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | /** | ||
804 | * spider_net_get_next_tx_descr - returns the next available tx descriptor | ||
805 | * @card: device structure to get descriptor from | ||
806 | * | ||
807 | * returns the address of the next descriptor, or NULL if not available. | ||
808 | */ | ||
809 | static struct spider_net_descr * | ||
810 | spider_net_get_next_tx_descr(struct spider_net_card *card) | ||
811 | { | ||
812 | /* check, if head points to not-in-use descr */ | ||
813 | if ( spider_net_get_descr_status(card->tx_chain.head) == | ||
814 | SPIDER_NET_DESCR_NOT_IN_USE ) { | ||
815 | return card->tx_chain.head; | ||
816 | } else { | ||
817 | return NULL; | ||
818 | } | ||
819 | } | ||
820 | |||
821 | /** | ||
822 | * spider_net_set_txdescr_cmdstat - sets the tx descriptor command field | ||
823 | * @descr: descriptor structure to fill out | ||
824 | * @skb: packet to consider | ||
825 | * | ||
826 | * fills out the command and status field of the descriptor structure, | ||
827 | * depending on hardware checksum settings. | ||
828 | */ | ||
829 | static void | ||
830 | spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, | ||
831 | struct sk_buff *skb) | ||
832 | { | ||
833 | /* make sure the other fields in the descriptor are written */ | ||
834 | wmb(); | ||
835 | |||
836 | if (skb->ip_summed != CHECKSUM_HW) { | ||
837 | descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; | ||
838 | return; | ||
839 | } | ||
840 | |||
841 | /* is packet ip? | ||
842 | * if yes: tcp? udp? */ | ||
843 | if (skb->protocol == htons(ETH_P_IP)) { | ||
844 | if (skb->nh.iph->protocol == IPPROTO_TCP) | ||
845 | descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS; | ||
846 | else if (skb->nh.iph->protocol == IPPROTO_UDP) | ||
847 | descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS; | ||
848 | else /* the stack should checksum non-tcp and non-udp | ||
849 | packets on his own: NETIF_F_IP_CSUM */ | ||
850 | descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; | ||
851 | } | ||
852 | } | ||
853 | |||
854 | /** | ||
855 | * spider_net_prepare_tx_descr - fill tx descriptor with skb data | 632 | * spider_net_prepare_tx_descr - fill tx descriptor with skb data |
856 | * @card: card structure | 633 | * @card: card structure |
857 | * @descr: descriptor structure to fill out | 634 | * @descr: descriptor structure to fill out |
@@ -864,13 +641,12 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, | |||
864 | */ | 641 | */ |
865 | static int | 642 | static int |
866 | spider_net_prepare_tx_descr(struct spider_net_card *card, | 643 | spider_net_prepare_tx_descr(struct spider_net_card *card, |
867 | struct spider_net_descr *descr, | ||
868 | struct sk_buff *skb) | 644 | struct sk_buff *skb) |
869 | { | 645 | { |
646 | struct spider_net_descr *descr = card->tx_chain.head; | ||
870 | dma_addr_t buf; | 647 | dma_addr_t buf; |
871 | 648 | ||
872 | buf = pci_map_single(card->pdev, skb->data, | 649 | buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); |
873 | skb->len, PCI_DMA_BIDIRECTIONAL); | ||
874 | if (buf == DMA_ERROR_CODE) { | 650 | if (buf == DMA_ERROR_CODE) { |
875 | if (netif_msg_tx_err(card) && net_ratelimit()) | 651 | if (netif_msg_tx_err(card) && net_ratelimit()) |
876 | pr_err("could not iommu-map packet (%p, %i). " | 652 | pr_err("could not iommu-map packet (%p, %i). " |
@@ -880,10 +656,101 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
880 | 656 | ||
881 | descr->buf_addr = buf; | 657 | descr->buf_addr = buf; |
882 | descr->buf_size = skb->len; | 658 | descr->buf_size = skb->len; |
659 | descr->next_descr_addr = 0; | ||
883 | descr->skb = skb; | 660 | descr->skb = skb; |
884 | descr->data_status = 0; | 661 | descr->data_status = 0; |
885 | 662 | ||
886 | spider_net_set_txdescr_cmdstat(descr,skb); | 663 | descr->dmac_cmd_status = |
664 | SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; | ||
665 | if (skb->protocol == htons(ETH_P_IP)) | ||
666 | switch (skb->nh.iph->protocol) { | ||
667 | case IPPROTO_TCP: | ||
668 | descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; | ||
669 | break; | ||
670 | case IPPROTO_UDP: | ||
671 | descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP; | ||
672 | break; | ||
673 | } | ||
674 | |||
675 | descr->prev->next_descr_addr = descr->bus_addr; | ||
676 | |||
677 | return 0; | ||
678 | } | ||
679 | |||
680 | /** | ||
681 | * spider_net_release_tx_descr - processes a used tx descriptor | ||
682 | * @card: card structure | ||
683 | * @descr: descriptor to release | ||
684 | * | ||
685 | * releases a used tx descriptor (unmapping, freeing of skb) | ||
686 | */ | ||
687 | static inline void | ||
688 | spider_net_release_tx_descr(struct spider_net_card *card) | ||
689 | { | ||
690 | struct spider_net_descr *descr = card->tx_chain.tail; | ||
691 | struct sk_buff *skb; | ||
692 | |||
693 | card->tx_chain.tail = card->tx_chain.tail->next; | ||
694 | descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; | ||
695 | |||
696 | /* unmap the skb */ | ||
697 | skb = descr->skb; | ||
698 | pci_unmap_single(card->pdev, descr->buf_addr, skb->len, | ||
699 | PCI_DMA_TODEVICE); | ||
700 | dev_kfree_skb_any(skb); | ||
701 | } | ||
702 | |||
703 | /** | ||
704 | * spider_net_release_tx_chain - processes sent tx descriptors | ||
705 | * @card: adapter structure | ||
706 | * @brutal: if set, don't care about whether descriptor seems to be in use | ||
707 | * | ||
708 | * returns 0 if the tx ring is empty, otherwise 1. | ||
709 | * | ||
710 | * spider_net_release_tx_chain releases the tx descriptors that spider has | ||
711 | * finished with (if non-brutal) or simply release tx descriptors (if brutal). | ||
712 | * If some other context is calling this function, we return 1 so that we're | ||
713 | * scheduled again (if we were scheduled) and will not loose initiative. | ||
714 | */ | ||
715 | static int | ||
716 | spider_net_release_tx_chain(struct spider_net_card *card, int brutal) | ||
717 | { | ||
718 | struct spider_net_descr_chain *chain = &card->tx_chain; | ||
719 | int status; | ||
720 | |||
721 | spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR); | ||
722 | |||
723 | while (chain->tail != chain->head) { | ||
724 | status = spider_net_get_descr_status(chain->tail); | ||
725 | switch (status) { | ||
726 | case SPIDER_NET_DESCR_COMPLETE: | ||
727 | card->netdev_stats.tx_packets++; | ||
728 | card->netdev_stats.tx_bytes += chain->tail->skb->len; | ||
729 | break; | ||
730 | |||
731 | case SPIDER_NET_DESCR_CARDOWNED: | ||
732 | if (!brutal) | ||
733 | return 1; | ||
734 | /* fallthrough, if we release the descriptors | ||
735 | * brutally (then we don't care about | ||
736 | * SPIDER_NET_DESCR_CARDOWNED) */ | ||
737 | |||
738 | case SPIDER_NET_DESCR_RESPONSE_ERROR: | ||
739 | case SPIDER_NET_DESCR_PROTECTION_ERROR: | ||
740 | case SPIDER_NET_DESCR_FORCE_END: | ||
741 | if (netif_msg_tx_err(card)) | ||
742 | pr_err("%s: forcing end of tx descriptor " | ||
743 | "with status x%02x\n", | ||
744 | card->netdev->name, status); | ||
745 | card->netdev_stats.tx_errors++; | ||
746 | break; | ||
747 | |||
748 | default: | ||
749 | card->netdev_stats.tx_dropped++; | ||
750 | return 1; | ||
751 | } | ||
752 | spider_net_release_tx_descr(card); | ||
753 | } | ||
887 | 754 | ||
888 | return 0; | 755 | return 0; |
889 | } | 756 | } |
@@ -896,18 +763,32 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
896 | * spider_net_kick_tx_dma writes the current tx chain head as start address | 763 | * spider_net_kick_tx_dma writes the current tx chain head as start address |
897 | * of the tx descriptor chain and enables the transmission DMA engine | 764 | * of the tx descriptor chain and enables the transmission DMA engine |
898 | */ | 765 | */ |
899 | static void | 766 | static inline void |
900 | spider_net_kick_tx_dma(struct spider_net_card *card, | 767 | spider_net_kick_tx_dma(struct spider_net_card *card) |
901 | struct spider_net_descr *descr) | ||
902 | { | 768 | { |
903 | /* this is the only descriptor in the output chain. | 769 | struct spider_net_descr *descr; |
904 | * Enable TX DMA */ | ||
905 | 770 | ||
906 | spider_net_write_reg(card, SPIDER_NET_GDTDCHA, | 771 | if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) & |
907 | descr->bus_addr); | 772 | SPIDER_NET_TX_DMA_EN) |
773 | goto out; | ||
908 | 774 | ||
909 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | 775 | descr = card->tx_chain.tail; |
910 | SPIDER_NET_DMA_TX_VALUE); | 776 | for (;;) { |
777 | if (spider_net_get_descr_status(descr) == | ||
778 | SPIDER_NET_DESCR_CARDOWNED) { | ||
779 | spider_net_write_reg(card, SPIDER_NET_GDTDCHA, | ||
780 | descr->bus_addr); | ||
781 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | ||
782 | SPIDER_NET_DMA_TX_VALUE); | ||
783 | break; | ||
784 | } | ||
785 | if (descr == card->tx_chain.head) | ||
786 | break; | ||
787 | descr = descr->next; | ||
788 | } | ||
789 | |||
790 | out: | ||
791 | mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); | ||
911 | } | 792 | } |
912 | 793 | ||
913 | /** | 794 | /** |
@@ -915,47 +796,69 @@ spider_net_kick_tx_dma(struct spider_net_card *card, | |||
915 | * @skb: packet to send out | 796 | * @skb: packet to send out |
916 | * @netdev: interface device structure | 797 | * @netdev: interface device structure |
917 | * | 798 | * |
918 | * returns 0 on success, <0 on failure | 799 | * returns 0 on success, !0 on failure |
919 | */ | 800 | */ |
920 | static int | 801 | static int |
921 | spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) | 802 | spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) |
922 | { | 803 | { |
923 | struct spider_net_card *card = netdev_priv(netdev); | 804 | struct spider_net_card *card = netdev_priv(netdev); |
924 | struct spider_net_descr *descr; | 805 | struct spider_net_descr_chain *chain = &card->tx_chain; |
806 | struct spider_net_descr *descr = chain->head; | ||
807 | unsigned long flags; | ||
925 | int result; | 808 | int result; |
926 | 809 | ||
810 | spin_lock_irqsave(&chain->lock, flags); | ||
811 | |||
927 | spider_net_release_tx_chain(card, 0); | 812 | spider_net_release_tx_chain(card, 0); |
928 | 813 | ||
929 | descr = spider_net_get_next_tx_descr(card); | 814 | if (chain->head->next == chain->tail->prev) { |
815 | card->netdev_stats.tx_dropped++; | ||
816 | result = NETDEV_TX_LOCKED; | ||
817 | goto out; | ||
818 | } | ||
930 | 819 | ||
931 | if (!descr) | 820 | if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) { |
932 | goto error; | 821 | result = NETDEV_TX_LOCKED; |
822 | goto out; | ||
823 | } | ||
933 | 824 | ||
934 | result = spider_net_prepare_tx_descr(card, descr, skb); | 825 | if (spider_net_prepare_tx_descr(card, skb) != 0) { |
935 | if (result) | 826 | card->netdev_stats.tx_dropped++; |
936 | goto error; | 827 | result = NETDEV_TX_BUSY; |
828 | goto out; | ||
829 | } | ||
830 | |||
831 | result = NETDEV_TX_OK; | ||
937 | 832 | ||
833 | spider_net_kick_tx_dma(card); | ||
938 | card->tx_chain.head = card->tx_chain.head->next; | 834 | card->tx_chain.head = card->tx_chain.head->next; |
939 | 835 | ||
940 | if (spider_net_get_descr_status(descr->prev) != | 836 | out: |
941 | SPIDER_NET_DESCR_CARDOWNED) { | 837 | spin_unlock_irqrestore(&chain->lock, flags); |
942 | /* make sure the current descriptor is in memory. Then | 838 | netif_wake_queue(netdev); |
943 | * kicking it on again makes sense, if the previous is not | 839 | return result; |
944 | * card-owned anymore. Check the previous descriptor twice | 840 | } |
945 | * to omit an mb() in heavy traffic cases */ | ||
946 | mb(); | ||
947 | if (spider_net_get_descr_status(descr->prev) != | ||
948 | SPIDER_NET_DESCR_CARDOWNED) | ||
949 | spider_net_kick_tx_dma(card, descr); | ||
950 | } | ||
951 | 841 | ||
952 | mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); | 842 | /** |
843 | * spider_net_cleanup_tx_ring - cleans up the TX ring | ||
844 | * @card: card structure | ||
845 | * | ||
846 | * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use | ||
847 | * interrupts to cleanup our TX ring) and returns sent packets to the stack | ||
848 | * by freeing them | ||
849 | */ | ||
850 | static void | ||
851 | spider_net_cleanup_tx_ring(struct spider_net_card *card) | ||
852 | { | ||
853 | unsigned long flags; | ||
953 | 854 | ||
954 | return NETDEV_TX_OK; | 855 | spin_lock_irqsave(&card->tx_chain.lock, flags); |
955 | 856 | ||
956 | error: | 857 | if ((spider_net_release_tx_chain(card, 0) != 0) && |
957 | card->netdev_stats.tx_dropped++; | 858 | (card->netdev->flags & IFF_UP)) |
958 | return NETDEV_TX_BUSY; | 859 | spider_net_kick_tx_dma(card); |
860 | |||
861 | spin_unlock_irqrestore(&card->tx_chain.lock, flags); | ||
959 | } | 862 | } |
960 | 863 | ||
961 | /** | 864 | /** |
@@ -1002,7 +905,7 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, | |||
1002 | 905 | ||
1003 | /* unmap descriptor */ | 906 | /* unmap descriptor */ |
1004 | pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME, | 907 | pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME, |
1005 | PCI_DMA_BIDIRECTIONAL); | 908 | PCI_DMA_FROMDEVICE); |
1006 | 909 | ||
1007 | /* the cases we'll throw away the packet immediately */ | 910 | /* the cases we'll throw away the packet immediately */ |
1008 | if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) { | 911 | if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) { |
@@ -1067,14 +970,11 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, | |||
1067 | static int | 970 | static int |
1068 | spider_net_decode_one_descr(struct spider_net_card *card, int napi) | 971 | spider_net_decode_one_descr(struct spider_net_card *card, int napi) |
1069 | { | 972 | { |
1070 | enum spider_net_descr_status status; | 973 | struct spider_net_descr_chain *chain = &card->rx_chain; |
1071 | struct spider_net_descr *descr; | 974 | struct spider_net_descr *descr = chain->tail; |
1072 | struct spider_net_descr_chain *chain; | 975 | int status; |
1073 | int result; | 976 | int result; |
1074 | 977 | ||
1075 | chain = &card->rx_chain; | ||
1076 | descr = chain->tail; | ||
1077 | |||
1078 | status = spider_net_get_descr_status(descr); | 978 | status = spider_net_get_descr_status(descr); |
1079 | 979 | ||
1080 | if (status == SPIDER_NET_DESCR_CARDOWNED) { | 980 | if (status == SPIDER_NET_DESCR_CARDOWNED) { |
@@ -1103,7 +1003,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi) | |||
1103 | card->netdev->name, status); | 1003 | card->netdev->name, status); |
1104 | card->netdev_stats.rx_dropped++; | 1004 | card->netdev_stats.rx_dropped++; |
1105 | pci_unmap_single(card->pdev, descr->buf_addr, | 1005 | pci_unmap_single(card->pdev, descr->buf_addr, |
1106 | SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); | 1006 | SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); |
1107 | dev_kfree_skb_irq(descr->skb); | 1007 | dev_kfree_skb_irq(descr->skb); |
1108 | goto refill; | 1008 | goto refill; |
1109 | } | 1009 | } |
@@ -1119,7 +1019,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi) | |||
1119 | /* ok, we've got a packet in descr */ | 1019 | /* ok, we've got a packet in descr */ |
1120 | result = spider_net_pass_skb_up(descr, card, napi); | 1020 | result = spider_net_pass_skb_up(descr, card, napi); |
1121 | refill: | 1021 | refill: |
1122 | spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); | 1022 | descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; |
1123 | /* change the descriptor state: */ | 1023 | /* change the descriptor state: */ |
1124 | if (!napi) | 1024 | if (!napi) |
1125 | spider_net_refill_rx_chain(card); | 1025 | spider_net_refill_rx_chain(card); |
@@ -1291,21 +1191,6 @@ spider_net_set_mac(struct net_device *netdev, void *p) | |||
1291 | } | 1191 | } |
1292 | 1192 | ||
1293 | /** | 1193 | /** |
1294 | * spider_net_enable_txdmac - enables a TX DMA controller | ||
1295 | * @card: card structure | ||
1296 | * | ||
1297 | * spider_net_enable_txdmac enables the TX DMA controller by setting the | ||
1298 | * descriptor chain tail address | ||
1299 | */ | ||
1300 | static void | ||
1301 | spider_net_enable_txdmac(struct spider_net_card *card) | ||
1302 | { | ||
1303 | /* assume chain is aligned correctly */ | ||
1304 | spider_net_write_reg(card, SPIDER_NET_GDTDCHA, | ||
1305 | card->tx_chain.tail->bus_addr); | ||
1306 | } | ||
1307 | |||
1308 | /** | ||
1309 | * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt | 1194 | * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt |
1310 | * @card: card structure | 1195 | * @card: card structure |
1311 | * | 1196 | * |
@@ -1653,7 +1538,6 @@ spider_net_enable_card(struct spider_net_card *card) | |||
1653 | { SPIDER_NET_GMRWOLCTRL, 0 }, | 1538 | { SPIDER_NET_GMRWOLCTRL, 0 }, |
1654 | { SPIDER_NET_GTESTMD, 0x10000000 }, | 1539 | { SPIDER_NET_GTESTMD, 0x10000000 }, |
1655 | { SPIDER_NET_GTTQMSK, 0x00400040 }, | 1540 | { SPIDER_NET_GTTQMSK, 0x00400040 }, |
1656 | { SPIDER_NET_GTESTMD, 0 }, | ||
1657 | 1541 | ||
1658 | { SPIDER_NET_GMACINTEN, 0 }, | 1542 | { SPIDER_NET_GMACINTEN, 0 }, |
1659 | 1543 | ||
@@ -1692,9 +1576,6 @@ spider_net_enable_card(struct spider_net_card *card) | |||
1692 | 1576 | ||
1693 | spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE); | 1577 | spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE); |
1694 | 1578 | ||
1695 | /* set chain tail adress for TX chain */ | ||
1696 | spider_net_enable_txdmac(card); | ||
1697 | |||
1698 | spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, | 1579 | spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, |
1699 | SPIDER_NET_LENLMT_VALUE); | 1580 | SPIDER_NET_LENLMT_VALUE); |
1700 | spider_net_write_reg(card, SPIDER_NET_GMACMODE, | 1581 | spider_net_write_reg(card, SPIDER_NET_GMACMODE, |
@@ -1709,6 +1590,9 @@ spider_net_enable_card(struct spider_net_card *card) | |||
1709 | SPIDER_NET_INT1_MASK_VALUE); | 1590 | SPIDER_NET_INT1_MASK_VALUE); |
1710 | spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, | 1591 | spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, |
1711 | SPIDER_NET_INT2_MASK_VALUE); | 1592 | SPIDER_NET_INT2_MASK_VALUE); |
1593 | |||
1594 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | ||
1595 | SPIDER_NET_GDTDCEIDIS); | ||
1712 | } | 1596 | } |
1713 | 1597 | ||
1714 | /** | 1598 | /** |
@@ -1728,10 +1612,12 @@ spider_net_open(struct net_device *netdev) | |||
1728 | 1612 | ||
1729 | result = -ENOMEM; | 1613 | result = -ENOMEM; |
1730 | if (spider_net_init_chain(card, &card->tx_chain, | 1614 | if (spider_net_init_chain(card, &card->tx_chain, |
1731 | card->descr, tx_descriptors)) | 1615 | card->descr, |
1616 | PCI_DMA_TODEVICE, tx_descriptors)) | ||
1732 | goto alloc_tx_failed; | 1617 | goto alloc_tx_failed; |
1733 | if (spider_net_init_chain(card, &card->rx_chain, | 1618 | if (spider_net_init_chain(card, &card->rx_chain, |
1734 | card->descr + tx_descriptors, rx_descriptors)) | 1619 | card->descr + tx_descriptors, |
1620 | PCI_DMA_FROMDEVICE, rx_descriptors)) | ||
1735 | goto alloc_rx_failed; | 1621 | goto alloc_rx_failed; |
1736 | 1622 | ||
1737 | /* allocate rx skbs */ | 1623 | /* allocate rx skbs */ |
@@ -1938,7 +1824,7 @@ spider_net_workaround_rxramfull(struct spider_net_card *card) | |||
1938 | /* empty sequencer data */ | 1824 | /* empty sequencer data */ |
1939 | for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; | 1825 | for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS; |
1940 | sequencer++) { | 1826 | sequencer++) { |
1941 | spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + | 1827 | spider_net_write_reg(card, SPIDER_NET_GSnPRGADR + |
1942 | sequencer * 8, 0x0); | 1828 | sequencer * 8, 0x0); |
1943 | for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { | 1829 | for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) { |
1944 | spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + | 1830 | spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + |
@@ -1955,6 +1841,49 @@ spider_net_workaround_rxramfull(struct spider_net_card *card) | |||
1955 | } | 1841 | } |
1956 | 1842 | ||
1957 | /** | 1843 | /** |
1844 | * spider_net_stop - called upon ifconfig down | ||
1845 | * @netdev: interface device structure | ||
1846 | * | ||
1847 | * always returns 0 | ||
1848 | */ | ||
1849 | int | ||
1850 | spider_net_stop(struct net_device *netdev) | ||
1851 | { | ||
1852 | struct spider_net_card *card = netdev_priv(netdev); | ||
1853 | |||
1854 | tasklet_kill(&card->rxram_full_tl); | ||
1855 | netif_poll_disable(netdev); | ||
1856 | netif_carrier_off(netdev); | ||
1857 | netif_stop_queue(netdev); | ||
1858 | del_timer_sync(&card->tx_timer); | ||
1859 | |||
1860 | /* disable/mask all interrupts */ | ||
1861 | spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); | ||
1862 | spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); | ||
1863 | spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); | ||
1864 | |||
1865 | /* free_irq(netdev->irq, netdev);*/ | ||
1866 | free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev); | ||
1867 | |||
1868 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | ||
1869 | SPIDER_NET_DMA_TX_FEND_VALUE); | ||
1870 | |||
1871 | /* turn off DMA, force end */ | ||
1872 | spider_net_disable_rxdmac(card); | ||
1873 | |||
1874 | /* release chains */ | ||
1875 | if (spin_trylock(&card->tx_chain.lock)) { | ||
1876 | spider_net_release_tx_chain(card, 1); | ||
1877 | spin_unlock(&card->tx_chain.lock); | ||
1878 | } | ||
1879 | |||
1880 | spider_net_free_chain(card, &card->tx_chain); | ||
1881 | spider_net_free_chain(card, &card->rx_chain); | ||
1882 | |||
1883 | return 0; | ||
1884 | } | ||
1885 | |||
1886 | /** | ||
1958 | * spider_net_tx_timeout_task - task scheduled by the watchdog timeout | 1887 | * spider_net_tx_timeout_task - task scheduled by the watchdog timeout |
1959 | * function (to be called not under interrupt status) | 1888 | * function (to be called not under interrupt status) |
1960 | * @data: data, is interface device structure | 1889 | * @data: data, is interface device structure |
@@ -1982,7 +1911,7 @@ spider_net_tx_timeout_task(void *data) | |||
1982 | goto out; | 1911 | goto out; |
1983 | 1912 | ||
1984 | spider_net_open(netdev); | 1913 | spider_net_open(netdev); |
1985 | spider_net_kick_tx_dma(card, card->tx_chain.head); | 1914 | spider_net_kick_tx_dma(card); |
1986 | netif_device_attach(netdev); | 1915 | netif_device_attach(netdev); |
1987 | 1916 | ||
1988 | out: | 1917 | out: |
@@ -2065,7 +1994,6 @@ spider_net_setup_netdev(struct spider_net_card *card) | |||
2065 | 1994 | ||
2066 | pci_set_drvdata(card->pdev, netdev); | 1995 | pci_set_drvdata(card->pdev, netdev); |
2067 | 1996 | ||
2068 | atomic_set(&card->tx_chain_release,0); | ||
2069 | card->rxram_full_tl.data = (unsigned long) card; | 1997 | card->rxram_full_tl.data = (unsigned long) card; |
2070 | card->rxram_full_tl.func = | 1998 | card->rxram_full_tl.func = |
2071 | (void (*)(unsigned long)) spider_net_handle_rxram_full; | 1999 | (void (*)(unsigned long)) spider_net_handle_rxram_full; |
@@ -2079,7 +2007,7 @@ spider_net_setup_netdev(struct spider_net_card *card) | |||
2079 | 2007 | ||
2080 | spider_net_setup_netdev_ops(netdev); | 2008 | spider_net_setup_netdev_ops(netdev); |
2081 | 2009 | ||
2082 | netdev->features = NETIF_F_HW_CSUM; | 2010 | netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX; |
2083 | /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | | 2011 | /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | |
2084 | * NETIF_F_HW_VLAN_FILTER */ | 2012 | * NETIF_F_HW_VLAN_FILTER */ |
2085 | 2013 | ||
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index 3b8d951cf73c..f6dcf180ae3d 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h | |||
@@ -208,7 +208,10 @@ extern char spider_net_driver_name[]; | |||
208 | #define SPIDER_NET_DMA_RX_VALUE 0x80000000 | 208 | #define SPIDER_NET_DMA_RX_VALUE 0x80000000 |
209 | #define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003 | 209 | #define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003 |
210 | /* to set TX_DMA_EN */ | 210 | /* to set TX_DMA_EN */ |
211 | #define SPIDER_NET_DMA_TX_VALUE 0x80000000 | 211 | #define SPIDER_NET_TX_DMA_EN 0x80000000 |
212 | #define SPIDER_NET_GDTDCEIDIS 0x00000002 | ||
213 | #define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \ | ||
214 | SPIDER_NET_GDTDCEIDIS | ||
212 | #define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 | 215 | #define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 |
213 | 216 | ||
214 | /* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */ | 217 | /* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */ |
@@ -329,55 +332,23 @@ enum spider_net_int2_status { | |||
329 | (~SPIDER_NET_TXINT) & \ | 332 | (~SPIDER_NET_TXINT) & \ |
330 | (~SPIDER_NET_RXINT) ) | 333 | (~SPIDER_NET_RXINT) ) |
331 | 334 | ||
332 | #define SPIDER_NET_GPREXEC 0x80000000 | 335 | #define SPIDER_NET_GPREXEC 0x80000000 |
333 | #define SPIDER_NET_GPRDAT_MASK 0x0000ffff | 336 | #define SPIDER_NET_GPRDAT_MASK 0x0000ffff |
334 | 337 | ||
335 | /* descriptor bits | 338 | #define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000 |
336 | * | 339 | #define SPIDER_NET_DMAC_NOCS 0x00040000 |
337 | * 1010 descriptor ready | 340 | #define SPIDER_NET_DMAC_TCP 0x00020000 |
338 | * 0 descr in middle of chain | 341 | #define SPIDER_NET_DMAC_UDP 0x00030000 |
339 | * 000 fixed to 0 | 342 | #define SPIDER_NET_TXDCEST 0x08000000 |
340 | * | 343 | |
341 | * 0 no interrupt on completion | 344 | #define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000 |
342 | * 000 fixed to 0 | 345 | #define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */ |
343 | * 1 no ipsec processing | 346 | #define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */ |
344 | * 1 last descriptor for this frame | 347 | #define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */ |
345 | * 00 no checksum | 348 | #define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */ |
346 | * 10 tcp checksum | 349 | #define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */ |
347 | * 11 udp checksum | 350 | #define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */ |
348 | * | 351 | #define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000 |
349 | * 00 fixed to 0 | ||
350 | * 0 fixed to 0 | ||
351 | * 0 no interrupt on response errors | ||
352 | * 0 no interrupt on invalid descr | ||
353 | * 0 no interrupt on dma process termination | ||
354 | * 0 no interrupt on descr chain end | ||
355 | * 0 no interrupt on descr complete | ||
356 | * | ||
357 | * 000 fixed to 0 | ||
358 | * 0 response error interrupt status | ||
359 | * 0 invalid descr status | ||
360 | * 0 dma termination status | ||
361 | * 0 descr chain end status | ||
362 | * 0 descr complete status */ | ||
363 | #define SPIDER_NET_DMAC_CMDSTAT_NOCS 0xa00c0000 | ||
364 | #define SPIDER_NET_DMAC_CMDSTAT_TCPCS 0xa00e0000 | ||
365 | #define SPIDER_NET_DMAC_CMDSTAT_UDPCS 0xa00f0000 | ||
366 | #define SPIDER_NET_DESCR_IND_PROC_SHIFT 28 | ||
367 | #define SPIDER_NET_DESCR_IND_PROC_MASKO 0x0fffffff | ||
368 | |||
369 | /* descr ready, descr is in middle of chain, get interrupt on completion */ | ||
370 | #define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000 | ||
371 | |||
372 | enum spider_net_descr_status { | ||
373 | SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */ | ||
374 | SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */ | ||
375 | SPIDER_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */ | ||
376 | SPIDER_NET_DESCR_FRAME_END = 0x04, /* used in rx */ | ||
377 | SPIDER_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */ | ||
378 | SPIDER_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */ | ||
379 | SPIDER_NET_DESCR_NOT_IN_USE /* any other value */ | ||
380 | }; | ||
381 | 352 | ||
382 | struct spider_net_descr { | 353 | struct spider_net_descr { |
383 | /* as defined by the hardware */ | 354 | /* as defined by the hardware */ |
@@ -398,7 +369,7 @@ struct spider_net_descr { | |||
398 | } __attribute__((aligned(32))); | 369 | } __attribute__((aligned(32))); |
399 | 370 | ||
400 | struct spider_net_descr_chain { | 371 | struct spider_net_descr_chain { |
401 | /* we walk from tail to head */ | 372 | spinlock_t lock; |
402 | struct spider_net_descr *head; | 373 | struct spider_net_descr *head; |
403 | struct spider_net_descr *tail; | 374 | struct spider_net_descr *tail; |
404 | }; | 375 | }; |
@@ -453,8 +424,6 @@ struct spider_net_card { | |||
453 | 424 | ||
454 | struct spider_net_descr_chain tx_chain; | 425 | struct spider_net_descr_chain tx_chain; |
455 | struct spider_net_descr_chain rx_chain; | 426 | struct spider_net_descr_chain rx_chain; |
456 | atomic_t rx_chain_refill; | ||
457 | atomic_t tx_chain_release; | ||
458 | 427 | ||
459 | struct net_device_stats netdev_stats; | 428 | struct net_device_stats netdev_stats; |
460 | 429 | ||
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index 8673fd4c08c7..c6f5bc3c042f 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c | |||
@@ -3255,12 +3255,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev) | |||
3255 | } | 3255 | } |
3256 | 3256 | ||
3257 | static struct pci_device_id happymeal_pci_ids[] = { | 3257 | static struct pci_device_id happymeal_pci_ids[] = { |
3258 | { | 3258 | { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) }, |
3259 | .vendor = PCI_VENDOR_ID_SUN, | ||
3260 | .device = PCI_DEVICE_ID_SUN_HAPPYMEAL, | ||
3261 | .subvendor = PCI_ANY_ID, | ||
3262 | .subdevice = PCI_ANY_ID, | ||
3263 | }, | ||
3264 | { } /* Terminating entry */ | 3259 | { } /* Terminating entry */ |
3265 | }; | 3260 | }; |
3266 | 3261 | ||
@@ -3275,7 +3270,7 @@ static struct pci_driver hme_pci_driver = { | |||
3275 | 3270 | ||
3276 | static int __init happy_meal_pci_init(void) | 3271 | static int __init happy_meal_pci_init(void) |
3277 | { | 3272 | { |
3278 | return pci_module_init(&hme_pci_driver); | 3273 | return pci_register_driver(&hme_pci_driver); |
3279 | } | 3274 | } |
3280 | 3275 | ||
3281 | static void happy_meal_pci_exit(void) | 3276 | static void happy_meal_pci_exit(void) |
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 1ef9fd39a79a..0e3fdf7c6dd3 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c | |||
@@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void) | |||
1537 | { | 1537 | { |
1538 | if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || | 1538 | if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || |
1539 | (idprom->id_machtype == (SM_SUN4|SM_4_470))) { | 1539 | (idprom->id_machtype == (SM_SUN4|SM_4_470))) { |
1540 | memset(&sun4_sdev, 0, sizeof(sdev)); | 1540 | memset(&sun4_sdev, 0, sizeof(struct sbus_dev)); |
1541 | sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; | 1541 | sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; |
1542 | sun4_sdev.irqs[0] = 6; | 1542 | sun4_sdev.irqs[0] = 6; |
1543 | return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); | 1543 | return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); |
@@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void) | |||
1547 | 1547 | ||
1548 | static int __exit sunlance_sun4_remove(void) | 1548 | static int __exit sunlance_sun4_remove(void) |
1549 | { | 1549 | { |
1550 | struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev); | 1550 | struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev); |
1551 | struct net_device *net_dev = lp->dev; | 1551 | struct net_device *net_dev = lp->dev; |
1552 | 1552 | ||
1553 | unregister_netdevice(net_dev); | 1553 | unregister_netdevice(net_dev); |
1554 | 1554 | ||
1555 | lance_free_hwresources(root_lance_dev); | 1555 | lance_free_hwresources(lp); |
1556 | 1556 | ||
1557 | free_netdev(net_dev); | 1557 | free_netdev(net_dev); |
1558 | 1558 | ||
1559 | dev_set_drvdata(&sun4_sdev->dev, NULL); | 1559 | dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL); |
1560 | 1560 | ||
1561 | return 0; | 1561 | return 0; |
1562 | } | 1562 | } |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index f645921aff8b..1b8138f641e3 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -68,8 +68,8 @@ | |||
68 | 68 | ||
69 | #define DRV_MODULE_NAME "tg3" | 69 | #define DRV_MODULE_NAME "tg3" |
70 | #define PFX DRV_MODULE_NAME ": " | 70 | #define PFX DRV_MODULE_NAME ": " |
71 | #define DRV_MODULE_VERSION "3.62" | 71 | #define DRV_MODULE_VERSION "3.63" |
72 | #define DRV_MODULE_RELDATE "June 30, 2006" | 72 | #define DRV_MODULE_RELDATE "July 25, 2006" |
73 | 73 | ||
74 | #define TG3_DEF_MAC_MODE 0 | 74 | #define TG3_DEF_MAC_MODE 0 |
75 | #define TG3_DEF_RX_MODE 0 | 75 | #define TG3_DEF_RX_MODE 0 |
@@ -3590,6 +3590,28 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id, | |||
3590 | static int tg3_init_hw(struct tg3 *, int); | 3590 | static int tg3_init_hw(struct tg3 *, int); |
3591 | static int tg3_halt(struct tg3 *, int, int); | 3591 | static int tg3_halt(struct tg3 *, int, int); |
3592 | 3592 | ||
3593 | /* Restart hardware after configuration changes, self-test, etc. | ||
3594 | * Invoked with tp->lock held. | ||
3595 | */ | ||
3596 | static int tg3_restart_hw(struct tg3 *tp, int reset_phy) | ||
3597 | { | ||
3598 | int err; | ||
3599 | |||
3600 | err = tg3_init_hw(tp, reset_phy); | ||
3601 | if (err) { | ||
3602 | printk(KERN_ERR PFX "%s: Failed to re-initialize device, " | ||
3603 | "aborting.\n", tp->dev->name); | ||
3604 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | ||
3605 | tg3_full_unlock(tp); | ||
3606 | del_timer_sync(&tp->timer); | ||
3607 | tp->irq_sync = 0; | ||
3608 | netif_poll_enable(tp->dev); | ||
3609 | dev_close(tp->dev); | ||
3610 | tg3_full_lock(tp, 0); | ||
3611 | } | ||
3612 | return err; | ||
3613 | } | ||
3614 | |||
3593 | #ifdef CONFIG_NET_POLL_CONTROLLER | 3615 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3594 | static void tg3_poll_controller(struct net_device *dev) | 3616 | static void tg3_poll_controller(struct net_device *dev) |
3595 | { | 3617 | { |
@@ -3630,13 +3652,15 @@ static void tg3_reset_task(void *_data) | |||
3630 | } | 3652 | } |
3631 | 3653 | ||
3632 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); | 3654 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); |
3633 | tg3_init_hw(tp, 1); | 3655 | if (tg3_init_hw(tp, 1)) |
3656 | goto out; | ||
3634 | 3657 | ||
3635 | tg3_netif_start(tp); | 3658 | tg3_netif_start(tp); |
3636 | 3659 | ||
3637 | if (restart_timer) | 3660 | if (restart_timer) |
3638 | mod_timer(&tp->timer, jiffies + 1); | 3661 | mod_timer(&tp->timer, jiffies + 1); |
3639 | 3662 | ||
3663 | out: | ||
3640 | tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; | 3664 | tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; |
3641 | 3665 | ||
3642 | tg3_full_unlock(tp); | 3666 | tg3_full_unlock(tp); |
@@ -4124,6 +4148,7 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, | |||
4124 | static int tg3_change_mtu(struct net_device *dev, int new_mtu) | 4148 | static int tg3_change_mtu(struct net_device *dev, int new_mtu) |
4125 | { | 4149 | { |
4126 | struct tg3 *tp = netdev_priv(dev); | 4150 | struct tg3 *tp = netdev_priv(dev); |
4151 | int err; | ||
4127 | 4152 | ||
4128 | if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) | 4153 | if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) |
4129 | return -EINVAL; | 4154 | return -EINVAL; |
@@ -4144,13 +4169,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) | |||
4144 | 4169 | ||
4145 | tg3_set_mtu(dev, tp, new_mtu); | 4170 | tg3_set_mtu(dev, tp, new_mtu); |
4146 | 4171 | ||
4147 | tg3_init_hw(tp, 0); | 4172 | err = tg3_restart_hw(tp, 0); |
4148 | 4173 | ||
4149 | tg3_netif_start(tp); | 4174 | if (!err) |
4175 | tg3_netif_start(tp); | ||
4150 | 4176 | ||
4151 | tg3_full_unlock(tp); | 4177 | tg3_full_unlock(tp); |
4152 | 4178 | ||
4153 | return 0; | 4179 | return err; |
4154 | } | 4180 | } |
4155 | 4181 | ||
4156 | /* Free up pending packets in all rx/tx rings. | 4182 | /* Free up pending packets in all rx/tx rings. |
@@ -4232,7 +4258,7 @@ static void tg3_free_rings(struct tg3 *tp) | |||
4232 | * end up in the driver. tp->{tx,}lock are held and thus | 4258 | * end up in the driver. tp->{tx,}lock are held and thus |
4233 | * we may not sleep. | 4259 | * we may not sleep. |
4234 | */ | 4260 | */ |
4235 | static void tg3_init_rings(struct tg3 *tp) | 4261 | static int tg3_init_rings(struct tg3 *tp) |
4236 | { | 4262 | { |
4237 | u32 i; | 4263 | u32 i; |
4238 | 4264 | ||
@@ -4281,18 +4307,38 @@ static void tg3_init_rings(struct tg3 *tp) | |||
4281 | 4307 | ||
4282 | /* Now allocate fresh SKBs for each rx ring. */ | 4308 | /* Now allocate fresh SKBs for each rx ring. */ |
4283 | for (i = 0; i < tp->rx_pending; i++) { | 4309 | for (i = 0; i < tp->rx_pending; i++) { |
4284 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, | 4310 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { |
4285 | -1, i) < 0) | 4311 | printk(KERN_WARNING PFX |
4312 | "%s: Using a smaller RX standard ring, " | ||
4313 | "only %d out of %d buffers were allocated " | ||
4314 | "successfully.\n", | ||
4315 | tp->dev->name, i, tp->rx_pending); | ||
4316 | if (i == 0) | ||
4317 | return -ENOMEM; | ||
4318 | tp->rx_pending = i; | ||
4286 | break; | 4319 | break; |
4320 | } | ||
4287 | } | 4321 | } |
4288 | 4322 | ||
4289 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { | 4323 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { |
4290 | for (i = 0; i < tp->rx_jumbo_pending; i++) { | 4324 | for (i = 0; i < tp->rx_jumbo_pending; i++) { |
4291 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, | 4325 | if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, |
4292 | -1, i) < 0) | 4326 | -1, i) < 0) { |
4327 | printk(KERN_WARNING PFX | ||
4328 | "%s: Using a smaller RX jumbo ring, " | ||
4329 | "only %d out of %d buffers were " | ||
4330 | "allocated successfully.\n", | ||
4331 | tp->dev->name, i, tp->rx_jumbo_pending); | ||
4332 | if (i == 0) { | ||
4333 | tg3_free_rings(tp); | ||
4334 | return -ENOMEM; | ||
4335 | } | ||
4336 | tp->rx_jumbo_pending = i; | ||
4293 | break; | 4337 | break; |
4338 | } | ||
4294 | } | 4339 | } |
4295 | } | 4340 | } |
4341 | return 0; | ||
4296 | } | 4342 | } |
4297 | 4343 | ||
4298 | /* | 4344 | /* |
@@ -5815,6 +5861,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) | |||
5815 | { | 5861 | { |
5816 | struct tg3 *tp = netdev_priv(dev); | 5862 | struct tg3 *tp = netdev_priv(dev); |
5817 | struct sockaddr *addr = p; | 5863 | struct sockaddr *addr = p; |
5864 | int err = 0; | ||
5818 | 5865 | ||
5819 | if (!is_valid_ether_addr(addr->sa_data)) | 5866 | if (!is_valid_ether_addr(addr->sa_data)) |
5820 | return -EINVAL; | 5867 | return -EINVAL; |
@@ -5832,9 +5879,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) | |||
5832 | tg3_full_lock(tp, 1); | 5879 | tg3_full_lock(tp, 1); |
5833 | 5880 | ||
5834 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 5881 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
5835 | tg3_init_hw(tp, 0); | 5882 | err = tg3_restart_hw(tp, 0); |
5836 | 5883 | if (!err) | |
5837 | tg3_netif_start(tp); | 5884 | tg3_netif_start(tp); |
5838 | tg3_full_unlock(tp); | 5885 | tg3_full_unlock(tp); |
5839 | } else { | 5886 | } else { |
5840 | spin_lock_bh(&tp->lock); | 5887 | spin_lock_bh(&tp->lock); |
@@ -5842,7 +5889,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) | |||
5842 | spin_unlock_bh(&tp->lock); | 5889 | spin_unlock_bh(&tp->lock); |
5843 | } | 5890 | } |
5844 | 5891 | ||
5845 | return 0; | 5892 | return err; |
5846 | } | 5893 | } |
5847 | 5894 | ||
5848 | /* tp->lock is held. */ | 5895 | /* tp->lock is held. */ |
@@ -5942,7 +5989,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
5942 | * can only do this after the hardware has been | 5989 | * can only do this after the hardware has been |
5943 | * successfully reset. | 5990 | * successfully reset. |
5944 | */ | 5991 | */ |
5945 | tg3_init_rings(tp); | 5992 | err = tg3_init_rings(tp); |
5993 | if (err) | ||
5994 | return err; | ||
5946 | 5995 | ||
5947 | /* This value is determined during the probe time DMA | 5996 | /* This value is determined during the probe time DMA |
5948 | * engine test, tg3_test_dma. | 5997 | * engine test, tg3_test_dma. |
@@ -7956,7 +8005,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * | |||
7956 | static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | 8005 | static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) |
7957 | { | 8006 | { |
7958 | struct tg3 *tp = netdev_priv(dev); | 8007 | struct tg3 *tp = netdev_priv(dev); |
7959 | int irq_sync = 0; | 8008 | int irq_sync = 0, err = 0; |
7960 | 8009 | ||
7961 | if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || | 8010 | if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || |
7962 | (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || | 8011 | (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || |
@@ -7980,13 +8029,14 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e | |||
7980 | 8029 | ||
7981 | if (netif_running(dev)) { | 8030 | if (netif_running(dev)) { |
7982 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 8031 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
7983 | tg3_init_hw(tp, 1); | 8032 | err = tg3_restart_hw(tp, 1); |
7984 | tg3_netif_start(tp); | 8033 | if (!err) |
8034 | tg3_netif_start(tp); | ||
7985 | } | 8035 | } |
7986 | 8036 | ||
7987 | tg3_full_unlock(tp); | 8037 | tg3_full_unlock(tp); |
7988 | 8038 | ||
7989 | return 0; | 8039 | return err; |
7990 | } | 8040 | } |
7991 | 8041 | ||
7992 | static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) | 8042 | static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) |
@@ -8001,7 +8051,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
8001 | static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) | 8051 | static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) |
8002 | { | 8052 | { |
8003 | struct tg3 *tp = netdev_priv(dev); | 8053 | struct tg3 *tp = netdev_priv(dev); |
8004 | int irq_sync = 0; | 8054 | int irq_sync = 0, err = 0; |
8005 | 8055 | ||
8006 | if (netif_running(dev)) { | 8056 | if (netif_running(dev)) { |
8007 | tg3_netif_stop(tp); | 8057 | tg3_netif_stop(tp); |
@@ -8025,13 +8075,14 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
8025 | 8075 | ||
8026 | if (netif_running(dev)) { | 8076 | if (netif_running(dev)) { |
8027 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 8077 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
8028 | tg3_init_hw(tp, 1); | 8078 | err = tg3_restart_hw(tp, 1); |
8029 | tg3_netif_start(tp); | 8079 | if (!err) |
8080 | tg3_netif_start(tp); | ||
8030 | } | 8081 | } |
8031 | 8082 | ||
8032 | tg3_full_unlock(tp); | 8083 | tg3_full_unlock(tp); |
8033 | 8084 | ||
8034 | return 0; | 8085 | return err; |
8035 | } | 8086 | } |
8036 | 8087 | ||
8037 | static u32 tg3_get_rx_csum(struct net_device *dev) | 8088 | static u32 tg3_get_rx_csum(struct net_device *dev) |
@@ -8666,7 +8717,9 @@ static int tg3_test_loopback(struct tg3 *tp) | |||
8666 | if (!netif_running(tp->dev)) | 8717 | if (!netif_running(tp->dev)) |
8667 | return TG3_LOOPBACK_FAILED; | 8718 | return TG3_LOOPBACK_FAILED; |
8668 | 8719 | ||
8669 | tg3_reset_hw(tp, 1); | 8720 | err = tg3_reset_hw(tp, 1); |
8721 | if (err) | ||
8722 | return TG3_LOOPBACK_FAILED; | ||
8670 | 8723 | ||
8671 | if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) | 8724 | if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) |
8672 | err |= TG3_MAC_LOOPBACK_FAILED; | 8725 | err |= TG3_MAC_LOOPBACK_FAILED; |
@@ -8740,8 +8793,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | |||
8740 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 8793 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
8741 | if (netif_running(dev)) { | 8794 | if (netif_running(dev)) { |
8742 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 8795 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
8743 | tg3_init_hw(tp, 1); | 8796 | if (!tg3_restart_hw(tp, 1)) |
8744 | tg3_netif_start(tp); | 8797 | tg3_netif_start(tp); |
8745 | } | 8798 | } |
8746 | 8799 | ||
8747 | tg3_full_unlock(tp); | 8800 | tg3_full_unlock(tp); |
@@ -10078,6 +10131,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
10078 | static struct pci_device_id write_reorder_chipsets[] = { | 10131 | static struct pci_device_id write_reorder_chipsets[] = { |
10079 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, | 10132 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, |
10080 | PCI_DEVICE_ID_AMD_FE_GATE_700C) }, | 10133 | PCI_DEVICE_ID_AMD_FE_GATE_700C) }, |
10134 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, | ||
10135 | PCI_DEVICE_ID_AMD_8131_BRIDGE) }, | ||
10081 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, | 10136 | { PCI_DEVICE(PCI_VENDOR_ID_VIA, |
10082 | PCI_DEVICE_ID_VIA_8385_0) }, | 10137 | PCI_DEVICE_ID_VIA_8385_0) }, |
10083 | { }, | 10138 | { }, |
@@ -11697,7 +11752,8 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) | |||
11697 | tg3_full_lock(tp, 0); | 11752 | tg3_full_lock(tp, 0); |
11698 | 11753 | ||
11699 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 11754 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
11700 | tg3_init_hw(tp, 1); | 11755 | if (tg3_restart_hw(tp, 1)) |
11756 | goto out; | ||
11701 | 11757 | ||
11702 | tp->timer.expires = jiffies + tp->timer_offset; | 11758 | tp->timer.expires = jiffies + tp->timer_offset; |
11703 | add_timer(&tp->timer); | 11759 | add_timer(&tp->timer); |
@@ -11705,6 +11761,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) | |||
11705 | netif_device_attach(dev); | 11761 | netif_device_attach(dev); |
11706 | tg3_netif_start(tp); | 11762 | tg3_netif_start(tp); |
11707 | 11763 | ||
11764 | out: | ||
11708 | tg3_full_unlock(tp); | 11765 | tg3_full_unlock(tp); |
11709 | } | 11766 | } |
11710 | 11767 | ||
@@ -11731,16 +11788,19 @@ static int tg3_resume(struct pci_dev *pdev) | |||
11731 | tg3_full_lock(tp, 0); | 11788 | tg3_full_lock(tp, 0); |
11732 | 11789 | ||
11733 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; | 11790 | tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; |
11734 | tg3_init_hw(tp, 1); | 11791 | err = tg3_restart_hw(tp, 1); |
11792 | if (err) | ||
11793 | goto out; | ||
11735 | 11794 | ||
11736 | tp->timer.expires = jiffies + tp->timer_offset; | 11795 | tp->timer.expires = jiffies + tp->timer_offset; |
11737 | add_timer(&tp->timer); | 11796 | add_timer(&tp->timer); |
11738 | 11797 | ||
11739 | tg3_netif_start(tp); | 11798 | tg3_netif_start(tp); |
11740 | 11799 | ||
11800 | out: | ||
11741 | tg3_full_unlock(tp); | 11801 | tg3_full_unlock(tp); |
11742 | 11802 | ||
11743 | return 0; | 11803 | return err; |
11744 | } | 11804 | } |
11745 | 11805 | ||
11746 | static struct pci_driver tg3_driver = { | 11806 | static struct pci_driver tg3_driver = { |
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 063816f2b11e..4103c37172f9 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c | |||
@@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
805 | * If problems develop with TSO, check this first. | 805 | * If problems develop with TSO, check this first. |
806 | */ | 806 | */ |
807 | numDesc = skb_shinfo(skb)->nr_frags + 1; | 807 | numDesc = skb_shinfo(skb)->nr_frags + 1; |
808 | if(skb_tso_size(skb)) | 808 | if (skb_is_gso(skb)) |
809 | numDesc++; | 809 | numDesc++; |
810 | 810 | ||
811 | /* When checking for free space in the ring, we need to also | 811 | /* When checking for free space in the ring, we need to also |
@@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
845 | TYPHOON_TX_PF_VLAN_TAG_SHIFT); | 845 | TYPHOON_TX_PF_VLAN_TAG_SHIFT); |
846 | } | 846 | } |
847 | 847 | ||
848 | if(skb_tso_size(skb)) { | 848 | if (skb_is_gso(skb)) { |
849 | first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; | 849 | first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; |
850 | first_txd->numDesc++; | 850 | first_txd->numDesc++; |
851 | 851 | ||
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index f5b0078eb4ad..aa9cd92f46b2 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -2742,7 +2742,7 @@ static u32 check_connection_type(struct mac_regs __iomem * regs) | |||
2742 | 2742 | ||
2743 | if (PHYSR0 & PHYSR0_SPDG) | 2743 | if (PHYSR0 & PHYSR0_SPDG) |
2744 | status |= VELOCITY_SPEED_1000; | 2744 | status |= VELOCITY_SPEED_1000; |
2745 | if (PHYSR0 & PHYSR0_SPD10) | 2745 | else if (PHYSR0 & PHYSR0_SPD10) |
2746 | status |= VELOCITY_SPEED_10; | 2746 | status |= VELOCITY_SPEED_10; |
2747 | else | 2747 | else |
2748 | status |= VELOCITY_SPEED_100; | 2748 | status |= VELOCITY_SPEED_100; |
@@ -2851,8 +2851,17 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd | |||
2851 | u32 status; | 2851 | u32 status; |
2852 | status = check_connection_type(vptr->mac_regs); | 2852 | status = check_connection_type(vptr->mac_regs); |
2853 | 2853 | ||
2854 | cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; | 2854 | cmd->supported = SUPPORTED_TP | |
2855 | if (status & VELOCITY_SPEED_100) | 2855 | SUPPORTED_Autoneg | |
2856 | SUPPORTED_10baseT_Half | | ||
2857 | SUPPORTED_10baseT_Full | | ||
2858 | SUPPORTED_100baseT_Half | | ||
2859 | SUPPORTED_100baseT_Full | | ||
2860 | SUPPORTED_1000baseT_Half | | ||
2861 | SUPPORTED_1000baseT_Full; | ||
2862 | if (status & VELOCITY_SPEED_1000) | ||
2863 | cmd->speed = SPEED_1000; | ||
2864 | else if (status & VELOCITY_SPEED_100) | ||
2856 | cmd->speed = SPEED_100; | 2865 | cmd->speed = SPEED_100; |
2857 | else | 2866 | else |
2858 | cmd->speed = SPEED_10; | 2867 | cmd->speed = SPEED_10; |
@@ -2896,7 +2905,7 @@ static u32 velocity_get_link(struct net_device *dev) | |||
2896 | { | 2905 | { |
2897 | struct velocity_info *vptr = netdev_priv(dev); | 2906 | struct velocity_info *vptr = netdev_priv(dev); |
2898 | struct mac_regs __iomem * regs = vptr->mac_regs; | 2907 | struct mac_regs __iomem * regs = vptr->mac_regs; |
2899 | return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 0 : 1; | 2908 | return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 1 : 0; |
2900 | } | 2909 | } |
2901 | 2910 | ||
2902 | static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 2911 | static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c index c92ac9fde083..435e91ec4620 100644 --- a/drivers/net/wan/c101.c +++ b/drivers/net/wan/c101.c | |||
@@ -116,27 +116,33 @@ static inline void openwin(card_t *card, u8 page) | |||
116 | #include "hd6457x.c" | 116 | #include "hd6457x.c" |
117 | 117 | ||
118 | 118 | ||
119 | static inline void set_carrier(port_t *port) | ||
120 | { | ||
121 | if (!sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD) | ||
122 | netif_carrier_on(port_to_dev(port)); | ||
123 | else | ||
124 | netif_carrier_off(port_to_dev(port)); | ||
125 | } | ||
126 | |||
127 | |||
119 | static void sca_msci_intr(port_t *port) | 128 | static void sca_msci_intr(port_t *port) |
120 | { | 129 | { |
121 | struct net_device *dev = port_to_dev(port); | 130 | u8 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI ST1 status */ |
122 | card_t* card = port_to_card(port); | ||
123 | u8 stat = sca_in(MSCI1_OFFSET + ST1, card); /* read MSCI ST1 status */ | ||
124 | 131 | ||
125 | /* Reset MSCI TX underrun status bit */ | 132 | /* Reset MSCI TX underrun status bit */ |
126 | sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, card); | 133 | sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, port); |
127 | 134 | ||
128 | if (stat & ST1_UDRN) { | 135 | if (stat & ST1_UDRN) { |
129 | struct net_device_stats *stats = hdlc_stats(dev); | 136 | struct net_device_stats *stats = hdlc_stats(port_to_dev(port)); |
130 | stats->tx_errors++; /* TX Underrun error detected */ | 137 | stats->tx_errors++; /* TX Underrun error detected */ |
131 | stats->tx_fifo_errors++; | 138 | stats->tx_fifo_errors++; |
132 | } | 139 | } |
133 | 140 | ||
134 | /* Reset MSCI CDCD status bit - uses ch#2 DCD input */ | 141 | /* Reset MSCI CDCD status bit - uses ch#2 DCD input */ |
135 | sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, card); | 142 | sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port); |
136 | 143 | ||
137 | if (stat & ST1_CDCD) | 144 | if (stat & ST1_CDCD) |
138 | hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, card) & ST3_DCD), | 145 | set_carrier(port); |
139 | dev); | ||
140 | } | 146 | } |
141 | 147 | ||
142 | 148 | ||
@@ -190,8 +196,7 @@ static int c101_open(struct net_device *dev) | |||
190 | sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port); | 196 | sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port); |
191 | sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port); | 197 | sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port); |
192 | 198 | ||
193 | hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD), dev); | 199 | set_carrier(port); |
194 | printk(KERN_DEBUG "0x%X\n", sca_in(MSCI1_OFFSET + ST3, port)); | ||
195 | 200 | ||
196 | /* enable MSCI1 CDCD interrupt */ | 201 | /* enable MSCI1 CDCD interrupt */ |
197 | sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port); | 202 | sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port); |
@@ -378,7 +383,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase) | |||
378 | } | 383 | } |
379 | 384 | ||
380 | sca_init_sync_port(card); /* Set up C101 memory */ | 385 | sca_init_sync_port(card); /* Set up C101 memory */ |
381 | hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, card) & ST3_DCD), dev); | 386 | set_carrier(card); |
382 | 387 | ||
383 | printk(KERN_INFO "%s: Moxa C101 on IRQ%u," | 388 | printk(KERN_INFO "%s: Moxa C101 on IRQ%u," |
384 | " using %u TX + %u RX packets rings\n", | 389 | " using %u TX + %u RX packets rings\n", |
@@ -443,4 +448,5 @@ module_exit(c101_cleanup); | |||
443 | MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); | 448 | MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); |
444 | MODULE_DESCRIPTION("Moxa C101 serial port driver"); | 449 | MODULE_DESCRIPTION("Moxa C101 serial port driver"); |
445 | MODULE_LICENSE("GPL v2"); | 450 | MODULE_LICENSE("GPL v2"); |
446 | module_param(hw, charp, 0444); /* hw=irq,ram:irq,... */ | 451 | module_param(hw, charp, 0444); |
452 | MODULE_PARM_DESC(hw, "irq,ram:irq,..."); | ||
diff --git a/drivers/net/wan/hd6457x.c b/drivers/net/wan/hd6457x.c index d3743321a977..dce2bb317b82 100644 --- a/drivers/net/wan/hd6457x.c +++ b/drivers/net/wan/hd6457x.c | |||
@@ -168,6 +168,23 @@ static inline u32 buffer_offset(port_t *port, u16 desc, int transmit) | |||
168 | } | 168 | } |
169 | 169 | ||
170 | 170 | ||
171 | static inline void sca_set_carrier(port_t *port) | ||
172 | { | ||
173 | if (!(sca_in(get_msci(port) + ST3, port_to_card(port)) & ST3_DCD)) { | ||
174 | #ifdef DEBUG_LINK | ||
175 | printk(KERN_DEBUG "%s: sca_set_carrier on\n", | ||
176 | port_to_dev(port)->name); | ||
177 | #endif | ||
178 | netif_carrier_on(port_to_dev(port)); | ||
179 | } else { | ||
180 | #ifdef DEBUG_LINK | ||
181 | printk(KERN_DEBUG "%s: sca_set_carrier off\n", | ||
182 | port_to_dev(port)->name); | ||
183 | #endif | ||
184 | netif_carrier_off(port_to_dev(port)); | ||
185 | } | ||
186 | } | ||
187 | |||
171 | 188 | ||
172 | static void sca_init_sync_port(port_t *port) | 189 | static void sca_init_sync_port(port_t *port) |
173 | { | 190 | { |
@@ -237,9 +254,7 @@ static void sca_init_sync_port(port_t *port) | |||
237 | sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card); | 254 | sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card); |
238 | } | 255 | } |
239 | } | 256 | } |
240 | 257 | sca_set_carrier(port); | |
241 | hdlc_set_carrier(!(sca_in(get_msci(port) + ST3, card) & ST3_DCD), | ||
242 | port_to_dev(port)); | ||
243 | } | 258 | } |
244 | 259 | ||
245 | 260 | ||
@@ -262,8 +277,7 @@ static inline void sca_msci_intr(port_t *port) | |||
262 | } | 277 | } |
263 | 278 | ||
264 | if (stat & ST1_CDCD) | 279 | if (stat & ST1_CDCD) |
265 | hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD), | 280 | sca_set_carrier(port); |
266 | port_to_dev(port)); | ||
267 | } | 281 | } |
268 | #endif | 282 | #endif |
269 | 283 | ||
@@ -566,7 +580,7 @@ static void sca_open(struct net_device *dev) | |||
566 | - all DMA interrupts | 580 | - all DMA interrupts |
567 | */ | 581 | */ |
568 | 582 | ||
569 | hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD), dev); | 583 | sca_set_carrier(port); |
570 | 584 | ||
571 | #ifdef __HD64570_H | 585 | #ifdef __HD64570_H |
572 | /* MSCI TX INT and RX INT A IRQ enable */ | 586 | /* MSCI TX INT and RX INT A IRQ enable */ |
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 1fd04662c4fc..f289daba0c7b 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c | |||
@@ -192,9 +192,7 @@ static int cisco_rx(struct sk_buff *skb) | |||
192 | "uptime %ud%uh%um%us)\n", | 192 | "uptime %ud%uh%um%us)\n", |
193 | dev->name, days, hrs, | 193 | dev->name, days, hrs, |
194 | min, sec); | 194 | min, sec); |
195 | #if 0 | 195 | netif_dormant_off(dev); |
196 | netif_carrier_on(dev); | ||
197 | #endif | ||
198 | hdlc->state.cisco.up = 1; | 196 | hdlc->state.cisco.up = 1; |
199 | } | 197 | } |
200 | } | 198 | } |
@@ -227,9 +225,7 @@ static void cisco_timer(unsigned long arg) | |||
227 | hdlc->state.cisco.settings.timeout * HZ)) { | 225 | hdlc->state.cisco.settings.timeout * HZ)) { |
228 | hdlc->state.cisco.up = 0; | 226 | hdlc->state.cisco.up = 0; |
229 | printk(KERN_INFO "%s: Link down\n", dev->name); | 227 | printk(KERN_INFO "%s: Link down\n", dev->name); |
230 | #if 0 | 228 | netif_dormant_on(dev); |
231 | netif_carrier_off(dev); | ||
232 | #endif | ||
233 | } | 229 | } |
234 | 230 | ||
235 | cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, | 231 | cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, |
@@ -265,10 +261,7 @@ static void cisco_stop(struct net_device *dev) | |||
265 | { | 261 | { |
266 | hdlc_device *hdlc = dev_to_hdlc(dev); | 262 | hdlc_device *hdlc = dev_to_hdlc(dev); |
267 | del_timer_sync(&hdlc->state.cisco.timer); | 263 | del_timer_sync(&hdlc->state.cisco.timer); |
268 | #if 0 | 264 | netif_dormant_on(dev); |
269 | if (netif_carrier_ok(dev)) | ||
270 | netif_carrier_off(dev); | ||
271 | #endif | ||
272 | hdlc->state.cisco.up = 0; | 265 | hdlc->state.cisco.up = 0; |
273 | hdlc->state.cisco.request_sent = 0; | 266 | hdlc->state.cisco.request_sent = 0; |
274 | } | 267 | } |
@@ -328,6 +321,7 @@ int hdlc_cisco_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
328 | dev->type = ARPHRD_CISCO; | 321 | dev->type = ARPHRD_CISCO; |
329 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | 322 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; |
330 | dev->addr_len = 0; | 323 | dev->addr_len = 0; |
324 | netif_dormant_on(dev); | ||
331 | return 0; | 325 | return 0; |
332 | } | 326 | } |
333 | 327 | ||
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 523afe17564e..7bb737bbdeb9 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c | |||
@@ -301,7 +301,7 @@ static int pvc_open(struct net_device *dev) | |||
301 | if (pvc->open_count++ == 0) { | 301 | if (pvc->open_count++ == 0) { |
302 | hdlc_device *hdlc = dev_to_hdlc(pvc->master); | 302 | hdlc_device *hdlc = dev_to_hdlc(pvc->master); |
303 | if (hdlc->state.fr.settings.lmi == LMI_NONE) | 303 | if (hdlc->state.fr.settings.lmi == LMI_NONE) |
304 | pvc->state.active = hdlc->carrier; | 304 | pvc->state.active = netif_carrier_ok(pvc->master); |
305 | 305 | ||
306 | pvc_carrier(pvc->state.active, pvc); | 306 | pvc_carrier(pvc->state.active, pvc); |
307 | hdlc->state.fr.dce_changed = 1; | 307 | hdlc->state.fr.dce_changed = 1; |
@@ -545,11 +545,7 @@ static void fr_set_link_state(int reliable, struct net_device *dev) | |||
545 | 545 | ||
546 | hdlc->state.fr.reliable = reliable; | 546 | hdlc->state.fr.reliable = reliable; |
547 | if (reliable) { | 547 | if (reliable) { |
548 | #if 0 | 548 | netif_dormant_off(dev); |
549 | if (!netif_carrier_ok(dev)) | ||
550 | netif_carrier_on(dev); | ||
551 | #endif | ||
552 | |||
553 | hdlc->state.fr.n391cnt = 0; /* Request full status */ | 549 | hdlc->state.fr.n391cnt = 0; /* Request full status */ |
554 | hdlc->state.fr.dce_changed = 1; | 550 | hdlc->state.fr.dce_changed = 1; |
555 | 551 | ||
@@ -562,11 +558,7 @@ static void fr_set_link_state(int reliable, struct net_device *dev) | |||
562 | } | 558 | } |
563 | } | 559 | } |
564 | } else { | 560 | } else { |
565 | #if 0 | 561 | netif_dormant_on(dev); |
566 | if (netif_carrier_ok(dev)) | ||
567 | netif_carrier_off(dev); | ||
568 | #endif | ||
569 | |||
570 | while (pvc) { /* Deactivate all PVCs */ | 562 | while (pvc) { /* Deactivate all PVCs */ |
571 | pvc_carrier(0, pvc); | 563 | pvc_carrier(0, pvc); |
572 | pvc->state.exist = pvc->state.active = 0; | 564 | pvc->state.exist = pvc->state.active = 0; |
diff --git a/drivers/net/wan/hdlc_generic.c b/drivers/net/wan/hdlc_generic.c index b7da55140fbd..04ca1f7b6424 100644 --- a/drivers/net/wan/hdlc_generic.c +++ b/drivers/net/wan/hdlc_generic.c | |||
@@ -34,10 +34,11 @@ | |||
34 | #include <linux/inetdevice.h> | 34 | #include <linux/inetdevice.h> |
35 | #include <linux/lapb.h> | 35 | #include <linux/lapb.h> |
36 | #include <linux/rtnetlink.h> | 36 | #include <linux/rtnetlink.h> |
37 | #include <linux/notifier.h> | ||
37 | #include <linux/hdlc.h> | 38 | #include <linux/hdlc.h> |
38 | 39 | ||
39 | 40 | ||
40 | static const char* version = "HDLC support module revision 1.18"; | 41 | static const char* version = "HDLC support module revision 1.19"; |
41 | 42 | ||
42 | #undef DEBUG_LINK | 43 | #undef DEBUG_LINK |
43 | 44 | ||
@@ -73,57 +74,51 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, | |||
73 | 74 | ||
74 | 75 | ||
75 | 76 | ||
76 | static void __hdlc_set_carrier_on(struct net_device *dev) | 77 | static inline void hdlc_proto_start(struct net_device *dev) |
77 | { | 78 | { |
78 | hdlc_device *hdlc = dev_to_hdlc(dev); | 79 | hdlc_device *hdlc = dev_to_hdlc(dev); |
79 | if (hdlc->proto.start) | 80 | if (hdlc->proto.start) |
80 | return hdlc->proto.start(dev); | 81 | return hdlc->proto.start(dev); |
81 | #if 0 | ||
82 | #ifdef DEBUG_LINK | ||
83 | if (netif_carrier_ok(dev)) | ||
84 | printk(KERN_ERR "hdlc_set_carrier_on(): already on\n"); | ||
85 | #endif | ||
86 | netif_carrier_on(dev); | ||
87 | #endif | ||
88 | } | 82 | } |
89 | 83 | ||
90 | 84 | ||
91 | 85 | ||
92 | static void __hdlc_set_carrier_off(struct net_device *dev) | 86 | static inline void hdlc_proto_stop(struct net_device *dev) |
93 | { | 87 | { |
94 | hdlc_device *hdlc = dev_to_hdlc(dev); | 88 | hdlc_device *hdlc = dev_to_hdlc(dev); |
95 | if (hdlc->proto.stop) | 89 | if (hdlc->proto.stop) |
96 | return hdlc->proto.stop(dev); | 90 | return hdlc->proto.stop(dev); |
97 | |||
98 | #if 0 | ||
99 | #ifdef DEBUG_LINK | ||
100 | if (!netif_carrier_ok(dev)) | ||
101 | printk(KERN_ERR "hdlc_set_carrier_off(): already off\n"); | ||
102 | #endif | ||
103 | netif_carrier_off(dev); | ||
104 | #endif | ||
105 | } | 91 | } |
106 | 92 | ||
107 | 93 | ||
108 | 94 | ||
109 | void hdlc_set_carrier(int on, struct net_device *dev) | 95 | static int hdlc_device_event(struct notifier_block *this, unsigned long event, |
96 | void *ptr) | ||
110 | { | 97 | { |
111 | hdlc_device *hdlc = dev_to_hdlc(dev); | 98 | struct net_device *dev = ptr; |
99 | hdlc_device *hdlc; | ||
112 | unsigned long flags; | 100 | unsigned long flags; |
113 | on = on ? 1 : 0; | 101 | int on; |
102 | |||
103 | if (dev->get_stats != hdlc_get_stats) | ||
104 | return NOTIFY_DONE; /* not an HDLC device */ | ||
105 | |||
106 | if (event != NETDEV_CHANGE) | ||
107 | return NOTIFY_DONE; /* Only interrested in carrier changes */ | ||
108 | |||
109 | on = netif_carrier_ok(dev); | ||
114 | 110 | ||
115 | #ifdef DEBUG_LINK | 111 | #ifdef DEBUG_LINK |
116 | printk(KERN_DEBUG "hdlc_set_carrier %i\n", on); | 112 | printk(KERN_DEBUG "%s: hdlc_device_event NETDEV_CHANGE, carrier %i\n", |
113 | dev->name, on); | ||
117 | #endif | 114 | #endif |
118 | 115 | ||
116 | hdlc = dev_to_hdlc(dev); | ||
119 | spin_lock_irqsave(&hdlc->state_lock, flags); | 117 | spin_lock_irqsave(&hdlc->state_lock, flags); |
120 | 118 | ||
121 | if (hdlc->carrier == on) | 119 | if (hdlc->carrier == on) |
122 | goto carrier_exit; /* no change in DCD line level */ | 120 | goto carrier_exit; /* no change in DCD line level */ |
123 | 121 | ||
124 | #ifdef DEBUG_LINK | ||
125 | printk(KERN_INFO "%s: carrier %s\n", dev->name, on ? "ON" : "off"); | ||
126 | #endif | ||
127 | hdlc->carrier = on; | 122 | hdlc->carrier = on; |
128 | 123 | ||
129 | if (!hdlc->open) | 124 | if (!hdlc->open) |
@@ -131,14 +126,15 @@ void hdlc_set_carrier(int on, struct net_device *dev) | |||
131 | 126 | ||
132 | if (hdlc->carrier) { | 127 | if (hdlc->carrier) { |
133 | printk(KERN_INFO "%s: Carrier detected\n", dev->name); | 128 | printk(KERN_INFO "%s: Carrier detected\n", dev->name); |
134 | __hdlc_set_carrier_on(dev); | 129 | hdlc_proto_start(dev); |
135 | } else { | 130 | } else { |
136 | printk(KERN_INFO "%s: Carrier lost\n", dev->name); | 131 | printk(KERN_INFO "%s: Carrier lost\n", dev->name); |
137 | __hdlc_set_carrier_off(dev); | 132 | hdlc_proto_stop(dev); |
138 | } | 133 | } |
139 | 134 | ||
140 | carrier_exit: | 135 | carrier_exit: |
141 | spin_unlock_irqrestore(&hdlc->state_lock, flags); | 136 | spin_unlock_irqrestore(&hdlc->state_lock, flags); |
137 | return NOTIFY_DONE; | ||
142 | } | 138 | } |
143 | 139 | ||
144 | 140 | ||
@@ -165,7 +161,7 @@ int hdlc_open(struct net_device *dev) | |||
165 | 161 | ||
166 | if (hdlc->carrier) { | 162 | if (hdlc->carrier) { |
167 | printk(KERN_INFO "%s: Carrier detected\n", dev->name); | 163 | printk(KERN_INFO "%s: Carrier detected\n", dev->name); |
168 | __hdlc_set_carrier_on(dev); | 164 | hdlc_proto_start(dev); |
169 | } else | 165 | } else |
170 | printk(KERN_INFO "%s: No carrier\n", dev->name); | 166 | printk(KERN_INFO "%s: No carrier\n", dev->name); |
171 | 167 | ||
@@ -190,7 +186,7 @@ void hdlc_close(struct net_device *dev) | |||
190 | 186 | ||
191 | hdlc->open = 0; | 187 | hdlc->open = 0; |
192 | if (hdlc->carrier) | 188 | if (hdlc->carrier) |
193 | __hdlc_set_carrier_off(dev); | 189 | hdlc_proto_stop(dev); |
194 | 190 | ||
195 | spin_unlock_irq(&hdlc->state_lock); | 191 | spin_unlock_irq(&hdlc->state_lock); |
196 | 192 | ||
@@ -303,7 +299,6 @@ MODULE_LICENSE("GPL v2"); | |||
303 | 299 | ||
304 | EXPORT_SYMBOL(hdlc_open); | 300 | EXPORT_SYMBOL(hdlc_open); |
305 | EXPORT_SYMBOL(hdlc_close); | 301 | EXPORT_SYMBOL(hdlc_close); |
306 | EXPORT_SYMBOL(hdlc_set_carrier); | ||
307 | EXPORT_SYMBOL(hdlc_ioctl); | 302 | EXPORT_SYMBOL(hdlc_ioctl); |
308 | EXPORT_SYMBOL(hdlc_setup); | 303 | EXPORT_SYMBOL(hdlc_setup); |
309 | EXPORT_SYMBOL(alloc_hdlcdev); | 304 | EXPORT_SYMBOL(alloc_hdlcdev); |
@@ -315,9 +310,18 @@ static struct packet_type hdlc_packet_type = { | |||
315 | }; | 310 | }; |
316 | 311 | ||
317 | 312 | ||
313 | static struct notifier_block hdlc_notifier = { | ||
314 | .notifier_call = hdlc_device_event, | ||
315 | }; | ||
316 | |||
317 | |||
318 | static int __init hdlc_module_init(void) | 318 | static int __init hdlc_module_init(void) |
319 | { | 319 | { |
320 | int result; | ||
321 | |||
320 | printk(KERN_INFO "%s\n", version); | 322 | printk(KERN_INFO "%s\n", version); |
323 | if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0) | ||
324 | return result; | ||
321 | dev_add_pack(&hdlc_packet_type); | 325 | dev_add_pack(&hdlc_packet_type); |
322 | return 0; | 326 | return 0; |
323 | } | 327 | } |
@@ -327,6 +331,7 @@ static int __init hdlc_module_init(void) | |||
327 | static void __exit hdlc_module_exit(void) | 331 | static void __exit hdlc_module_exit(void) |
328 | { | 332 | { |
329 | dev_remove_pack(&hdlc_packet_type); | 333 | dev_remove_pack(&hdlc_packet_type); |
334 | unregister_netdevice_notifier(&hdlc_notifier); | ||
330 | } | 335 | } |
331 | 336 | ||
332 | 337 | ||
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index b81263eaede0..fbaab5bf71eb 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c | |||
@@ -107,6 +107,7 @@ int hdlc_ppp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
107 | dev->hard_header = NULL; | 107 | dev->hard_header = NULL; |
108 | dev->type = ARPHRD_PPP; | 108 | dev->type = ARPHRD_PPP; |
109 | dev->addr_len = 0; | 109 | dev->addr_len = 0; |
110 | netif_dormant_off(dev); | ||
110 | return 0; | 111 | return 0; |
111 | } | 112 | } |
112 | 113 | ||
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c index 9456d31cb1c1..f15aa6ba77f1 100644 --- a/drivers/net/wan/hdlc_raw.c +++ b/drivers/net/wan/hdlc_raw.c | |||
@@ -82,6 +82,7 @@ int hdlc_raw_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
82 | dev->type = ARPHRD_RAWHDLC; | 82 | dev->type = ARPHRD_RAWHDLC; |
83 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; | 83 | dev->flags = IFF_POINTOPOINT | IFF_NOARP; |
84 | dev->addr_len = 0; | 84 | dev->addr_len = 0; |
85 | netif_dormant_off(dev); | ||
85 | return 0; | 86 | return 0; |
86 | } | 87 | } |
87 | 88 | ||
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c index b1285cc8fee6..d1884987f94e 100644 --- a/drivers/net/wan/hdlc_raw_eth.c +++ b/drivers/net/wan/hdlc_raw_eth.c | |||
@@ -100,6 +100,7 @@ int hdlc_raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
100 | dev->tx_queue_len = old_qlen; | 100 | dev->tx_queue_len = old_qlen; |
101 | memcpy(dev->dev_addr, "\x00\x01", 2); | 101 | memcpy(dev->dev_addr, "\x00\x01", 2); |
102 | get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); | 102 | get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); |
103 | netif_dormant_off(dev); | ||
103 | return 0; | 104 | return 0; |
104 | } | 105 | } |
105 | 106 | ||
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index 07e5eef1fe0f..a867fb411f89 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c | |||
@@ -212,6 +212,7 @@ int hdlc_x25_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
212 | dev->hard_header = NULL; | 212 | dev->hard_header = NULL; |
213 | dev->type = ARPHRD_X25; | 213 | dev->type = ARPHRD_X25; |
214 | dev->addr_len = 0; | 214 | dev->addr_len = 0; |
215 | netif_dormant_off(dev); | ||
215 | return 0; | 216 | return 0; |
216 | } | 217 | } |
217 | 218 | ||
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c index e013b817cab8..dcf46add3adf 100644 --- a/drivers/net/wan/n2.c +++ b/drivers/net/wan/n2.c | |||
@@ -564,4 +564,5 @@ module_exit(n2_cleanup); | |||
564 | MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); | 564 | MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); |
565 | MODULE_DESCRIPTION("RISCom/N2 serial port driver"); | 565 | MODULE_DESCRIPTION("RISCom/N2 serial port driver"); |
566 | MODULE_LICENSE("GPL v2"); | 566 | MODULE_LICENSE("GPL v2"); |
567 | module_param(hw, charp, 0444); /* hw=io,irq,ram,ports:io,irq,... */ | 567 | module_param(hw, charp, 0444); |
568 | MODULE_PARM_DESC(hw, "io,irq,ram,ports:io,irq,..."); | ||
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index d564224cdca9..b2031dfc4bb1 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c | |||
@@ -149,7 +149,10 @@ static inline void wanxl_cable_intr(port_t *port) | |||
149 | printk(KERN_INFO "%s: %s%s module, %s cable%s%s\n", | 149 | printk(KERN_INFO "%s: %s%s module, %s cable%s%s\n", |
150 | port->dev->name, pm, dte, cable, dsr, dcd); | 150 | port->dev->name, pm, dte, cable, dsr, dcd); |
151 | 151 | ||
152 | hdlc_set_carrier(value & STATUS_CABLE_DCD, port->dev); | 152 | if (value & STATUS_CABLE_DCD) |
153 | netif_carrier_on(port->dev); | ||
154 | else | ||
155 | netif_carrier_off(port->dev); | ||
153 | } | 156 | } |
154 | 157 | ||
155 | 158 | ||
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index fa9d2c4edc93..2e8ac995d56f 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig | |||
@@ -447,6 +447,7 @@ config AIRO_CS | |||
447 | tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" | 447 | tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" |
448 | depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) | 448 | depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) |
449 | select CRYPTO | 449 | select CRYPTO |
450 | select CRYPTO_AES | ||
450 | ---help--- | 451 | ---help--- |
451 | This is the standard Linux driver to support Cisco/Aironet PCMCIA | 452 | This is the standard Linux driver to support Cisco/Aironet PCMCIA |
452 | 802.11 wireless cards. This driver is the same as the Aironet | 453 | 802.11 wireless cards. This driver is the same as the Aironet |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index e1c5a939bca4..df317c1e12a8 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c | |||
@@ -1547,7 +1547,7 @@ static void handle_irq_noise(struct bcm43xx_private *bcm) | |||
1547 | goto generate_new; | 1547 | goto generate_new; |
1548 | 1548 | ||
1549 | /* Get the noise samples. */ | 1549 | /* Get the noise samples. */ |
1550 | assert(bcm->noisecalc.nr_samples <= 8); | 1550 | assert(bcm->noisecalc.nr_samples < 8); |
1551 | i = bcm->noisecalc.nr_samples; | 1551 | i = bcm->noisecalc.nr_samples; |
1552 | noise[0] = limit_value(noise[0], 0, ARRAY_SIZE(radio->nrssi_lt) - 1); | 1552 | noise[0] = limit_value(noise[0], 0, ARRAY_SIZE(radio->nrssi_lt) - 1); |
1553 | noise[1] = limit_value(noise[1], 0, ARRAY_SIZE(radio->nrssi_lt) - 1); | 1553 | noise[1] = limit_value(noise[1], 0, ARRAY_SIZE(radio->nrssi_lt) - 1); |
@@ -3701,7 +3701,7 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev, | |||
3701 | } | 3701 | } |
3702 | if (sec->flags & SEC_AUTH_MODE) { | 3702 | if (sec->flags & SEC_AUTH_MODE) { |
3703 | secinfo->auth_mode = sec->auth_mode; | 3703 | secinfo->auth_mode = sec->auth_mode; |
3704 | dprintk(", .auth_mode = %d\n", sec->auth_mode); | 3704 | dprintk(", .auth_mode = %d", sec->auth_mode); |
3705 | } | 3705 | } |
3706 | dprintk("\n"); | 3706 | dprintk("\n"); |
3707 | if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED && | 3707 | if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED && |
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index d6ed5781b93a..317ace7f9aae 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c | |||
@@ -2875,7 +2875,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev, | |||
2875 | if (orinoco_lock(priv, &flags) != 0) | 2875 | if (orinoco_lock(priv, &flags) != 0) |
2876 | return -EBUSY; | 2876 | return -EBUSY; |
2877 | 2877 | ||
2878 | if (erq->pointer) { | 2878 | if (erq->length > 0) { |
2879 | if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) | 2879 | if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) |
2880 | index = priv->tx_key; | 2880 | index = priv->tx_key; |
2881 | 2881 | ||
@@ -2918,7 +2918,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev, | |||
2918 | if (erq->flags & IW_ENCODE_RESTRICTED) | 2918 | if (erq->flags & IW_ENCODE_RESTRICTED) |
2919 | restricted = 1; | 2919 | restricted = 1; |
2920 | 2920 | ||
2921 | if (erq->pointer) { | 2921 | if (erq->pointer && erq->length > 0) { |
2922 | priv->keys[index].len = cpu_to_le16(xlen); | 2922 | priv->keys[index].len = cpu_to_le16(xlen); |
2923 | memset(priv->keys[index].data, 0, | 2923 | memset(priv->keys[index].data, 0, |
2924 | sizeof(priv->keys[index].data)); | 2924 | sizeof(priv->keys[index].data)); |
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c index 15465278c789..7f78b7801fb3 100644 --- a/drivers/net/wireless/spectrum_cs.c +++ b/drivers/net/wireless/spectrum_cs.c | |||
@@ -34,8 +34,6 @@ | |||
34 | 34 | ||
35 | #include "orinoco.h" | 35 | #include "orinoco.h" |
36 | 36 | ||
37 | static unsigned char *primsym; | ||
38 | static unsigned char *secsym; | ||
39 | static const char primary_fw_name[] = "symbol_sp24t_prim_fw"; | 37 | static const char primary_fw_name[] = "symbol_sp24t_prim_fw"; |
40 | static const char secondary_fw_name[] = "symbol_sp24t_sec_fw"; | 38 | static const char secondary_fw_name[] = "symbol_sp24t_sec_fw"; |
41 | 39 | ||
@@ -440,7 +438,7 @@ spectrum_load_blocks(hermes_t *hw, const struct dblock *first_block) | |||
440 | */ | 438 | */ |
441 | static int | 439 | static int |
442 | spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link, | 440 | spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link, |
443 | const unsigned char *image) | 441 | const unsigned char *image, int secondary) |
444 | { | 442 | { |
445 | int ret; | 443 | int ret; |
446 | const unsigned char *ptr; | 444 | const unsigned char *ptr; |
@@ -455,7 +453,7 @@ spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link, | |||
455 | first_block = (const struct dblock *) ptr; | 453 | first_block = (const struct dblock *) ptr; |
456 | 454 | ||
457 | /* Read the PDA */ | 455 | /* Read the PDA */ |
458 | if (image != primsym) { | 456 | if (secondary) { |
459 | ret = spectrum_read_pda(hw, pda, sizeof(pda)); | 457 | ret = spectrum_read_pda(hw, pda, sizeof(pda)); |
460 | if (ret) | 458 | if (ret) |
461 | return ret; | 459 | return ret; |
@@ -472,7 +470,7 @@ spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link, | |||
472 | return ret; | 470 | return ret; |
473 | 471 | ||
474 | /* Write the PDA to the adapter */ | 472 | /* Write the PDA to the adapter */ |
475 | if (image != primsym) { | 473 | if (secondary) { |
476 | ret = spectrum_apply_pda(hw, first_block, pda); | 474 | ret = spectrum_apply_pda(hw, first_block, pda); |
477 | if (ret) | 475 | if (ret) |
478 | return ret; | 476 | return ret; |
@@ -487,7 +485,7 @@ spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link, | |||
487 | ret = hermes_init(hw); | 485 | ret = hermes_init(hw); |
488 | 486 | ||
489 | /* hermes_reset() should return 0 with the secondary firmware */ | 487 | /* hermes_reset() should return 0 with the secondary firmware */ |
490 | if (image != primsym && ret != 0) | 488 | if (secondary && ret != 0) |
491 | return -ENODEV; | 489 | return -ENODEV; |
492 | 490 | ||
493 | /* And this should work with any firmware */ | 491 | /* And this should work with any firmware */ |
@@ -509,33 +507,30 @@ spectrum_dl_firmware(hermes_t *hw, struct pcmcia_device *link) | |||
509 | const struct firmware *fw_entry; | 507 | const struct firmware *fw_entry; |
510 | 508 | ||
511 | if (request_firmware(&fw_entry, primary_fw_name, | 509 | if (request_firmware(&fw_entry, primary_fw_name, |
512 | &handle_to_dev(link)) == 0) { | 510 | &handle_to_dev(link)) != 0) { |
513 | primsym = fw_entry->data; | ||
514 | } else { | ||
515 | printk(KERN_ERR PFX "Cannot find firmware: %s\n", | 511 | printk(KERN_ERR PFX "Cannot find firmware: %s\n", |
516 | primary_fw_name); | 512 | primary_fw_name); |
517 | return -ENOENT; | 513 | return -ENOENT; |
518 | } | 514 | } |
519 | 515 | ||
520 | if (request_firmware(&fw_entry, secondary_fw_name, | ||
521 | &handle_to_dev(link)) == 0) { | ||
522 | secsym = fw_entry->data; | ||
523 | } else { | ||
524 | printk(KERN_ERR PFX "Cannot find firmware: %s\n", | ||
525 | secondary_fw_name); | ||
526 | return -ENOENT; | ||
527 | } | ||
528 | |||
529 | /* Load primary firmware */ | 516 | /* Load primary firmware */ |
530 | ret = spectrum_dl_image(hw, link, primsym); | 517 | ret = spectrum_dl_image(hw, link, fw_entry->data, 0); |
518 | release_firmware(fw_entry); | ||
531 | if (ret) { | 519 | if (ret) { |
532 | printk(KERN_ERR PFX "Primary firmware download failed\n"); | 520 | printk(KERN_ERR PFX "Primary firmware download failed\n"); |
533 | return ret; | 521 | return ret; |
534 | } | 522 | } |
535 | 523 | ||
536 | /* Load secondary firmware */ | 524 | if (request_firmware(&fw_entry, secondary_fw_name, |
537 | ret = spectrum_dl_image(hw, link, secsym); | 525 | &handle_to_dev(link)) != 0) { |
526 | printk(KERN_ERR PFX "Cannot find firmware: %s\n", | ||
527 | secondary_fw_name); | ||
528 | return -ENOENT; | ||
529 | } | ||
538 | 530 | ||
531 | /* Load secondary firmware */ | ||
532 | ret = spectrum_dl_image(hw, link, fw_entry->data, 1); | ||
533 | release_firmware(fw_entry); | ||
539 | if (ret) { | 534 | if (ret) { |
540 | printk(KERN_ERR PFX "Secondary firmware download failed\n"); | 535 | printk(KERN_ERR PFX "Secondary firmware download failed\n"); |
541 | } | 536 | } |
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 662ecc8a33ff..c52e9bcf8d02 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c | |||
@@ -1820,6 +1820,8 @@ static int zd1201_probe(struct usb_interface *interface, | |||
1820 | zd->dev->name); | 1820 | zd->dev->name); |
1821 | 1821 | ||
1822 | usb_set_intfdata(interface, zd); | 1822 | usb_set_intfdata(interface, zd); |
1823 | zd1201_enable(zd); /* zd1201 likes to startup enabled, */ | ||
1824 | zd1201_disable(zd); /* interfering with all the wifis in range */ | ||
1823 | return 0; | 1825 | return 0; |
1824 | 1826 | ||
1825 | err_net: | 1827 | err_net: |
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index ce1cb2c6aa8d..72f90525bf68 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
@@ -375,10 +375,8 @@ static void int_urb_complete(struct urb *urb, struct pt_regs *pt_regs) | |||
375 | case -ENODEV: | 375 | case -ENODEV: |
376 | case -ENOENT: | 376 | case -ENOENT: |
377 | case -ECONNRESET: | 377 | case -ECONNRESET: |
378 | goto kfree; | ||
379 | case -EPIPE: | 378 | case -EPIPE: |
380 | usb_clear_halt(urb->dev, EP_INT_IN); | 379 | goto kfree; |
381 | /* FALL-THROUGH */ | ||
382 | default: | 380 | default: |
383 | goto resubmit; | 381 | goto resubmit; |
384 | } | 382 | } |
@@ -580,10 +578,8 @@ static void rx_urb_complete(struct urb *urb, struct pt_regs *pt_regs) | |||
580 | case -ENODEV: | 578 | case -ENODEV: |
581 | case -ENOENT: | 579 | case -ENOENT: |
582 | case -ECONNRESET: | 580 | case -ECONNRESET: |
583 | return; | ||
584 | case -EPIPE: | 581 | case -EPIPE: |
585 | usb_clear_halt(urb->dev, EP_DATA_IN); | 582 | return; |
586 | /* FALL-THROUGH */ | ||
587 | default: | 583 | default: |
588 | dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); | 584 | dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); |
589 | goto resubmit; | 585 | goto resubmit; |
@@ -749,11 +745,9 @@ static void tx_urb_complete(struct urb *urb, struct pt_regs *pt_regs) | |||
749 | case -ENODEV: | 745 | case -ENODEV: |
750 | case -ENOENT: | 746 | case -ENOENT: |
751 | case -ECONNRESET: | 747 | case -ECONNRESET: |
748 | case -EPIPE: | ||
752 | dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); | 749 | dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); |
753 | break; | 750 | break; |
754 | case -EPIPE: | ||
755 | usb_clear_halt(urb->dev, EP_DATA_OUT); | ||
756 | /* FALL-THROUGH */ | ||
757 | default: | 751 | default: |
758 | dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); | 752 | dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); |
759 | goto resubmit; | 753 | goto resubmit; |