diff options
author | David S. Miller <davem@davemloft.net> | 2008-03-28 22:48:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-03-28 22:48:26 -0400 |
commit | 17eed249539a7b756ca65a5cb0940abc48ef553b (patch) | |
tree | ea8745536e7ee5aaf0d3b03e9d356979daf6bfb0 | |
parent | 4ad96d39a2d74c1b2e400b602da2594f5098fc26 (diff) | |
parent | 318a94d68979cbe9cc98a3050b4b7be2f08513c8 (diff) |
Merge branch 'upstream-net26' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
-rw-r--r-- | drivers/net/3c509.c | 729 | ||||
-rw-r--r-- | drivers/net/arcnet/arcnet.c | 5 | ||||
-rw-r--r-- | drivers/net/e1000e/82571.c | 127 | ||||
-rw-r--r-- | drivers/net/e1000e/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/e1000e/defines.h | 109 | ||||
-rw-r--r-- | drivers/net/e1000e/e1000.h | 29 | ||||
-rw-r--r-- | drivers/net/e1000e/es2lan.c | 105 | ||||
-rw-r--r-- | drivers/net/e1000e/ethtool.c | 161 | ||||
-rw-r--r-- | drivers/net/e1000e/hw.h | 171 | ||||
-rw-r--r-- | drivers/net/e1000e/ich8lan.c | 279 | ||||
-rw-r--r-- | drivers/net/e1000e/lib.c | 286 | ||||
-rw-r--r-- | drivers/net/e1000e/netdev.c | 592 | ||||
-rw-r--r-- | drivers/net/e1000e/param.c | 33 | ||||
-rw-r--r-- | drivers/net/e1000e/phy.c | 164 | ||||
-rw-r--r-- | drivers/net/ehea/ehea.h | 6 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_main.c | 90 | ||||
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 6 | ||||
-rw-r--r-- | drivers/net/tokenring/3c359.c | 21 | ||||
-rw-r--r-- | drivers/net/yellowfin.c | 4 | ||||
-rw-r--r-- | include/linux/arcdevice.h | 4 |
20 files changed, 1678 insertions, 1245 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index 8fafac987e0b..54dac0696d91 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c | |||
@@ -54,25 +54,24 @@ | |||
54 | v1.19a 28Oct2002 Davud Ruggiero <jdr@farfalle.com> | 54 | v1.19a 28Oct2002 Davud Ruggiero <jdr@farfalle.com> |
55 | - Increase *read_eeprom udelay to workaround oops with 2 cards. | 55 | - Increase *read_eeprom udelay to workaround oops with 2 cards. |
56 | v1.19b 08Nov2002 Marc Zyngier <maz@wild-wind.fr.eu.org> | 56 | v1.19b 08Nov2002 Marc Zyngier <maz@wild-wind.fr.eu.org> |
57 | - Introduce driver model for EISA cards. | 57 | - Introduce driver model for EISA cards. |
58 | v1.20 04Feb2008 Ondrej Zary <linux@rainbow-software.org> | ||
59 | - convert to isa_driver and pnp_driver and some cleanups | ||
58 | */ | 60 | */ |
59 | 61 | ||
60 | #define DRV_NAME "3c509" | 62 | #define DRV_NAME "3c509" |
61 | #define DRV_VERSION "1.19b" | 63 | #define DRV_VERSION "1.20" |
62 | #define DRV_RELDATE "08Nov2002" | 64 | #define DRV_RELDATE "04Feb2008" |
63 | 65 | ||
64 | /* A few values that may be tweaked. */ | 66 | /* A few values that may be tweaked. */ |
65 | 67 | ||
66 | /* Time in jiffies before concluding the transmitter is hung. */ | 68 | /* Time in jiffies before concluding the transmitter is hung. */ |
67 | #define TX_TIMEOUT (400*HZ/1000) | 69 | #define TX_TIMEOUT (400*HZ/1000) |
68 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ | ||
69 | static int max_interrupt_work = 10; | ||
70 | 70 | ||
71 | #include <linux/module.h> | 71 | #include <linux/module.h> |
72 | #ifdef CONFIG_MCA | ||
73 | #include <linux/mca.h> | 72 | #include <linux/mca.h> |
74 | #endif | 73 | #include <linux/isa.h> |
75 | #include <linux/isapnp.h> | 74 | #include <linux/pnp.h> |
76 | #include <linux/string.h> | 75 | #include <linux/string.h> |
77 | #include <linux/interrupt.h> | 76 | #include <linux/interrupt.h> |
78 | #include <linux/errno.h> | 77 | #include <linux/errno.h> |
@@ -97,10 +96,6 @@ static int max_interrupt_work = 10; | |||
97 | 96 | ||
98 | static char version[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; | 97 | static char version[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; |
99 | 98 | ||
100 | #if defined(CONFIG_PM) && (defined(CONFIG_MCA) || defined(CONFIG_EISA)) | ||
101 | #define EL3_SUSPEND | ||
102 | #endif | ||
103 | |||
104 | #ifdef EL3_DEBUG | 99 | #ifdef EL3_DEBUG |
105 | static int el3_debug = EL3_DEBUG; | 100 | static int el3_debug = EL3_DEBUG; |
106 | #else | 101 | #else |
@@ -111,6 +106,7 @@ static int el3_debug = 2; | |||
111 | * a global variable so that the mca/eisa probe routines can increment | 106 | * a global variable so that the mca/eisa probe routines can increment |
112 | * it */ | 107 | * it */ |
113 | static int el3_cards = 0; | 108 | static int el3_cards = 0; |
109 | #define EL3_MAX_CARDS 8 | ||
114 | 110 | ||
115 | /* To minimize the size of the driver source I only define operating | 111 | /* To minimize the size of the driver source I only define operating |
116 | constants if they are used several times. You'll need the manual | 112 | constants if they are used several times. You'll need the manual |
@@ -119,7 +115,7 @@ static int el3_cards = 0; | |||
119 | #define EL3_DATA 0x00 | 115 | #define EL3_DATA 0x00 |
120 | #define EL3_CMD 0x0e | 116 | #define EL3_CMD 0x0e |
121 | #define EL3_STATUS 0x0e | 117 | #define EL3_STATUS 0x0e |
122 | #define EEPROM_READ 0x80 | 118 | #define EEPROM_READ 0x80 |
123 | 119 | ||
124 | #define EL3_IO_EXTENT 16 | 120 | #define EL3_IO_EXTENT 16 |
125 | 121 | ||
@@ -168,23 +164,31 @@ enum RxFilter { | |||
168 | */ | 164 | */ |
169 | #define SKB_QUEUE_SIZE 64 | 165 | #define SKB_QUEUE_SIZE 64 |
170 | 166 | ||
167 | enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA }; | ||
168 | |||
171 | struct el3_private { | 169 | struct el3_private { |
172 | struct net_device_stats stats; | 170 | struct net_device_stats stats; |
173 | struct net_device *next_dev; | ||
174 | spinlock_t lock; | 171 | spinlock_t lock; |
175 | /* skb send-queue */ | 172 | /* skb send-queue */ |
176 | int head, size; | 173 | int head, size; |
177 | struct sk_buff *queue[SKB_QUEUE_SIZE]; | 174 | struct sk_buff *queue[SKB_QUEUE_SIZE]; |
178 | enum { | 175 | enum el3_cardtype type; |
179 | EL3_MCA, | ||
180 | EL3_PNP, | ||
181 | EL3_EISA, | ||
182 | } type; /* type of device */ | ||
183 | struct device *dev; | ||
184 | }; | 176 | }; |
185 | static int id_port __initdata = 0x110; /* Start with 0x110 to avoid new sound cards.*/ | 177 | static int id_port; |
186 | static struct net_device *el3_root_dev; | 178 | static int current_tag; |
179 | static struct net_device *el3_devs[EL3_MAX_CARDS]; | ||
180 | |||
181 | /* Parameters that may be passed into the module. */ | ||
182 | static int debug = -1; | ||
183 | static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1}; | ||
184 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ | ||
185 | static int max_interrupt_work = 10; | ||
186 | #ifdef CONFIG_PNP | ||
187 | static int nopnp; | ||
188 | #endif | ||
187 | 189 | ||
190 | static int __init el3_common_init(struct net_device *dev); | ||
191 | static void el3_common_remove(struct net_device *dev); | ||
188 | static ushort id_read_eeprom(int index); | 192 | static ushort id_read_eeprom(int index); |
189 | static ushort read_eeprom(int ioaddr, int index); | 193 | static ushort read_eeprom(int ioaddr, int index); |
190 | static int el3_open(struct net_device *dev); | 194 | static int el3_open(struct net_device *dev); |
@@ -199,7 +203,7 @@ static void el3_tx_timeout (struct net_device *dev); | |||
199 | static void el3_down(struct net_device *dev); | 203 | static void el3_down(struct net_device *dev); |
200 | static void el3_up(struct net_device *dev); | 204 | static void el3_up(struct net_device *dev); |
201 | static const struct ethtool_ops ethtool_ops; | 205 | static const struct ethtool_ops ethtool_ops; |
202 | #ifdef EL3_SUSPEND | 206 | #ifdef CONFIG_PM |
203 | static int el3_suspend(struct device *, pm_message_t); | 207 | static int el3_suspend(struct device *, pm_message_t); |
204 | static int el3_resume(struct device *); | 208 | static int el3_resume(struct device *); |
205 | #else | 209 | #else |
@@ -209,13 +213,272 @@ static int el3_resume(struct device *); | |||
209 | 213 | ||
210 | 214 | ||
211 | /* generic device remove for all device types */ | 215 | /* generic device remove for all device types */ |
212 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) | ||
213 | static int el3_device_remove (struct device *device); | 216 | static int el3_device_remove (struct device *device); |
214 | #endif | ||
215 | #ifdef CONFIG_NET_POLL_CONTROLLER | 217 | #ifdef CONFIG_NET_POLL_CONTROLLER |
216 | static void el3_poll_controller(struct net_device *dev); | 218 | static void el3_poll_controller(struct net_device *dev); |
217 | #endif | 219 | #endif |
218 | 220 | ||
221 | /* Return 0 on success, 1 on error, 2 when found already detected PnP card */ | ||
222 | static int el3_isa_id_sequence(__be16 *phys_addr) | ||
223 | { | ||
224 | short lrs_state = 0xff; | ||
225 | int i; | ||
226 | |||
227 | /* ISA boards are detected by sending the ID sequence to the | ||
228 | ID_PORT. We find cards past the first by setting the 'current_tag' | ||
229 | on cards as they are found. Cards with their tag set will not | ||
230 | respond to subsequent ID sequences. */ | ||
231 | |||
232 | outb(0x00, id_port); | ||
233 | outb(0x00, id_port); | ||
234 | for (i = 0; i < 255; i++) { | ||
235 | outb(lrs_state, id_port); | ||
236 | lrs_state <<= 1; | ||
237 | lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state; | ||
238 | } | ||
239 | /* For the first probe, clear all board's tag registers. */ | ||
240 | if (current_tag == 0) | ||
241 | outb(0xd0, id_port); | ||
242 | else /* Otherwise kill off already-found boards. */ | ||
243 | outb(0xd8, id_port); | ||
244 | if (id_read_eeprom(7) != 0x6d50) | ||
245 | return 1; | ||
246 | /* Read in EEPROM data, which does contention-select. | ||
247 | Only the lowest address board will stay "on-line". | ||
248 | 3Com got the byte order backwards. */ | ||
249 | for (i = 0; i < 3; i++) | ||
250 | phys_addr[i] = htons(id_read_eeprom(i)); | ||
251 | #ifdef CONFIG_PNP | ||
252 | if (!nopnp) { | ||
253 | /* The ISA PnP 3c509 cards respond to the ID sequence too. | ||
254 | This check is needed in order not to register them twice. */ | ||
255 | for (i = 0; i < el3_cards; i++) { | ||
256 | struct el3_private *lp = netdev_priv(el3_devs[i]); | ||
257 | if (lp->type == EL3_PNP | ||
258 | && !memcmp(phys_addr, el3_devs[i]->dev_addr, | ||
259 | ETH_ALEN)) { | ||
260 | if (el3_debug > 3) | ||
261 | printk(KERN_DEBUG "3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n", | ||
262 | phys_addr[0] & 0xff, phys_addr[0] >> 8, | ||
263 | phys_addr[1] & 0xff, phys_addr[1] >> 8, | ||
264 | phys_addr[2] & 0xff, phys_addr[2] >> 8); | ||
265 | /* Set the adaptor tag so that the next card can be found. */ | ||
266 | outb(0xd0 + ++current_tag, id_port); | ||
267 | return 2; | ||
268 | } | ||
269 | } | ||
270 | } | ||
271 | #endif /* CONFIG_PNP */ | ||
272 | return 0; | ||
273 | |||
274 | } | ||
275 | |||
276 | static void __devinit el3_dev_fill(struct net_device *dev, __be16 *phys_addr, | ||
277 | int ioaddr, int irq, int if_port, | ||
278 | enum el3_cardtype type) | ||
279 | { | ||
280 | struct el3_private *lp = netdev_priv(dev); | ||
281 | |||
282 | memcpy(dev->dev_addr, phys_addr, ETH_ALEN); | ||
283 | dev->base_addr = ioaddr; | ||
284 | dev->irq = irq; | ||
285 | dev->if_port = if_port; | ||
286 | lp->type = type; | ||
287 | } | ||
288 | |||
289 | static int __devinit el3_isa_match(struct device *pdev, | ||
290 | unsigned int ndev) | ||
291 | { | ||
292 | struct net_device *dev; | ||
293 | int ioaddr, isa_irq, if_port, err; | ||
294 | unsigned int iobase; | ||
295 | __be16 phys_addr[3]; | ||
296 | |||
297 | while ((err = el3_isa_id_sequence(phys_addr)) == 2) | ||
298 | ; /* Skip to next card when PnP card found */ | ||
299 | if (err == 1) | ||
300 | return 0; | ||
301 | |||
302 | iobase = id_read_eeprom(8); | ||
303 | if_port = iobase >> 14; | ||
304 | ioaddr = 0x200 + ((iobase & 0x1f) << 4); | ||
305 | if (irq[el3_cards] > 1 && irq[el3_cards] < 16) | ||
306 | isa_irq = irq[el3_cards]; | ||
307 | else | ||
308 | isa_irq = id_read_eeprom(9) >> 12; | ||
309 | |||
310 | dev = alloc_etherdev(sizeof(struct el3_private)); | ||
311 | if (!dev) | ||
312 | return -ENOMEM; | ||
313 | |||
314 | netdev_boot_setup_check(dev); | ||
315 | |||
316 | if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) { | ||
317 | free_netdev(dev); | ||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | /* Set the adaptor tag so that the next card can be found. */ | ||
322 | outb(0xd0 + ++current_tag, id_port); | ||
323 | |||
324 | /* Activate the adaptor at the EEPROM location. */ | ||
325 | outb((ioaddr >> 4) | 0xe0, id_port); | ||
326 | |||
327 | EL3WINDOW(0); | ||
328 | if (inw(ioaddr) != 0x6d50) { | ||
329 | free_netdev(dev); | ||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | /* Free the interrupt so that some other card can use it. */ | ||
334 | outw(0x0f00, ioaddr + WN0_IRQ); | ||
335 | |||
336 | el3_dev_fill(dev, phys_addr, ioaddr, isa_irq, if_port, EL3_ISA); | ||
337 | dev_set_drvdata(pdev, dev); | ||
338 | if (el3_common_init(dev)) { | ||
339 | free_netdev(dev); | ||
340 | return 0; | ||
341 | } | ||
342 | |||
343 | el3_devs[el3_cards++] = dev; | ||
344 | return 1; | ||
345 | } | ||
346 | |||
347 | static int __devexit el3_isa_remove(struct device *pdev, | ||
348 | unsigned int ndev) | ||
349 | { | ||
350 | el3_device_remove(pdev); | ||
351 | dev_set_drvdata(pdev, NULL); | ||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | #ifdef CONFIG_PM | ||
356 | static int el3_isa_suspend(struct device *dev, unsigned int n, | ||
357 | pm_message_t state) | ||
358 | { | ||
359 | current_tag = 0; | ||
360 | return el3_suspend(dev, state); | ||
361 | } | ||
362 | |||
363 | static int el3_isa_resume(struct device *dev, unsigned int n) | ||
364 | { | ||
365 | struct net_device *ndev = dev_get_drvdata(dev); | ||
366 | int ioaddr = ndev->base_addr, err; | ||
367 | __be16 phys_addr[3]; | ||
368 | |||
369 | while ((err = el3_isa_id_sequence(phys_addr)) == 2) | ||
370 | ; /* Skip to next card when PnP card found */ | ||
371 | if (err == 1) | ||
372 | return 0; | ||
373 | /* Set the adaptor tag so that the next card can be found. */ | ||
374 | outb(0xd0 + ++current_tag, id_port); | ||
375 | /* Enable the card */ | ||
376 | outb((ioaddr >> 4) | 0xe0, id_port); | ||
377 | EL3WINDOW(0); | ||
378 | if (inw(ioaddr) != 0x6d50) | ||
379 | return 1; | ||
380 | /* Free the interrupt so that some other card can use it. */ | ||
381 | outw(0x0f00, ioaddr + WN0_IRQ); | ||
382 | return el3_resume(dev); | ||
383 | } | ||
384 | #endif | ||
385 | |||
386 | static struct isa_driver el3_isa_driver = { | ||
387 | .match = el3_isa_match, | ||
388 | .remove = __devexit_p(el3_isa_remove), | ||
389 | #ifdef CONFIG_PM | ||
390 | .suspend = el3_isa_suspend, | ||
391 | .resume = el3_isa_resume, | ||
392 | #endif | ||
393 | .driver = { | ||
394 | .name = "3c509" | ||
395 | }, | ||
396 | }; | ||
397 | static int isa_registered; | ||
398 | |||
399 | #ifdef CONFIG_PNP | ||
400 | static struct pnp_device_id el3_pnp_ids[] = { | ||
401 | { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */ | ||
402 | { .id = "TCM5091" }, /* 3Com Etherlink III */ | ||
403 | { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */ | ||
404 | { .id = "TCM5095" }, /* 3Com Etherlink III (TPO) */ | ||
405 | { .id = "TCM5098" }, /* 3Com Etherlink III (TPC) */ | ||
406 | { .id = "PNP80f7" }, /* 3Com Etherlink III compatible */ | ||
407 | { .id = "PNP80f8" }, /* 3Com Etherlink III compatible */ | ||
408 | { .id = "" } | ||
409 | }; | ||
410 | MODULE_DEVICE_TABLE(pnp, el3_pnp_ids); | ||
411 | |||
412 | static int __devinit el3_pnp_probe(struct pnp_dev *pdev, | ||
413 | const struct pnp_device_id *id) | ||
414 | { | ||
415 | short i; | ||
416 | int ioaddr, irq, if_port; | ||
417 | u16 phys_addr[3]; | ||
418 | struct net_device *dev = NULL; | ||
419 | int err; | ||
420 | |||
421 | ioaddr = pnp_port_start(pdev, 0); | ||
422 | if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-pnp")) | ||
423 | return -EBUSY; | ||
424 | irq = pnp_irq(pdev, 0); | ||
425 | EL3WINDOW(0); | ||
426 | for (i = 0; i < 3; i++) | ||
427 | phys_addr[i] = htons(read_eeprom(ioaddr, i)); | ||
428 | if_port = read_eeprom(ioaddr, 8) >> 14; | ||
429 | dev = alloc_etherdev(sizeof(struct el3_private)); | ||
430 | if (!dev) { | ||
431 | release_region(ioaddr, EL3_IO_EXTENT); | ||
432 | return -ENOMEM; | ||
433 | } | ||
434 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
435 | netdev_boot_setup_check(dev); | ||
436 | |||
437 | el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_PNP); | ||
438 | pnp_set_drvdata(pdev, dev); | ||
439 | err = el3_common_init(dev); | ||
440 | |||
441 | if (err) { | ||
442 | pnp_set_drvdata(pdev, NULL); | ||
443 | free_netdev(dev); | ||
444 | return err; | ||
445 | } | ||
446 | |||
447 | el3_devs[el3_cards++] = dev; | ||
448 | return 0; | ||
449 | } | ||
450 | |||
451 | static void __devexit el3_pnp_remove(struct pnp_dev *pdev) | ||
452 | { | ||
453 | el3_common_remove(pnp_get_drvdata(pdev)); | ||
454 | pnp_set_drvdata(pdev, NULL); | ||
455 | } | ||
456 | |||
457 | #ifdef CONFIG_PM | ||
458 | static int el3_pnp_suspend(struct pnp_dev *pdev, pm_message_t state) | ||
459 | { | ||
460 | return el3_suspend(&pdev->dev, state); | ||
461 | } | ||
462 | |||
463 | static int el3_pnp_resume(struct pnp_dev *pdev) | ||
464 | { | ||
465 | return el3_resume(&pdev->dev); | ||
466 | } | ||
467 | #endif | ||
468 | |||
469 | static struct pnp_driver el3_pnp_driver = { | ||
470 | .name = "3c509", | ||
471 | .id_table = el3_pnp_ids, | ||
472 | .probe = el3_pnp_probe, | ||
473 | .remove = __devexit_p(el3_pnp_remove), | ||
474 | #ifdef CONFIG_PM | ||
475 | .suspend = el3_pnp_suspend, | ||
476 | .resume = el3_pnp_resume, | ||
477 | #endif | ||
478 | }; | ||
479 | static int pnp_registered; | ||
480 | #endif /* CONFIG_PNP */ | ||
481 | |||
219 | #ifdef CONFIG_EISA | 482 | #ifdef CONFIG_EISA |
220 | static struct eisa_device_id el3_eisa_ids[] = { | 483 | static struct eisa_device_id el3_eisa_ids[] = { |
221 | { "TCM5092" }, | 484 | { "TCM5092" }, |
@@ -230,13 +493,14 @@ static int el3_eisa_probe (struct device *device); | |||
230 | static struct eisa_driver el3_eisa_driver = { | 493 | static struct eisa_driver el3_eisa_driver = { |
231 | .id_table = el3_eisa_ids, | 494 | .id_table = el3_eisa_ids, |
232 | .driver = { | 495 | .driver = { |
233 | .name = "3c509", | 496 | .name = "3c579", |
234 | .probe = el3_eisa_probe, | 497 | .probe = el3_eisa_probe, |
235 | .remove = __devexit_p (el3_device_remove), | 498 | .remove = __devexit_p (el3_device_remove), |
236 | .suspend = el3_suspend, | 499 | .suspend = el3_suspend, |
237 | .resume = el3_resume, | 500 | .resume = el3_resume, |
238 | } | 501 | } |
239 | }; | 502 | }; |
503 | static int eisa_registered; | ||
240 | #endif | 504 | #endif |
241 | 505 | ||
242 | #ifdef CONFIG_MCA | 506 | #ifdef CONFIG_MCA |
@@ -271,45 +535,9 @@ static struct mca_driver el3_mca_driver = { | |||
271 | .resume = el3_resume, | 535 | .resume = el3_resume, |
272 | }, | 536 | }, |
273 | }; | 537 | }; |
538 | static int mca_registered; | ||
274 | #endif /* CONFIG_MCA */ | 539 | #endif /* CONFIG_MCA */ |
275 | 540 | ||
276 | #if defined(__ISAPNP__) | ||
277 | static struct isapnp_device_id el3_isapnp_adapters[] __initdata = { | ||
278 | { ISAPNP_ANY_ID, ISAPNP_ANY_ID, | ||
279 | ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5090), | ||
280 | (long) "3Com Etherlink III (TP)" }, | ||
281 | { ISAPNP_ANY_ID, ISAPNP_ANY_ID, | ||
282 | ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5091), | ||
283 | (long) "3Com Etherlink III" }, | ||
284 | { ISAPNP_ANY_ID, ISAPNP_ANY_ID, | ||
285 | ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5094), | ||
286 | (long) "3Com Etherlink III (combo)" }, | ||
287 | { ISAPNP_ANY_ID, ISAPNP_ANY_ID, | ||
288 | ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5095), | ||
289 | (long) "3Com Etherlink III (TPO)" }, | ||
290 | { ISAPNP_ANY_ID, ISAPNP_ANY_ID, | ||
291 | ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5098), | ||
292 | (long) "3Com Etherlink III (TPC)" }, | ||
293 | { ISAPNP_ANY_ID, ISAPNP_ANY_ID, | ||
294 | ISAPNP_VENDOR('P', 'N', 'P'), ISAPNP_FUNCTION(0x80f7), | ||
295 | (long) "3Com Etherlink III compatible" }, | ||
296 | { ISAPNP_ANY_ID, ISAPNP_ANY_ID, | ||
297 | ISAPNP_VENDOR('P', 'N', 'P'), ISAPNP_FUNCTION(0x80f8), | ||
298 | (long) "3Com Etherlink III compatible" }, | ||
299 | { } /* terminate list */ | ||
300 | }; | ||
301 | |||
302 | static __be16 el3_isapnp_phys_addr[8][3]; | ||
303 | static int nopnp; | ||
304 | #endif /* __ISAPNP__ */ | ||
305 | |||
306 | /* With the driver model introduction for EISA devices, both init | ||
307 | * and cleanup have been split : | ||
308 | * - EISA devices probe/remove starts in el3_eisa_probe/el3_device_remove | ||
309 | * - MCA/ISA still use el3_probe | ||
310 | * | ||
311 | * Both call el3_common_init/el3_common_remove. */ | ||
312 | |||
313 | static int __init el3_common_init(struct net_device *dev) | 541 | static int __init el3_common_init(struct net_device *dev) |
314 | { | 542 | { |
315 | struct el3_private *lp = netdev_priv(dev); | 543 | struct el3_private *lp = netdev_priv(dev); |
@@ -360,231 +588,11 @@ static int __init el3_common_init(struct net_device *dev) | |||
360 | 588 | ||
361 | static void el3_common_remove (struct net_device *dev) | 589 | static void el3_common_remove (struct net_device *dev) |
362 | { | 590 | { |
363 | struct el3_private *lp = netdev_priv(dev); | ||
364 | |||
365 | (void) lp; /* Keep gcc quiet... */ | ||
366 | #if defined(__ISAPNP__) | ||
367 | if (lp->type == EL3_PNP) | ||
368 | pnp_device_detach(to_pnp_dev(lp->dev)); | ||
369 | #endif | ||
370 | |||
371 | unregister_netdev (dev); | 591 | unregister_netdev (dev); |
372 | release_region(dev->base_addr, EL3_IO_EXTENT); | 592 | release_region(dev->base_addr, EL3_IO_EXTENT); |
373 | free_netdev (dev); | 593 | free_netdev (dev); |
374 | } | 594 | } |
375 | 595 | ||
376 | static int __init el3_probe(int card_idx) | ||
377 | { | ||
378 | struct net_device *dev; | ||
379 | struct el3_private *lp; | ||
380 | short lrs_state = 0xff, i; | ||
381 | int ioaddr, irq, if_port; | ||
382 | __be16 phys_addr[3]; | ||
383 | static int current_tag; | ||
384 | int err = -ENODEV; | ||
385 | #if defined(__ISAPNP__) | ||
386 | static int pnp_cards; | ||
387 | struct pnp_dev *idev = NULL; | ||
388 | int pnp_found = 0; | ||
389 | |||
390 | if (nopnp == 1) | ||
391 | goto no_pnp; | ||
392 | |||
393 | for (i=0; el3_isapnp_adapters[i].vendor != 0; i++) { | ||
394 | int j; | ||
395 | while ((idev = pnp_find_dev(NULL, | ||
396 | el3_isapnp_adapters[i].vendor, | ||
397 | el3_isapnp_adapters[i].function, | ||
398 | idev))) { | ||
399 | if (pnp_device_attach(idev) < 0) | ||
400 | continue; | ||
401 | if (pnp_activate_dev(idev) < 0) { | ||
402 | __again: | ||
403 | pnp_device_detach(idev); | ||
404 | continue; | ||
405 | } | ||
406 | if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0)) | ||
407 | goto __again; | ||
408 | ioaddr = pnp_port_start(idev, 0); | ||
409 | if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509 PnP")) { | ||
410 | pnp_device_detach(idev); | ||
411 | return -EBUSY; | ||
412 | } | ||
413 | irq = pnp_irq(idev, 0); | ||
414 | if (el3_debug > 3) | ||
415 | printk ("ISAPnP reports %s at i/o 0x%x, irq %d\n", | ||
416 | (char*) el3_isapnp_adapters[i].driver_data, ioaddr, irq); | ||
417 | EL3WINDOW(0); | ||
418 | for (j = 0; j < 3; j++) | ||
419 | el3_isapnp_phys_addr[pnp_cards][j] = | ||
420 | phys_addr[j] = | ||
421 | htons(read_eeprom(ioaddr, j)); | ||
422 | if_port = read_eeprom(ioaddr, 8) >> 14; | ||
423 | dev = alloc_etherdev(sizeof (struct el3_private)); | ||
424 | if (!dev) { | ||
425 | release_region(ioaddr, EL3_IO_EXTENT); | ||
426 | pnp_device_detach(idev); | ||
427 | return -ENOMEM; | ||
428 | } | ||
429 | |||
430 | SET_NETDEV_DEV(dev, &idev->dev); | ||
431 | pnp_cards++; | ||
432 | |||
433 | netdev_boot_setup_check(dev); | ||
434 | pnp_found = 1; | ||
435 | goto found; | ||
436 | } | ||
437 | } | ||
438 | no_pnp: | ||
439 | #endif /* __ISAPNP__ */ | ||
440 | |||
441 | /* Select an open I/O location at 0x1*0 to do contention select. */ | ||
442 | for ( ; id_port < 0x200; id_port += 0x10) { | ||
443 | if (!request_region(id_port, 1, "3c509")) | ||
444 | continue; | ||
445 | outb(0x00, id_port); | ||
446 | outb(0xff, id_port); | ||
447 | if (inb(id_port) & 0x01){ | ||
448 | release_region(id_port, 1); | ||
449 | break; | ||
450 | } else | ||
451 | release_region(id_port, 1); | ||
452 | } | ||
453 | if (id_port >= 0x200) { | ||
454 | /* Rare -- do we really need a warning? */ | ||
455 | printk(" WARNING: No I/O port available for 3c509 activation.\n"); | ||
456 | return -ENODEV; | ||
457 | } | ||
458 | |||
459 | /* Next check for all ISA bus boards by sending the ID sequence to the | ||
460 | ID_PORT. We find cards past the first by setting the 'current_tag' | ||
461 | on cards as they are found. Cards with their tag set will not | ||
462 | respond to subsequent ID sequences. */ | ||
463 | |||
464 | outb(0x00, id_port); | ||
465 | outb(0x00, id_port); | ||
466 | for(i = 0; i < 255; i++) { | ||
467 | outb(lrs_state, id_port); | ||
468 | lrs_state <<= 1; | ||
469 | lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state; | ||
470 | } | ||
471 | |||
472 | /* For the first probe, clear all board's tag registers. */ | ||
473 | if (current_tag == 0) | ||
474 | outb(0xd0, id_port); | ||
475 | else /* Otherwise kill off already-found boards. */ | ||
476 | outb(0xd8, id_port); | ||
477 | |||
478 | if (id_read_eeprom(7) != 0x6d50) { | ||
479 | return -ENODEV; | ||
480 | } | ||
481 | |||
482 | /* Read in EEPROM data, which does contention-select. | ||
483 | Only the lowest address board will stay "on-line". | ||
484 | 3Com got the byte order backwards. */ | ||
485 | for (i = 0; i < 3; i++) { | ||
486 | phys_addr[i] = htons(id_read_eeprom(i)); | ||
487 | } | ||
488 | |||
489 | #if defined(__ISAPNP__) | ||
490 | if (nopnp == 0) { | ||
491 | /* The ISA PnP 3c509 cards respond to the ID sequence. | ||
492 | This check is needed in order not to register them twice. */ | ||
493 | for (i = 0; i < pnp_cards; i++) { | ||
494 | if (phys_addr[0] == el3_isapnp_phys_addr[i][0] && | ||
495 | phys_addr[1] == el3_isapnp_phys_addr[i][1] && | ||
496 | phys_addr[2] == el3_isapnp_phys_addr[i][2]) | ||
497 | { | ||
498 | if (el3_debug > 3) | ||
499 | printk("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n", | ||
500 | phys_addr[0] & 0xff, phys_addr[0] >> 8, | ||
501 | phys_addr[1] & 0xff, phys_addr[1] >> 8, | ||
502 | phys_addr[2] & 0xff, phys_addr[2] >> 8); | ||
503 | /* Set the adaptor tag so that the next card can be found. */ | ||
504 | outb(0xd0 + ++current_tag, id_port); | ||
505 | goto no_pnp; | ||
506 | } | ||
507 | } | ||
508 | } | ||
509 | #endif /* __ISAPNP__ */ | ||
510 | |||
511 | { | ||
512 | unsigned int iobase = id_read_eeprom(8); | ||
513 | if_port = iobase >> 14; | ||
514 | ioaddr = 0x200 + ((iobase & 0x1f) << 4); | ||
515 | } | ||
516 | irq = id_read_eeprom(9) >> 12; | ||
517 | |||
518 | dev = alloc_etherdev(sizeof (struct el3_private)); | ||
519 | if (!dev) | ||
520 | return -ENOMEM; | ||
521 | |||
522 | netdev_boot_setup_check(dev); | ||
523 | |||
524 | /* Set passed-in IRQ or I/O Addr. */ | ||
525 | if (dev->irq > 1 && dev->irq < 16) | ||
526 | irq = dev->irq; | ||
527 | |||
528 | if (dev->base_addr) { | ||
529 | if (dev->mem_end == 0x3c509 /* Magic key */ | ||
530 | && dev->base_addr >= 0x200 && dev->base_addr <= 0x3e0) | ||
531 | ioaddr = dev->base_addr & 0x3f0; | ||
532 | else if (dev->base_addr != ioaddr) | ||
533 | goto out; | ||
534 | } | ||
535 | |||
536 | if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509")) { | ||
537 | err = -EBUSY; | ||
538 | goto out; | ||
539 | } | ||
540 | |||
541 | /* Set the adaptor tag so that the next card can be found. */ | ||
542 | outb(0xd0 + ++current_tag, id_port); | ||
543 | |||
544 | /* Activate the adaptor at the EEPROM location. */ | ||
545 | outb((ioaddr >> 4) | 0xe0, id_port); | ||
546 | |||
547 | EL3WINDOW(0); | ||
548 | if (inw(ioaddr) != 0x6d50) | ||
549 | goto out1; | ||
550 | |||
551 | /* Free the interrupt so that some other card can use it. */ | ||
552 | outw(0x0f00, ioaddr + WN0_IRQ); | ||
553 | |||
554 | #if defined(__ISAPNP__) | ||
555 | found: /* PNP jumps here... */ | ||
556 | #endif /* __ISAPNP__ */ | ||
557 | |||
558 | memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr)); | ||
559 | dev->base_addr = ioaddr; | ||
560 | dev->irq = irq; | ||
561 | dev->if_port = if_port; | ||
562 | lp = netdev_priv(dev); | ||
563 | #if defined(__ISAPNP__) | ||
564 | lp->dev = &idev->dev; | ||
565 | if (pnp_found) | ||
566 | lp->type = EL3_PNP; | ||
567 | #endif | ||
568 | err = el3_common_init(dev); | ||
569 | |||
570 | if (err) | ||
571 | goto out1; | ||
572 | |||
573 | el3_cards++; | ||
574 | lp->next_dev = el3_root_dev; | ||
575 | el3_root_dev = dev; | ||
576 | return 0; | ||
577 | |||
578 | out1: | ||
579 | #if defined(__ISAPNP__) | ||
580 | if (idev) | ||
581 | pnp_device_detach(idev); | ||
582 | #endif | ||
583 | out: | ||
584 | free_netdev(dev); | ||
585 | return err; | ||
586 | } | ||
587 | |||
588 | #ifdef CONFIG_MCA | 596 | #ifdef CONFIG_MCA |
589 | static int __init el3_mca_probe(struct device *device) | 597 | static int __init el3_mca_probe(struct device *device) |
590 | { | 598 | { |
@@ -596,7 +604,6 @@ static int __init el3_mca_probe(struct device *device) | |||
596 | * redone for multi-card detection by ZP Gu (zpg@castle.net) | 604 | * redone for multi-card detection by ZP Gu (zpg@castle.net) |
597 | * now works as a module */ | 605 | * now works as a module */ |
598 | 606 | ||
599 | struct el3_private *lp; | ||
600 | short i; | 607 | short i; |
601 | int ioaddr, irq, if_port; | 608 | int ioaddr, irq, if_port; |
602 | u16 phys_addr[3]; | 609 | u16 phys_addr[3]; |
@@ -613,7 +620,7 @@ static int __init el3_mca_probe(struct device *device) | |||
613 | irq = pos5 & 0x0f; | 620 | irq = pos5 & 0x0f; |
614 | 621 | ||
615 | 622 | ||
616 | printk("3c529: found %s at slot %d\n", | 623 | printk(KERN_INFO "3c529: found %s at slot %d\n", |
617 | el3_mca_adapter_names[mdev->index], slot + 1); | 624 | el3_mca_adapter_names[mdev->index], slot + 1); |
618 | 625 | ||
619 | /* claim the slot */ | 626 | /* claim the slot */ |
@@ -626,7 +633,7 @@ static int __init el3_mca_probe(struct device *device) | |||
626 | irq = mca_device_transform_irq(mdev, irq); | 633 | irq = mca_device_transform_irq(mdev, irq); |
627 | ioaddr = mca_device_transform_ioport(mdev, ioaddr); | 634 | ioaddr = mca_device_transform_ioport(mdev, ioaddr); |
628 | if (el3_debug > 2) { | 635 | if (el3_debug > 2) { |
629 | printk("3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port); | 636 | printk(KERN_DEBUG "3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port); |
630 | } | 637 | } |
631 | EL3WINDOW(0); | 638 | EL3WINDOW(0); |
632 | for (i = 0; i < 3; i++) { | 639 | for (i = 0; i < 3; i++) { |
@@ -641,13 +648,7 @@ static int __init el3_mca_probe(struct device *device) | |||
641 | 648 | ||
642 | netdev_boot_setup_check(dev); | 649 | netdev_boot_setup_check(dev); |
643 | 650 | ||
644 | memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr)); | 651 | el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_MCA); |
645 | dev->base_addr = ioaddr; | ||
646 | dev->irq = irq; | ||
647 | dev->if_port = if_port; | ||
648 | lp = netdev_priv(dev); | ||
649 | lp->dev = device; | ||
650 | lp->type = EL3_MCA; | ||
651 | device->driver_data = dev; | 652 | device->driver_data = dev; |
652 | err = el3_common_init(dev); | 653 | err = el3_common_init(dev); |
653 | 654 | ||
@@ -657,7 +658,7 @@ static int __init el3_mca_probe(struct device *device) | |||
657 | return -ENOMEM; | 658 | return -ENOMEM; |
658 | } | 659 | } |
659 | 660 | ||
660 | el3_cards++; | 661 | el3_devs[el3_cards++] = dev; |
661 | return 0; | 662 | return 0; |
662 | } | 663 | } |
663 | 664 | ||
@@ -666,7 +667,6 @@ static int __init el3_mca_probe(struct device *device) | |||
666 | #ifdef CONFIG_EISA | 667 | #ifdef CONFIG_EISA |
667 | static int __init el3_eisa_probe (struct device *device) | 668 | static int __init el3_eisa_probe (struct device *device) |
668 | { | 669 | { |
669 | struct el3_private *lp; | ||
670 | short i; | 670 | short i; |
671 | int ioaddr, irq, if_port; | 671 | int ioaddr, irq, if_port; |
672 | u16 phys_addr[3]; | 672 | u16 phys_addr[3]; |
@@ -678,7 +678,7 @@ static int __init el3_eisa_probe (struct device *device) | |||
678 | edev = to_eisa_device (device); | 678 | edev = to_eisa_device (device); |
679 | ioaddr = edev->base_addr; | 679 | ioaddr = edev->base_addr; |
680 | 680 | ||
681 | if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509")) | 681 | if (!request_region(ioaddr, EL3_IO_EXTENT, "3c579-eisa")) |
682 | return -EBUSY; | 682 | return -EBUSY; |
683 | 683 | ||
684 | /* Change the register set to the configuration window 0. */ | 684 | /* Change the register set to the configuration window 0. */ |
@@ -700,13 +700,7 @@ static int __init el3_eisa_probe (struct device *device) | |||
700 | 700 | ||
701 | netdev_boot_setup_check(dev); | 701 | netdev_boot_setup_check(dev); |
702 | 702 | ||
703 | memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr)); | 703 | el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA); |
704 | dev->base_addr = ioaddr; | ||
705 | dev->irq = irq; | ||
706 | dev->if_port = if_port; | ||
707 | lp = netdev_priv(dev); | ||
708 | lp->dev = device; | ||
709 | lp->type = EL3_EISA; | ||
710 | eisa_set_drvdata (edev, dev); | 704 | eisa_set_drvdata (edev, dev); |
711 | err = el3_common_init(dev); | 705 | err = el3_common_init(dev); |
712 | 706 | ||
@@ -716,12 +710,11 @@ static int __init el3_eisa_probe (struct device *device) | |||
716 | return err; | 710 | return err; |
717 | } | 711 | } |
718 | 712 | ||
719 | el3_cards++; | 713 | el3_devs[el3_cards++] = dev; |
720 | return 0; | 714 | return 0; |
721 | } | 715 | } |
722 | #endif | 716 | #endif |
723 | 717 | ||
724 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) | ||
725 | /* This remove works for all device types. | 718 | /* This remove works for all device types. |
726 | * | 719 | * |
727 | * The net dev must be stored in the driver_data field */ | 720 | * The net dev must be stored in the driver_data field */ |
@@ -734,7 +727,6 @@ static int __devexit el3_device_remove (struct device *device) | |||
734 | el3_common_remove (dev); | 727 | el3_common_remove (dev); |
735 | return 0; | 728 | return 0; |
736 | } | 729 | } |
737 | #endif | ||
738 | 730 | ||
739 | /* Read a word from the EEPROM using the regular EEPROM access register. | 731 | /* Read a word from the EEPROM using the regular EEPROM access register. |
740 | Assume that we are in register window zero. | 732 | Assume that we are in register window zero. |
@@ -749,7 +741,7 @@ static ushort read_eeprom(int ioaddr, int index) | |||
749 | } | 741 | } |
750 | 742 | ||
751 | /* Read a word from the EEPROM when in the ISA ID probe state. */ | 743 | /* Read a word from the EEPROM when in the ISA ID probe state. */ |
752 | static ushort __init id_read_eeprom(int index) | 744 | static ushort id_read_eeprom(int index) |
753 | { | 745 | { |
754 | int bit, word = 0; | 746 | int bit, word = 0; |
755 | 747 | ||
@@ -765,7 +757,7 @@ static ushort __init id_read_eeprom(int index) | |||
765 | word = (word << 1) + (inb(id_port) & 0x01); | 757 | word = (word << 1) + (inb(id_port) & 0x01); |
766 | 758 | ||
767 | if (el3_debug > 3) | 759 | if (el3_debug > 3) |
768 | printk(" 3c509 EEPROM word %d %#4.4x.\n", index, word); | 760 | printk(KERN_DEBUG " 3c509 EEPROM word %d %#4.4x.\n", index, word); |
769 | 761 | ||
770 | return word; | 762 | return word; |
771 | } | 763 | } |
@@ -787,13 +779,13 @@ el3_open(struct net_device *dev) | |||
787 | 779 | ||
788 | EL3WINDOW(0); | 780 | EL3WINDOW(0); |
789 | if (el3_debug > 3) | 781 | if (el3_debug > 3) |
790 | printk("%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name, | 782 | printk(KERN_DEBUG "%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name, |
791 | dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS)); | 783 | dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS)); |
792 | 784 | ||
793 | el3_up(dev); | 785 | el3_up(dev); |
794 | 786 | ||
795 | if (el3_debug > 3) | 787 | if (el3_debug > 3) |
796 | printk("%s: Opened 3c509 IRQ %d status %4.4x.\n", | 788 | printk(KERN_DEBUG "%s: Opened 3c509 IRQ %d status %4.4x.\n", |
797 | dev->name, dev->irq, inw(ioaddr + EL3_STATUS)); | 789 | dev->name, dev->irq, inw(ioaddr + EL3_STATUS)); |
798 | 790 | ||
799 | return 0; | 791 | return 0; |
@@ -806,7 +798,7 @@ el3_tx_timeout (struct net_device *dev) | |||
806 | int ioaddr = dev->base_addr; | 798 | int ioaddr = dev->base_addr; |
807 | 799 | ||
808 | /* Transmitter timeout, serious problems. */ | 800 | /* Transmitter timeout, serious problems. */ |
809 | printk("%s: transmit timed out, Tx_status %2.2x status %4.4x " | 801 | printk(KERN_WARNING "%s: transmit timed out, Tx_status %2.2x status %4.4x " |
810 | "Tx FIFO room %d.\n", | 802 | "Tx FIFO room %d.\n", |
811 | dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), | 803 | dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), |
812 | inw(ioaddr + TX_FREE)); | 804 | inw(ioaddr + TX_FREE)); |
@@ -831,7 +823,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
831 | lp->stats.tx_bytes += skb->len; | 823 | lp->stats.tx_bytes += skb->len; |
832 | 824 | ||
833 | if (el3_debug > 4) { | 825 | if (el3_debug > 4) { |
834 | printk("%s: el3_start_xmit(length = %u) called, status %4.4x.\n", | 826 | printk(KERN_DEBUG "%s: el3_start_xmit(length = %u) called, status %4.4x.\n", |
835 | dev->name, skb->len, inw(ioaddr + EL3_STATUS)); | 827 | dev->name, skb->len, inw(ioaddr + EL3_STATUS)); |
836 | } | 828 | } |
837 | #if 0 | 829 | #if 0 |
@@ -840,7 +832,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
840 | ushort status = inw(ioaddr + EL3_STATUS); | 832 | ushort status = inw(ioaddr + EL3_STATUS); |
841 | if (status & 0x0001 /* IRQ line active, missed one. */ | 833 | if (status & 0x0001 /* IRQ line active, missed one. */ |
842 | && inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */ | 834 | && inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */ |
843 | printk("%s: Missed interrupt, status then %04x now %04x" | 835 | printk(KERN_DEBUG "%s: Missed interrupt, status then %04x now %04x" |
844 | " Tx %2.2x Rx %4.4x.\n", dev->name, status, | 836 | " Tx %2.2x Rx %4.4x.\n", dev->name, status, |
845 | inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS), | 837 | inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS), |
846 | inw(ioaddr + RX_STATUS)); | 838 | inw(ioaddr + RX_STATUS)); |
@@ -914,7 +906,7 @@ el3_interrupt(int irq, void *dev_id) | |||
914 | 906 | ||
915 | if (el3_debug > 4) { | 907 | if (el3_debug > 4) { |
916 | status = inw(ioaddr + EL3_STATUS); | 908 | status = inw(ioaddr + EL3_STATUS); |
917 | printk("%s: interrupt, status %4.4x.\n", dev->name, status); | 909 | printk(KERN_DEBUG "%s: interrupt, status %4.4x.\n", dev->name, status); |
918 | } | 910 | } |
919 | 911 | ||
920 | while ((status = inw(ioaddr + EL3_STATUS)) & | 912 | while ((status = inw(ioaddr + EL3_STATUS)) & |
@@ -925,7 +917,7 @@ el3_interrupt(int irq, void *dev_id) | |||
925 | 917 | ||
926 | if (status & TxAvailable) { | 918 | if (status & TxAvailable) { |
927 | if (el3_debug > 5) | 919 | if (el3_debug > 5) |
928 | printk(" TX room bit was handled.\n"); | 920 | printk(KERN_DEBUG " TX room bit was handled.\n"); |
929 | /* There's room in the FIFO for a full-sized packet. */ | 921 | /* There's room in the FIFO for a full-sized packet. */ |
930 | outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); | 922 | outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); |
931 | netif_wake_queue (dev); | 923 | netif_wake_queue (dev); |
@@ -964,7 +956,7 @@ el3_interrupt(int irq, void *dev_id) | |||
964 | } | 956 | } |
965 | 957 | ||
966 | if (--i < 0) { | 958 | if (--i < 0) { |
967 | printk("%s: Infinite loop in interrupt, status %4.4x.\n", | 959 | printk(KERN_ERR "%s: Infinite loop in interrupt, status %4.4x.\n", |
968 | dev->name, status); | 960 | dev->name, status); |
969 | /* Clear all interrupts. */ | 961 | /* Clear all interrupts. */ |
970 | outw(AckIntr | 0xFF, ioaddr + EL3_CMD); | 962 | outw(AckIntr | 0xFF, ioaddr + EL3_CMD); |
@@ -975,7 +967,7 @@ el3_interrupt(int irq, void *dev_id) | |||
975 | } | 967 | } |
976 | 968 | ||
977 | if (el3_debug > 4) { | 969 | if (el3_debug > 4) { |
978 | printk("%s: exiting interrupt, status %4.4x.\n", dev->name, | 970 | printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n", dev->name, |
979 | inw(ioaddr + EL3_STATUS)); | 971 | inw(ioaddr + EL3_STATUS)); |
980 | } | 972 | } |
981 | spin_unlock(&lp->lock); | 973 | spin_unlock(&lp->lock); |
@@ -1450,7 +1442,7 @@ el3_up(struct net_device *dev) | |||
1450 | } | 1442 | } |
1451 | 1443 | ||
1452 | /* Power Management support functions */ | 1444 | /* Power Management support functions */ |
1453 | #ifdef EL3_SUSPEND | 1445 | #ifdef CONFIG_PM |
1454 | 1446 | ||
1455 | static int | 1447 | static int |
1456 | el3_suspend(struct device *pdev, pm_message_t state) | 1448 | el3_suspend(struct device *pdev, pm_message_t state) |
@@ -1500,79 +1492,102 @@ el3_resume(struct device *pdev) | |||
1500 | return 0; | 1492 | return 0; |
1501 | } | 1493 | } |
1502 | 1494 | ||
1503 | #endif /* EL3_SUSPEND */ | 1495 | #endif /* CONFIG_PM */ |
1504 | |||
1505 | /* Parameters that may be passed into the module. */ | ||
1506 | static int debug = -1; | ||
1507 | static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1}; | ||
1508 | static int xcvr[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}; | ||
1509 | 1496 | ||
1510 | module_param(debug,int, 0); | 1497 | module_param(debug,int, 0); |
1511 | module_param_array(irq, int, NULL, 0); | 1498 | module_param_array(irq, int, NULL, 0); |
1512 | module_param_array(xcvr, int, NULL, 0); | ||
1513 | module_param(max_interrupt_work, int, 0); | 1499 | module_param(max_interrupt_work, int, 0); |
1514 | MODULE_PARM_DESC(debug, "debug level (0-6)"); | 1500 | MODULE_PARM_DESC(debug, "debug level (0-6)"); |
1515 | MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); | 1501 | MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); |
1516 | MODULE_PARM_DESC(xcvr,"transceiver(s) (0=internal, 1=external)"); | ||
1517 | MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt"); | 1502 | MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt"); |
1518 | #if defined(__ISAPNP__) | 1503 | #ifdef CONFIG_PNP |
1519 | module_param(nopnp, int, 0); | 1504 | module_param(nopnp, int, 0); |
1520 | MODULE_PARM_DESC(nopnp, "disable ISA PnP support (0-1)"); | 1505 | MODULE_PARM_DESC(nopnp, "disable ISA PnP support (0-1)"); |
1521 | MODULE_DEVICE_TABLE(isapnp, el3_isapnp_adapters); | 1506 | #endif /* CONFIG_PNP */ |
1522 | #endif /* __ISAPNP__ */ | 1507 | MODULE_DESCRIPTION("3Com Etherlink III (3c509, 3c509B, 3c529, 3c579) ethernet driver"); |
1523 | MODULE_DESCRIPTION("3Com Etherlink III (3c509, 3c509B) ISA/PnP ethernet driver"); | ||
1524 | MODULE_LICENSE("GPL"); | 1508 | MODULE_LICENSE("GPL"); |
1525 | 1509 | ||
1526 | static int __init el3_init_module(void) | 1510 | static int __init el3_init_module(void) |
1527 | { | 1511 | { |
1528 | int ret = 0; | 1512 | int ret = 0; |
1529 | el3_cards = 0; | ||
1530 | 1513 | ||
1531 | if (debug >= 0) | 1514 | if (debug >= 0) |
1532 | el3_debug = debug; | 1515 | el3_debug = debug; |
1533 | 1516 | ||
1534 | el3_root_dev = NULL; | 1517 | #ifdef CONFIG_PNP |
1535 | while (el3_probe(el3_cards) == 0) { | 1518 | if (!nopnp) { |
1536 | if (irq[el3_cards] > 1) | 1519 | ret = pnp_register_driver(&el3_pnp_driver); |
1537 | el3_root_dev->irq = irq[el3_cards]; | 1520 | if (!ret) |
1538 | if (xcvr[el3_cards] >= 0) | 1521 | pnp_registered = 1; |
1539 | el3_root_dev->if_port = xcvr[el3_cards]; | 1522 | } |
1540 | el3_cards++; | 1523 | #endif |
1524 | /* Select an open I/O location at 0x1*0 to do ISA contention select. */ | ||
1525 | /* Start with 0x110 to avoid some sound cards.*/ | ||
1526 | for (id_port = 0x110 ; id_port < 0x200; id_port += 0x10) { | ||
1527 | if (!request_region(id_port, 1, "3c509-control")) | ||
1528 | continue; | ||
1529 | outb(0x00, id_port); | ||
1530 | outb(0xff, id_port); | ||
1531 | if (inb(id_port) & 0x01) | ||
1532 | break; | ||
1533 | else | ||
1534 | release_region(id_port, 1); | ||
1535 | } | ||
1536 | if (id_port >= 0x200) { | ||
1537 | id_port = 0; | ||
1538 | printk(KERN_ERR "No I/O port available for 3c509 activation.\n"); | ||
1539 | } else { | ||
1540 | ret = isa_register_driver(&el3_isa_driver, EL3_MAX_CARDS); | ||
1541 | if (!ret) | ||
1542 | isa_registered = 1; | ||
1541 | } | 1543 | } |
1542 | |||
1543 | #ifdef CONFIG_EISA | 1544 | #ifdef CONFIG_EISA |
1544 | ret = eisa_driver_register(&el3_eisa_driver); | 1545 | ret = eisa_driver_register(&el3_eisa_driver); |
1546 | if (!ret) | ||
1547 | eisa_registered = 1; | ||
1545 | #endif | 1548 | #endif |
1546 | #ifdef CONFIG_MCA | 1549 | #ifdef CONFIG_MCA |
1547 | { | 1550 | ret = mca_register_driver(&el3_mca_driver); |
1548 | int err = mca_register_driver(&el3_mca_driver); | 1551 | if (!ret) |
1549 | if (ret == 0) | 1552 | mca_registered = 1; |
1550 | ret = err; | 1553 | #endif |
1551 | } | 1554 | |
1555 | #ifdef CONFIG_PNP | ||
1556 | if (pnp_registered) | ||
1557 | ret = 0; | ||
1558 | #endif | ||
1559 | if (isa_registered) | ||
1560 | ret = 0; | ||
1561 | #ifdef CONFIG_EISA | ||
1562 | if (eisa_registered) | ||
1563 | ret = 0; | ||
1564 | #endif | ||
1565 | #ifdef CONFIG_MCA | ||
1566 | if (mca_registered) | ||
1567 | ret = 0; | ||
1552 | #endif | 1568 | #endif |
1553 | return ret; | 1569 | return ret; |
1554 | } | 1570 | } |
1555 | 1571 | ||
1556 | static void __exit el3_cleanup_module(void) | 1572 | static void __exit el3_cleanup_module(void) |
1557 | { | 1573 | { |
1558 | struct net_device *next_dev; | 1574 | #ifdef CONFIG_PNP |
1559 | 1575 | if (pnp_registered) | |
1560 | while (el3_root_dev) { | 1576 | pnp_unregister_driver(&el3_pnp_driver); |
1561 | struct el3_private *lp = netdev_priv(el3_root_dev); | 1577 | #endif |
1562 | 1578 | if (isa_registered) | |
1563 | next_dev = lp->next_dev; | 1579 | isa_unregister_driver(&el3_isa_driver); |
1564 | el3_common_remove (el3_root_dev); | 1580 | if (id_port) |
1565 | el3_root_dev = next_dev; | 1581 | release_region(id_port, 1); |
1566 | } | ||
1567 | |||
1568 | #ifdef CONFIG_EISA | 1582 | #ifdef CONFIG_EISA |
1569 | eisa_driver_unregister (&el3_eisa_driver); | 1583 | if (eisa_registered) |
1584 | eisa_driver_unregister(&el3_eisa_driver); | ||
1570 | #endif | 1585 | #endif |
1571 | #ifdef CONFIG_MCA | 1586 | #ifdef CONFIG_MCA |
1572 | mca_unregister_driver(&el3_mca_driver); | 1587 | if (mca_registered) |
1588 | mca_unregister_driver(&el3_mca_driver); | ||
1573 | #endif | 1589 | #endif |
1574 | } | 1590 | } |
1575 | 1591 | ||
1576 | module_init (el3_init_module); | 1592 | module_init (el3_init_module); |
1577 | module_exit (el3_cleanup_module); | 1593 | module_exit (el3_cleanup_module); |
1578 | |||
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index c59c8067de99..bdc4c0bb56d9 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c | |||
@@ -940,7 +940,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) | |||
940 | 940 | ||
941 | /* is the RECON info empty or old? */ | 941 | /* is the RECON info empty or old? */ |
942 | if (!lp->first_recon || !lp->last_recon || | 942 | if (!lp->first_recon || !lp->last_recon || |
943 | jiffies - lp->last_recon > HZ * 10) { | 943 | time_after(jiffies, lp->last_recon + HZ * 10)) { |
944 | if (lp->network_down) | 944 | if (lp->network_down) |
945 | BUGMSG(D_NORMAL, "reconfiguration detected: cabling restored?\n"); | 945 | BUGMSG(D_NORMAL, "reconfiguration detected: cabling restored?\n"); |
946 | lp->first_recon = lp->last_recon = jiffies; | 946 | lp->first_recon = lp->last_recon = jiffies; |
@@ -974,7 +974,8 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) | |||
974 | lp->num_recons = 1; | 974 | lp->num_recons = 1; |
975 | } | 975 | } |
976 | } | 976 | } |
977 | } else if (lp->network_down && jiffies - lp->last_recon > HZ * 10) { | 977 | } else if (lp->network_down && |
978 | time_after(jiffies, lp->last_recon + HZ * 10)) { | ||
978 | if (lp->network_down) | 979 | if (lp->network_down) |
979 | BUGMSG(D_NORMAL, "cabling restored?\n"); | 980 | BUGMSG(D_NORMAL, "cabling restored?\n"); |
980 | lp->first_recon = lp->last_recon = 0; | 981 | lp->first_recon = lp->last_recon = 0; |
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index 7fe20310eb5f..f7e1619b974e 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2008 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -29,6 +29,9 @@ | |||
29 | /* | 29 | /* |
30 | * 82571EB Gigabit Ethernet Controller | 30 | * 82571EB Gigabit Ethernet Controller |
31 | * 82571EB Gigabit Ethernet Controller (Fiber) | 31 | * 82571EB Gigabit Ethernet Controller (Fiber) |
32 | * 82571EB Dual Port Gigabit Mezzanine Adapter | ||
33 | * 82571EB Quad Port Gigabit Mezzanine Adapter | ||
34 | * 82571PT Gigabit PT Quad Port Server ExpressModule | ||
32 | * 82572EI Gigabit Ethernet Controller (Copper) | 35 | * 82572EI Gigabit Ethernet Controller (Copper) |
33 | * 82572EI Gigabit Ethernet Controller (Fiber) | 36 | * 82572EI Gigabit Ethernet Controller (Fiber) |
34 | * 82572EI Gigabit Ethernet Controller | 37 | * 82572EI Gigabit Ethernet Controller |
@@ -72,7 +75,7 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) | |||
72 | struct e1000_phy_info *phy = &hw->phy; | 75 | struct e1000_phy_info *phy = &hw->phy; |
73 | s32 ret_val; | 76 | s32 ret_val; |
74 | 77 | ||
75 | if (hw->media_type != e1000_media_type_copper) { | 78 | if (hw->phy.media_type != e1000_media_type_copper) { |
76 | phy->type = e1000_phy_none; | 79 | phy->type = e1000_phy_none; |
77 | return 0; | 80 | return 0; |
78 | } | 81 | } |
@@ -150,7 +153,8 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) | |||
150 | if (((eecd >> 15) & 0x3) == 0x3) { | 153 | if (((eecd >> 15) & 0x3) == 0x3) { |
151 | nvm->type = e1000_nvm_flash_hw; | 154 | nvm->type = e1000_nvm_flash_hw; |
152 | nvm->word_size = 2048; | 155 | nvm->word_size = 2048; |
153 | /* Autonomous Flash update bit must be cleared due | 156 | /* |
157 | * Autonomous Flash update bit must be cleared due | ||
154 | * to Flash update issue. | 158 | * to Flash update issue. |
155 | */ | 159 | */ |
156 | eecd &= ~E1000_EECD_AUPDEN; | 160 | eecd &= ~E1000_EECD_AUPDEN; |
@@ -159,10 +163,11 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) | |||
159 | } | 163 | } |
160 | /* Fall Through */ | 164 | /* Fall Through */ |
161 | default: | 165 | default: |
162 | nvm->type = e1000_nvm_eeprom_spi; | 166 | nvm->type = e1000_nvm_eeprom_spi; |
163 | size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> | 167 | size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> |
164 | E1000_EECD_SIZE_EX_SHIFT); | 168 | E1000_EECD_SIZE_EX_SHIFT); |
165 | /* Added to a constant, "size" becomes the left-shift value | 169 | /* |
170 | * Added to a constant, "size" becomes the left-shift value | ||
166 | * for setting word_size. | 171 | * for setting word_size. |
167 | */ | 172 | */ |
168 | size += NVM_WORD_SIZE_BASE_SHIFT; | 173 | size += NVM_WORD_SIZE_BASE_SHIFT; |
@@ -190,16 +195,16 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | |||
190 | case E1000_DEV_ID_82571EB_FIBER: | 195 | case E1000_DEV_ID_82571EB_FIBER: |
191 | case E1000_DEV_ID_82572EI_FIBER: | 196 | case E1000_DEV_ID_82572EI_FIBER: |
192 | case E1000_DEV_ID_82571EB_QUAD_FIBER: | 197 | case E1000_DEV_ID_82571EB_QUAD_FIBER: |
193 | hw->media_type = e1000_media_type_fiber; | 198 | hw->phy.media_type = e1000_media_type_fiber; |
194 | break; | 199 | break; |
195 | case E1000_DEV_ID_82571EB_SERDES: | 200 | case E1000_DEV_ID_82571EB_SERDES: |
196 | case E1000_DEV_ID_82572EI_SERDES: | 201 | case E1000_DEV_ID_82572EI_SERDES: |
197 | case E1000_DEV_ID_82571EB_SERDES_DUAL: | 202 | case E1000_DEV_ID_82571EB_SERDES_DUAL: |
198 | case E1000_DEV_ID_82571EB_SERDES_QUAD: | 203 | case E1000_DEV_ID_82571EB_SERDES_QUAD: |
199 | hw->media_type = e1000_media_type_internal_serdes; | 204 | hw->phy.media_type = e1000_media_type_internal_serdes; |
200 | break; | 205 | break; |
201 | default: | 206 | default: |
202 | hw->media_type = e1000_media_type_copper; | 207 | hw->phy.media_type = e1000_media_type_copper; |
203 | break; | 208 | break; |
204 | } | 209 | } |
205 | 210 | ||
@@ -208,25 +213,28 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) | |||
208 | /* Set rar entry count */ | 213 | /* Set rar entry count */ |
209 | mac->rar_entry_count = E1000_RAR_ENTRIES; | 214 | mac->rar_entry_count = E1000_RAR_ENTRIES; |
210 | /* Set if manageability features are enabled. */ | 215 | /* Set if manageability features are enabled. */ |
211 | mac->arc_subsystem_valid = | 216 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; |
212 | (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; | ||
213 | 217 | ||
214 | /* check for link */ | 218 | /* check for link */ |
215 | switch (hw->media_type) { | 219 | switch (hw->phy.media_type) { |
216 | case e1000_media_type_copper: | 220 | case e1000_media_type_copper: |
217 | func->setup_physical_interface = e1000_setup_copper_link_82571; | 221 | func->setup_physical_interface = e1000_setup_copper_link_82571; |
218 | func->check_for_link = e1000e_check_for_copper_link; | 222 | func->check_for_link = e1000e_check_for_copper_link; |
219 | func->get_link_up_info = e1000e_get_speed_and_duplex_copper; | 223 | func->get_link_up_info = e1000e_get_speed_and_duplex_copper; |
220 | break; | 224 | break; |
221 | case e1000_media_type_fiber: | 225 | case e1000_media_type_fiber: |
222 | func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571; | 226 | func->setup_physical_interface = |
227 | e1000_setup_fiber_serdes_link_82571; | ||
223 | func->check_for_link = e1000e_check_for_fiber_link; | 228 | func->check_for_link = e1000e_check_for_fiber_link; |
224 | func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; | 229 | func->get_link_up_info = |
230 | e1000e_get_speed_and_duplex_fiber_serdes; | ||
225 | break; | 231 | break; |
226 | case e1000_media_type_internal_serdes: | 232 | case e1000_media_type_internal_serdes: |
227 | func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571; | 233 | func->setup_physical_interface = |
234 | e1000_setup_fiber_serdes_link_82571; | ||
228 | func->check_for_link = e1000e_check_for_serdes_link; | 235 | func->check_for_link = e1000e_check_for_serdes_link; |
229 | func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; | 236 | func->get_link_up_info = |
237 | e1000e_get_speed_and_duplex_fiber_serdes; | ||
230 | break; | 238 | break; |
231 | default: | 239 | default: |
232 | return -E1000_ERR_CONFIG; | 240 | return -E1000_ERR_CONFIG; |
@@ -322,10 +330,12 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) | |||
322 | switch (hw->mac.type) { | 330 | switch (hw->mac.type) { |
323 | case e1000_82571: | 331 | case e1000_82571: |
324 | case e1000_82572: | 332 | case e1000_82572: |
325 | /* The 82571 firmware may still be configuring the PHY. | 333 | /* |
334 | * The 82571 firmware may still be configuring the PHY. | ||
326 | * In this case, we cannot access the PHY until the | 335 | * In this case, we cannot access the PHY until the |
327 | * configuration is done. So we explicitly set the | 336 | * configuration is done. So we explicitly set the |
328 | * PHY ID. */ | 337 | * PHY ID. |
338 | */ | ||
329 | phy->id = IGP01E1000_I_PHY_ID; | 339 | phy->id = IGP01E1000_I_PHY_ID; |
330 | break; | 340 | break; |
331 | case e1000_82573: | 341 | case e1000_82573: |
@@ -479,8 +489,10 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) | |||
479 | if (ret_val) | 489 | if (ret_val) |
480 | return ret_val; | 490 | return ret_val; |
481 | 491 | ||
482 | /* If our nvm is an EEPROM, then we're done | 492 | /* |
483 | * otherwise, commit the checksum to the flash NVM. */ | 493 | * If our nvm is an EEPROM, then we're done |
494 | * otherwise, commit the checksum to the flash NVM. | ||
495 | */ | ||
484 | if (hw->nvm.type != e1000_nvm_flash_hw) | 496 | if (hw->nvm.type != e1000_nvm_flash_hw) |
485 | return ret_val; | 497 | return ret_val; |
486 | 498 | ||
@@ -496,7 +508,8 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) | |||
496 | 508 | ||
497 | /* Reset the firmware if using STM opcode. */ | 509 | /* Reset the firmware if using STM opcode. */ |
498 | if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { | 510 | if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { |
499 | /* The enabling of and the actual reset must be done | 511 | /* |
512 | * The enabling of and the actual reset must be done | ||
500 | * in two write cycles. | 513 | * in two write cycles. |
501 | */ | 514 | */ |
502 | ew32(HICR, E1000_HICR_FW_RESET_ENABLE); | 515 | ew32(HICR, E1000_HICR_FW_RESET_ENABLE); |
@@ -557,8 +570,10 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, | |||
557 | u32 eewr = 0; | 570 | u32 eewr = 0; |
558 | s32 ret_val = 0; | 571 | s32 ret_val = 0; |
559 | 572 | ||
560 | /* A check for invalid values: offset too large, too many words, | 573 | /* |
561 | * and not enough words. */ | 574 | * A check for invalid values: offset too large, too many words, |
575 | * and not enough words. | ||
576 | */ | ||
562 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 577 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
563 | (words == 0)) { | 578 | (words == 0)) { |
564 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 579 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); |
@@ -645,30 +660,32 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) | |||
645 | } else { | 660 | } else { |
646 | data &= ~IGP02E1000_PM_D0_LPLU; | 661 | data &= ~IGP02E1000_PM_D0_LPLU; |
647 | ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); | 662 | ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); |
648 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used | 663 | /* |
664 | * LPLU and SmartSpeed are mutually exclusive. LPLU is used | ||
649 | * during Dx states where the power conservation is most | 665 | * during Dx states where the power conservation is most |
650 | * important. During driver activity we should enable | 666 | * important. During driver activity we should enable |
651 | * SmartSpeed, so performance is maintained. */ | 667 | * SmartSpeed, so performance is maintained. |
668 | */ | ||
652 | if (phy->smart_speed == e1000_smart_speed_on) { | 669 | if (phy->smart_speed == e1000_smart_speed_on) { |
653 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | 670 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
654 | &data); | 671 | &data); |
655 | if (ret_val) | 672 | if (ret_val) |
656 | return ret_val; | 673 | return ret_val; |
657 | 674 | ||
658 | data |= IGP01E1000_PSCFR_SMART_SPEED; | 675 | data |= IGP01E1000_PSCFR_SMART_SPEED; |
659 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | 676 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
660 | data); | 677 | data); |
661 | if (ret_val) | 678 | if (ret_val) |
662 | return ret_val; | 679 | return ret_val; |
663 | } else if (phy->smart_speed == e1000_smart_speed_off) { | 680 | } else if (phy->smart_speed == e1000_smart_speed_off) { |
664 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | 681 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
665 | &data); | 682 | &data); |
666 | if (ret_val) | 683 | if (ret_val) |
667 | return ret_val; | 684 | return ret_val; |
668 | 685 | ||
669 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | 686 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; |
670 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | 687 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
671 | data); | 688 | data); |
672 | if (ret_val) | 689 | if (ret_val) |
673 | return ret_val; | 690 | return ret_val; |
674 | } | 691 | } |
@@ -693,7 +710,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
693 | s32 ret_val; | 710 | s32 ret_val; |
694 | u16 i = 0; | 711 | u16 i = 0; |
695 | 712 | ||
696 | /* Prevent the PCI-E bus from sticking if there is no TLP connection | 713 | /* |
714 | * Prevent the PCI-E bus from sticking if there is no TLP connection | ||
697 | * on the last TLP read/write transaction when MAC is reset. | 715 | * on the last TLP read/write transaction when MAC is reset. |
698 | */ | 716 | */ |
699 | ret_val = e1000e_disable_pcie_master(hw); | 717 | ret_val = e1000e_disable_pcie_master(hw); |
@@ -709,8 +727,10 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
709 | 727 | ||
710 | msleep(10); | 728 | msleep(10); |
711 | 729 | ||
712 | /* Must acquire the MDIO ownership before MAC reset. | 730 | /* |
713 | * Ownership defaults to firmware after a reset. */ | 731 | * Must acquire the MDIO ownership before MAC reset. |
732 | * Ownership defaults to firmware after a reset. | ||
733 | */ | ||
714 | if (hw->mac.type == e1000_82573) { | 734 | if (hw->mac.type == e1000_82573) { |
715 | extcnf_ctrl = er32(EXTCNF_CTRL); | 735 | extcnf_ctrl = er32(EXTCNF_CTRL); |
716 | extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; | 736 | extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; |
@@ -747,7 +767,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) | |||
747 | /* We don't want to continue accessing MAC registers. */ | 767 | /* We don't want to continue accessing MAC registers. */ |
748 | return ret_val; | 768 | return ret_val; |
749 | 769 | ||
750 | /* Phy configuration from NVM just starts after EECD_AUTO_RD is set. | 770 | /* |
771 | * Phy configuration from NVM just starts after EECD_AUTO_RD is set. | ||
751 | * Need to wait for Phy configuration completion before accessing | 772 | * Need to wait for Phy configuration completion before accessing |
752 | * NVM and Phy. | 773 | * NVM and Phy. |
753 | */ | 774 | */ |
@@ -793,7 +814,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) | |||
793 | e1000e_clear_vfta(hw); | 814 | e1000e_clear_vfta(hw); |
794 | 815 | ||
795 | /* Setup the receive address. */ | 816 | /* Setup the receive address. */ |
796 | /* If, however, a locally administered address was assigned to the | 817 | /* |
818 | * If, however, a locally administered address was assigned to the | ||
797 | * 82571, we must reserve a RAR for it to work around an issue where | 819 | * 82571, we must reserve a RAR for it to work around an issue where |
798 | * resetting one port will reload the MAC on the other port. | 820 | * resetting one port will reload the MAC on the other port. |
799 | */ | 821 | */ |
@@ -830,7 +852,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) | |||
830 | ew32(GCR, reg_data); | 852 | ew32(GCR, reg_data); |
831 | } | 853 | } |
832 | 854 | ||
833 | /* Clear all of the statistics registers (clear on read). It is | 855 | /* |
856 | * Clear all of the statistics registers (clear on read). It is | ||
834 | * important that we do this after we have tried to establish link | 857 | * important that we do this after we have tried to establish link |
835 | * because the symbol error count will increment wildly if there | 858 | * because the symbol error count will increment wildly if there |
836 | * is no link. | 859 | * is no link. |
@@ -922,7 +945,8 @@ void e1000e_clear_vfta(struct e1000_hw *hw) | |||
922 | 945 | ||
923 | if (hw->mac.type == e1000_82573) { | 946 | if (hw->mac.type == e1000_82573) { |
924 | if (hw->mng_cookie.vlan_id != 0) { | 947 | if (hw->mng_cookie.vlan_id != 0) { |
925 | /* The VFTA is a 4096b bit-field, each identifying | 948 | /* |
949 | * The VFTA is a 4096b bit-field, each identifying | ||
926 | * a single VLAN ID. The following operations | 950 | * a single VLAN ID. The following operations |
927 | * determine which 32b entry (i.e. offset) into the | 951 | * determine which 32b entry (i.e. offset) into the |
928 | * array we want to set the VLAN ID (i.e. bit) of | 952 | * array we want to set the VLAN ID (i.e. bit) of |
@@ -936,7 +960,8 @@ void e1000e_clear_vfta(struct e1000_hw *hw) | |||
936 | } | 960 | } |
937 | } | 961 | } |
938 | for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { | 962 | for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { |
939 | /* If the offset we want to clear is the same offset of the | 963 | /* |
964 | * If the offset we want to clear is the same offset of the | ||
940 | * manageability VLAN ID, then clear all bits except that of | 965 | * manageability VLAN ID, then clear all bits except that of |
941 | * the manageability unit. | 966 | * the manageability unit. |
942 | */ | 967 | */ |
@@ -947,7 +972,7 @@ void e1000e_clear_vfta(struct e1000_hw *hw) | |||
947 | } | 972 | } |
948 | 973 | ||
949 | /** | 974 | /** |
950 | * e1000_mc_addr_list_update_82571 - Update Multicast addresses | 975 | * e1000_update_mc_addr_list_82571 - Update Multicast addresses |
951 | * @hw: pointer to the HW structure | 976 | * @hw: pointer to the HW structure |
952 | * @mc_addr_list: array of multicast addresses to program | 977 | * @mc_addr_list: array of multicast addresses to program |
953 | * @mc_addr_count: number of multicast addresses to program | 978 | * @mc_addr_count: number of multicast addresses to program |
@@ -959,7 +984,7 @@ void e1000e_clear_vfta(struct e1000_hw *hw) | |||
959 | * The parameter rar_count will usually be hw->mac.rar_entry_count | 984 | * The parameter rar_count will usually be hw->mac.rar_entry_count |
960 | * unless there are workarounds that change this. | 985 | * unless there are workarounds that change this. |
961 | **/ | 986 | **/ |
962 | static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw, | 987 | static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw, |
963 | u8 *mc_addr_list, | 988 | u8 *mc_addr_list, |
964 | u32 mc_addr_count, | 989 | u32 mc_addr_count, |
965 | u32 rar_used_count, | 990 | u32 rar_used_count, |
@@ -968,8 +993,8 @@ static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw, | |||
968 | if (e1000e_get_laa_state_82571(hw)) | 993 | if (e1000e_get_laa_state_82571(hw)) |
969 | rar_count--; | 994 | rar_count--; |
970 | 995 | ||
971 | e1000e_mc_addr_list_update_generic(hw, mc_addr_list, mc_addr_count, | 996 | e1000e_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count, |
972 | rar_used_count, rar_count); | 997 | rar_used_count, rar_count); |
973 | } | 998 | } |
974 | 999 | ||
975 | /** | 1000 | /** |
@@ -984,12 +1009,13 @@ static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw, | |||
984 | **/ | 1009 | **/ |
985 | static s32 e1000_setup_link_82571(struct e1000_hw *hw) | 1010 | static s32 e1000_setup_link_82571(struct e1000_hw *hw) |
986 | { | 1011 | { |
987 | /* 82573 does not have a word in the NVM to determine | 1012 | /* |
1013 | * 82573 does not have a word in the NVM to determine | ||
988 | * the default flow control setting, so we explicitly | 1014 | * the default flow control setting, so we explicitly |
989 | * set it to full. | 1015 | * set it to full. |
990 | */ | 1016 | */ |
991 | if (hw->mac.type == e1000_82573) | 1017 | if (hw->mac.type == e1000_82573) |
992 | hw->mac.fc = e1000_fc_full; | 1018 | hw->fc.type = e1000_fc_full; |
993 | 1019 | ||
994 | return e1000e_setup_link(hw); | 1020 | return e1000e_setup_link(hw); |
995 | } | 1021 | } |
@@ -1050,14 +1076,14 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) | |||
1050 | switch (hw->mac.type) { | 1076 | switch (hw->mac.type) { |
1051 | case e1000_82571: | 1077 | case e1000_82571: |
1052 | case e1000_82572: | 1078 | case e1000_82572: |
1053 | /* If SerDes loopback mode is entered, there is no form | 1079 | /* |
1080 | * If SerDes loopback mode is entered, there is no form | ||
1054 | * of reset to take the adapter out of that mode. So we | 1081 | * of reset to take the adapter out of that mode. So we |
1055 | * have to explicitly take the adapter out of loopback | 1082 | * have to explicitly take the adapter out of loopback |
1056 | * mode. This prevents drivers from twiddling their thumbs | 1083 | * mode. This prevents drivers from twiddling their thumbs |
1057 | * if another tool failed to take it out of loopback mode. | 1084 | * if another tool failed to take it out of loopback mode. |
1058 | */ | 1085 | */ |
1059 | ew32(SCTL, | 1086 | ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); |
1060 | E1000_SCTL_DISABLE_SERDES_LOOPBACK); | ||
1061 | break; | 1087 | break; |
1062 | default: | 1088 | default: |
1063 | break; | 1089 | break; |
@@ -1124,7 +1150,8 @@ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) | |||
1124 | 1150 | ||
1125 | /* If workaround is activated... */ | 1151 | /* If workaround is activated... */ |
1126 | if (state) | 1152 | if (state) |
1127 | /* Hold a copy of the LAA in RAR[14] This is done so that | 1153 | /* |
1154 | * Hold a copy of the LAA in RAR[14] This is done so that | ||
1128 | * between the time RAR[0] gets clobbered and the time it | 1155 | * between the time RAR[0] gets clobbered and the time it |
1129 | * gets fixed, the actual LAA is in one of the RARs and no | 1156 | * gets fixed, the actual LAA is in one of the RARs and no |
1130 | * incoming packets directed to this port are dropped. | 1157 | * incoming packets directed to this port are dropped. |
@@ -1152,7 +1179,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) | |||
1152 | if (nvm->type != e1000_nvm_flash_hw) | 1179 | if (nvm->type != e1000_nvm_flash_hw) |
1153 | return 0; | 1180 | return 0; |
1154 | 1181 | ||
1155 | /* Check bit 4 of word 10h. If it is 0, firmware is done updating | 1182 | /* |
1183 | * Check bit 4 of word 10h. If it is 0, firmware is done updating | ||
1156 | * 10h-12h. Checksum may need to be fixed. | 1184 | * 10h-12h. Checksum may need to be fixed. |
1157 | */ | 1185 | */ |
1158 | ret_val = e1000_read_nvm(hw, 0x10, 1, &data); | 1186 | ret_val = e1000_read_nvm(hw, 0x10, 1, &data); |
@@ -1160,7 +1188,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) | |||
1160 | return ret_val; | 1188 | return ret_val; |
1161 | 1189 | ||
1162 | if (!(data & 0x10)) { | 1190 | if (!(data & 0x10)) { |
1163 | /* Read 0x23 and check bit 15. This bit is a 1 | 1191 | /* |
1192 | * Read 0x23 and check bit 15. This bit is a 1 | ||
1164 | * when the checksum has already been fixed. If | 1193 | * when the checksum has already been fixed. If |
1165 | * the checksum is still wrong and this bit is a | 1194 | * the checksum is still wrong and this bit is a |
1166 | * 1, we need to return bad checksum. Otherwise, | 1195 | * 1, we need to return bad checksum. Otherwise, |
@@ -1240,7 +1269,7 @@ static struct e1000_mac_operations e82571_mac_ops = { | |||
1240 | /* .get_link_up_info: media type dependent */ | 1269 | /* .get_link_up_info: media type dependent */ |
1241 | .led_on = e1000e_led_on_generic, | 1270 | .led_on = e1000e_led_on_generic, |
1242 | .led_off = e1000e_led_off_generic, | 1271 | .led_off = e1000e_led_off_generic, |
1243 | .mc_addr_list_update = e1000_mc_addr_list_update_82571, | 1272 | .update_mc_addr_list = e1000_update_mc_addr_list_82571, |
1244 | .reset_hw = e1000_reset_hw_82571, | 1273 | .reset_hw = e1000_reset_hw_82571, |
1245 | .init_hw = e1000_init_hw_82571, | 1274 | .init_hw = e1000_init_hw_82571, |
1246 | .setup_link = e1000_setup_link_82571, | 1275 | .setup_link = e1000_setup_link_82571, |
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile index 650f866e7ac2..360c91369f35 100644 --- a/drivers/net/e1000e/Makefile +++ b/drivers/net/e1000e/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | ################################################################################ | 1 | ################################################################################ |
2 | # | 2 | # |
3 | # Intel PRO/1000 Linux driver | 3 | # Intel PRO/1000 Linux driver |
4 | # Copyright(c) 1999 - 2007 Intel Corporation. | 4 | # Copyright(c) 1999 - 2008 Intel Corporation. |
5 | # | 5 | # |
6 | # This program is free software; you can redistribute it and/or modify it | 6 | # This program is free software; you can redistribute it and/or modify it |
7 | # under the terms and conditions of the GNU General Public License, | 7 | # under the terms and conditions of the GNU General Public License, |
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index a4f511f549f7..572cfd44397a 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2008 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -120,10 +120,10 @@ | |||
120 | #define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ | 120 | #define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ |
121 | #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ | 121 | #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ |
122 | #define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ | 122 | #define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ |
123 | #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address | 123 | /* Enable MAC address filtering */ |
124 | * filtering */ | 124 | #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 |
125 | #define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host | 125 | /* Enable MNG packets to host memory */ |
126 | * memory */ | 126 | #define E1000_MANC_EN_MNG2HOST 0x00200000 |
127 | 127 | ||
128 | /* Receive Control */ | 128 | /* Receive Control */ |
129 | #define E1000_RCTL_EN 0x00000002 /* enable */ | 129 | #define E1000_RCTL_EN 0x00000002 /* enable */ |
@@ -135,25 +135,26 @@ | |||
135 | #define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ | 135 | #define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ |
136 | #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ | 136 | #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ |
137 | #define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ | 137 | #define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ |
138 | #define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ | 138 | #define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ |
139 | #define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ | 139 | #define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ |
140 | #define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ | 140 | #define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ |
141 | /* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ | 141 | /* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ |
142 | #define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ | 142 | #define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ |
143 | #define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ | 143 | #define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ |
144 | #define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ | 144 | #define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ |
145 | #define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ | 145 | #define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ |
146 | /* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ | 146 | /* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ |
147 | #define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ | 147 | #define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ |
148 | #define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ | 148 | #define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ |
149 | #define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ | 149 | #define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ |
150 | #define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ | 150 | #define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ |
151 | #define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ | 151 | #define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ |
152 | #define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ | 152 | #define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ |
153 | #define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ | 153 | #define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ |
154 | #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ | 154 | #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ |
155 | 155 | ||
156 | /* Use byte values for the following shift parameters | 156 | /* |
157 | * Use byte values for the following shift parameters | ||
157 | * Usage: | 158 | * Usage: |
158 | * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & | 159 | * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & |
159 | * E1000_PSRCTL_BSIZE0_MASK) | | 160 | * E1000_PSRCTL_BSIZE0_MASK) | |
@@ -206,7 +207,8 @@ | |||
206 | #define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ | 207 | #define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ |
207 | #define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ | 208 | #define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ |
208 | 209 | ||
209 | /* Bit definitions for the Management Data IO (MDIO) and Management Data | 210 | /* |
211 | * Bit definitions for the Management Data IO (MDIO) and Management Data | ||
210 | * Clock (MDC) pins in the Device Control Register. | 212 | * Clock (MDC) pins in the Device Control Register. |
211 | */ | 213 | */ |
212 | 214 | ||
@@ -279,7 +281,7 @@ | |||
279 | #define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ | 281 | #define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ |
280 | 282 | ||
281 | /* Transmit Control */ | 283 | /* Transmit Control */ |
282 | #define E1000_TCTL_EN 0x00000002 /* enable tx */ | 284 | #define E1000_TCTL_EN 0x00000002 /* enable Tx */ |
283 | #define E1000_TCTL_PSP 0x00000008 /* pad short packets */ | 285 | #define E1000_TCTL_PSP 0x00000008 /* pad short packets */ |
284 | #define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ | 286 | #define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ |
285 | #define E1000_TCTL_COLD 0x003ff000 /* collision distance */ | 287 | #define E1000_TCTL_COLD 0x003ff000 /* collision distance */ |
@@ -337,8 +339,8 @@ | |||
337 | #define E1000_KABGTXD_BGSQLBIAS 0x00050000 | 339 | #define E1000_KABGTXD_BGSQLBIAS 0x00050000 |
338 | 340 | ||
339 | /* PBA constants */ | 341 | /* PBA constants */ |
340 | #define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ | 342 | #define E1000_PBA_8K 0x0008 /* 8KB */ |
341 | #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ | 343 | #define E1000_PBA_16K 0x0010 /* 16KB */ |
342 | 344 | ||
343 | #define E1000_PBS_16K E1000_PBA_16K | 345 | #define E1000_PBS_16K E1000_PBA_16K |
344 | 346 | ||
@@ -356,12 +358,13 @@ | |||
356 | /* Interrupt Cause Read */ | 358 | /* Interrupt Cause Read */ |
357 | #define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ | 359 | #define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ |
358 | #define E1000_ICR_LSC 0x00000004 /* Link Status Change */ | 360 | #define E1000_ICR_LSC 0x00000004 /* Link Status Change */ |
359 | #define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ | 361 | #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ |
360 | #define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ | 362 | #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ |
361 | #define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ | 363 | #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ |
362 | #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ | 364 | #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ |
363 | 365 | ||
364 | /* This defines the bits that are set in the Interrupt Mask | 366 | /* |
367 | * This defines the bits that are set in the Interrupt Mask | ||
365 | * Set/Read Register. Each bit is documented below: | 368 | * Set/Read Register. Each bit is documented below: |
366 | * o RXT0 = Receiver Timer Interrupt (ring 0) | 369 | * o RXT0 = Receiver Timer Interrupt (ring 0) |
367 | * o TXDW = Transmit Descriptor Written Back | 370 | * o TXDW = Transmit Descriptor Written Back |
@@ -379,21 +382,22 @@ | |||
379 | /* Interrupt Mask Set */ | 382 | /* Interrupt Mask Set */ |
380 | #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | 383 | #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ |
381 | #define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ | 384 | #define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ |
382 | #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ | 385 | #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ |
383 | #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ | 386 | #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ |
384 | #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ | 387 | #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ |
385 | 388 | ||
386 | /* Interrupt Cause Set */ | 389 | /* Interrupt Cause Set */ |
387 | #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ | 390 | #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ |
388 | #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ | 391 | #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ |
392 | #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ | ||
389 | 393 | ||
390 | /* Transmit Descriptor Control */ | 394 | /* Transmit Descriptor Control */ |
391 | #define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ | 395 | #define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ |
392 | #define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ | 396 | #define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ |
393 | #define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ | 397 | #define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ |
394 | #define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ | 398 | #define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ |
395 | #define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. | 399 | /* Enable the counting of desc. still to be processed. */ |
396 | still to be processed. */ | 400 | #define E1000_TXDCTL_COUNT_DESC 0x00400000 |
397 | 401 | ||
398 | /* Flow Control Constants */ | 402 | /* Flow Control Constants */ |
399 | #define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 | 403 | #define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 |
@@ -404,7 +408,8 @@ | |||
404 | #define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ | 408 | #define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ |
405 | 409 | ||
406 | /* Receive Address */ | 410 | /* Receive Address */ |
407 | /* Number of high/low register pairs in the RAR. The RAR (Receive Address | 411 | /* |
412 | * Number of high/low register pairs in the RAR. The RAR (Receive Address | ||
408 | * Registers) holds the directed and multicast addresses that we monitor. | 413 | * Registers) holds the directed and multicast addresses that we monitor. |
409 | * Technically, we have 16 spots. However, we reserve one of these spots | 414 | * Technically, we have 16 spots. However, we reserve one of these spots |
410 | * (RAR[15]) for our directed address used by controllers with | 415 | * (RAR[15]) for our directed address used by controllers with |
@@ -533,8 +538,8 @@ | |||
533 | #define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ | 538 | #define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ |
534 | #define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ | 539 | #define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ |
535 | #define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ | 540 | #define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ |
536 | #define E1000_EECD_ADDR_BITS 0x00000400 /* NVM Addressing bits based on type | 541 | /* NVM Addressing bits based on type (0-small, 1-large) */ |
537 | * (0-small, 1-large) */ | 542 | #define E1000_EECD_ADDR_BITS 0x00000400 |
538 | #define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ | 543 | #define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ |
539 | #define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ | 544 | #define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ |
540 | #define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ | 545 | #define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ |
@@ -626,7 +631,8 @@ | |||
626 | #define MAX_PHY_MULTI_PAGE_REG 0xF | 631 | #define MAX_PHY_MULTI_PAGE_REG 0xF |
627 | 632 | ||
628 | /* Bit definitions for valid PHY IDs. */ | 633 | /* Bit definitions for valid PHY IDs. */ |
629 | /* I = Integrated | 634 | /* |
635 | * I = Integrated | ||
630 | * E = External | 636 | * E = External |
631 | */ | 637 | */ |
632 | #define M88E1000_E_PHY_ID 0x01410C50 | 638 | #define M88E1000_E_PHY_ID 0x01410C50 |
@@ -653,37 +659,37 @@ | |||
653 | #define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ | 659 | #define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ |
654 | /* Manual MDI configuration */ | 660 | /* Manual MDI configuration */ |
655 | #define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ | 661 | #define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ |
656 | #define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, | 662 | /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ |
657 | * 100BASE-TX/10BASE-T: | 663 | #define M88E1000_PSCR_AUTO_X_1000T 0x0040 |
658 | * MDI Mode | 664 | /* Auto crossover enabled all speeds */ |
659 | */ | 665 | #define M88E1000_PSCR_AUTO_X_MODE 0x0060 |
660 | #define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled | 666 | /* |
661 | * all speeds. | 667 | * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold) |
662 | */ | 668 | * 0=Normal 10BASE-T Rx Threshold |
663 | /* 1=Enable Extended 10BASE-T distance | 669 | */ |
664 | * (Lower 10BASE-T RX Threshold) | 670 | #define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ |
665 | * 0=Normal 10BASE-T RX Threshold */ | ||
666 | /* 1=5-Bit interface in 100BASE-TX | ||
667 | * 0=MII interface in 100BASE-TX */ | ||
668 | #define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ | ||
669 | 671 | ||
670 | /* M88E1000 PHY Specific Status Register */ | 672 | /* M88E1000 PHY Specific Status Register */ |
671 | #define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ | 673 | #define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ |
672 | #define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ | 674 | #define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ |
673 | #define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ | 675 | #define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ |
674 | #define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; | 676 | /* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */ |
675 | * 3=110-140M;4=>140M */ | 677 | #define M88E1000_PSSR_CABLE_LENGTH 0x0380 |
676 | #define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ | 678 | #define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ |
677 | #define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ | 679 | #define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ |
678 | 680 | ||
679 | #define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 | 681 | #define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 |
680 | 682 | ||
681 | /* Number of times we will attempt to autonegotiate before downshifting if we | 683 | /* |
682 | * are the master */ | 684 | * Number of times we will attempt to autonegotiate before downshifting if we |
685 | * are the master | ||
686 | */ | ||
683 | #define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 | 687 | #define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 |
684 | #define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 | 688 | #define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 |
685 | /* Number of times we will attempt to autonegotiate before downshifting if we | 689 | /* |
686 | * are the slave */ | 690 | * Number of times we will attempt to autonegotiate before downshifting if we |
691 | * are the slave | ||
692 | */ | ||
687 | #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 | 693 | #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 |
688 | #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 | 694 | #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 |
689 | #define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ | 695 | #define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ |
@@ -692,7 +698,8 @@ | |||
692 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 | 698 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 |
693 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 | 699 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 |
694 | 700 | ||
695 | /* Bits... | 701 | /* |
702 | * Bits... | ||
696 | * 15-5: page | 703 | * 15-5: page |
697 | * 4-0: register offset | 704 | * 4-0: register offset |
698 | */ | 705 | */ |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 4bf0c6c045c0..b941a6b509c4 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2008 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -61,7 +61,7 @@ struct e1000_info; | |||
61 | ndev_printk(KERN_NOTICE , netdev, format, ## arg) | 61 | ndev_printk(KERN_NOTICE , netdev, format, ## arg) |
62 | 62 | ||
63 | 63 | ||
64 | /* TX/RX descriptor defines */ | 64 | /* Tx/Rx descriptor defines */ |
65 | #define E1000_DEFAULT_TXD 256 | 65 | #define E1000_DEFAULT_TXD 256 |
66 | #define E1000_MAX_TXD 4096 | 66 | #define E1000_MAX_TXD 4096 |
67 | #define E1000_MIN_TXD 80 | 67 | #define E1000_MIN_TXD 80 |
@@ -114,13 +114,13 @@ struct e1000_buffer { | |||
114 | dma_addr_t dma; | 114 | dma_addr_t dma; |
115 | struct sk_buff *skb; | 115 | struct sk_buff *skb; |
116 | union { | 116 | union { |
117 | /* TX */ | 117 | /* Tx */ |
118 | struct { | 118 | struct { |
119 | unsigned long time_stamp; | 119 | unsigned long time_stamp; |
120 | u16 length; | 120 | u16 length; |
121 | u16 next_to_watch; | 121 | u16 next_to_watch; |
122 | }; | 122 | }; |
123 | /* RX */ | 123 | /* Rx */ |
124 | /* arrays of page information for packet split */ | 124 | /* arrays of page information for packet split */ |
125 | struct e1000_ps_page *ps_pages; | 125 | struct e1000_ps_page *ps_pages; |
126 | }; | 126 | }; |
@@ -177,7 +177,7 @@ struct e1000_adapter { | |||
177 | u16 rx_itr; | 177 | u16 rx_itr; |
178 | 178 | ||
179 | /* | 179 | /* |
180 | * TX | 180 | * Tx |
181 | */ | 181 | */ |
182 | struct e1000_ring *tx_ring /* One per active queue */ | 182 | struct e1000_ring *tx_ring /* One per active queue */ |
183 | ____cacheline_aligned_in_smp; | 183 | ____cacheline_aligned_in_smp; |
@@ -199,7 +199,7 @@ struct e1000_adapter { | |||
199 | unsigned int total_rx_bytes; | 199 | unsigned int total_rx_bytes; |
200 | unsigned int total_rx_packets; | 200 | unsigned int total_rx_packets; |
201 | 201 | ||
202 | /* TX stats */ | 202 | /* Tx stats */ |
203 | u64 tpt_old; | 203 | u64 tpt_old; |
204 | u64 colc_old; | 204 | u64 colc_old; |
205 | u64 gotcl_old; | 205 | u64 gotcl_old; |
@@ -211,7 +211,7 @@ struct e1000_adapter { | |||
211 | u32 tx_dma_failed; | 211 | u32 tx_dma_failed; |
212 | 212 | ||
213 | /* | 213 | /* |
214 | * RX | 214 | * Rx |
215 | */ | 215 | */ |
216 | bool (*clean_rx) (struct e1000_adapter *adapter, | 216 | bool (*clean_rx) (struct e1000_adapter *adapter, |
217 | int *work_done, int work_to_do) | 217 | int *work_done, int work_to_do) |
@@ -223,7 +223,7 @@ struct e1000_adapter { | |||
223 | u32 rx_int_delay; | 223 | u32 rx_int_delay; |
224 | u32 rx_abs_int_delay; | 224 | u32 rx_abs_int_delay; |
225 | 225 | ||
226 | /* RX stats */ | 226 | /* Rx stats */ |
227 | u64 hw_csum_err; | 227 | u64 hw_csum_err; |
228 | u64 hw_csum_good; | 228 | u64 hw_csum_good; |
229 | u64 rx_hdr_split; | 229 | u64 rx_hdr_split; |
@@ -234,6 +234,8 @@ struct e1000_adapter { | |||
234 | 234 | ||
235 | unsigned int rx_ps_pages; | 235 | unsigned int rx_ps_pages; |
236 | u16 rx_ps_bsize0; | 236 | u16 rx_ps_bsize0; |
237 | u32 max_frame_size; | ||
238 | u32 min_frame_size; | ||
237 | 239 | ||
238 | /* OS defined structs */ | 240 | /* OS defined structs */ |
239 | struct net_device *netdev; | 241 | struct net_device *netdev; |
@@ -258,7 +260,7 @@ struct e1000_adapter { | |||
258 | u32 wol; | 260 | u32 wol; |
259 | u32 pba; | 261 | u32 pba; |
260 | 262 | ||
261 | u8 fc_autoneg; | 263 | bool fc_autoneg; |
262 | 264 | ||
263 | unsigned long led_status; | 265 | unsigned long led_status; |
264 | 266 | ||
@@ -305,6 +307,7 @@ struct e1000_info { | |||
305 | #define FLAG_MSI_ENABLED (1 << 27) | 307 | #define FLAG_MSI_ENABLED (1 << 27) |
306 | #define FLAG_RX_CSUM_ENABLED (1 << 28) | 308 | #define FLAG_RX_CSUM_ENABLED (1 << 28) |
307 | #define FLAG_TSO_FORCE (1 << 29) | 309 | #define FLAG_TSO_FORCE (1 << 29) |
310 | #define FLAG_RX_RESTART_NOW (1 << 30) | ||
308 | 311 | ||
309 | #define E1000_RX_DESC_PS(R, i) \ | 312 | #define E1000_RX_DESC_PS(R, i) \ |
310 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | 313 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) |
@@ -387,9 +390,11 @@ extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); | |||
387 | extern s32 e1000e_setup_link(struct e1000_hw *hw); | 390 | extern s32 e1000e_setup_link(struct e1000_hw *hw); |
388 | extern void e1000e_clear_vfta(struct e1000_hw *hw); | 391 | extern void e1000e_clear_vfta(struct e1000_hw *hw); |
389 | extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); | 392 | extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); |
390 | extern void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw, | 393 | extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, |
391 | u8 *mc_addr_list, u32 mc_addr_count, | 394 | u8 *mc_addr_list, |
392 | u32 rar_used_count, u32 rar_count); | 395 | u32 mc_addr_count, |
396 | u32 rar_used_count, | ||
397 | u32 rar_count); | ||
393 | extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); | 398 | extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); |
394 | extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); | 399 | extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); |
395 | extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); | 400 | extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); |
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index 88657adf965f..e3f4aeefeae2 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2008 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -92,7 +92,8 @@ | |||
92 | /* In-Band Control Register (Page 194, Register 18) */ | 92 | /* In-Band Control Register (Page 194, Register 18) */ |
93 | #define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ | 93 | #define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ |
94 | 94 | ||
95 | /* A table for the GG82563 cable length where the range is defined | 95 | /* |
96 | * A table for the GG82563 cable length where the range is defined | ||
96 | * with a lower bound at "index" and the upper bound at | 97 | * with a lower bound at "index" and the upper bound at |
97 | * "index + 5". | 98 | * "index + 5". |
98 | */ | 99 | */ |
@@ -118,7 +119,7 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) | |||
118 | struct e1000_phy_info *phy = &hw->phy; | 119 | struct e1000_phy_info *phy = &hw->phy; |
119 | s32 ret_val; | 120 | s32 ret_val; |
120 | 121 | ||
121 | if (hw->media_type != e1000_media_type_copper) { | 122 | if (hw->phy.media_type != e1000_media_type_copper) { |
122 | phy->type = e1000_phy_none; | 123 | phy->type = e1000_phy_none; |
123 | return 0; | 124 | return 0; |
124 | } | 125 | } |
@@ -167,12 +168,13 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) | |||
167 | break; | 168 | break; |
168 | } | 169 | } |
169 | 170 | ||
170 | nvm->type = e1000_nvm_eeprom_spi; | 171 | nvm->type = e1000_nvm_eeprom_spi; |
171 | 172 | ||
172 | size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> | 173 | size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> |
173 | E1000_EECD_SIZE_EX_SHIFT); | 174 | E1000_EECD_SIZE_EX_SHIFT); |
174 | 175 | ||
175 | /* Added to a constant, "size" becomes the left-shift value | 176 | /* |
177 | * Added to a constant, "size" becomes the left-shift value | ||
176 | * for setting word_size. | 178 | * for setting word_size. |
177 | */ | 179 | */ |
178 | size += NVM_WORD_SIZE_BASE_SHIFT; | 180 | size += NVM_WORD_SIZE_BASE_SHIFT; |
@@ -196,10 +198,10 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) | |||
196 | /* Set media type */ | 198 | /* Set media type */ |
197 | switch (adapter->pdev->device) { | 199 | switch (adapter->pdev->device) { |
198 | case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: | 200 | case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: |
199 | hw->media_type = e1000_media_type_internal_serdes; | 201 | hw->phy.media_type = e1000_media_type_internal_serdes; |
200 | break; | 202 | break; |
201 | default: | 203 | default: |
202 | hw->media_type = e1000_media_type_copper; | 204 | hw->phy.media_type = e1000_media_type_copper; |
203 | break; | 205 | break; |
204 | } | 206 | } |
205 | 207 | ||
@@ -208,11 +210,10 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) | |||
208 | /* Set rar entry count */ | 210 | /* Set rar entry count */ |
209 | mac->rar_entry_count = E1000_RAR_ENTRIES; | 211 | mac->rar_entry_count = E1000_RAR_ENTRIES; |
210 | /* Set if manageability features are enabled. */ | 212 | /* Set if manageability features are enabled. */ |
211 | mac->arc_subsystem_valid = | 213 | mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; |
212 | (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; | ||
213 | 214 | ||
214 | /* check for link */ | 215 | /* check for link */ |
215 | switch (hw->media_type) { | 216 | switch (hw->phy.media_type) { |
216 | case e1000_media_type_copper: | 217 | case e1000_media_type_copper: |
217 | func->setup_physical_interface = e1000_setup_copper_link_80003es2lan; | 218 | func->setup_physical_interface = e1000_setup_copper_link_80003es2lan; |
218 | func->check_for_link = e1000e_check_for_copper_link; | 219 | func->check_for_link = e1000e_check_for_copper_link; |
@@ -344,8 +345,10 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) | |||
344 | if (!(swfw_sync & (fwmask | swmask))) | 345 | if (!(swfw_sync & (fwmask | swmask))) |
345 | break; | 346 | break; |
346 | 347 | ||
347 | /* Firmware currently using resource (fwmask) | 348 | /* |
348 | * or other software thread using resource (swmask) */ | 349 | * Firmware currently using resource (fwmask) |
350 | * or other software thread using resource (swmask) | ||
351 | */ | ||
349 | e1000e_put_hw_semaphore(hw); | 352 | e1000e_put_hw_semaphore(hw); |
350 | mdelay(5); | 353 | mdelay(5); |
351 | i++; | 354 | i++; |
@@ -407,7 +410,8 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | |||
407 | if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) | 410 | if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) |
408 | page_select = GG82563_PHY_PAGE_SELECT; | 411 | page_select = GG82563_PHY_PAGE_SELECT; |
409 | else | 412 | else |
410 | /* Use Alternative Page Select register to access | 413 | /* |
414 | * Use Alternative Page Select register to access | ||
411 | * registers 30 and 31 | 415 | * registers 30 and 31 |
412 | */ | 416 | */ |
413 | page_select = GG82563_PHY_PAGE_SELECT_ALT; | 417 | page_select = GG82563_PHY_PAGE_SELECT_ALT; |
@@ -417,7 +421,8 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | |||
417 | if (ret_val) | 421 | if (ret_val) |
418 | return ret_val; | 422 | return ret_val; |
419 | 423 | ||
420 | /* The "ready" bit in the MDIC register may be incorrectly set | 424 | /* |
425 | * The "ready" bit in the MDIC register may be incorrectly set | ||
421 | * before the device has completed the "Page Select" MDI | 426 | * before the device has completed the "Page Select" MDI |
422 | * transaction. So we wait 200us after each MDI command... | 427 | * transaction. So we wait 200us after each MDI command... |
423 | */ | 428 | */ |
@@ -462,7 +467,8 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | |||
462 | if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) | 467 | if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) |
463 | page_select = GG82563_PHY_PAGE_SELECT; | 468 | page_select = GG82563_PHY_PAGE_SELECT; |
464 | else | 469 | else |
465 | /* Use Alternative Page Select register to access | 470 | /* |
471 | * Use Alternative Page Select register to access | ||
466 | * registers 30 and 31 | 472 | * registers 30 and 31 |
467 | */ | 473 | */ |
468 | page_select = GG82563_PHY_PAGE_SELECT_ALT; | 474 | page_select = GG82563_PHY_PAGE_SELECT_ALT; |
@@ -473,7 +479,8 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, | |||
473 | return ret_val; | 479 | return ret_val; |
474 | 480 | ||
475 | 481 | ||
476 | /* The "ready" bit in the MDIC register may be incorrectly set | 482 | /* |
483 | * The "ready" bit in the MDIC register may be incorrectly set | ||
477 | * before the device has completed the "Page Select" MDI | 484 | * before the device has completed the "Page Select" MDI |
478 | * transaction. So we wait 200us after each MDI command... | 485 | * transaction. So we wait 200us after each MDI command... |
479 | */ | 486 | */ |
@@ -554,7 +561,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | |||
554 | u16 phy_data; | 561 | u16 phy_data; |
555 | bool link; | 562 | bool link; |
556 | 563 | ||
557 | /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI | 564 | /* |
565 | * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI | ||
558 | * forced whenever speed and duplex are forced. | 566 | * forced whenever speed and duplex are forced. |
559 | */ | 567 | */ |
560 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | 568 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); |
@@ -583,7 +591,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | |||
583 | 591 | ||
584 | udelay(1); | 592 | udelay(1); |
585 | 593 | ||
586 | if (hw->phy.wait_for_link) { | 594 | if (hw->phy.autoneg_wait_to_complete) { |
587 | hw_dbg(hw, "Waiting for forced speed/duplex link " | 595 | hw_dbg(hw, "Waiting for forced speed/duplex link " |
588 | "on GG82563 phy.\n"); | 596 | "on GG82563 phy.\n"); |
589 | 597 | ||
@@ -593,7 +601,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | |||
593 | return ret_val; | 601 | return ret_val; |
594 | 602 | ||
595 | if (!link) { | 603 | if (!link) { |
596 | /* We didn't get link. | 604 | /* |
605 | * We didn't get link. | ||
597 | * Reset the DSP and cross our fingers. | 606 | * Reset the DSP and cross our fingers. |
598 | */ | 607 | */ |
599 | ret_val = e1000e_phy_reset_dsp(hw); | 608 | ret_val = e1000e_phy_reset_dsp(hw); |
@@ -612,7 +621,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | |||
612 | if (ret_val) | 621 | if (ret_val) |
613 | return ret_val; | 622 | return ret_val; |
614 | 623 | ||
615 | /* Resetting the phy means we need to verify the TX_CLK corresponds | 624 | /* |
625 | * Resetting the phy means we need to verify the TX_CLK corresponds | ||
616 | * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. | 626 | * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. |
617 | */ | 627 | */ |
618 | phy_data &= ~GG82563_MSCR_TX_CLK_MASK; | 628 | phy_data &= ~GG82563_MSCR_TX_CLK_MASK; |
@@ -621,7 +631,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) | |||
621 | else | 631 | else |
622 | phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; | 632 | phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; |
623 | 633 | ||
624 | /* In addition, we must re-enable CRS on Tx for both half and full | 634 | /* |
635 | * In addition, we must re-enable CRS on Tx for both half and full | ||
625 | * duplex. | 636 | * duplex. |
626 | */ | 637 | */ |
627 | phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; | 638 | phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; |
@@ -671,7 +682,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, | |||
671 | { | 682 | { |
672 | s32 ret_val; | 683 | s32 ret_val; |
673 | 684 | ||
674 | if (hw->media_type == e1000_media_type_copper) { | 685 | if (hw->phy.media_type == e1000_media_type_copper) { |
675 | ret_val = e1000e_get_speed_and_duplex_copper(hw, | 686 | ret_val = e1000e_get_speed_and_duplex_copper(hw, |
676 | speed, | 687 | speed, |
677 | duplex); | 688 | duplex); |
@@ -704,7 +715,8 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) | |||
704 | u32 icr; | 715 | u32 icr; |
705 | s32 ret_val; | 716 | s32 ret_val; |
706 | 717 | ||
707 | /* Prevent the PCI-E bus from sticking if there is no TLP connection | 718 | /* |
719 | * Prevent the PCI-E bus from sticking if there is no TLP connection | ||
708 | * on the last TLP read/write transaction when MAC is reset. | 720 | * on the last TLP read/write transaction when MAC is reset. |
709 | */ | 721 | */ |
710 | ret_val = e1000e_disable_pcie_master(hw); | 722 | ret_val = e1000e_disable_pcie_master(hw); |
@@ -808,7 +820,8 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) | |||
808 | reg_data &= ~0x00100000; | 820 | reg_data &= ~0x00100000; |
809 | E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); | 821 | E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); |
810 | 822 | ||
811 | /* Clear all of the statistics registers (clear on read). It is | 823 | /* |
824 | * Clear all of the statistics registers (clear on read). It is | ||
812 | * important that we do this after we have tried to establish link | 825 | * important that we do this after we have tried to establish link |
813 | * because the symbol error count will increment wildly if there | 826 | * because the symbol error count will increment wildly if there |
814 | * is no link. | 827 | * is no link. |
@@ -841,7 +854,7 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) | |||
841 | /* Transmit Arbitration Control 0 */ | 854 | /* Transmit Arbitration Control 0 */ |
842 | reg = er32(TARC0); | 855 | reg = er32(TARC0); |
843 | reg &= ~(0xF << 27); /* 30:27 */ | 856 | reg &= ~(0xF << 27); /* 30:27 */ |
844 | if (hw->media_type != e1000_media_type_copper) | 857 | if (hw->phy.media_type != e1000_media_type_copper) |
845 | reg &= ~(1 << 20); | 858 | reg &= ~(1 << 20); |
846 | ew32(TARC0, reg); | 859 | ew32(TARC0, reg); |
847 | 860 | ||
@@ -881,7 +894,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) | |||
881 | if (ret_val) | 894 | if (ret_val) |
882 | return ret_val; | 895 | return ret_val; |
883 | 896 | ||
884 | /* Options: | 897 | /* |
898 | * Options: | ||
885 | * MDI/MDI-X = 0 (default) | 899 | * MDI/MDI-X = 0 (default) |
886 | * 0 - Auto for all speeds | 900 | * 0 - Auto for all speeds |
887 | * 1 - MDI mode | 901 | * 1 - MDI mode |
@@ -907,7 +921,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) | |||
907 | break; | 921 | break; |
908 | } | 922 | } |
909 | 923 | ||
910 | /* Options: | 924 | /* |
925 | * Options: | ||
911 | * disable_polarity_correction = 0 (default) | 926 | * disable_polarity_correction = 0 (default) |
912 | * Automatic Correction for Reversed Cable Polarity | 927 | * Automatic Correction for Reversed Cable Polarity |
913 | * 0 - Disabled | 928 | * 0 - Disabled |
@@ -928,10 +943,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) | |||
928 | return ret_val; | 943 | return ret_val; |
929 | } | 944 | } |
930 | 945 | ||
931 | /* Bypass RX and TX FIFO's */ | 946 | /* Bypass Rx and Tx FIFO's */ |
932 | ret_val = e1000e_write_kmrn_reg(hw, | 947 | ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, |
933 | E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, | 948 | E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | |
934 | E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | | ||
935 | E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); | 949 | E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); |
936 | if (ret_val) | 950 | if (ret_val) |
937 | return ret_val; | 951 | return ret_val; |
@@ -953,7 +967,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) | |||
953 | if (ret_val) | 967 | if (ret_val) |
954 | return ret_val; | 968 | return ret_val; |
955 | 969 | ||
956 | /* Do not init these registers when the HW is in IAMT mode, since the | 970 | /* |
971 | * Do not init these registers when the HW is in IAMT mode, since the | ||
957 | * firmware will have already initialized them. We only initialize | 972 | * firmware will have already initialized them. We only initialize |
958 | * them if the HW is not in IAMT mode. | 973 | * them if the HW is not in IAMT mode. |
959 | */ | 974 | */ |
@@ -974,7 +989,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) | |||
974 | return ret_val; | 989 | return ret_val; |
975 | } | 990 | } |
976 | 991 | ||
977 | /* Workaround: Disable padding in Kumeran interface in the MAC | 992 | /* |
993 | * Workaround: Disable padding in Kumeran interface in the MAC | ||
978 | * and in the PHY to avoid CRC errors. | 994 | * and in the PHY to avoid CRC errors. |
979 | */ | 995 | */ |
980 | ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); | 996 | ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); |
@@ -1007,9 +1023,11 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) | |||
1007 | ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | 1023 | ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); |
1008 | ew32(CTRL, ctrl); | 1024 | ew32(CTRL, ctrl); |
1009 | 1025 | ||
1010 | /* Set the mac to wait the maximum time between each | 1026 | /* |
1027 | * Set the mac to wait the maximum time between each | ||
1011 | * iteration and increase the max iterations when | 1028 | * iteration and increase the max iterations when |
1012 | * polling the phy; this fixes erroneous timeouts at 10Mbps. */ | 1029 | * polling the phy; this fixes erroneous timeouts at 10Mbps. |
1030 | */ | ||
1013 | ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); | 1031 | ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); |
1014 | if (ret_val) | 1032 | if (ret_val) |
1015 | return ret_val; | 1033 | return ret_val; |
@@ -1026,9 +1044,8 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) | |||
1026 | if (ret_val) | 1044 | if (ret_val) |
1027 | return ret_val; | 1045 | return ret_val; |
1028 | reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; | 1046 | reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; |
1029 | ret_val = e1000e_write_kmrn_reg(hw, | 1047 | ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, |
1030 | E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, | 1048 | reg_data); |
1031 | reg_data); | ||
1032 | if (ret_val) | 1049 | if (ret_val) |
1033 | return ret_val; | 1050 | return ret_val; |
1034 | 1051 | ||
@@ -1056,9 +1073,8 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) | |||
1056 | u16 reg_data; | 1073 | u16 reg_data; |
1057 | 1074 | ||
1058 | reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; | 1075 | reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; |
1059 | ret_val = e1000e_write_kmrn_reg(hw, | 1076 | ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, |
1060 | E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, | 1077 | reg_data); |
1061 | reg_data); | ||
1062 | if (ret_val) | 1078 | if (ret_val) |
1063 | return ret_val; | 1079 | return ret_val; |
1064 | 1080 | ||
@@ -1096,9 +1112,8 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) | |||
1096 | u32 tipg; | 1112 | u32 tipg; |
1097 | 1113 | ||
1098 | reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; | 1114 | reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; |
1099 | ret_val = e1000e_write_kmrn_reg(hw, | 1115 | ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, |
1100 | E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, | 1116 | reg_data); |
1101 | reg_data); | ||
1102 | if (ret_val) | 1117 | if (ret_val) |
1103 | return ret_val; | 1118 | return ret_val; |
1104 | 1119 | ||
@@ -1175,7 +1190,7 @@ static struct e1000_mac_operations es2_mac_ops = { | |||
1175 | .get_link_up_info = e1000_get_link_up_info_80003es2lan, | 1190 | .get_link_up_info = e1000_get_link_up_info_80003es2lan, |
1176 | .led_on = e1000e_led_on_generic, | 1191 | .led_on = e1000e_led_on_generic, |
1177 | .led_off = e1000e_led_off_generic, | 1192 | .led_off = e1000e_led_off_generic, |
1178 | .mc_addr_list_update = e1000e_mc_addr_list_update_generic, | 1193 | .update_mc_addr_list = e1000e_update_mc_addr_list_generic, |
1179 | .reset_hw = e1000_reset_hw_80003es2lan, | 1194 | .reset_hw = e1000_reset_hw_80003es2lan, |
1180 | .init_hw = e1000_init_hw_80003es2lan, | 1195 | .init_hw = e1000_init_hw_80003es2lan, |
1181 | .setup_link = e1000e_setup_link, | 1196 | .setup_link = e1000e_setup_link, |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index f77a7427d3a0..4ae00567bba6 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2008 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -102,7 +102,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { | |||
102 | "Interrupt test (offline)", "Loopback test (offline)", | 102 | "Interrupt test (offline)", "Loopback test (offline)", |
103 | "Link test (on/offline)" | 103 | "Link test (on/offline)" |
104 | }; | 104 | }; |
105 | #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) | 105 | #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) |
106 | 106 | ||
107 | static int e1000_get_settings(struct net_device *netdev, | 107 | static int e1000_get_settings(struct net_device *netdev, |
108 | struct ethtool_cmd *ecmd) | 108 | struct ethtool_cmd *ecmd) |
@@ -111,7 +111,7 @@ static int e1000_get_settings(struct net_device *netdev, | |||
111 | struct e1000_hw *hw = &adapter->hw; | 111 | struct e1000_hw *hw = &adapter->hw; |
112 | u32 status; | 112 | u32 status; |
113 | 113 | ||
114 | if (hw->media_type == e1000_media_type_copper) { | 114 | if (hw->phy.media_type == e1000_media_type_copper) { |
115 | 115 | ||
116 | ecmd->supported = (SUPPORTED_10baseT_Half | | 116 | ecmd->supported = (SUPPORTED_10baseT_Half | |
117 | SUPPORTED_10baseT_Full | | 117 | SUPPORTED_10baseT_Full | |
@@ -165,7 +165,7 @@ static int e1000_get_settings(struct net_device *netdev, | |||
165 | ecmd->duplex = -1; | 165 | ecmd->duplex = -1; |
166 | } | 166 | } |
167 | 167 | ||
168 | ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || | 168 | ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || |
169 | hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; | 169 | hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; |
170 | return 0; | 170 | return 0; |
171 | } | 171 | } |
@@ -187,7 +187,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | |||
187 | mac->autoneg = 0; | 187 | mac->autoneg = 0; |
188 | 188 | ||
189 | /* Fiber NICs only allow 1000 gbps Full duplex */ | 189 | /* Fiber NICs only allow 1000 gbps Full duplex */ |
190 | if ((adapter->hw.media_type == e1000_media_type_fiber) && | 190 | if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && |
191 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { | 191 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { |
192 | ndev_err(adapter->netdev, "Unsupported Speed/Duplex " | 192 | ndev_err(adapter->netdev, "Unsupported Speed/Duplex " |
193 | "configuration\n"); | 193 | "configuration\n"); |
@@ -226,8 +226,10 @@ static int e1000_set_settings(struct net_device *netdev, | |||
226 | struct e1000_adapter *adapter = netdev_priv(netdev); | 226 | struct e1000_adapter *adapter = netdev_priv(netdev); |
227 | struct e1000_hw *hw = &adapter->hw; | 227 | struct e1000_hw *hw = &adapter->hw; |
228 | 228 | ||
229 | /* When SoL/IDER sessions are active, autoneg/speed/duplex | 229 | /* |
230 | * cannot be changed */ | 230 | * When SoL/IDER sessions are active, autoneg/speed/duplex |
231 | * cannot be changed | ||
232 | */ | ||
231 | if (e1000_check_reset_block(hw)) { | 233 | if (e1000_check_reset_block(hw)) { |
232 | ndev_err(netdev, "Cannot change link " | 234 | ndev_err(netdev, "Cannot change link " |
233 | "characteristics when SoL/IDER is active.\n"); | 235 | "characteristics when SoL/IDER is active.\n"); |
@@ -239,7 +241,7 @@ static int e1000_set_settings(struct net_device *netdev, | |||
239 | 241 | ||
240 | if (ecmd->autoneg == AUTONEG_ENABLE) { | 242 | if (ecmd->autoneg == AUTONEG_ENABLE) { |
241 | hw->mac.autoneg = 1; | 243 | hw->mac.autoneg = 1; |
242 | if (hw->media_type == e1000_media_type_fiber) | 244 | if (hw->phy.media_type == e1000_media_type_fiber) |
243 | hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | | 245 | hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | |
244 | ADVERTISED_FIBRE | | 246 | ADVERTISED_FIBRE | |
245 | ADVERTISED_Autoneg; | 247 | ADVERTISED_Autoneg; |
@@ -248,6 +250,8 @@ static int e1000_set_settings(struct net_device *netdev, | |||
248 | ADVERTISED_TP | | 250 | ADVERTISED_TP | |
249 | ADVERTISED_Autoneg; | 251 | ADVERTISED_Autoneg; |
250 | ecmd->advertising = hw->phy.autoneg_advertised; | 252 | ecmd->advertising = hw->phy.autoneg_advertised; |
253 | if (adapter->fc_autoneg) | ||
254 | hw->fc.original_type = e1000_fc_default; | ||
251 | } else { | 255 | } else { |
252 | if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { | 256 | if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { |
253 | clear_bit(__E1000_RESETTING, &adapter->state); | 257 | clear_bit(__E1000_RESETTING, &adapter->state); |
@@ -277,11 +281,11 @@ static void e1000_get_pauseparam(struct net_device *netdev, | |||
277 | pause->autoneg = | 281 | pause->autoneg = |
278 | (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); | 282 | (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); |
279 | 283 | ||
280 | if (hw->mac.fc == e1000_fc_rx_pause) { | 284 | if (hw->fc.type == e1000_fc_rx_pause) { |
281 | pause->rx_pause = 1; | 285 | pause->rx_pause = 1; |
282 | } else if (hw->mac.fc == e1000_fc_tx_pause) { | 286 | } else if (hw->fc.type == e1000_fc_tx_pause) { |
283 | pause->tx_pause = 1; | 287 | pause->tx_pause = 1; |
284 | } else if (hw->mac.fc == e1000_fc_full) { | 288 | } else if (hw->fc.type == e1000_fc_full) { |
285 | pause->rx_pause = 1; | 289 | pause->rx_pause = 1; |
286 | pause->tx_pause = 1; | 290 | pause->tx_pause = 1; |
287 | } | 291 | } |
@@ -300,18 +304,18 @@ static int e1000_set_pauseparam(struct net_device *netdev, | |||
300 | msleep(1); | 304 | msleep(1); |
301 | 305 | ||
302 | if (pause->rx_pause && pause->tx_pause) | 306 | if (pause->rx_pause && pause->tx_pause) |
303 | hw->mac.fc = e1000_fc_full; | 307 | hw->fc.type = e1000_fc_full; |
304 | else if (pause->rx_pause && !pause->tx_pause) | 308 | else if (pause->rx_pause && !pause->tx_pause) |
305 | hw->mac.fc = e1000_fc_rx_pause; | 309 | hw->fc.type = e1000_fc_rx_pause; |
306 | else if (!pause->rx_pause && pause->tx_pause) | 310 | else if (!pause->rx_pause && pause->tx_pause) |
307 | hw->mac.fc = e1000_fc_tx_pause; | 311 | hw->fc.type = e1000_fc_tx_pause; |
308 | else if (!pause->rx_pause && !pause->tx_pause) | 312 | else if (!pause->rx_pause && !pause->tx_pause) |
309 | hw->mac.fc = e1000_fc_none; | 313 | hw->fc.type = e1000_fc_none; |
310 | 314 | ||
311 | hw->mac.original_fc = hw->mac.fc; | 315 | hw->fc.original_type = hw->fc.type; |
312 | 316 | ||
313 | if (adapter->fc_autoneg == AUTONEG_ENABLE) { | 317 | if (adapter->fc_autoneg == AUTONEG_ENABLE) { |
314 | hw->mac.fc = e1000_fc_default; | 318 | hw->fc.type = e1000_fc_default; |
315 | if (netif_running(adapter->netdev)) { | 319 | if (netif_running(adapter->netdev)) { |
316 | e1000e_down(adapter); | 320 | e1000e_down(adapter); |
317 | e1000e_up(adapter); | 321 | e1000e_up(adapter); |
@@ -319,7 +323,7 @@ static int e1000_set_pauseparam(struct net_device *netdev, | |||
319 | e1000e_reset(adapter); | 323 | e1000e_reset(adapter); |
320 | } | 324 | } |
321 | } else { | 325 | } else { |
322 | retval = ((hw->media_type == e1000_media_type_fiber) ? | 326 | retval = ((hw->phy.media_type == e1000_media_type_fiber) ? |
323 | hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw)); | 327 | hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw)); |
324 | } | 328 | } |
325 | 329 | ||
@@ -558,8 +562,10 @@ static int e1000_set_eeprom(struct net_device *netdev, | |||
558 | ret_val = e1000_write_nvm(hw, first_word, | 562 | ret_val = e1000_write_nvm(hw, first_word, |
559 | last_word - first_word + 1, eeprom_buff); | 563 | last_word - first_word + 1, eeprom_buff); |
560 | 564 | ||
561 | /* Update the checksum over the first part of the EEPROM if needed | 565 | /* |
562 | * and flush shadow RAM for 82573 controllers */ | 566 | * Update the checksum over the first part of the EEPROM if needed |
567 | * and flush shadow RAM for 82573 controllers | ||
568 | */ | ||
563 | if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) || | 569 | if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) || |
564 | (hw->mac.type == e1000_82573))) | 570 | (hw->mac.type == e1000_82573))) |
565 | e1000e_update_nvm_checksum(hw); | 571 | e1000e_update_nvm_checksum(hw); |
@@ -578,8 +584,10 @@ static void e1000_get_drvinfo(struct net_device *netdev, | |||
578 | strncpy(drvinfo->driver, e1000e_driver_name, 32); | 584 | strncpy(drvinfo->driver, e1000e_driver_name, 32); |
579 | strncpy(drvinfo->version, e1000e_driver_version, 32); | 585 | strncpy(drvinfo->version, e1000e_driver_version, 32); |
580 | 586 | ||
581 | /* EEPROM image version # is reported as firmware version # for | 587 | /* |
582 | * PCI-E controllers */ | 588 | * EEPROM image version # is reported as firmware version # for |
589 | * PCI-E controllers | ||
590 | */ | ||
583 | e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data); | 591 | e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data); |
584 | sprintf(firmware_version, "%d.%d-%d", | 592 | sprintf(firmware_version, "%d.%d-%d", |
585 | (eeprom_data & 0xF000) >> 12, | 593 | (eeprom_data & 0xF000) >> 12, |
@@ -658,8 +666,10 @@ static int e1000_set_ringparam(struct net_device *netdev, | |||
658 | if (err) | 666 | if (err) |
659 | goto err_setup_tx; | 667 | goto err_setup_tx; |
660 | 668 | ||
661 | /* save the new, restore the old in order to free it, | 669 | /* |
662 | * then restore the new back again */ | 670 | * restore the old in order to free it, |
671 | * then add in the new | ||
672 | */ | ||
663 | adapter->rx_ring = rx_old; | 673 | adapter->rx_ring = rx_old; |
664 | adapter->tx_ring = tx_old; | 674 | adapter->tx_ring = tx_old; |
665 | e1000e_free_rx_resources(adapter); | 675 | e1000e_free_rx_resources(adapter); |
@@ -758,7 +768,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
758 | u32 i; | 768 | u32 i; |
759 | u32 toggle; | 769 | u32 toggle; |
760 | 770 | ||
761 | /* The status register is Read Only, so a write should fail. | 771 | /* |
772 | * The status register is Read Only, so a write should fail. | ||
762 | * Some bits that get toggled are ignored. | 773 | * Some bits that get toggled are ignored. |
763 | */ | 774 | */ |
764 | switch (mac->type) { | 775 | switch (mac->type) { |
@@ -908,7 +919,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
908 | mask = 1 << i; | 919 | mask = 1 << i; |
909 | 920 | ||
910 | if (!shared_int) { | 921 | if (!shared_int) { |
911 | /* Disable the interrupt to be reported in | 922 | /* |
923 | * Disable the interrupt to be reported in | ||
912 | * the cause register and then force the same | 924 | * the cause register and then force the same |
913 | * interrupt and see if one gets posted. If | 925 | * interrupt and see if one gets posted. If |
914 | * an interrupt was posted to the bus, the | 926 | * an interrupt was posted to the bus, the |
@@ -925,7 +937,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
925 | } | 937 | } |
926 | } | 938 | } |
927 | 939 | ||
928 | /* Enable the interrupt to be reported in | 940 | /* |
941 | * Enable the interrupt to be reported in | ||
929 | * the cause register and then force the same | 942 | * the cause register and then force the same |
930 | * interrupt and see if one gets posted. If | 943 | * interrupt and see if one gets posted. If |
931 | * an interrupt was not posted to the bus, the | 944 | * an interrupt was not posted to the bus, the |
@@ -942,7 +955,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
942 | } | 955 | } |
943 | 956 | ||
944 | if (!shared_int) { | 957 | if (!shared_int) { |
945 | /* Disable the other interrupts to be reported in | 958 | /* |
959 | * Disable the other interrupts to be reported in | ||
946 | * the cause register and then force the other | 960 | * the cause register and then force the other |
947 | * interrupts and see if any get posted. If | 961 | * interrupts and see if any get posted. If |
948 | * an interrupt was posted to the bus, the | 962 | * an interrupt was posted to the bus, the |
@@ -1175,21 +1189,21 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1175 | u32 ctrl_reg = 0; | 1189 | u32 ctrl_reg = 0; |
1176 | u32 stat_reg = 0; | 1190 | u32 stat_reg = 0; |
1177 | 1191 | ||
1178 | adapter->hw.mac.autoneg = 0; | 1192 | hw->mac.autoneg = 0; |
1179 | 1193 | ||
1180 | if (adapter->hw.phy.type == e1000_phy_m88) { | 1194 | if (hw->phy.type == e1000_phy_m88) { |
1181 | /* Auto-MDI/MDIX Off */ | 1195 | /* Auto-MDI/MDIX Off */ |
1182 | e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); | 1196 | e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); |
1183 | /* reset to update Auto-MDI/MDIX */ | 1197 | /* reset to update Auto-MDI/MDIX */ |
1184 | e1e_wphy(hw, PHY_CONTROL, 0x9140); | 1198 | e1e_wphy(hw, PHY_CONTROL, 0x9140); |
1185 | /* autoneg off */ | 1199 | /* autoneg off */ |
1186 | e1e_wphy(hw, PHY_CONTROL, 0x8140); | 1200 | e1e_wphy(hw, PHY_CONTROL, 0x8140); |
1187 | } else if (adapter->hw.phy.type == e1000_phy_gg82563) | 1201 | } else if (hw->phy.type == e1000_phy_gg82563) |
1188 | e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); | 1202 | e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); |
1189 | 1203 | ||
1190 | ctrl_reg = er32(CTRL); | 1204 | ctrl_reg = er32(CTRL); |
1191 | 1205 | ||
1192 | if (adapter->hw.phy.type == e1000_phy_ife) { | 1206 | if (hw->phy.type == e1000_phy_ife) { |
1193 | /* force 100, set loopback */ | 1207 | /* force 100, set loopback */ |
1194 | e1e_wphy(hw, PHY_CONTROL, 0x6100); | 1208 | e1e_wphy(hw, PHY_CONTROL, 0x6100); |
1195 | 1209 | ||
@@ -1212,12 +1226,14 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1212 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1226 | E1000_CTRL_FD); /* Force Duplex to FULL */ |
1213 | } | 1227 | } |
1214 | 1228 | ||
1215 | if (adapter->hw.media_type == e1000_media_type_copper && | 1229 | if (hw->phy.media_type == e1000_media_type_copper && |
1216 | adapter->hw.phy.type == e1000_phy_m88) { | 1230 | hw->phy.type == e1000_phy_m88) { |
1217 | ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ | 1231 | ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ |
1218 | } else { | 1232 | } else { |
1219 | /* Set the ILOS bit on the fiber Nic if half duplex link is | 1233 | /* |
1220 | * detected. */ | 1234 | * Set the ILOS bit on the fiber Nic if half duplex link is |
1235 | * detected. | ||
1236 | */ | ||
1221 | stat_reg = er32(STATUS); | 1237 | stat_reg = er32(STATUS); |
1222 | if ((stat_reg & E1000_STATUS_FD) == 0) | 1238 | if ((stat_reg & E1000_STATUS_FD) == 0) |
1223 | ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); | 1239 | ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); |
@@ -1225,10 +1241,11 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1225 | 1241 | ||
1226 | ew32(CTRL, ctrl_reg); | 1242 | ew32(CTRL, ctrl_reg); |
1227 | 1243 | ||
1228 | /* Disable the receiver on the PHY so when a cable is plugged in, the | 1244 | /* |
1245 | * Disable the receiver on the PHY so when a cable is plugged in, the | ||
1229 | * PHY does not begin to autoneg when a cable is reconnected to the NIC. | 1246 | * PHY does not begin to autoneg when a cable is reconnected to the NIC. |
1230 | */ | 1247 | */ |
1231 | if (adapter->hw.phy.type == e1000_phy_m88) | 1248 | if (hw->phy.type == e1000_phy_m88) |
1232 | e1000_phy_disable_receiver(adapter); | 1249 | e1000_phy_disable_receiver(adapter); |
1233 | 1250 | ||
1234 | udelay(500); | 1251 | udelay(500); |
@@ -1244,8 +1261,10 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) | |||
1244 | 1261 | ||
1245 | /* special requirements for 82571/82572 fiber adapters */ | 1262 | /* special requirements for 82571/82572 fiber adapters */ |
1246 | 1263 | ||
1247 | /* jump through hoops to make sure link is up because serdes | 1264 | /* |
1248 | * link is hardwired up */ | 1265 | * jump through hoops to make sure link is up because serdes |
1266 | * link is hardwired up | ||
1267 | */ | ||
1249 | ctrl |= E1000_CTRL_SLU; | 1268 | ctrl |= E1000_CTRL_SLU; |
1250 | ew32(CTRL, ctrl); | 1269 | ew32(CTRL, ctrl); |
1251 | 1270 | ||
@@ -1263,8 +1282,10 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) | |||
1263 | ew32(CTRL, ctrl); | 1282 | ew32(CTRL, ctrl); |
1264 | } | 1283 | } |
1265 | 1284 | ||
1266 | /* special write to serdes control register to enable SerDes analog | 1285 | /* |
1267 | * loopback */ | 1286 | * special write to serdes control register to enable SerDes analog |
1287 | * loopback | ||
1288 | */ | ||
1268 | #define E1000_SERDES_LB_ON 0x410 | 1289 | #define E1000_SERDES_LB_ON 0x410 |
1269 | ew32(SCTL, E1000_SERDES_LB_ON); | 1290 | ew32(SCTL, E1000_SERDES_LB_ON); |
1270 | msleep(10); | 1291 | msleep(10); |
@@ -1279,8 +1300,10 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) | |||
1279 | u32 ctrlext = er32(CTRL_EXT); | 1300 | u32 ctrlext = er32(CTRL_EXT); |
1280 | u32 ctrl = er32(CTRL); | 1301 | u32 ctrl = er32(CTRL); |
1281 | 1302 | ||
1282 | /* save CTRL_EXT to restore later, reuse an empty variable (unused | 1303 | /* |
1283 | on mac_type 80003es2lan) */ | 1304 | * save CTRL_EXT to restore later, reuse an empty variable (unused |
1305 | * on mac_type 80003es2lan) | ||
1306 | */ | ||
1284 | adapter->tx_fifo_head = ctrlext; | 1307 | adapter->tx_fifo_head = ctrlext; |
1285 | 1308 | ||
1286 | /* clear the serdes mode bits, putting the device into mac loopback */ | 1309 | /* clear the serdes mode bits, putting the device into mac loopback */ |
@@ -1312,8 +1335,8 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter) | |||
1312 | struct e1000_hw *hw = &adapter->hw; | 1335 | struct e1000_hw *hw = &adapter->hw; |
1313 | u32 rctl; | 1336 | u32 rctl; |
1314 | 1337 | ||
1315 | if (hw->media_type == e1000_media_type_fiber || | 1338 | if (hw->phy.media_type == e1000_media_type_fiber || |
1316 | hw->media_type == e1000_media_type_internal_serdes) { | 1339 | hw->phy.media_type == e1000_media_type_internal_serdes) { |
1317 | switch (hw->mac.type) { | 1340 | switch (hw->mac.type) { |
1318 | case e1000_80003es2lan: | 1341 | case e1000_80003es2lan: |
1319 | return e1000_set_es2lan_mac_loopback(adapter); | 1342 | return e1000_set_es2lan_mac_loopback(adapter); |
@@ -1328,7 +1351,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter) | |||
1328 | ew32(RCTL, rctl); | 1351 | ew32(RCTL, rctl); |
1329 | return 0; | 1352 | return 0; |
1330 | } | 1353 | } |
1331 | } else if (hw->media_type == e1000_media_type_copper) { | 1354 | } else if (hw->phy.media_type == e1000_media_type_copper) { |
1332 | return e1000_integrated_phy_loopback(adapter); | 1355 | return e1000_integrated_phy_loopback(adapter); |
1333 | } | 1356 | } |
1334 | 1357 | ||
@@ -1347,18 +1370,17 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) | |||
1347 | 1370 | ||
1348 | switch (hw->mac.type) { | 1371 | switch (hw->mac.type) { |
1349 | case e1000_80003es2lan: | 1372 | case e1000_80003es2lan: |
1350 | if (hw->media_type == e1000_media_type_fiber || | 1373 | if (hw->phy.media_type == e1000_media_type_fiber || |
1351 | hw->media_type == e1000_media_type_internal_serdes) { | 1374 | hw->phy.media_type == e1000_media_type_internal_serdes) { |
1352 | /* restore CTRL_EXT, stealing space from tx_fifo_head */ | 1375 | /* restore CTRL_EXT, stealing space from tx_fifo_head */ |
1353 | ew32(CTRL_EXT, | 1376 | ew32(CTRL_EXT, adapter->tx_fifo_head); |
1354 | adapter->tx_fifo_head); | ||
1355 | adapter->tx_fifo_head = 0; | 1377 | adapter->tx_fifo_head = 0; |
1356 | } | 1378 | } |
1357 | /* fall through */ | 1379 | /* fall through */ |
1358 | case e1000_82571: | 1380 | case e1000_82571: |
1359 | case e1000_82572: | 1381 | case e1000_82572: |
1360 | if (hw->media_type == e1000_media_type_fiber || | 1382 | if (hw->phy.media_type == e1000_media_type_fiber || |
1361 | hw->media_type == e1000_media_type_internal_serdes) { | 1383 | hw->phy.media_type == e1000_media_type_internal_serdes) { |
1362 | #define E1000_SERDES_LB_OFF 0x400 | 1384 | #define E1000_SERDES_LB_OFF 0x400 |
1363 | ew32(SCTL, E1000_SERDES_LB_OFF); | 1385 | ew32(SCTL, E1000_SERDES_LB_OFF); |
1364 | msleep(10); | 1386 | msleep(10); |
@@ -1414,7 +1436,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
1414 | 1436 | ||
1415 | ew32(RDT, rx_ring->count - 1); | 1437 | ew32(RDT, rx_ring->count - 1); |
1416 | 1438 | ||
1417 | /* Calculate the loop count based on the largest descriptor ring | 1439 | /* |
1440 | * Calculate the loop count based on the largest descriptor ring | ||
1418 | * The idea is to wrap the largest ring a number of times using 64 | 1441 | * The idea is to wrap the largest ring a number of times using 64 |
1419 | * send/receive pairs during each loop | 1442 | * send/receive pairs during each loop |
1420 | */ | 1443 | */ |
@@ -1454,7 +1477,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
1454 | l++; | 1477 | l++; |
1455 | if (l == rx_ring->count) | 1478 | if (l == rx_ring->count) |
1456 | l = 0; | 1479 | l = 0; |
1457 | /* time + 20 msecs (200 msecs on 2.4) is more than | 1480 | /* |
1481 | * time + 20 msecs (200 msecs on 2.4) is more than | ||
1458 | * enough time to complete the receives, if it's | 1482 | * enough time to complete the receives, if it's |
1459 | * exceeded, break and error off | 1483 | * exceeded, break and error off |
1460 | */ | 1484 | */ |
@@ -1473,8 +1497,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) | |||
1473 | 1497 | ||
1474 | static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) | 1498 | static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) |
1475 | { | 1499 | { |
1476 | /* PHY loopback cannot be performed if SoL/IDER | 1500 | /* |
1477 | * sessions are active */ | 1501 | * PHY loopback cannot be performed if SoL/IDER |
1502 | * sessions are active | ||
1503 | */ | ||
1478 | if (e1000_check_reset_block(&adapter->hw)) { | 1504 | if (e1000_check_reset_block(&adapter->hw)) { |
1479 | ndev_err(adapter->netdev, "Cannot do PHY loopback test " | 1505 | ndev_err(adapter->netdev, "Cannot do PHY loopback test " |
1480 | "when SoL/IDER is active.\n"); | 1506 | "when SoL/IDER is active.\n"); |
@@ -1504,12 +1530,14 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) | |||
1504 | struct e1000_hw *hw = &adapter->hw; | 1530 | struct e1000_hw *hw = &adapter->hw; |
1505 | 1531 | ||
1506 | *data = 0; | 1532 | *data = 0; |
1507 | if (hw->media_type == e1000_media_type_internal_serdes) { | 1533 | if (hw->phy.media_type == e1000_media_type_internal_serdes) { |
1508 | int i = 0; | 1534 | int i = 0; |
1509 | hw->mac.serdes_has_link = 0; | 1535 | hw->mac.serdes_has_link = 0; |
1510 | 1536 | ||
1511 | /* On some blade server designs, link establishment | 1537 | /* |
1512 | * could take as long as 2-3 minutes */ | 1538 | * On some blade server designs, link establishment |
1539 | * could take as long as 2-3 minutes | ||
1540 | */ | ||
1513 | do { | 1541 | do { |
1514 | hw->mac.ops.check_for_link(hw); | 1542 | hw->mac.ops.check_for_link(hw); |
1515 | if (hw->mac.serdes_has_link) | 1543 | if (hw->mac.serdes_has_link) |
@@ -1562,8 +1590,10 @@ static void e1000_diag_test(struct net_device *netdev, | |||
1562 | 1590 | ||
1563 | ndev_info(netdev, "offline testing starting\n"); | 1591 | ndev_info(netdev, "offline testing starting\n"); |
1564 | 1592 | ||
1565 | /* Link test performed before hardware reset so autoneg doesn't | 1593 | /* |
1566 | * interfere with test result */ | 1594 | * Link test performed before hardware reset so autoneg doesn't |
1595 | * interfere with test result | ||
1596 | */ | ||
1567 | if (e1000_link_test(adapter, &data[4])) | 1597 | if (e1000_link_test(adapter, &data[4])) |
1568 | eth_test->flags |= ETH_TEST_FL_FAILED; | 1598 | eth_test->flags |= ETH_TEST_FL_FAILED; |
1569 | 1599 | ||
@@ -1596,9 +1626,9 @@ static void e1000_diag_test(struct net_device *netdev, | |||
1596 | adapter->hw.mac.autoneg = autoneg; | 1626 | adapter->hw.mac.autoneg = autoneg; |
1597 | 1627 | ||
1598 | /* force this routine to wait until autoneg complete/timeout */ | 1628 | /* force this routine to wait until autoneg complete/timeout */ |
1599 | adapter->hw.phy.wait_for_link = 1; | 1629 | adapter->hw.phy.autoneg_wait_to_complete = 1; |
1600 | e1000e_reset(adapter); | 1630 | e1000e_reset(adapter); |
1601 | adapter->hw.phy.wait_for_link = 0; | 1631 | adapter->hw.phy.autoneg_wait_to_complete = 0; |
1602 | 1632 | ||
1603 | clear_bit(__E1000_TESTING, &adapter->state); | 1633 | clear_bit(__E1000_TESTING, &adapter->state); |
1604 | if (if_running) | 1634 | if (if_running) |
@@ -1768,8 +1798,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset, | |||
1768 | 1798 | ||
1769 | switch (stringset) { | 1799 | switch (stringset) { |
1770 | case ETH_SS_TEST: | 1800 | case ETH_SS_TEST: |
1771 | memcpy(data, *e1000_gstrings_test, | 1801 | memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test)); |
1772 | sizeof(e1000_gstrings_test)); | ||
1773 | break; | 1802 | break; |
1774 | case ETH_SS_STATS: | 1803 | case ETH_SS_STATS: |
1775 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { | 1804 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { |
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index 916025b30fc3..0b4145a73229 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2008 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -66,14 +66,14 @@ enum e1e_registers { | |||
66 | E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */ | 66 | E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */ |
67 | E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */ | 67 | E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */ |
68 | E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */ | 68 | E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */ |
69 | E1000_RCTL = 0x00100, /* RX Control - RW */ | 69 | E1000_RCTL = 0x00100, /* Rx Control - RW */ |
70 | E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */ | 70 | E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */ |
71 | E1000_TXCW = 0x00178, /* TX Configuration Word - RW */ | 71 | E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */ |
72 | E1000_RXCW = 0x00180, /* RX Configuration Word - RO */ | 72 | E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */ |
73 | E1000_TCTL = 0x00400, /* TX Control - RW */ | 73 | E1000_TCTL = 0x00400, /* Tx Control - RW */ |
74 | E1000_TCTL_EXT = 0x00404, /* Extended TX Control - RW */ | 74 | E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */ |
75 | E1000_TIPG = 0x00410, /* TX Inter-packet gap -RW */ | 75 | E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */ |
76 | E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle - RW */ | 76 | E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */ |
77 | E1000_LEDCTL = 0x00E00, /* LED Control - RW */ | 77 | E1000_LEDCTL = 0x00E00, /* LED Control - RW */ |
78 | E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ | 78 | E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ |
79 | E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ | 79 | E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ |
@@ -87,12 +87,12 @@ enum e1e_registers { | |||
87 | E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ | 87 | E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ |
88 | E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ | 88 | E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ |
89 | E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ | 89 | E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ |
90 | E1000_RDBAL = 0x02800, /* RX Descriptor Base Address Low - RW */ | 90 | E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */ |
91 | E1000_RDBAH = 0x02804, /* RX Descriptor Base Address High - RW */ | 91 | E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */ |
92 | E1000_RDLEN = 0x02808, /* RX Descriptor Length - RW */ | 92 | E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */ |
93 | E1000_RDH = 0x02810, /* RX Descriptor Head - RW */ | 93 | E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */ |
94 | E1000_RDT = 0x02818, /* RX Descriptor Tail - RW */ | 94 | E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */ |
95 | E1000_RDTR = 0x02820, /* RX Delay Timer - RW */ | 95 | E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */ |
96 | E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ | 96 | E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ |
97 | 97 | ||
98 | /* Convenience macros | 98 | /* Convenience macros |
@@ -105,17 +105,17 @@ enum e1e_registers { | |||
105 | */ | 105 | */ |
106 | #define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) | 106 | #define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) |
107 | E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ | 107 | E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ |
108 | E1000_TDBAL = 0x03800, /* TX Descriptor Base Address Low - RW */ | 108 | E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */ |
109 | E1000_TDBAH = 0x03804, /* TX Descriptor Base Address High - RW */ | 109 | E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */ |
110 | E1000_TDLEN = 0x03808, /* TX Descriptor Length - RW */ | 110 | E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */ |
111 | E1000_TDH = 0x03810, /* TX Descriptor Head - RW */ | 111 | E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */ |
112 | E1000_TDT = 0x03818, /* TX Descriptor Tail - RW */ | 112 | E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */ |
113 | E1000_TIDV = 0x03820, /* TX Interrupt Delay Value - RW */ | 113 | E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ |
114 | E1000_TXDCTL = 0x03828, /* TX Descriptor Control - RW */ | 114 | E1000_TXDCTL = 0x03828, /* Tx Descriptor Control - RW */ |
115 | E1000_TADV = 0x0382C, /* TX Interrupt Absolute Delay Val - RW */ | 115 | E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */ |
116 | E1000_TARC0 = 0x03840, /* TX Arbitration Count (0) */ | 116 | E1000_TARC0 = 0x03840, /* Tx Arbitration Count (0) */ |
117 | E1000_TXDCTL1 = 0x03928, /* TX Descriptor Control (1) - RW */ | 117 | E1000_TXDCTL1 = 0x03928, /* Tx Descriptor Control (1) - RW */ |
118 | E1000_TARC1 = 0x03940, /* TX Arbitration Count (1) */ | 118 | E1000_TARC1 = 0x03940, /* Tx Arbitration Count (1) */ |
119 | E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ | 119 | E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ |
120 | E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ | 120 | E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ |
121 | E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ | 121 | E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ |
@@ -127,53 +127,53 @@ enum e1e_registers { | |||
127 | E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */ | 127 | E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */ |
128 | E1000_COLC = 0x04028, /* Collision Count - R/clr */ | 128 | E1000_COLC = 0x04028, /* Collision Count - R/clr */ |
129 | E1000_DC = 0x04030, /* Defer Count - R/clr */ | 129 | E1000_DC = 0x04030, /* Defer Count - R/clr */ |
130 | E1000_TNCRS = 0x04034, /* TX-No CRS - R/clr */ | 130 | E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */ |
131 | E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */ | 131 | E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */ |
132 | E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */ | 132 | E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */ |
133 | E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */ | 133 | E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */ |
134 | E1000_XONRXC = 0x04048, /* XON RX Count - R/clr */ | 134 | E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */ |
135 | E1000_XONTXC = 0x0404C, /* XON TX Count - R/clr */ | 135 | E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */ |
136 | E1000_XOFFRXC = 0x04050, /* XOFF RX Count - R/clr */ | 136 | E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */ |
137 | E1000_XOFFTXC = 0x04054, /* XOFF TX Count - R/clr */ | 137 | E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */ |
138 | E1000_FCRUC = 0x04058, /* Flow Control RX Unsupported Count- R/clr */ | 138 | E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */ |
139 | E1000_PRC64 = 0x0405C, /* Packets RX (64 bytes) - R/clr */ | 139 | E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */ |
140 | E1000_PRC127 = 0x04060, /* Packets RX (65-127 bytes) - R/clr */ | 140 | E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */ |
141 | E1000_PRC255 = 0x04064, /* Packets RX (128-255 bytes) - R/clr */ | 141 | E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */ |
142 | E1000_PRC511 = 0x04068, /* Packets RX (255-511 bytes) - R/clr */ | 142 | E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */ |
143 | E1000_PRC1023 = 0x0406C, /* Packets RX (512-1023 bytes) - R/clr */ | 143 | E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */ |
144 | E1000_PRC1522 = 0x04070, /* Packets RX (1024-1522 bytes) - R/clr */ | 144 | E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */ |
145 | E1000_GPRC = 0x04074, /* Good Packets RX Count - R/clr */ | 145 | E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */ |
146 | E1000_BPRC = 0x04078, /* Broadcast Packets RX Count - R/clr */ | 146 | E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */ |
147 | E1000_MPRC = 0x0407C, /* Multicast Packets RX Count - R/clr */ | 147 | E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */ |
148 | E1000_GPTC = 0x04080, /* Good Packets TX Count - R/clr */ | 148 | E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */ |
149 | E1000_GORCL = 0x04088, /* Good Octets RX Count Low - R/clr */ | 149 | E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */ |
150 | E1000_GORCH = 0x0408C, /* Good Octets RX Count High - R/clr */ | 150 | E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */ |
151 | E1000_GOTCL = 0x04090, /* Good Octets TX Count Low - R/clr */ | 151 | E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */ |
152 | E1000_GOTCH = 0x04094, /* Good Octets TX Count High - R/clr */ | 152 | E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */ |
153 | E1000_RNBC = 0x040A0, /* RX No Buffers Count - R/clr */ | 153 | E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */ |
154 | E1000_RUC = 0x040A4, /* RX Undersize Count - R/clr */ | 154 | E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */ |
155 | E1000_RFC = 0x040A8, /* RX Fragment Count - R/clr */ | 155 | E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */ |
156 | E1000_ROC = 0x040AC, /* RX Oversize Count - R/clr */ | 156 | E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */ |
157 | E1000_RJC = 0x040B0, /* RX Jabber Count - R/clr */ | 157 | E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */ |
158 | E1000_MGTPRC = 0x040B4, /* Management Packets RX Count - R/clr */ | 158 | E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */ |
159 | E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */ | 159 | E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */ |
160 | E1000_MGTPTC = 0x040BC, /* Management Packets TX Count - R/clr */ | 160 | E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */ |
161 | E1000_TORL = 0x040C0, /* Total Octets RX Low - R/clr */ | 161 | E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */ |
162 | E1000_TORH = 0x040C4, /* Total Octets RX High - R/clr */ | 162 | E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */ |
163 | E1000_TOTL = 0x040C8, /* Total Octets TX Low - R/clr */ | 163 | E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */ |
164 | E1000_TOTH = 0x040CC, /* Total Octets TX High - R/clr */ | 164 | E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */ |
165 | E1000_TPR = 0x040D0, /* Total Packets RX - R/clr */ | 165 | E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */ |
166 | E1000_TPT = 0x040D4, /* Total Packets TX - R/clr */ | 166 | E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */ |
167 | E1000_PTC64 = 0x040D8, /* Packets TX (64 bytes) - R/clr */ | 167 | E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */ |
168 | E1000_PTC127 = 0x040DC, /* Packets TX (65-127 bytes) - R/clr */ | 168 | E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */ |
169 | E1000_PTC255 = 0x040E0, /* Packets TX (128-255 bytes) - R/clr */ | 169 | E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */ |
170 | E1000_PTC511 = 0x040E4, /* Packets TX (256-511 bytes) - R/clr */ | 170 | E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */ |
171 | E1000_PTC1023 = 0x040E8, /* Packets TX (512-1023 bytes) - R/clr */ | 171 | E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */ |
172 | E1000_PTC1522 = 0x040EC, /* Packets TX (1024-1522 Bytes) - R/clr */ | 172 | E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */ |
173 | E1000_MPTC = 0x040F0, /* Multicast Packets TX Count - R/clr */ | 173 | E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */ |
174 | E1000_BPTC = 0x040F4, /* Broadcast Packets TX Count - R/clr */ | 174 | E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */ |
175 | E1000_TSCTC = 0x040F8, /* TCP Segmentation Context TX - R/clr */ | 175 | E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */ |
176 | E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context TX Fail - R/clr */ | 176 | E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */ |
177 | E1000_IAC = 0x04100, /* Interrupt Assertion Count */ | 177 | E1000_IAC = 0x04100, /* Interrupt Assertion Count */ |
178 | E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */ | 178 | E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */ |
179 | E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */ | 179 | E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */ |
@@ -183,7 +183,7 @@ enum e1e_registers { | |||
183 | E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */ | 183 | E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */ |
184 | E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ | 184 | E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ |
185 | E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ | 185 | E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ |
186 | E1000_RXCSUM = 0x05000, /* RX Checksum Control - RW */ | 186 | E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */ |
187 | E1000_RFCTL = 0x05008, /* Receive Filter Control */ | 187 | E1000_RFCTL = 0x05008, /* Receive Filter Control */ |
188 | E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ | 188 | E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ |
189 | E1000_RA = 0x05400, /* Receive Address - RW Array */ | 189 | E1000_RA = 0x05400, /* Receive Address - RW Array */ |
@@ -250,8 +250,8 @@ enum e1e_registers { | |||
250 | #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F | 250 | #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F |
251 | 251 | ||
252 | #define E1000_HICR_EN 0x01 /* Enable bit - RO */ | 252 | #define E1000_HICR_EN 0x01 /* Enable bit - RO */ |
253 | #define E1000_HICR_C 0x02 /* Driver sets this bit when done | 253 | /* Driver sets this bit when done to put command in RAM */ |
254 | * to put command in RAM */ | 254 | #define E1000_HICR_C 0x02 |
255 | #define E1000_HICR_FW_RESET_ENABLE 0x40 | 255 | #define E1000_HICR_FW_RESET_ENABLE 0x40 |
256 | #define E1000_HICR_FW_RESET 0x80 | 256 | #define E1000_HICR_FW_RESET 0x80 |
257 | 257 | ||
@@ -400,7 +400,7 @@ enum e1000_rev_polarity{ | |||
400 | e1000_rev_polarity_undefined = 0xFF | 400 | e1000_rev_polarity_undefined = 0xFF |
401 | }; | 401 | }; |
402 | 402 | ||
403 | enum e1000_fc_mode { | 403 | enum e1000_fc_type { |
404 | e1000_fc_none = 0, | 404 | e1000_fc_none = 0, |
405 | e1000_fc_rx_pause, | 405 | e1000_fc_rx_pause, |
406 | e1000_fc_tx_pause, | 406 | e1000_fc_tx_pause, |
@@ -685,8 +685,7 @@ struct e1000_mac_operations { | |||
685 | s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); | 685 | s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); |
686 | s32 (*led_on)(struct e1000_hw *); | 686 | s32 (*led_on)(struct e1000_hw *); |
687 | s32 (*led_off)(struct e1000_hw *); | 687 | s32 (*led_off)(struct e1000_hw *); |
688 | void (*mc_addr_list_update)(struct e1000_hw *, u8 *, u32, u32, | 688 | void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32); |
689 | u32); | ||
690 | s32 (*reset_hw)(struct e1000_hw *); | 689 | s32 (*reset_hw)(struct e1000_hw *); |
691 | s32 (*init_hw)(struct e1000_hw *); | 690 | s32 (*init_hw)(struct e1000_hw *); |
692 | s32 (*setup_link)(struct e1000_hw *); | 691 | s32 (*setup_link)(struct e1000_hw *); |
@@ -728,16 +727,12 @@ struct e1000_mac_info { | |||
728 | u8 perm_addr[6]; | 727 | u8 perm_addr[6]; |
729 | 728 | ||
730 | enum e1000_mac_type type; | 729 | enum e1000_mac_type type; |
731 | enum e1000_fc_mode fc; | ||
732 | enum e1000_fc_mode original_fc; | ||
733 | 730 | ||
734 | u32 collision_delta; | 731 | u32 collision_delta; |
735 | u32 ledctl_default; | 732 | u32 ledctl_default; |
736 | u32 ledctl_mode1; | 733 | u32 ledctl_mode1; |
737 | u32 ledctl_mode2; | 734 | u32 ledctl_mode2; |
738 | u32 max_frame_size; | ||
739 | u32 mc_filter_type; | 735 | u32 mc_filter_type; |
740 | u32 min_frame_size; | ||
741 | u32 tx_packet_delta; | 736 | u32 tx_packet_delta; |
742 | u32 txcw; | 737 | u32 txcw; |
743 | 738 | ||
@@ -748,9 +743,6 @@ struct e1000_mac_info { | |||
748 | u16 ifs_step_size; | 743 | u16 ifs_step_size; |
749 | u16 mta_reg_count; | 744 | u16 mta_reg_count; |
750 | u16 rar_entry_count; | 745 | u16 rar_entry_count; |
751 | u16 fc_high_water; | ||
752 | u16 fc_low_water; | ||
753 | u16 fc_pause_time; | ||
754 | 746 | ||
755 | u8 forced_speed_duplex; | 747 | u8 forced_speed_duplex; |
756 | 748 | ||
@@ -780,6 +772,8 @@ struct e1000_phy_info { | |||
780 | u32 reset_delay_us; /* in usec */ | 772 | u32 reset_delay_us; /* in usec */ |
781 | u32 revision; | 773 | u32 revision; |
782 | 774 | ||
775 | enum e1000_media_type media_type; | ||
776 | |||
783 | u16 autoneg_advertised; | 777 | u16 autoneg_advertised; |
784 | u16 autoneg_mask; | 778 | u16 autoneg_mask; |
785 | u16 cable_length; | 779 | u16 cable_length; |
@@ -792,7 +786,7 @@ struct e1000_phy_info { | |||
792 | bool is_mdix; | 786 | bool is_mdix; |
793 | bool polarity_correction; | 787 | bool polarity_correction; |
794 | bool speed_downgraded; | 788 | bool speed_downgraded; |
795 | bool wait_for_link; | 789 | bool autoneg_wait_to_complete; |
796 | }; | 790 | }; |
797 | 791 | ||
798 | struct e1000_nvm_info { | 792 | struct e1000_nvm_info { |
@@ -817,6 +811,16 @@ struct e1000_bus_info { | |||
817 | u16 func; | 811 | u16 func; |
818 | }; | 812 | }; |
819 | 813 | ||
814 | struct e1000_fc_info { | ||
815 | u32 high_water; /* Flow control high-water mark */ | ||
816 | u32 low_water; /* Flow control low-water mark */ | ||
817 | u16 pause_time; /* Flow control pause timer */ | ||
818 | bool send_xon; /* Flow control send XON */ | ||
819 | bool strict_ieee; /* Strict IEEE mode */ | ||
820 | enum e1000_fc_type type; /* Type of flow control */ | ||
821 | enum e1000_fc_type original_type; | ||
822 | }; | ||
823 | |||
820 | struct e1000_dev_spec_82571 { | 824 | struct e1000_dev_spec_82571 { |
821 | bool laa_is_present; | 825 | bool laa_is_present; |
822 | bool alt_mac_addr_is_present; | 826 | bool alt_mac_addr_is_present; |
@@ -841,6 +845,7 @@ struct e1000_hw { | |||
841 | u8 __iomem *flash_address; | 845 | u8 __iomem *flash_address; |
842 | 846 | ||
843 | struct e1000_mac_info mac; | 847 | struct e1000_mac_info mac; |
848 | struct e1000_fc_info fc; | ||
844 | struct e1000_phy_info phy; | 849 | struct e1000_phy_info phy; |
845 | struct e1000_nvm_info nvm; | 850 | struct e1000_nvm_info nvm; |
846 | struct e1000_bus_info bus; | 851 | struct e1000_bus_info bus; |
@@ -850,8 +855,6 @@ struct e1000_hw { | |||
850 | struct e1000_dev_spec_82571 e82571; | 855 | struct e1000_dev_spec_82571 e82571; |
851 | struct e1000_dev_spec_ich8lan ich8lan; | 856 | struct e1000_dev_spec_ich8lan ich8lan; |
852 | } dev_spec; | 857 | } dev_spec; |
853 | |||
854 | enum e1000_media_type media_type; | ||
855 | }; | 858 | }; |
856 | 859 | ||
857 | #ifdef DEBUG | 860 | #ifdef DEBUG |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 0ae39550768d..e358a773e67a 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2008 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -243,8 +243,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) | |||
243 | u32 sector_end_addr; | 243 | u32 sector_end_addr; |
244 | u16 i; | 244 | u16 i; |
245 | 245 | ||
246 | /* Can't read flash registers if the register set isn't mapped. | 246 | /* Can't read flash registers if the register set isn't mapped. */ |
247 | */ | ||
248 | if (!hw->flash_address) { | 247 | if (!hw->flash_address) { |
249 | hw_dbg(hw, "ERROR: Flash registers not mapped\n"); | 248 | hw_dbg(hw, "ERROR: Flash registers not mapped\n"); |
250 | return -E1000_ERR_CONFIG; | 249 | return -E1000_ERR_CONFIG; |
@@ -254,17 +253,21 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) | |||
254 | 253 | ||
255 | gfpreg = er32flash(ICH_FLASH_GFPREG); | 254 | gfpreg = er32flash(ICH_FLASH_GFPREG); |
256 | 255 | ||
257 | /* sector_X_addr is a "sector"-aligned address (4096 bytes) | 256 | /* |
257 | * sector_X_addr is a "sector"-aligned address (4096 bytes) | ||
258 | * Add 1 to sector_end_addr since this sector is included in | 258 | * Add 1 to sector_end_addr since this sector is included in |
259 | * the overall size. */ | 259 | * the overall size. |
260 | */ | ||
260 | sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; | 261 | sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; |
261 | sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; | 262 | sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; |
262 | 263 | ||
263 | /* flash_base_addr is byte-aligned */ | 264 | /* flash_base_addr is byte-aligned */ |
264 | nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; | 265 | nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; |
265 | 266 | ||
266 | /* find total size of the NVM, then cut in half since the total | 267 | /* |
267 | * size represents two separate NVM banks. */ | 268 | * find total size of the NVM, then cut in half since the total |
269 | * size represents two separate NVM banks. | ||
270 | */ | ||
268 | nvm->flash_bank_size = (sector_end_addr - sector_base_addr) | 271 | nvm->flash_bank_size = (sector_end_addr - sector_base_addr) |
269 | << FLASH_SECTOR_ADDR_SHIFT; | 272 | << FLASH_SECTOR_ADDR_SHIFT; |
270 | nvm->flash_bank_size /= 2; | 273 | nvm->flash_bank_size /= 2; |
@@ -295,7 +298,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) | |||
295 | struct e1000_mac_info *mac = &hw->mac; | 298 | struct e1000_mac_info *mac = &hw->mac; |
296 | 299 | ||
297 | /* Set media type function pointer */ | 300 | /* Set media type function pointer */ |
298 | hw->media_type = e1000_media_type_copper; | 301 | hw->phy.media_type = e1000_media_type_copper; |
299 | 302 | ||
300 | /* Set mta register count */ | 303 | /* Set mta register count */ |
301 | mac->mta_reg_count = 32; | 304 | mac->mta_reg_count = 32; |
@@ -450,7 +453,7 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw) | |||
450 | 453 | ||
451 | udelay(1); | 454 | udelay(1); |
452 | 455 | ||
453 | if (phy->wait_for_link) { | 456 | if (phy->autoneg_wait_to_complete) { |
454 | hw_dbg(hw, "Waiting for forced speed/duplex link on IFE phy.\n"); | 457 | hw_dbg(hw, "Waiting for forced speed/duplex link on IFE phy.\n"); |
455 | 458 | ||
456 | ret_val = e1000e_phy_has_link_generic(hw, | 459 | ret_val = e1000e_phy_has_link_generic(hw, |
@@ -496,7 +499,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) | |||
496 | if (ret_val) | 499 | if (ret_val) |
497 | return ret_val; | 500 | return ret_val; |
498 | 501 | ||
499 | /* Initialize the PHY from the NVM on ICH platforms. This | 502 | /* |
503 | * Initialize the PHY from the NVM on ICH platforms. This | ||
500 | * is needed due to an issue where the NVM configuration is | 504 | * is needed due to an issue where the NVM configuration is |
501 | * not properly autoloaded after power transitions. | 505 | * not properly autoloaded after power transitions. |
502 | * Therefore, after each PHY reset, we will load the | 506 | * Therefore, after each PHY reset, we will load the |
@@ -523,7 +527,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) | |||
523 | udelay(100); | 527 | udelay(100); |
524 | } while ((!data) && --loop); | 528 | } while ((!data) && --loop); |
525 | 529 | ||
526 | /* If basic configuration is incomplete before the above loop | 530 | /* |
531 | * If basic configuration is incomplete before the above loop | ||
527 | * count reaches 0, loading the configuration from NVM will | 532 | * count reaches 0, loading the configuration from NVM will |
528 | * leave the PHY in a bad state possibly resulting in no link. | 533 | * leave the PHY in a bad state possibly resulting in no link. |
529 | */ | 534 | */ |
@@ -536,8 +541,10 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) | |||
536 | data &= ~E1000_STATUS_LAN_INIT_DONE; | 541 | data &= ~E1000_STATUS_LAN_INIT_DONE; |
537 | ew32(STATUS, data); | 542 | ew32(STATUS, data); |
538 | 543 | ||
539 | /* Make sure HW does not configure LCD from PHY | 544 | /* |
540 | * extended configuration before SW configuration */ | 545 | * Make sure HW does not configure LCD from PHY |
546 | * extended configuration before SW configuration | ||
547 | */ | ||
541 | data = er32(EXTCNF_CTRL); | 548 | data = er32(EXTCNF_CTRL); |
542 | if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) | 549 | if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) |
543 | return 0; | 550 | return 0; |
@@ -551,8 +558,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) | |||
551 | cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; | 558 | cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; |
552 | cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; | 559 | cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; |
553 | 560 | ||
554 | /* Configure LCD from extended configuration | 561 | /* Configure LCD from extended configuration region. */ |
555 | * region. */ | ||
556 | 562 | ||
557 | /* cnf_base_addr is in DWORD */ | 563 | /* cnf_base_addr is in DWORD */ |
558 | word_addr = (u16)(cnf_base_addr << 1); | 564 | word_addr = (u16)(cnf_base_addr << 1); |
@@ -681,8 +687,8 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw) | |||
681 | s32 ret_val; | 687 | s32 ret_val; |
682 | u16 phy_data, offset, mask; | 688 | u16 phy_data, offset, mask; |
683 | 689 | ||
684 | /* Polarity is determined based on the reversal feature | 690 | /* |
685 | * being enabled. | 691 | * Polarity is determined based on the reversal feature being enabled. |
686 | */ | 692 | */ |
687 | if (phy->polarity_correction) { | 693 | if (phy->polarity_correction) { |
688 | offset = IFE_PHY_EXTENDED_STATUS_CONTROL; | 694 | offset = IFE_PHY_EXTENDED_STATUS_CONTROL; |
@@ -731,8 +737,10 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) | |||
731 | phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; | 737 | phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; |
732 | ew32(PHY_CTRL, phy_ctrl); | 738 | ew32(PHY_CTRL, phy_ctrl); |
733 | 739 | ||
734 | /* Call gig speed drop workaround on LPLU before accessing | 740 | /* |
735 | * any PHY registers */ | 741 | * Call gig speed drop workaround on LPLU before accessing |
742 | * any PHY registers | ||
743 | */ | ||
736 | if ((hw->mac.type == e1000_ich8lan) && | 744 | if ((hw->mac.type == e1000_ich8lan) && |
737 | (hw->phy.type == e1000_phy_igp_3)) | 745 | (hw->phy.type == e1000_phy_igp_3)) |
738 | e1000e_gig_downshift_workaround_ich8lan(hw); | 746 | e1000e_gig_downshift_workaround_ich8lan(hw); |
@@ -747,30 +755,32 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) | |||
747 | phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; | 755 | phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; |
748 | ew32(PHY_CTRL, phy_ctrl); | 756 | ew32(PHY_CTRL, phy_ctrl); |
749 | 757 | ||
750 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used | 758 | /* |
759 | * LPLU and SmartSpeed are mutually exclusive. LPLU is used | ||
751 | * during Dx states where the power conservation is most | 760 | * during Dx states where the power conservation is most |
752 | * important. During driver activity we should enable | 761 | * important. During driver activity we should enable |
753 | * SmartSpeed, so performance is maintained. */ | 762 | * SmartSpeed, so performance is maintained. |
763 | */ | ||
754 | if (phy->smart_speed == e1000_smart_speed_on) { | 764 | if (phy->smart_speed == e1000_smart_speed_on) { |
755 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | 765 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
756 | &data); | 766 | &data); |
757 | if (ret_val) | 767 | if (ret_val) |
758 | return ret_val; | 768 | return ret_val; |
759 | 769 | ||
760 | data |= IGP01E1000_PSCFR_SMART_SPEED; | 770 | data |= IGP01E1000_PSCFR_SMART_SPEED; |
761 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | 771 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
762 | data); | 772 | data); |
763 | if (ret_val) | 773 | if (ret_val) |
764 | return ret_val; | 774 | return ret_val; |
765 | } else if (phy->smart_speed == e1000_smart_speed_off) { | 775 | } else if (phy->smart_speed == e1000_smart_speed_off) { |
766 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | 776 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
767 | &data); | 777 | &data); |
768 | if (ret_val) | 778 | if (ret_val) |
769 | return ret_val; | 779 | return ret_val; |
770 | 780 | ||
771 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | 781 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; |
772 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | 782 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
773 | data); | 783 | data); |
774 | if (ret_val) | 784 | if (ret_val) |
775 | return ret_val; | 785 | return ret_val; |
776 | } | 786 | } |
@@ -804,34 +814,32 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) | |||
804 | if (!active) { | 814 | if (!active) { |
805 | phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; | 815 | phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; |
806 | ew32(PHY_CTRL, phy_ctrl); | 816 | ew32(PHY_CTRL, phy_ctrl); |
807 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used | 817 | /* |
818 | * LPLU and SmartSpeed are mutually exclusive. LPLU is used | ||
808 | * during Dx states where the power conservation is most | 819 | * during Dx states where the power conservation is most |
809 | * important. During driver activity we should enable | 820 | * important. During driver activity we should enable |
810 | * SmartSpeed, so performance is maintained. */ | 821 | * SmartSpeed, so performance is maintained. |
822 | */ | ||
811 | if (phy->smart_speed == e1000_smart_speed_on) { | 823 | if (phy->smart_speed == e1000_smart_speed_on) { |
812 | ret_val = e1e_rphy(hw, | 824 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
813 | IGP01E1000_PHY_PORT_CONFIG, | 825 | &data); |
814 | &data); | ||
815 | if (ret_val) | 826 | if (ret_val) |
816 | return ret_val; | 827 | return ret_val; |
817 | 828 | ||
818 | data |= IGP01E1000_PSCFR_SMART_SPEED; | 829 | data |= IGP01E1000_PSCFR_SMART_SPEED; |
819 | ret_val = e1e_wphy(hw, | 830 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
820 | IGP01E1000_PHY_PORT_CONFIG, | 831 | data); |
821 | data); | ||
822 | if (ret_val) | 832 | if (ret_val) |
823 | return ret_val; | 833 | return ret_val; |
824 | } else if (phy->smart_speed == e1000_smart_speed_off) { | 834 | } else if (phy->smart_speed == e1000_smart_speed_off) { |
825 | ret_val = e1e_rphy(hw, | 835 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
826 | IGP01E1000_PHY_PORT_CONFIG, | 836 | &data); |
827 | &data); | ||
828 | if (ret_val) | 837 | if (ret_val) |
829 | return ret_val; | 838 | return ret_val; |
830 | 839 | ||
831 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | 840 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; |
832 | ret_val = e1e_wphy(hw, | 841 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
833 | IGP01E1000_PHY_PORT_CONFIG, | 842 | data); |
834 | data); | ||
835 | if (ret_val) | 843 | if (ret_val) |
836 | return ret_val; | 844 | return ret_val; |
837 | } | 845 | } |
@@ -841,23 +849,21 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) | |||
841 | phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; | 849 | phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; |
842 | ew32(PHY_CTRL, phy_ctrl); | 850 | ew32(PHY_CTRL, phy_ctrl); |
843 | 851 | ||
844 | /* Call gig speed drop workaround on LPLU before accessing | 852 | /* |
845 | * any PHY registers */ | 853 | * Call gig speed drop workaround on LPLU before accessing |
854 | * any PHY registers | ||
855 | */ | ||
846 | if ((hw->mac.type == e1000_ich8lan) && | 856 | if ((hw->mac.type == e1000_ich8lan) && |
847 | (hw->phy.type == e1000_phy_igp_3)) | 857 | (hw->phy.type == e1000_phy_igp_3)) |
848 | e1000e_gig_downshift_workaround_ich8lan(hw); | 858 | e1000e_gig_downshift_workaround_ich8lan(hw); |
849 | 859 | ||
850 | /* When LPLU is enabled, we should disable SmartSpeed */ | 860 | /* When LPLU is enabled, we should disable SmartSpeed */ |
851 | ret_val = e1e_rphy(hw, | 861 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); |
852 | IGP01E1000_PHY_PORT_CONFIG, | ||
853 | &data); | ||
854 | if (ret_val) | 862 | if (ret_val) |
855 | return ret_val; | 863 | return ret_val; |
856 | 864 | ||
857 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | 865 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; |
858 | ret_val = e1e_wphy(hw, | 866 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); |
859 | IGP01E1000_PHY_PORT_CONFIG, | ||
860 | data); | ||
861 | } | 867 | } |
862 | 868 | ||
863 | return 0; | 869 | return 0; |
@@ -944,7 +950,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
944 | 950 | ||
945 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | 951 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); |
946 | 952 | ||
947 | /* Either we should have a hardware SPI cycle in progress | 953 | /* |
954 | * Either we should have a hardware SPI cycle in progress | ||
948 | * bit to check against, in order to start a new cycle or | 955 | * bit to check against, in order to start a new cycle or |
949 | * FDONE bit should be changed in the hardware so that it | 956 | * FDONE bit should be changed in the hardware so that it |
950 | * is 1 after hardware reset, which can then be used as an | 957 | * is 1 after hardware reset, which can then be used as an |
@@ -953,15 +960,19 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
953 | */ | 960 | */ |
954 | 961 | ||
955 | if (hsfsts.hsf_status.flcinprog == 0) { | 962 | if (hsfsts.hsf_status.flcinprog == 0) { |
956 | /* There is no cycle running at present, | 963 | /* |
957 | * so we can start a cycle */ | 964 | * There is no cycle running at present, |
958 | /* Begin by setting Flash Cycle Done. */ | 965 | * so we can start a cycle |
966 | * Begin by setting Flash Cycle Done. | ||
967 | */ | ||
959 | hsfsts.hsf_status.flcdone = 1; | 968 | hsfsts.hsf_status.flcdone = 1; |
960 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | 969 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); |
961 | ret_val = 0; | 970 | ret_val = 0; |
962 | } else { | 971 | } else { |
963 | /* otherwise poll for sometime so the current | 972 | /* |
964 | * cycle has a chance to end before giving up. */ | 973 | * otherwise poll for sometime so the current |
974 | * cycle has a chance to end before giving up. | ||
975 | */ | ||
965 | for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { | 976 | for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { |
966 | hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); | 977 | hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); |
967 | if (hsfsts.hsf_status.flcinprog == 0) { | 978 | if (hsfsts.hsf_status.flcinprog == 0) { |
@@ -971,8 +982,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
971 | udelay(1); | 982 | udelay(1); |
972 | } | 983 | } |
973 | if (ret_val == 0) { | 984 | if (ret_val == 0) { |
974 | /* Successful in waiting for previous cycle to timeout, | 985 | /* |
975 | * now set the Flash Cycle Done. */ | 986 | * Successful in waiting for previous cycle to timeout, |
987 | * now set the Flash Cycle Done. | ||
988 | */ | ||
976 | hsfsts.hsf_status.flcdone = 1; | 989 | hsfsts.hsf_status.flcdone = 1; |
977 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); | 990 | ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); |
978 | } else { | 991 | } else { |
@@ -1077,10 +1090,12 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |||
1077 | ret_val = e1000_flash_cycle_ich8lan(hw, | 1090 | ret_val = e1000_flash_cycle_ich8lan(hw, |
1078 | ICH_FLASH_READ_COMMAND_TIMEOUT); | 1091 | ICH_FLASH_READ_COMMAND_TIMEOUT); |
1079 | 1092 | ||
1080 | /* Check if FCERR is set to 1, if set to 1, clear it | 1093 | /* |
1094 | * Check if FCERR is set to 1, if set to 1, clear it | ||
1081 | * and try the whole sequence a few more times, else | 1095 | * and try the whole sequence a few more times, else |
1082 | * read in (shift in) the Flash Data0, the order is | 1096 | * read in (shift in) the Flash Data0, the order is |
1083 | * least significant byte first msb to lsb */ | 1097 | * least significant byte first msb to lsb |
1098 | */ | ||
1084 | if (ret_val == 0) { | 1099 | if (ret_val == 0) { |
1085 | flash_data = er32flash(ICH_FLASH_FDATA0); | 1100 | flash_data = er32flash(ICH_FLASH_FDATA0); |
1086 | if (size == 1) { | 1101 | if (size == 1) { |
@@ -1090,7 +1105,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |||
1090 | } | 1105 | } |
1091 | break; | 1106 | break; |
1092 | } else { | 1107 | } else { |
1093 | /* If we've gotten here, then things are probably | 1108 | /* |
1109 | * If we've gotten here, then things are probably | ||
1094 | * completely hosed, but if the error condition is | 1110 | * completely hosed, but if the error condition is |
1095 | * detected, it won't hurt to give it another try... | 1111 | * detected, it won't hurt to give it another try... |
1096 | * ICH_FLASH_CYCLE_REPEAT_COUNT times. | 1112 | * ICH_FLASH_CYCLE_REPEAT_COUNT times. |
@@ -1168,18 +1184,20 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1168 | 1184 | ||
1169 | ret_val = e1000e_update_nvm_checksum_generic(hw); | 1185 | ret_val = e1000e_update_nvm_checksum_generic(hw); |
1170 | if (ret_val) | 1186 | if (ret_val) |
1171 | return ret_val;; | 1187 | return ret_val; |
1172 | 1188 | ||
1173 | if (nvm->type != e1000_nvm_flash_sw) | 1189 | if (nvm->type != e1000_nvm_flash_sw) |
1174 | return ret_val;; | 1190 | return ret_val; |
1175 | 1191 | ||
1176 | ret_val = e1000_acquire_swflag_ich8lan(hw); | 1192 | ret_val = e1000_acquire_swflag_ich8lan(hw); |
1177 | if (ret_val) | 1193 | if (ret_val) |
1178 | return ret_val;; | 1194 | return ret_val; |
1179 | 1195 | ||
1180 | /* We're writing to the opposite bank so if we're on bank 1, | 1196 | /* |
1197 | * We're writing to the opposite bank so if we're on bank 1, | ||
1181 | * write to bank 0 etc. We also need to erase the segment that | 1198 | * write to bank 0 etc. We also need to erase the segment that |
1182 | * is going to be written */ | 1199 | * is going to be written |
1200 | */ | ||
1183 | if (!(er32(EECD) & E1000_EECD_SEC1VAL)) { | 1201 | if (!(er32(EECD) & E1000_EECD_SEC1VAL)) { |
1184 | new_bank_offset = nvm->flash_bank_size; | 1202 | new_bank_offset = nvm->flash_bank_size; |
1185 | old_bank_offset = 0; | 1203 | old_bank_offset = 0; |
@@ -1191,9 +1209,11 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1191 | } | 1209 | } |
1192 | 1210 | ||
1193 | for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { | 1211 | for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { |
1194 | /* Determine whether to write the value stored | 1212 | /* |
1213 | * Determine whether to write the value stored | ||
1195 | * in the other NVM bank or a modified value stored | 1214 | * in the other NVM bank or a modified value stored |
1196 | * in the shadow RAM */ | 1215 | * in the shadow RAM |
1216 | */ | ||
1197 | if (dev_spec->shadow_ram[i].modified) { | 1217 | if (dev_spec->shadow_ram[i].modified) { |
1198 | data = dev_spec->shadow_ram[i].value; | 1218 | data = dev_spec->shadow_ram[i].value; |
1199 | } else { | 1219 | } else { |
@@ -1202,12 +1222,14 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1202 | &data); | 1222 | &data); |
1203 | } | 1223 | } |
1204 | 1224 | ||
1205 | /* If the word is 0x13, then make sure the signature bits | 1225 | /* |
1226 | * If the word is 0x13, then make sure the signature bits | ||
1206 | * (15:14) are 11b until the commit has completed. | 1227 | * (15:14) are 11b until the commit has completed. |
1207 | * This will allow us to write 10b which indicates the | 1228 | * This will allow us to write 10b which indicates the |
1208 | * signature is valid. We want to do this after the write | 1229 | * signature is valid. We want to do this after the write |
1209 | * has completed so that we don't mark the segment valid | 1230 | * has completed so that we don't mark the segment valid |
1210 | * while the write is still in progress */ | 1231 | * while the write is still in progress |
1232 | */ | ||
1211 | if (i == E1000_ICH_NVM_SIG_WORD) | 1233 | if (i == E1000_ICH_NVM_SIG_WORD) |
1212 | data |= E1000_ICH_NVM_SIG_MASK; | 1234 | data |= E1000_ICH_NVM_SIG_MASK; |
1213 | 1235 | ||
@@ -1230,18 +1252,22 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1230 | break; | 1252 | break; |
1231 | } | 1253 | } |
1232 | 1254 | ||
1233 | /* Don't bother writing the segment valid bits if sector | 1255 | /* |
1234 | * programming failed. */ | 1256 | * Don't bother writing the segment valid bits if sector |
1257 | * programming failed. | ||
1258 | */ | ||
1235 | if (ret_val) { | 1259 | if (ret_val) { |
1236 | hw_dbg(hw, "Flash commit failed.\n"); | 1260 | hw_dbg(hw, "Flash commit failed.\n"); |
1237 | e1000_release_swflag_ich8lan(hw); | 1261 | e1000_release_swflag_ich8lan(hw); |
1238 | return ret_val; | 1262 | return ret_val; |
1239 | } | 1263 | } |
1240 | 1264 | ||
1241 | /* Finally validate the new segment by setting bit 15:14 | 1265 | /* |
1266 | * Finally validate the new segment by setting bit 15:14 | ||
1242 | * to 10b in word 0x13 , this can be done without an | 1267 | * to 10b in word 0x13 , this can be done without an |
1243 | * erase as well since these bits are 11 to start with | 1268 | * erase as well since these bits are 11 to start with |
1244 | * and we need to change bit 14 to 0b */ | 1269 | * and we need to change bit 14 to 0b |
1270 | */ | ||
1245 | act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; | 1271 | act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; |
1246 | e1000_read_flash_word_ich8lan(hw, act_offset, &data); | 1272 | e1000_read_flash_word_ich8lan(hw, act_offset, &data); |
1247 | data &= 0xBFFF; | 1273 | data &= 0xBFFF; |
@@ -1253,10 +1279,12 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1253 | return ret_val; | 1279 | return ret_val; |
1254 | } | 1280 | } |
1255 | 1281 | ||
1256 | /* And invalidate the previously valid segment by setting | 1282 | /* |
1283 | * And invalidate the previously valid segment by setting | ||
1257 | * its signature word (0x13) high_byte to 0b. This can be | 1284 | * its signature word (0x13) high_byte to 0b. This can be |
1258 | * done without an erase because flash erase sets all bits | 1285 | * done without an erase because flash erase sets all bits |
1259 | * to 1's. We can write 1's to 0's without an erase */ | 1286 | * to 1's. We can write 1's to 0's without an erase |
1287 | */ | ||
1260 | act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; | 1288 | act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; |
1261 | ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); | 1289 | ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); |
1262 | if (ret_val) { | 1290 | if (ret_val) { |
@@ -1272,7 +1300,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1272 | 1300 | ||
1273 | e1000_release_swflag_ich8lan(hw); | 1301 | e1000_release_swflag_ich8lan(hw); |
1274 | 1302 | ||
1275 | /* Reload the EEPROM, or else modifications will not appear | 1303 | /* |
1304 | * Reload the EEPROM, or else modifications will not appear | ||
1276 | * until after the next adapter reset. | 1305 | * until after the next adapter reset. |
1277 | */ | 1306 | */ |
1278 | e1000e_reload_nvm(hw); | 1307 | e1000e_reload_nvm(hw); |
@@ -1294,7 +1323,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1294 | s32 ret_val; | 1323 | s32 ret_val; |
1295 | u16 data; | 1324 | u16 data; |
1296 | 1325 | ||
1297 | /* Read 0x19 and check bit 6. If this bit is 0, the checksum | 1326 | /* |
1327 | * Read 0x19 and check bit 6. If this bit is 0, the checksum | ||
1298 | * needs to be fixed. This bit is an indication that the NVM | 1328 | * needs to be fixed. This bit is an indication that the NVM |
1299 | * was prepared by OEM software and did not calculate the | 1329 | * was prepared by OEM software and did not calculate the |
1300 | * checksum...a likely scenario. | 1330 | * checksum...a likely scenario. |
@@ -1364,14 +1394,17 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, | |||
1364 | 1394 | ||
1365 | ew32flash(ICH_FLASH_FDATA0, flash_data); | 1395 | ew32flash(ICH_FLASH_FDATA0, flash_data); |
1366 | 1396 | ||
1367 | /* check if FCERR is set to 1 , if set to 1, clear it | 1397 | /* |
1368 | * and try the whole sequence a few more times else done */ | 1398 | * check if FCERR is set to 1 , if set to 1, clear it |
1399 | * and try the whole sequence a few more times else done | ||
1400 | */ | ||
1369 | ret_val = e1000_flash_cycle_ich8lan(hw, | 1401 | ret_val = e1000_flash_cycle_ich8lan(hw, |
1370 | ICH_FLASH_WRITE_COMMAND_TIMEOUT); | 1402 | ICH_FLASH_WRITE_COMMAND_TIMEOUT); |
1371 | if (!ret_val) | 1403 | if (!ret_val) |
1372 | break; | 1404 | break; |
1373 | 1405 | ||
1374 | /* If we're here, then things are most likely | 1406 | /* |
1407 | * If we're here, then things are most likely | ||
1375 | * completely hosed, but if the error condition | 1408 | * completely hosed, but if the error condition |
1376 | * is detected, it won't hurt to give it another | 1409 | * is detected, it won't hurt to give it another |
1377 | * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. | 1410 | * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. |
@@ -1462,9 +1495,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) | |||
1462 | 1495 | ||
1463 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | 1496 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); |
1464 | 1497 | ||
1465 | /* Determine HW Sector size: Read BERASE bits of hw flash status | 1498 | /* |
1466 | * register */ | 1499 | * Determine HW Sector size: Read BERASE bits of hw flash status |
1467 | /* 00: The Hw sector is 256 bytes, hence we need to erase 16 | 1500 | * register |
1501 | * 00: The Hw sector is 256 bytes, hence we need to erase 16 | ||
1468 | * consecutive sectors. The start index for the nth Hw sector | 1502 | * consecutive sectors. The start index for the nth Hw sector |
1469 | * can be calculated as = bank * 4096 + n * 256 | 1503 | * can be calculated as = bank * 4096 + n * 256 |
1470 | * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. | 1504 | * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. |
@@ -1511,13 +1545,16 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) | |||
1511 | if (ret_val) | 1545 | if (ret_val) |
1512 | return ret_val; | 1546 | return ret_val; |
1513 | 1547 | ||
1514 | /* Write a value 11 (block Erase) in Flash | 1548 | /* |
1515 | * Cycle field in hw flash control */ | 1549 | * Write a value 11 (block Erase) in Flash |
1550 | * Cycle field in hw flash control | ||
1551 | */ | ||
1516 | hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); | 1552 | hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); |
1517 | hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; | 1553 | hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; |
1518 | ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); | 1554 | ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); |
1519 | 1555 | ||
1520 | /* Write the last 24 bits of an index within the | 1556 | /* |
1557 | * Write the last 24 bits of an index within the | ||
1521 | * block into Flash Linear address field in Flash | 1558 | * block into Flash Linear address field in Flash |
1522 | * Address. | 1559 | * Address. |
1523 | */ | 1560 | */ |
@@ -1529,13 +1566,14 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) | |||
1529 | if (ret_val == 0) | 1566 | if (ret_val == 0) |
1530 | break; | 1567 | break; |
1531 | 1568 | ||
1532 | /* Check if FCERR is set to 1. If 1, | 1569 | /* |
1570 | * Check if FCERR is set to 1. If 1, | ||
1533 | * clear it and try the whole sequence | 1571 | * clear it and try the whole sequence |
1534 | * a few more times else Done */ | 1572 | * a few more times else Done |
1573 | */ | ||
1535 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | 1574 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); |
1536 | if (hsfsts.hsf_status.flcerr == 1) | 1575 | if (hsfsts.hsf_status.flcerr == 1) |
1537 | /* repeat for some time before | 1576 | /* repeat for some time before giving up */ |
1538 | * giving up */ | ||
1539 | continue; | 1577 | continue; |
1540 | else if (hsfsts.hsf_status.flcdone == 0) | 1578 | else if (hsfsts.hsf_status.flcdone == 0) |
1541 | return ret_val; | 1579 | return ret_val; |
@@ -1585,7 +1623,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) | |||
1585 | 1623 | ||
1586 | ret_val = e1000e_get_bus_info_pcie(hw); | 1624 | ret_val = e1000e_get_bus_info_pcie(hw); |
1587 | 1625 | ||
1588 | /* ICH devices are "PCI Express"-ish. They have | 1626 | /* |
1627 | * ICH devices are "PCI Express"-ish. They have | ||
1589 | * a configuration space, but do not contain | 1628 | * a configuration space, but do not contain |
1590 | * PCI Express Capability registers, so bus width | 1629 | * PCI Express Capability registers, so bus width |
1591 | * must be hardcoded. | 1630 | * must be hardcoded. |
@@ -1608,7 +1647,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
1608 | u32 ctrl, icr, kab; | 1647 | u32 ctrl, icr, kab; |
1609 | s32 ret_val; | 1648 | s32 ret_val; |
1610 | 1649 | ||
1611 | /* Prevent the PCI-E bus from sticking if there is no TLP connection | 1650 | /* |
1651 | * Prevent the PCI-E bus from sticking if there is no TLP connection | ||
1612 | * on the last TLP read/write transaction when MAC is reset. | 1652 | * on the last TLP read/write transaction when MAC is reset. |
1613 | */ | 1653 | */ |
1614 | ret_val = e1000e_disable_pcie_master(hw); | 1654 | ret_val = e1000e_disable_pcie_master(hw); |
@@ -1619,7 +1659,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
1619 | hw_dbg(hw, "Masking off all interrupts\n"); | 1659 | hw_dbg(hw, "Masking off all interrupts\n"); |
1620 | ew32(IMC, 0xffffffff); | 1660 | ew32(IMC, 0xffffffff); |
1621 | 1661 | ||
1622 | /* Disable the Transmit and Receive units. Then delay to allow | 1662 | /* |
1663 | * Disable the Transmit and Receive units. Then delay to allow | ||
1623 | * any pending transactions to complete before we hit the MAC | 1664 | * any pending transactions to complete before we hit the MAC |
1624 | * with the global reset. | 1665 | * with the global reset. |
1625 | */ | 1666 | */ |
@@ -1640,7 +1681,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
1640 | ctrl = er32(CTRL); | 1681 | ctrl = er32(CTRL); |
1641 | 1682 | ||
1642 | if (!e1000_check_reset_block(hw)) { | 1683 | if (!e1000_check_reset_block(hw)) { |
1643 | /* PHY HW reset requires MAC CORE reset at the same | 1684 | /* |
1685 | * PHY HW reset requires MAC CORE reset at the same | ||
1644 | * time to make sure the interface between MAC and the | 1686 | * time to make sure the interface between MAC and the |
1645 | * external PHY is reset. | 1687 | * external PHY is reset. |
1646 | */ | 1688 | */ |
@@ -1724,8 +1766,10 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) | |||
1724 | E1000_TXDCTL_MAX_TX_DESC_PREFETCH; | 1766 | E1000_TXDCTL_MAX_TX_DESC_PREFETCH; |
1725 | ew32(TXDCTL1, txdctl); | 1767 | ew32(TXDCTL1, txdctl); |
1726 | 1768 | ||
1727 | /* ICH8 has opposite polarity of no_snoop bits. | 1769 | /* |
1728 | * By default, we should use snoop behavior. */ | 1770 | * ICH8 has opposite polarity of no_snoop bits. |
1771 | * By default, we should use snoop behavior. | ||
1772 | */ | ||
1729 | if (mac->type == e1000_ich8lan) | 1773 | if (mac->type == e1000_ich8lan) |
1730 | snoop = PCIE_ICH8_SNOOP_ALL; | 1774 | snoop = PCIE_ICH8_SNOOP_ALL; |
1731 | else | 1775 | else |
@@ -1736,7 +1780,8 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) | |||
1736 | ctrl_ext |= E1000_CTRL_EXT_RO_DIS; | 1780 | ctrl_ext |= E1000_CTRL_EXT_RO_DIS; |
1737 | ew32(CTRL_EXT, ctrl_ext); | 1781 | ew32(CTRL_EXT, ctrl_ext); |
1738 | 1782 | ||
1739 | /* Clear all of the statistics registers (clear on read). It is | 1783 | /* |
1784 | * Clear all of the statistics registers (clear on read). It is | ||
1740 | * important that we do this after we have tried to establish link | 1785 | * important that we do this after we have tried to establish link |
1741 | * because the symbol error count will increment wildly if there | 1786 | * because the symbol error count will increment wildly if there |
1742 | * is no link. | 1787 | * is no link. |
@@ -1807,29 +1852,29 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) | |||
1807 | **/ | 1852 | **/ |
1808 | static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) | 1853 | static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) |
1809 | { | 1854 | { |
1810 | struct e1000_mac_info *mac = &hw->mac; | ||
1811 | s32 ret_val; | 1855 | s32 ret_val; |
1812 | 1856 | ||
1813 | if (e1000_check_reset_block(hw)) | 1857 | if (e1000_check_reset_block(hw)) |
1814 | return 0; | 1858 | return 0; |
1815 | 1859 | ||
1816 | /* ICH parts do not have a word in the NVM to determine | 1860 | /* |
1861 | * ICH parts do not have a word in the NVM to determine | ||
1817 | * the default flow control setting, so we explicitly | 1862 | * the default flow control setting, so we explicitly |
1818 | * set it to full. | 1863 | * set it to full. |
1819 | */ | 1864 | */ |
1820 | if (mac->fc == e1000_fc_default) | 1865 | if (hw->fc.type == e1000_fc_default) |
1821 | mac->fc = e1000_fc_full; | 1866 | hw->fc.type = e1000_fc_full; |
1822 | 1867 | ||
1823 | mac->original_fc = mac->fc; | 1868 | hw->fc.original_type = hw->fc.type; |
1824 | 1869 | ||
1825 | hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc); | 1870 | hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type); |
1826 | 1871 | ||
1827 | /* Continue to configure the copper link. */ | 1872 | /* Continue to configure the copper link. */ |
1828 | ret_val = e1000_setup_copper_link_ich8lan(hw); | 1873 | ret_val = e1000_setup_copper_link_ich8lan(hw); |
1829 | if (ret_val) | 1874 | if (ret_val) |
1830 | return ret_val; | 1875 | return ret_val; |
1831 | 1876 | ||
1832 | ew32(FCTTV, mac->fc_pause_time); | 1877 | ew32(FCTTV, hw->fc.pause_time); |
1833 | 1878 | ||
1834 | return e1000e_set_fc_watermarks(hw); | 1879 | return e1000e_set_fc_watermarks(hw); |
1835 | } | 1880 | } |
@@ -1853,9 +1898,11 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) | |||
1853 | ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); | 1898 | ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); |
1854 | ew32(CTRL, ctrl); | 1899 | ew32(CTRL, ctrl); |
1855 | 1900 | ||
1856 | /* Set the mac to wait the maximum time between each iteration | 1901 | /* |
1902 | * Set the mac to wait the maximum time between each iteration | ||
1857 | * and increase the max iterations when polling the phy; | 1903 | * and increase the max iterations when polling the phy; |
1858 | * this fixes erroneous timeouts at 10Mbps. */ | 1904 | * this fixes erroneous timeouts at 10Mbps. |
1905 | */ | ||
1859 | ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); | 1906 | ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); |
1860 | if (ret_val) | 1907 | if (ret_val) |
1861 | return ret_val; | 1908 | return ret_val; |
@@ -1882,7 +1929,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) | |||
1882 | * @speed: pointer to store current link speed | 1929 | * @speed: pointer to store current link speed |
1883 | * @duplex: pointer to store the current link duplex | 1930 | * @duplex: pointer to store the current link duplex |
1884 | * | 1931 | * |
1885 | * Calls the generic get_speed_and_duplex to retreive the current link | 1932 | * Calls the generic get_speed_and_duplex to retrieve the current link |
1886 | * information and then calls the Kumeran lock loss workaround for links at | 1933 | * information and then calls the Kumeran lock loss workaround for links at |
1887 | * gigabit speeds. | 1934 | * gigabit speeds. |
1888 | **/ | 1935 | **/ |
@@ -1930,9 +1977,11 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) | |||
1930 | if (!dev_spec->kmrn_lock_loss_workaround_enabled) | 1977 | if (!dev_spec->kmrn_lock_loss_workaround_enabled) |
1931 | return 0; | 1978 | return 0; |
1932 | 1979 | ||
1933 | /* Make sure link is up before proceeding. If not just return. | 1980 | /* |
1981 | * Make sure link is up before proceeding. If not just return. | ||
1934 | * Attempting this while link is negotiating fouled up link | 1982 | * Attempting this while link is negotiating fouled up link |
1935 | * stability */ | 1983 | * stability |
1984 | */ | ||
1936 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | 1985 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); |
1937 | if (!link) | 1986 | if (!link) |
1938 | return 0; | 1987 | return 0; |
@@ -1961,8 +2010,10 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) | |||
1961 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | 2010 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); |
1962 | ew32(PHY_CTRL, phy_ctrl); | 2011 | ew32(PHY_CTRL, phy_ctrl); |
1963 | 2012 | ||
1964 | /* Call gig speed drop workaround on Gig disable before accessing | 2013 | /* |
1965 | * any PHY registers */ | 2014 | * Call gig speed drop workaround on Gig disable before accessing |
2015 | * any PHY registers | ||
2016 | */ | ||
1966 | e1000e_gig_downshift_workaround_ich8lan(hw); | 2017 | e1000e_gig_downshift_workaround_ich8lan(hw); |
1967 | 2018 | ||
1968 | /* unable to acquire PCS lock */ | 2019 | /* unable to acquire PCS lock */ |
@@ -1970,7 +2021,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) | |||
1970 | } | 2021 | } |
1971 | 2022 | ||
1972 | /** | 2023 | /** |
1973 | * e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state | 2024 | * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state |
1974 | * @hw: pointer to the HW structure | 2025 | * @hw: pointer to the HW structure |
1975 | * @state: boolean value used to set the current Kumeran workaround state | 2026 | * @state: boolean value used to set the current Kumeran workaround state |
1976 | * | 2027 | * |
@@ -2017,8 +2068,10 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) | |||
2017 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | 2068 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); |
2018 | ew32(PHY_CTRL, reg); | 2069 | ew32(PHY_CTRL, reg); |
2019 | 2070 | ||
2020 | /* Call gig speed drop workaround on Gig disable before | 2071 | /* |
2021 | * accessing any PHY registers */ | 2072 | * Call gig speed drop workaround on Gig disable before |
2073 | * accessing any PHY registers | ||
2074 | */ | ||
2022 | if (hw->mac.type == e1000_ich8lan) | 2075 | if (hw->mac.type == e1000_ich8lan) |
2023 | e1000e_gig_downshift_workaround_ich8lan(hw); | 2076 | e1000e_gig_downshift_workaround_ich8lan(hw); |
2024 | 2077 | ||
@@ -2158,7 +2211,7 @@ static struct e1000_mac_operations ich8_mac_ops = { | |||
2158 | .get_link_up_info = e1000_get_link_up_info_ich8lan, | 2211 | .get_link_up_info = e1000_get_link_up_info_ich8lan, |
2159 | .led_on = e1000_led_on_ich8lan, | 2212 | .led_on = e1000_led_on_ich8lan, |
2160 | .led_off = e1000_led_off_ich8lan, | 2213 | .led_off = e1000_led_off_ich8lan, |
2161 | .mc_addr_list_update = e1000e_mc_addr_list_update_generic, | 2214 | .update_mc_addr_list = e1000e_update_mc_addr_list_generic, |
2162 | .reset_hw = e1000_reset_hw_ich8lan, | 2215 | .reset_hw = e1000_reset_hw_ich8lan, |
2163 | .init_hw = e1000_init_hw_ich8lan, | 2216 | .init_hw = e1000_init_hw_ich8lan, |
2164 | .setup_link = e1000_setup_link_ich8lan, | 2217 | .setup_link = e1000_setup_link_ich8lan, |
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 073934c7f73a..ea3ff6369c86 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2008 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -43,8 +43,8 @@ enum e1000_mng_mode { | |||
43 | 43 | ||
44 | #define E1000_FACTPS_MNGCG 0x20000000 | 44 | #define E1000_FACTPS_MNGCG 0x20000000 |
45 | 45 | ||
46 | #define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management | 46 | /* Intel(R) Active Management Technology signature */ |
47 | * Technology signature */ | 47 | #define E1000_IAMT_SIGNATURE 0x544D4149 |
48 | 48 | ||
49 | /** | 49 | /** |
50 | * e1000e_get_bus_info_pcie - Get PCIe bus information | 50 | * e1000e_get_bus_info_pcie - Get PCIe bus information |
@@ -142,7 +142,8 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) | |||
142 | { | 142 | { |
143 | u32 rar_low, rar_high; | 143 | u32 rar_low, rar_high; |
144 | 144 | ||
145 | /* HW expects these in little endian so we reverse the byte order | 145 | /* |
146 | * HW expects these in little endian so we reverse the byte order | ||
146 | * from network order (big endian) to little endian | 147 | * from network order (big endian) to little endian |
147 | */ | 148 | */ |
148 | rar_low = ((u32) addr[0] | | 149 | rar_low = ((u32) addr[0] | |
@@ -171,7 +172,8 @@ static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value) | |||
171 | { | 172 | { |
172 | u32 hash_bit, hash_reg, mta; | 173 | u32 hash_bit, hash_reg, mta; |
173 | 174 | ||
174 | /* The MTA is a register array of 32-bit registers. It is | 175 | /* |
176 | * The MTA is a register array of 32-bit registers. It is | ||
175 | * treated like an array of (32*mta_reg_count) bits. We want to | 177 | * treated like an array of (32*mta_reg_count) bits. We want to |
176 | * set bit BitArray[hash_value]. So we figure out what register | 178 | * set bit BitArray[hash_value]. So we figure out what register |
177 | * the bit is in, read it, OR in the new bit, then write | 179 | * the bit is in, read it, OR in the new bit, then write |
@@ -208,12 +210,15 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) | |||
208 | /* Register count multiplied by bits per register */ | 210 | /* Register count multiplied by bits per register */ |
209 | hash_mask = (hw->mac.mta_reg_count * 32) - 1; | 211 | hash_mask = (hw->mac.mta_reg_count * 32) - 1; |
210 | 212 | ||
211 | /* For a mc_filter_type of 0, bit_shift is the number of left-shifts | 213 | /* |
212 | * where 0xFF would still fall within the hash mask. */ | 214 | * For a mc_filter_type of 0, bit_shift is the number of left-shifts |
215 | * where 0xFF would still fall within the hash mask. | ||
216 | */ | ||
213 | while (hash_mask >> bit_shift != 0xFF) | 217 | while (hash_mask >> bit_shift != 0xFF) |
214 | bit_shift++; | 218 | bit_shift++; |
215 | 219 | ||
216 | /* The portion of the address that is used for the hash table | 220 | /* |
221 | * The portion of the address that is used for the hash table | ||
217 | * is determined by the mc_filter_type setting. | 222 | * is determined by the mc_filter_type setting. |
218 | * The algorithm is such that there is a total of 8 bits of shifting. | 223 | * The algorithm is such that there is a total of 8 bits of shifting. |
219 | * The bit_shift for a mc_filter_type of 0 represents the number of | 224 | * The bit_shift for a mc_filter_type of 0 represents the number of |
@@ -224,8 +229,8 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) | |||
224 | * cases are a variation of this algorithm...essentially raising the | 229 | * cases are a variation of this algorithm...essentially raising the |
225 | * number of bits to shift mc_addr[5] left, while still keeping the | 230 | * number of bits to shift mc_addr[5] left, while still keeping the |
226 | * 8-bit shifting total. | 231 | * 8-bit shifting total. |
227 | */ | 232 | * |
228 | /* For example, given the following Destination MAC Address and an | 233 | * For example, given the following Destination MAC Address and an |
229 | * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), | 234 | * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), |
230 | * we can see that the bit_shift for case 0 is 4. These are the hash | 235 | * we can see that the bit_shift for case 0 is 4. These are the hash |
231 | * values resulting from each mc_filter_type... | 236 | * values resulting from each mc_filter_type... |
@@ -260,7 +265,7 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) | |||
260 | } | 265 | } |
261 | 266 | ||
262 | /** | 267 | /** |
263 | * e1000e_mc_addr_list_update_generic - Update Multicast addresses | 268 | * e1000e_update_mc_addr_list_generic - Update Multicast addresses |
264 | * @hw: pointer to the HW structure | 269 | * @hw: pointer to the HW structure |
265 | * @mc_addr_list: array of multicast addresses to program | 270 | * @mc_addr_list: array of multicast addresses to program |
266 | * @mc_addr_count: number of multicast addresses to program | 271 | * @mc_addr_count: number of multicast addresses to program |
@@ -272,14 +277,15 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) | |||
272 | * The parameter rar_count will usually be hw->mac.rar_entry_count | 277 | * The parameter rar_count will usually be hw->mac.rar_entry_count |
273 | * unless there are workarounds that change this. | 278 | * unless there are workarounds that change this. |
274 | **/ | 279 | **/ |
275 | void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw, | 280 | void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, |
276 | u8 *mc_addr_list, u32 mc_addr_count, | 281 | u8 *mc_addr_list, u32 mc_addr_count, |
277 | u32 rar_used_count, u32 rar_count) | 282 | u32 rar_used_count, u32 rar_count) |
278 | { | 283 | { |
279 | u32 hash_value; | 284 | u32 hash_value; |
280 | u32 i; | 285 | u32 i; |
281 | 286 | ||
282 | /* Load the first set of multicast addresses into the exact | 287 | /* |
288 | * Load the first set of multicast addresses into the exact | ||
283 | * filters (RAR). If there are not enough to fill the RAR | 289 | * filters (RAR). If there are not enough to fill the RAR |
284 | * array, clear the filters. | 290 | * array, clear the filters. |
285 | */ | 291 | */ |
@@ -375,7 +381,8 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | |||
375 | s32 ret_val; | 381 | s32 ret_val; |
376 | bool link; | 382 | bool link; |
377 | 383 | ||
378 | /* We only want to go out to the PHY registers to see if Auto-Neg | 384 | /* |
385 | * We only want to go out to the PHY registers to see if Auto-Neg | ||
379 | * has completed and/or if our link status has changed. The | 386 | * has completed and/or if our link status has changed. The |
380 | * get_link_status flag is set upon receiving a Link Status | 387 | * get_link_status flag is set upon receiving a Link Status |
381 | * Change or Rx Sequence Error interrupt. | 388 | * Change or Rx Sequence Error interrupt. |
@@ -383,7 +390,8 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | |||
383 | if (!mac->get_link_status) | 390 | if (!mac->get_link_status) |
384 | return 0; | 391 | return 0; |
385 | 392 | ||
386 | /* First we want to see if the MII Status Register reports | 393 | /* |
394 | * First we want to see if the MII Status Register reports | ||
387 | * link. If so, then we want to get the current speed/duplex | 395 | * link. If so, then we want to get the current speed/duplex |
388 | * of the PHY. | 396 | * of the PHY. |
389 | */ | 397 | */ |
@@ -396,11 +404,14 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | |||
396 | 404 | ||
397 | mac->get_link_status = 0; | 405 | mac->get_link_status = 0; |
398 | 406 | ||
399 | /* Check if there was DownShift, must be checked | 407 | /* |
400 | * immediately after link-up */ | 408 | * Check if there was DownShift, must be checked |
409 | * immediately after link-up | ||
410 | */ | ||
401 | e1000e_check_downshift(hw); | 411 | e1000e_check_downshift(hw); |
402 | 412 | ||
403 | /* If we are forcing speed/duplex, then we simply return since | 413 | /* |
414 | * If we are forcing speed/duplex, then we simply return since | ||
404 | * we have already determined whether we have link or not. | 415 | * we have already determined whether we have link or not. |
405 | */ | 416 | */ |
406 | if (!mac->autoneg) { | 417 | if (!mac->autoneg) { |
@@ -408,13 +419,15 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | |||
408 | return ret_val; | 419 | return ret_val; |
409 | } | 420 | } |
410 | 421 | ||
411 | /* Auto-Neg is enabled. Auto Speed Detection takes care | 422 | /* |
423 | * Auto-Neg is enabled. Auto Speed Detection takes care | ||
412 | * of MAC speed/duplex configuration. So we only need to | 424 | * of MAC speed/duplex configuration. So we only need to |
413 | * configure Collision Distance in the MAC. | 425 | * configure Collision Distance in the MAC. |
414 | */ | 426 | */ |
415 | e1000e_config_collision_dist(hw); | 427 | e1000e_config_collision_dist(hw); |
416 | 428 | ||
417 | /* Configure Flow Control now that Auto-Neg has completed. | 429 | /* |
430 | * Configure Flow Control now that Auto-Neg has completed. | ||
418 | * First, we need to restore the desired flow control | 431 | * First, we need to restore the desired flow control |
419 | * settings because we may have had to re-autoneg with a | 432 | * settings because we may have had to re-autoneg with a |
420 | * different link partner. | 433 | * different link partner. |
@@ -446,7 +459,8 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
446 | status = er32(STATUS); | 459 | status = er32(STATUS); |
447 | rxcw = er32(RXCW); | 460 | rxcw = er32(RXCW); |
448 | 461 | ||
449 | /* If we don't have link (auto-negotiation failed or link partner | 462 | /* |
463 | * If we don't have link (auto-negotiation failed or link partner | ||
450 | * cannot auto-negotiate), the cable is plugged in (we have signal), | 464 | * cannot auto-negotiate), the cable is plugged in (we have signal), |
451 | * and our link partner is not trying to auto-negotiate with us (we | 465 | * and our link partner is not trying to auto-negotiate with us (we |
452 | * are receiving idles or data), we need to force link up. We also | 466 | * are receiving idles or data), we need to force link up. We also |
@@ -477,7 +491,8 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) | |||
477 | return ret_val; | 491 | return ret_val; |
478 | } | 492 | } |
479 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 493 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
480 | /* If we are forcing link and we are receiving /C/ ordered | 494 | /* |
495 | * If we are forcing link and we are receiving /C/ ordered | ||
481 | * sets, re-enable auto-negotiation in the TXCW register | 496 | * sets, re-enable auto-negotiation in the TXCW register |
482 | * and disable forced link in the Device Control register | 497 | * and disable forced link in the Device Control register |
483 | * in an attempt to auto-negotiate with our link partner. | 498 | * in an attempt to auto-negotiate with our link partner. |
@@ -511,7 +526,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
511 | status = er32(STATUS); | 526 | status = er32(STATUS); |
512 | rxcw = er32(RXCW); | 527 | rxcw = er32(RXCW); |
513 | 528 | ||
514 | /* If we don't have link (auto-negotiation failed or link partner | 529 | /* |
530 | * If we don't have link (auto-negotiation failed or link partner | ||
515 | * cannot auto-negotiate), and our link partner is not trying to | 531 | * cannot auto-negotiate), and our link partner is not trying to |
516 | * auto-negotiate with us (we are receiving idles or data), | 532 | * auto-negotiate with us (we are receiving idles or data), |
517 | * we need to force link up. We also need to give auto-negotiation | 533 | * we need to force link up. We also need to give auto-negotiation |
@@ -540,7 +556,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
540 | return ret_val; | 556 | return ret_val; |
541 | } | 557 | } |
542 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { | 558 | } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { |
543 | /* If we are forcing link and we are receiving /C/ ordered | 559 | /* |
560 | * If we are forcing link and we are receiving /C/ ordered | ||
544 | * sets, re-enable auto-negotiation in the TXCW register | 561 | * sets, re-enable auto-negotiation in the TXCW register |
545 | * and disable forced link in the Device Control register | 562 | * and disable forced link in the Device Control register |
546 | * in an attempt to auto-negotiate with our link partner. | 563 | * in an attempt to auto-negotiate with our link partner. |
@@ -551,7 +568,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
551 | 568 | ||
552 | mac->serdes_has_link = 1; | 569 | mac->serdes_has_link = 1; |
553 | } else if (!(E1000_TXCW_ANE & er32(TXCW))) { | 570 | } else if (!(E1000_TXCW_ANE & er32(TXCW))) { |
554 | /* If we force link for non-auto-negotiation switch, check | 571 | /* |
572 | * If we force link for non-auto-negotiation switch, check | ||
555 | * link status based on MAC synchronization for internal | 573 | * link status based on MAC synchronization for internal |
556 | * serdes media type. | 574 | * serdes media type. |
557 | */ | 575 | */ |
@@ -585,11 +603,11 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) | |||
585 | **/ | 603 | **/ |
586 | static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) | 604 | static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) |
587 | { | 605 | { |
588 | struct e1000_mac_info *mac = &hw->mac; | ||
589 | s32 ret_val; | 606 | s32 ret_val; |
590 | u16 nvm_data; | 607 | u16 nvm_data; |
591 | 608 | ||
592 | /* Read and store word 0x0F of the EEPROM. This word contains bits | 609 | /* |
610 | * Read and store word 0x0F of the EEPROM. This word contains bits | ||
593 | * that determine the hardware's default PAUSE (flow control) mode, | 611 | * that determine the hardware's default PAUSE (flow control) mode, |
594 | * a bit that determines whether the HW defaults to enabling or | 612 | * a bit that determines whether the HW defaults to enabling or |
595 | * disabling auto-negotiation, and the direction of the | 613 | * disabling auto-negotiation, and the direction of the |
@@ -605,12 +623,12 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) | |||
605 | } | 623 | } |
606 | 624 | ||
607 | if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) | 625 | if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) |
608 | mac->fc = e1000_fc_none; | 626 | hw->fc.type = e1000_fc_none; |
609 | else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == | 627 | else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == |
610 | NVM_WORD0F_ASM_DIR) | 628 | NVM_WORD0F_ASM_DIR) |
611 | mac->fc = e1000_fc_tx_pause; | 629 | hw->fc.type = e1000_fc_tx_pause; |
612 | else | 630 | else |
613 | mac->fc = e1000_fc_full; | 631 | hw->fc.type = e1000_fc_full; |
614 | 632 | ||
615 | return 0; | 633 | return 0; |
616 | } | 634 | } |
@@ -630,7 +648,8 @@ s32 e1000e_setup_link(struct e1000_hw *hw) | |||
630 | struct e1000_mac_info *mac = &hw->mac; | 648 | struct e1000_mac_info *mac = &hw->mac; |
631 | s32 ret_val; | 649 | s32 ret_val; |
632 | 650 | ||
633 | /* In the case of the phy reset being blocked, we already have a link. | 651 | /* |
652 | * In the case of the phy reset being blocked, we already have a link. | ||
634 | * We do not need to set it up again. | 653 | * We do not need to set it up again. |
635 | */ | 654 | */ |
636 | if (e1000_check_reset_block(hw)) | 655 | if (e1000_check_reset_block(hw)) |
@@ -640,26 +659,28 @@ s32 e1000e_setup_link(struct e1000_hw *hw) | |||
640 | * If flow control is set to default, set flow control based on | 659 | * If flow control is set to default, set flow control based on |
641 | * the EEPROM flow control settings. | 660 | * the EEPROM flow control settings. |
642 | */ | 661 | */ |
643 | if (mac->fc == e1000_fc_default) { | 662 | if (hw->fc.type == e1000_fc_default) { |
644 | ret_val = e1000_set_default_fc_generic(hw); | 663 | ret_val = e1000_set_default_fc_generic(hw); |
645 | if (ret_val) | 664 | if (ret_val) |
646 | return ret_val; | 665 | return ret_val; |
647 | } | 666 | } |
648 | 667 | ||
649 | /* We want to save off the original Flow Control configuration just | 668 | /* |
669 | * We want to save off the original Flow Control configuration just | ||
650 | * in case we get disconnected and then reconnected into a different | 670 | * in case we get disconnected and then reconnected into a different |
651 | * hub or switch with different Flow Control capabilities. | 671 | * hub or switch with different Flow Control capabilities. |
652 | */ | 672 | */ |
653 | mac->original_fc = mac->fc; | 673 | hw->fc.original_type = hw->fc.type; |
654 | 674 | ||
655 | hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc); | 675 | hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type); |
656 | 676 | ||
657 | /* Call the necessary media_type subroutine to configure the link. */ | 677 | /* Call the necessary media_type subroutine to configure the link. */ |
658 | ret_val = mac->ops.setup_physical_interface(hw); | 678 | ret_val = mac->ops.setup_physical_interface(hw); |
659 | if (ret_val) | 679 | if (ret_val) |
660 | return ret_val; | 680 | return ret_val; |
661 | 681 | ||
662 | /* Initialize the flow control address, type, and PAUSE timer | 682 | /* |
683 | * Initialize the flow control address, type, and PAUSE timer | ||
663 | * registers to their default values. This is done even if flow | 684 | * registers to their default values. This is done even if flow |
664 | * control is disabled, because it does not hurt anything to | 685 | * control is disabled, because it does not hurt anything to |
665 | * initialize these registers. | 686 | * initialize these registers. |
@@ -669,7 +690,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw) | |||
669 | ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); | 690 | ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); |
670 | ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); | 691 | ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); |
671 | 692 | ||
672 | ew32(FCTTV, mac->fc_pause_time); | 693 | ew32(FCTTV, hw->fc.pause_time); |
673 | 694 | ||
674 | return e1000e_set_fc_watermarks(hw); | 695 | return e1000e_set_fc_watermarks(hw); |
675 | } | 696 | } |
@@ -686,7 +707,8 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) | |||
686 | struct e1000_mac_info *mac = &hw->mac; | 707 | struct e1000_mac_info *mac = &hw->mac; |
687 | u32 txcw; | 708 | u32 txcw; |
688 | 709 | ||
689 | /* Check for a software override of the flow control settings, and | 710 | /* |
711 | * Check for a software override of the flow control settings, and | ||
690 | * setup the device accordingly. If auto-negotiation is enabled, then | 712 | * setup the device accordingly. If auto-negotiation is enabled, then |
691 | * software will have to set the "PAUSE" bits to the correct value in | 713 | * software will have to set the "PAUSE" bits to the correct value in |
692 | * the Transmit Config Word Register (TXCW) and re-start auto- | 714 | * the Transmit Config Word Register (TXCW) and re-start auto- |
@@ -700,31 +722,34 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) | |||
700 | * but not send pause frames). | 722 | * but not send pause frames). |
701 | * 2: Tx flow control is enabled (we can send pause frames but we | 723 | * 2: Tx flow control is enabled (we can send pause frames but we |
702 | * do not support receiving pause frames). | 724 | * do not support receiving pause frames). |
703 | * 3: Both Rx and TX flow control (symmetric) are enabled. | 725 | * 3: Both Rx and Tx flow control (symmetric) are enabled. |
704 | */ | 726 | */ |
705 | switch (mac->fc) { | 727 | switch (hw->fc.type) { |
706 | case e1000_fc_none: | 728 | case e1000_fc_none: |
707 | /* Flow control completely disabled by a software over-ride. */ | 729 | /* Flow control completely disabled by a software over-ride. */ |
708 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); | 730 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); |
709 | break; | 731 | break; |
710 | case e1000_fc_rx_pause: | 732 | case e1000_fc_rx_pause: |
711 | /* RX Flow control is enabled and TX Flow control is disabled | 733 | /* |
734 | * Rx Flow control is enabled and Tx Flow control is disabled | ||
712 | * by a software over-ride. Since there really isn't a way to | 735 | * by a software over-ride. Since there really isn't a way to |
713 | * advertise that we are capable of RX Pause ONLY, we will | 736 | * advertise that we are capable of Rx Pause ONLY, we will |
714 | * advertise that we support both symmetric and asymmetric RX | 737 | * advertise that we support both symmetric and asymmetric Rx |
715 | * PAUSE. Later, we will disable the adapter's ability to send | 738 | * PAUSE. Later, we will disable the adapter's ability to send |
716 | * PAUSE frames. | 739 | * PAUSE frames. |
717 | */ | 740 | */ |
718 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); | 741 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); |
719 | break; | 742 | break; |
720 | case e1000_fc_tx_pause: | 743 | case e1000_fc_tx_pause: |
721 | /* TX Flow control is enabled, and RX Flow control is disabled, | 744 | /* |
745 | * Tx Flow control is enabled, and Rx Flow control is disabled, | ||
722 | * by a software over-ride. | 746 | * by a software over-ride. |
723 | */ | 747 | */ |
724 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); | 748 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); |
725 | break; | 749 | break; |
726 | case e1000_fc_full: | 750 | case e1000_fc_full: |
727 | /* Flow control (both RX and TX) is enabled by a software | 751 | /* |
752 | * Flow control (both Rx and Tx) is enabled by a software | ||
728 | * over-ride. | 753 | * over-ride. |
729 | */ | 754 | */ |
730 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); | 755 | txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); |
@@ -754,7 +779,8 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) | |||
754 | u32 i, status; | 779 | u32 i, status; |
755 | s32 ret_val; | 780 | s32 ret_val; |
756 | 781 | ||
757 | /* If we have a signal (the cable is plugged in, or assumed true for | 782 | /* |
783 | * If we have a signal (the cable is plugged in, or assumed true for | ||
758 | * serdes media) then poll for a "Link-Up" indication in the Device | 784 | * serdes media) then poll for a "Link-Up" indication in the Device |
759 | * Status Register. Time-out if a link isn't seen in 500 milliseconds | 785 | * Status Register. Time-out if a link isn't seen in 500 milliseconds |
760 | * seconds (Auto-negotiation should complete in less than 500 | 786 | * seconds (Auto-negotiation should complete in less than 500 |
@@ -769,7 +795,8 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) | |||
769 | if (i == FIBER_LINK_UP_LIMIT) { | 795 | if (i == FIBER_LINK_UP_LIMIT) { |
770 | hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); | 796 | hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); |
771 | mac->autoneg_failed = 1; | 797 | mac->autoneg_failed = 1; |
772 | /* AutoNeg failed to achieve a link, so we'll call | 798 | /* |
799 | * AutoNeg failed to achieve a link, so we'll call | ||
773 | * mac->check_for_link. This routine will force the | 800 | * mac->check_for_link. This routine will force the |
774 | * link up if we detect a signal. This will allow us to | 801 | * link up if we detect a signal. This will allow us to |
775 | * communicate with non-autonegotiating link partners. | 802 | * communicate with non-autonegotiating link partners. |
@@ -811,7 +838,8 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
811 | if (ret_val) | 838 | if (ret_val) |
812 | return ret_val; | 839 | return ret_val; |
813 | 840 | ||
814 | /* Since auto-negotiation is enabled, take the link out of reset (the | 841 | /* |
842 | * Since auto-negotiation is enabled, take the link out of reset (the | ||
815 | * link will be in reset, because we previously reset the chip). This | 843 | * link will be in reset, because we previously reset the chip). This |
816 | * will restart auto-negotiation. If auto-negotiation is successful | 844 | * will restart auto-negotiation. If auto-negotiation is successful |
817 | * then the link-up status bit will be set and the flow control enable | 845 | * then the link-up status bit will be set and the flow control enable |
@@ -823,11 +851,12 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) | |||
823 | e1e_flush(); | 851 | e1e_flush(); |
824 | msleep(1); | 852 | msleep(1); |
825 | 853 | ||
826 | /* For these adapters, the SW defineable pin 1 is set when the optics | 854 | /* |
855 | * For these adapters, the SW definable pin 1 is set when the optics | ||
827 | * detect a signal. If we have a signal, then poll for a "Link-Up" | 856 | * detect a signal. If we have a signal, then poll for a "Link-Up" |
828 | * indication. | 857 | * indication. |
829 | */ | 858 | */ |
830 | if (hw->media_type == e1000_media_type_internal_serdes || | 859 | if (hw->phy.media_type == e1000_media_type_internal_serdes || |
831 | (er32(CTRL) & E1000_CTRL_SWDPIN1)) { | 860 | (er32(CTRL) & E1000_CTRL_SWDPIN1)) { |
832 | ret_val = e1000_poll_fiber_serdes_link_generic(hw); | 861 | ret_val = e1000_poll_fiber_serdes_link_generic(hw); |
833 | } else { | 862 | } else { |
@@ -864,27 +893,28 @@ void e1000e_config_collision_dist(struct e1000_hw *hw) | |||
864 | * | 893 | * |
865 | * Sets the flow control high/low threshold (watermark) registers. If | 894 | * Sets the flow control high/low threshold (watermark) registers. If |
866 | * flow control XON frame transmission is enabled, then set XON frame | 895 | * flow control XON frame transmission is enabled, then set XON frame |
867 | * tansmission as well. | 896 | * transmission as well. |
868 | **/ | 897 | **/ |
869 | s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) | 898 | s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) |
870 | { | 899 | { |
871 | struct e1000_mac_info *mac = &hw->mac; | ||
872 | u32 fcrtl = 0, fcrth = 0; | 900 | u32 fcrtl = 0, fcrth = 0; |
873 | 901 | ||
874 | /* Set the flow control receive threshold registers. Normally, | 902 | /* |
903 | * Set the flow control receive threshold registers. Normally, | ||
875 | * these registers will be set to a default threshold that may be | 904 | * these registers will be set to a default threshold that may be |
876 | * adjusted later by the driver's runtime code. However, if the | 905 | * adjusted later by the driver's runtime code. However, if the |
877 | * ability to transmit pause frames is not enabled, then these | 906 | * ability to transmit pause frames is not enabled, then these |
878 | * registers will be set to 0. | 907 | * registers will be set to 0. |
879 | */ | 908 | */ |
880 | if (mac->fc & e1000_fc_tx_pause) { | 909 | if (hw->fc.type & e1000_fc_tx_pause) { |
881 | /* We need to set up the Receive Threshold high and low water | 910 | /* |
911 | * We need to set up the Receive Threshold high and low water | ||
882 | * marks as well as (optionally) enabling the transmission of | 912 | * marks as well as (optionally) enabling the transmission of |
883 | * XON frames. | 913 | * XON frames. |
884 | */ | 914 | */ |
885 | fcrtl = mac->fc_low_water; | 915 | fcrtl = hw->fc.low_water; |
886 | fcrtl |= E1000_FCRTL_XONE; | 916 | fcrtl |= E1000_FCRTL_XONE; |
887 | fcrth = mac->fc_high_water; | 917 | fcrth = hw->fc.high_water; |
888 | } | 918 | } |
889 | ew32(FCRTL, fcrtl); | 919 | ew32(FCRTL, fcrtl); |
890 | ew32(FCRTH, fcrth); | 920 | ew32(FCRTH, fcrth); |
@@ -904,18 +934,18 @@ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) | |||
904 | **/ | 934 | **/ |
905 | s32 e1000e_force_mac_fc(struct e1000_hw *hw) | 935 | s32 e1000e_force_mac_fc(struct e1000_hw *hw) |
906 | { | 936 | { |
907 | struct e1000_mac_info *mac = &hw->mac; | ||
908 | u32 ctrl; | 937 | u32 ctrl; |
909 | 938 | ||
910 | ctrl = er32(CTRL); | 939 | ctrl = er32(CTRL); |
911 | 940 | ||
912 | /* Because we didn't get link via the internal auto-negotiation | 941 | /* |
942 | * Because we didn't get link via the internal auto-negotiation | ||
913 | * mechanism (we either forced link or we got link via PHY | 943 | * mechanism (we either forced link or we got link via PHY |
914 | * auto-neg), we have to manually enable/disable transmit an | 944 | * auto-neg), we have to manually enable/disable transmit an |
915 | * receive flow control. | 945 | * receive flow control. |
916 | * | 946 | * |
917 | * The "Case" statement below enables/disable flow control | 947 | * The "Case" statement below enables/disable flow control |
918 | * according to the "mac->fc" parameter. | 948 | * according to the "hw->fc.type" parameter. |
919 | * | 949 | * |
920 | * The possible values of the "fc" parameter are: | 950 | * The possible values of the "fc" parameter are: |
921 | * 0: Flow control is completely disabled | 951 | * 0: Flow control is completely disabled |
@@ -923,12 +953,12 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw) | |||
923 | * frames but not send pause frames). | 953 | * frames but not send pause frames). |
924 | * 2: Tx flow control is enabled (we can send pause frames | 954 | * 2: Tx flow control is enabled (we can send pause frames |
925 | * frames but we do not receive pause frames). | 955 | * frames but we do not receive pause frames). |
926 | * 3: Both Rx and TX flow control (symmetric) is enabled. | 956 | * 3: Both Rx and Tx flow control (symmetric) is enabled. |
927 | * other: No other values should be possible at this point. | 957 | * other: No other values should be possible at this point. |
928 | */ | 958 | */ |
929 | hw_dbg(hw, "mac->fc = %u\n", mac->fc); | 959 | hw_dbg(hw, "hw->fc.type = %u\n", hw->fc.type); |
930 | 960 | ||
931 | switch (mac->fc) { | 961 | switch (hw->fc.type) { |
932 | case e1000_fc_none: | 962 | case e1000_fc_none: |
933 | ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); | 963 | ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); |
934 | break; | 964 | break; |
@@ -970,16 +1000,17 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
970 | u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; | 1000 | u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; |
971 | u16 speed, duplex; | 1001 | u16 speed, duplex; |
972 | 1002 | ||
973 | /* Check for the case where we have fiber media and auto-neg failed | 1003 | /* |
1004 | * Check for the case where we have fiber media and auto-neg failed | ||
974 | * so we had to force link. In this case, we need to force the | 1005 | * so we had to force link. In this case, we need to force the |
975 | * configuration of the MAC to match the "fc" parameter. | 1006 | * configuration of the MAC to match the "fc" parameter. |
976 | */ | 1007 | */ |
977 | if (mac->autoneg_failed) { | 1008 | if (mac->autoneg_failed) { |
978 | if (hw->media_type == e1000_media_type_fiber || | 1009 | if (hw->phy.media_type == e1000_media_type_fiber || |
979 | hw->media_type == e1000_media_type_internal_serdes) | 1010 | hw->phy.media_type == e1000_media_type_internal_serdes) |
980 | ret_val = e1000e_force_mac_fc(hw); | 1011 | ret_val = e1000e_force_mac_fc(hw); |
981 | } else { | 1012 | } else { |
982 | if (hw->media_type == e1000_media_type_copper) | 1013 | if (hw->phy.media_type == e1000_media_type_copper) |
983 | ret_val = e1000e_force_mac_fc(hw); | 1014 | ret_val = e1000e_force_mac_fc(hw); |
984 | } | 1015 | } |
985 | 1016 | ||
@@ -988,13 +1019,15 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
988 | return ret_val; | 1019 | return ret_val; |
989 | } | 1020 | } |
990 | 1021 | ||
991 | /* Check for the case where we have copper media and auto-neg is | 1022 | /* |
1023 | * Check for the case where we have copper media and auto-neg is | ||
992 | * enabled. In this case, we need to check and see if Auto-Neg | 1024 | * enabled. In this case, we need to check and see if Auto-Neg |
993 | * has completed, and if so, how the PHY and link partner has | 1025 | * has completed, and if so, how the PHY and link partner has |
994 | * flow control configured. | 1026 | * flow control configured. |
995 | */ | 1027 | */ |
996 | if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) { | 1028 | if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { |
997 | /* Read the MII Status Register and check to see if AutoNeg | 1029 | /* |
1030 | * Read the MII Status Register and check to see if AutoNeg | ||
998 | * has completed. We read this twice because this reg has | 1031 | * has completed. We read this twice because this reg has |
999 | * some "sticky" (latched) bits. | 1032 | * some "sticky" (latched) bits. |
1000 | */ | 1033 | */ |
@@ -1011,7 +1044,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1011 | return ret_val; | 1044 | return ret_val; |
1012 | } | 1045 | } |
1013 | 1046 | ||
1014 | /* The AutoNeg process has completed, so we now need to | 1047 | /* |
1048 | * The AutoNeg process has completed, so we now need to | ||
1015 | * read both the Auto Negotiation Advertisement | 1049 | * read both the Auto Negotiation Advertisement |
1016 | * Register (Address 4) and the Auto_Negotiation Base | 1050 | * Register (Address 4) and the Auto_Negotiation Base |
1017 | * Page Ability Register (Address 5) to determine how | 1051 | * Page Ability Register (Address 5) to determine how |
@@ -1024,7 +1058,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1024 | if (ret_val) | 1058 | if (ret_val) |
1025 | return ret_val; | 1059 | return ret_val; |
1026 | 1060 | ||
1027 | /* Two bits in the Auto Negotiation Advertisement Register | 1061 | /* |
1062 | * Two bits in the Auto Negotiation Advertisement Register | ||
1028 | * (Address 4) and two bits in the Auto Negotiation Base | 1063 | * (Address 4) and two bits in the Auto Negotiation Base |
1029 | * Page Ability Register (Address 5) determine flow control | 1064 | * Page Ability Register (Address 5) determine flow control |
1030 | * for both the PHY and the link partner. The following | 1065 | * for both the PHY and the link partner. The following |
@@ -1045,8 +1080,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1045 | * 1 | 1 | 0 | 0 | e1000_fc_none | 1080 | * 1 | 1 | 0 | 0 | e1000_fc_none |
1046 | * 1 | 1 | 0 | 1 | e1000_fc_rx_pause | 1081 | * 1 | 1 | 0 | 1 | e1000_fc_rx_pause |
1047 | * | 1082 | * |
1048 | */ | 1083 | * |
1049 | /* Are both PAUSE bits set to 1? If so, this implies | 1084 | * Are both PAUSE bits set to 1? If so, this implies |
1050 | * Symmetric Flow Control is enabled at both ends. The | 1085 | * Symmetric Flow Control is enabled at both ends. The |
1051 | * ASM_DIR bits are irrelevant per the spec. | 1086 | * ASM_DIR bits are irrelevant per the spec. |
1052 | * | 1087 | * |
@@ -1060,22 +1095,24 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1060 | */ | 1095 | */ |
1061 | if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && | 1096 | if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && |
1062 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { | 1097 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { |
1063 | /* Now we need to check if the user selected RX ONLY | 1098 | /* |
1099 | * Now we need to check if the user selected Rx ONLY | ||
1064 | * of pause frames. In this case, we had to advertise | 1100 | * of pause frames. In this case, we had to advertise |
1065 | * FULL flow control because we could not advertise RX | 1101 | * FULL flow control because we could not advertise Rx |
1066 | * ONLY. Hence, we must now check to see if we need to | 1102 | * ONLY. Hence, we must now check to see if we need to |
1067 | * turn OFF the TRANSMISSION of PAUSE frames. | 1103 | * turn OFF the TRANSMISSION of PAUSE frames. |
1068 | */ | 1104 | */ |
1069 | if (mac->original_fc == e1000_fc_full) { | 1105 | if (hw->fc.original_type == e1000_fc_full) { |
1070 | mac->fc = e1000_fc_full; | 1106 | hw->fc.type = e1000_fc_full; |
1071 | hw_dbg(hw, "Flow Control = FULL.\r\n"); | 1107 | hw_dbg(hw, "Flow Control = FULL.\r\n"); |
1072 | } else { | 1108 | } else { |
1073 | mac->fc = e1000_fc_rx_pause; | 1109 | hw->fc.type = e1000_fc_rx_pause; |
1074 | hw_dbg(hw, "Flow Control = " | 1110 | hw_dbg(hw, "Flow Control = " |
1075 | "RX PAUSE frames only.\r\n"); | 1111 | "RX PAUSE frames only.\r\n"); |
1076 | } | 1112 | } |
1077 | } | 1113 | } |
1078 | /* For receiving PAUSE frames ONLY. | 1114 | /* |
1115 | * For receiving PAUSE frames ONLY. | ||
1079 | * | 1116 | * |
1080 | * LOCAL DEVICE | LINK PARTNER | 1117 | * LOCAL DEVICE | LINK PARTNER |
1081 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result | 1118 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result |
@@ -1087,10 +1124,11 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1087 | (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && | 1124 | (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && |
1088 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | 1125 | (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
1089 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | 1126 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
1090 | mac->fc = e1000_fc_tx_pause; | 1127 | hw->fc.type = e1000_fc_tx_pause; |
1091 | hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n"); | 1128 | hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n"); |
1092 | } | 1129 | } |
1093 | /* For transmitting PAUSE frames ONLY. | 1130 | /* |
1131 | * For transmitting PAUSE frames ONLY. | ||
1094 | * | 1132 | * |
1095 | * LOCAL DEVICE | LINK PARTNER | 1133 | * LOCAL DEVICE | LINK PARTNER |
1096 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result | 1134 | * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result |
@@ -1102,18 +1140,19 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1102 | (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && | 1140 | (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && |
1103 | !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && | 1141 | !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && |
1104 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | 1142 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
1105 | mac->fc = e1000_fc_rx_pause; | 1143 | hw->fc.type = e1000_fc_rx_pause; |
1106 | hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); | 1144 | hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n"); |
1107 | } else { | 1145 | } else { |
1108 | /* | 1146 | /* |
1109 | * Per the IEEE spec, at this point flow control | 1147 | * Per the IEEE spec, at this point flow control |
1110 | * should be disabled. | 1148 | * should be disabled. |
1111 | */ | 1149 | */ |
1112 | mac->fc = e1000_fc_none; | 1150 | hw->fc.type = e1000_fc_none; |
1113 | hw_dbg(hw, "Flow Control = NONE.\r\n"); | 1151 | hw_dbg(hw, "Flow Control = NONE.\r\n"); |
1114 | } | 1152 | } |
1115 | 1153 | ||
1116 | /* Now we need to do one last check... If we auto- | 1154 | /* |
1155 | * Now we need to do one last check... If we auto- | ||
1117 | * negotiated to HALF DUPLEX, flow control should not be | 1156 | * negotiated to HALF DUPLEX, flow control should not be |
1118 | * enabled per IEEE 802.3 spec. | 1157 | * enabled per IEEE 802.3 spec. |
1119 | */ | 1158 | */ |
@@ -1124,9 +1163,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1124 | } | 1163 | } |
1125 | 1164 | ||
1126 | if (duplex == HALF_DUPLEX) | 1165 | if (duplex == HALF_DUPLEX) |
1127 | mac->fc = e1000_fc_none; | 1166 | hw->fc.type = e1000_fc_none; |
1128 | 1167 | ||
1129 | /* Now we call a subroutine to actually force the MAC | 1168 | /* |
1169 | * Now we call a subroutine to actually force the MAC | ||
1130 | * controller to use the correct flow control settings. | 1170 | * controller to use the correct flow control settings. |
1131 | */ | 1171 | */ |
1132 | ret_val = e1000e_force_mac_fc(hw); | 1172 | ret_val = e1000e_force_mac_fc(hw); |
@@ -1393,13 +1433,15 @@ s32 e1000e_blink_led(struct e1000_hw *hw) | |||
1393 | u32 ledctl_blink = 0; | 1433 | u32 ledctl_blink = 0; |
1394 | u32 i; | 1434 | u32 i; |
1395 | 1435 | ||
1396 | if (hw->media_type == e1000_media_type_fiber) { | 1436 | if (hw->phy.media_type == e1000_media_type_fiber) { |
1397 | /* always blink LED0 for PCI-E fiber */ | 1437 | /* always blink LED0 for PCI-E fiber */ |
1398 | ledctl_blink = E1000_LEDCTL_LED0_BLINK | | 1438 | ledctl_blink = E1000_LEDCTL_LED0_BLINK | |
1399 | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); | 1439 | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); |
1400 | } else { | 1440 | } else { |
1401 | /* set the blink bit for each LED that's "on" (0x0E) | 1441 | /* |
1402 | * in ledctl_mode2 */ | 1442 | * set the blink bit for each LED that's "on" (0x0E) |
1443 | * in ledctl_mode2 | ||
1444 | */ | ||
1403 | ledctl_blink = hw->mac.ledctl_mode2; | 1445 | ledctl_blink = hw->mac.ledctl_mode2; |
1404 | for (i = 0; i < 4; i++) | 1446 | for (i = 0; i < 4; i++) |
1405 | if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == | 1447 | if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == |
@@ -1423,7 +1465,7 @@ s32 e1000e_led_on_generic(struct e1000_hw *hw) | |||
1423 | { | 1465 | { |
1424 | u32 ctrl; | 1466 | u32 ctrl; |
1425 | 1467 | ||
1426 | switch (hw->media_type) { | 1468 | switch (hw->phy.media_type) { |
1427 | case e1000_media_type_fiber: | 1469 | case e1000_media_type_fiber: |
1428 | ctrl = er32(CTRL); | 1470 | ctrl = er32(CTRL); |
1429 | ctrl &= ~E1000_CTRL_SWDPIN0; | 1471 | ctrl &= ~E1000_CTRL_SWDPIN0; |
@@ -1450,7 +1492,7 @@ s32 e1000e_led_off_generic(struct e1000_hw *hw) | |||
1450 | { | 1492 | { |
1451 | u32 ctrl; | 1493 | u32 ctrl; |
1452 | 1494 | ||
1453 | switch (hw->media_type) { | 1495 | switch (hw->phy.media_type) { |
1454 | case e1000_media_type_fiber: | 1496 | case e1000_media_type_fiber: |
1455 | ctrl = er32(CTRL); | 1497 | ctrl = er32(CTRL); |
1456 | ctrl |= E1000_CTRL_SWDPIN0; | 1498 | ctrl |= E1000_CTRL_SWDPIN0; |
@@ -1562,8 +1604,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw) | |||
1562 | else | 1604 | else |
1563 | mac->current_ifs_val += | 1605 | mac->current_ifs_val += |
1564 | mac->ifs_step_size; | 1606 | mac->ifs_step_size; |
1565 | ew32(AIT, | 1607 | ew32(AIT, mac->current_ifs_val); |
1566 | mac->current_ifs_val); | ||
1567 | } | 1608 | } |
1568 | } | 1609 | } |
1569 | } else { | 1610 | } else { |
@@ -1826,10 +1867,12 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) | |||
1826 | udelay(1); | 1867 | udelay(1); |
1827 | timeout = NVM_MAX_RETRY_SPI; | 1868 | timeout = NVM_MAX_RETRY_SPI; |
1828 | 1869 | ||
1829 | /* Read "Status Register" repeatedly until the LSB is cleared. | 1870 | /* |
1871 | * Read "Status Register" repeatedly until the LSB is cleared. | ||
1830 | * The EEPROM will signal that the command has been completed | 1872 | * The EEPROM will signal that the command has been completed |
1831 | * by clearing bit 0 of the internal status register. If it's | 1873 | * by clearing bit 0 of the internal status register. If it's |
1832 | * not cleared within 'timeout', then error out. */ | 1874 | * not cleared within 'timeout', then error out. |
1875 | */ | ||
1833 | while (timeout) { | 1876 | while (timeout) { |
1834 | e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, | 1877 | e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, |
1835 | hw->nvm.opcode_bits); | 1878 | hw->nvm.opcode_bits); |
@@ -1866,8 +1909,10 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
1866 | u32 i, eerd = 0; | 1909 | u32 i, eerd = 0; |
1867 | s32 ret_val = 0; | 1910 | s32 ret_val = 0; |
1868 | 1911 | ||
1869 | /* A check for invalid values: offset too large, too many words, | 1912 | /* |
1870 | * and not enough words. */ | 1913 | * A check for invalid values: offset too large, too many words, |
1914 | * too many words for the offset, and not enough words. | ||
1915 | */ | ||
1871 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 1916 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
1872 | (words == 0)) { | 1917 | (words == 0)) { |
1873 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1918 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); |
@@ -1883,8 +1928,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
1883 | if (ret_val) | 1928 | if (ret_val) |
1884 | break; | 1929 | break; |
1885 | 1930 | ||
1886 | data[i] = (er32(EERD) >> | 1931 | data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA); |
1887 | E1000_NVM_RW_REG_DATA); | ||
1888 | } | 1932 | } |
1889 | 1933 | ||
1890 | return ret_val; | 1934 | return ret_val; |
@@ -1908,8 +1952,10 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
1908 | s32 ret_val; | 1952 | s32 ret_val; |
1909 | u16 widx = 0; | 1953 | u16 widx = 0; |
1910 | 1954 | ||
1911 | /* A check for invalid values: offset too large, too many words, | 1955 | /* |
1912 | * and not enough words. */ | 1956 | * A check for invalid values: offset too large, too many words, |
1957 | * and not enough words. | ||
1958 | */ | ||
1913 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || | 1959 | if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || |
1914 | (words == 0)) { | 1960 | (words == 0)) { |
1915 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1961 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); |
@@ -1939,8 +1985,10 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
1939 | 1985 | ||
1940 | e1000_standby_nvm(hw); | 1986 | e1000_standby_nvm(hw); |
1941 | 1987 | ||
1942 | /* Some SPI eeproms use the 8th address bit embedded in the | 1988 | /* |
1943 | * opcode */ | 1989 | * Some SPI eeproms use the 8th address bit embedded in the |
1990 | * opcode | ||
1991 | */ | ||
1944 | if ((nvm->address_bits == 8) && (offset >= 128)) | 1992 | if ((nvm->address_bits == 8) && (offset >= 128)) |
1945 | write_opcode |= NVM_A8_OPCODE_SPI; | 1993 | write_opcode |= NVM_A8_OPCODE_SPI; |
1946 | 1994 | ||
@@ -1985,9 +2033,9 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |||
1985 | /* Check for an alternate MAC address. An alternate MAC | 2033 | /* Check for an alternate MAC address. An alternate MAC |
1986 | * address can be setup by pre-boot software and must be | 2034 | * address can be setup by pre-boot software and must be |
1987 | * treated like a permanent address and must override the | 2035 | * treated like a permanent address and must override the |
1988 | * actual permanent MAC address. */ | 2036 | * actual permanent MAC address.*/ |
1989 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, | 2037 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, |
1990 | &mac_addr_offset); | 2038 | &mac_addr_offset); |
1991 | if (ret_val) { | 2039 | if (ret_val) { |
1992 | hw_dbg(hw, "NVM Read Error\n"); | 2040 | hw_dbg(hw, "NVM Read Error\n"); |
1993 | return ret_val; | 2041 | return ret_val; |
@@ -2000,7 +2048,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |||
2000 | mac_addr_offset += ETH_ALEN/sizeof(u16); | 2048 | mac_addr_offset += ETH_ALEN/sizeof(u16); |
2001 | 2049 | ||
2002 | /* make sure we have a valid mac address here | 2050 | /* make sure we have a valid mac address here |
2003 | * before using it */ | 2051 | * before using it */ |
2004 | ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, | 2052 | ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, |
2005 | &nvm_data); | 2053 | &nvm_data); |
2006 | if (ret_val) { | 2054 | if (ret_val) { |
@@ -2012,7 +2060,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw) | |||
2012 | } | 2060 | } |
2013 | 2061 | ||
2014 | if (mac_addr_offset) | 2062 | if (mac_addr_offset) |
2015 | hw->dev_spec.e82571.alt_mac_addr_is_present = 1; | 2063 | hw->dev_spec.e82571.alt_mac_addr_is_present = 1; |
2016 | } | 2064 | } |
2017 | 2065 | ||
2018 | for (i = 0; i < ETH_ALEN; i += 2) { | 2066 | for (i = 0; i < ETH_ALEN; i += 2) { |
@@ -2188,7 +2236,7 @@ bool e1000e_check_mng_mode(struct e1000_hw *hw) | |||
2188 | } | 2236 | } |
2189 | 2237 | ||
2190 | /** | 2238 | /** |
2191 | * e1000e_enable_tx_pkt_filtering - Enable packet filtering on TX | 2239 | * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx |
2192 | * @hw: pointer to the HW structure | 2240 | * @hw: pointer to the HW structure |
2193 | * | 2241 | * |
2194 | * Enables packet filtering on transmit packets if manageability is enabled | 2242 | * Enables packet filtering on transmit packets if manageability is enabled |
@@ -2208,7 +2256,8 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
2208 | return 0; | 2256 | return 0; |
2209 | } | 2257 | } |
2210 | 2258 | ||
2211 | /* If we can't read from the host interface for whatever | 2259 | /* |
2260 | * If we can't read from the host interface for whatever | ||
2212 | * reason, disable filtering. | 2261 | * reason, disable filtering. |
2213 | */ | 2262 | */ |
2214 | ret_val = e1000_mng_enable_host_if(hw); | 2263 | ret_val = e1000_mng_enable_host_if(hw); |
@@ -2226,7 +2275,8 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) | |||
2226 | hdr->checksum = 0; | 2275 | hdr->checksum = 0; |
2227 | csum = e1000_calculate_checksum((u8 *)hdr, | 2276 | csum = e1000_calculate_checksum((u8 *)hdr, |
2228 | E1000_MNG_DHCP_COOKIE_LENGTH); | 2277 | E1000_MNG_DHCP_COOKIE_LENGTH); |
2229 | /* If either the checksums or signature don't match, then | 2278 | /* |
2279 | * If either the checksums or signature don't match, then | ||
2230 | * the cookie area isn't considered valid, in which case we | 2280 | * the cookie area isn't considered valid, in which case we |
2231 | * take the safe route of assuming Tx filtering is enabled. | 2281 | * take the safe route of assuming Tx filtering is enabled. |
2232 | */ | 2282 | */ |
@@ -2318,8 +2368,10 @@ static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, | |||
2318 | /* Calculate length in DWORDs */ | 2368 | /* Calculate length in DWORDs */ |
2319 | length >>= 2; | 2369 | length >>= 2; |
2320 | 2370 | ||
2321 | /* The device driver writes the relevant command block into the | 2371 | /* |
2322 | * ram area. */ | 2372 | * The device driver writes the relevant command block into the |
2373 | * ram area. | ||
2374 | */ | ||
2323 | for (i = 0; i < length; i++) { | 2375 | for (i = 0; i < length; i++) { |
2324 | for (j = 0; j < sizeof(u32); j++) { | 2376 | for (j = 0; j < sizeof(u32); j++) { |
2325 | *(tmp + j) = *bufptr++; | 2377 | *(tmp + j) = *bufptr++; |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index f501dd5e7b16..d70bde03619e 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2008 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -82,7 +82,7 @@ static int e1000_desc_unused(struct e1000_ring *ring) | |||
82 | } | 82 | } |
83 | 83 | ||
84 | /** | 84 | /** |
85 | * e1000_receive_skb - helper function to handle rx indications | 85 | * e1000_receive_skb - helper function to handle Rx indications |
86 | * @adapter: board private structure | 86 | * @adapter: board private structure |
87 | * @status: descriptor status field as written by hardware | 87 | * @status: descriptor status field as written by hardware |
88 | * @vlan: descriptor vlan field as written by hardware (no le/be conversion) | 88 | * @vlan: descriptor vlan field as written by hardware (no le/be conversion) |
@@ -138,8 +138,9 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, | |||
138 | /* TCP checksum is good */ | 138 | /* TCP checksum is good */ |
139 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 139 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
140 | } else { | 140 | } else { |
141 | /* IP fragment with UDP payload */ | 141 | /* |
142 | /* Hardware complements the payload checksum, so we undo it | 142 | * IP fragment with UDP payload |
143 | * Hardware complements the payload checksum, so we undo it | ||
143 | * and then put the value in host order for further stack use. | 144 | * and then put the value in host order for further stack use. |
144 | */ | 145 | */ |
145 | __sum16 sum = (__force __sum16)htons(csum); | 146 | __sum16 sum = (__force __sum16)htons(csum); |
@@ -182,7 +183,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |||
182 | break; | 183 | break; |
183 | } | 184 | } |
184 | 185 | ||
185 | /* Make buffer alignment 2 beyond a 16 byte boundary | 186 | /* |
187 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
186 | * this will result in a 16 byte aligned IP header after | 188 | * this will result in a 16 byte aligned IP header after |
187 | * the 14 byte MAC header is removed | 189 | * the 14 byte MAC header is removed |
188 | */ | 190 | */ |
@@ -213,10 +215,12 @@ map_skb: | |||
213 | if (i-- == 0) | 215 | if (i-- == 0) |
214 | i = (rx_ring->count - 1); | 216 | i = (rx_ring->count - 1); |
215 | 217 | ||
216 | /* Force memory writes to complete before letting h/w | 218 | /* |
219 | * Force memory writes to complete before letting h/w | ||
217 | * know there are new descriptors to fetch. (Only | 220 | * know there are new descriptors to fetch. (Only |
218 | * applicable for weak-ordered memory model archs, | 221 | * applicable for weak-ordered memory model archs, |
219 | * such as IA-64). */ | 222 | * such as IA-64). |
223 | */ | ||
220 | wmb(); | 224 | wmb(); |
221 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | 225 | writel(i, adapter->hw.hw_addr + rx_ring->tail); |
222 | } | 226 | } |
@@ -285,7 +289,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
285 | break; | 289 | break; |
286 | } | 290 | } |
287 | 291 | ||
288 | /* Make buffer alignment 2 beyond a 16 byte boundary | 292 | /* |
293 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
289 | * this will result in a 16 byte aligned IP header after | 294 | * this will result in a 16 byte aligned IP header after |
290 | * the 14 byte MAC header is removed | 295 | * the 14 byte MAC header is removed |
291 | */ | 296 | */ |
@@ -319,12 +324,15 @@ no_buffers: | |||
319 | if (!(i--)) | 324 | if (!(i--)) |
320 | i = (rx_ring->count - 1); | 325 | i = (rx_ring->count - 1); |
321 | 326 | ||
322 | /* Force memory writes to complete before letting h/w | 327 | /* |
328 | * Force memory writes to complete before letting h/w | ||
323 | * know there are new descriptors to fetch. (Only | 329 | * know there are new descriptors to fetch. (Only |
324 | * applicable for weak-ordered memory model archs, | 330 | * applicable for weak-ordered memory model archs, |
325 | * such as IA-64). */ | 331 | * such as IA-64). |
332 | */ | ||
326 | wmb(); | 333 | wmb(); |
327 | /* Hardware increments by 16 bytes, but packet split | 334 | /* |
335 | * Hardware increments by 16 bytes, but packet split | ||
328 | * descriptors are 32 bytes...so we increment tail | 336 | * descriptors are 32 bytes...so we increment tail |
329 | * twice as much. | 337 | * twice as much. |
330 | */ | 338 | */ |
@@ -409,9 +417,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
409 | total_rx_bytes += length; | 417 | total_rx_bytes += length; |
410 | total_rx_packets++; | 418 | total_rx_packets++; |
411 | 419 | ||
412 | /* code added for copybreak, this should improve | 420 | /* |
421 | * code added for copybreak, this should improve | ||
413 | * performance for small packets with large amounts | 422 | * performance for small packets with large amounts |
414 | * of reassembly being done in the stack */ | 423 | * of reassembly being done in the stack |
424 | */ | ||
415 | if (length < copybreak) { | 425 | if (length < copybreak) { |
416 | struct sk_buff *new_skb = | 426 | struct sk_buff *new_skb = |
417 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); | 427 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); |
@@ -581,14 +591,15 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
581 | } | 591 | } |
582 | 592 | ||
583 | if (adapter->detect_tx_hung) { | 593 | if (adapter->detect_tx_hung) { |
584 | /* Detect a transmit hang in hardware, this serializes the | 594 | /* |
585 | * check with the clearing of time_stamp and movement of i */ | 595 | * Detect a transmit hang in hardware, this serializes the |
596 | * check with the clearing of time_stamp and movement of i | ||
597 | */ | ||
586 | adapter->detect_tx_hung = 0; | 598 | adapter->detect_tx_hung = 0; |
587 | if (tx_ring->buffer_info[eop].dma && | 599 | if (tx_ring->buffer_info[eop].dma && |
588 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp | 600 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp |
589 | + (adapter->tx_timeout_factor * HZ)) | 601 | + (adapter->tx_timeout_factor * HZ)) |
590 | && !(er32(STATUS) & | 602 | && !(er32(STATUS) & E1000_STATUS_TXOFF)) { |
591 | E1000_STATUS_TXOFF)) { | ||
592 | e1000_print_tx_hang(adapter); | 603 | e1000_print_tx_hang(adapter); |
593 | netif_stop_queue(netdev); | 604 | netif_stop_queue(netdev); |
594 | } | 605 | } |
@@ -677,21 +688,28 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
677 | skb_put(skb, length); | 688 | skb_put(skb, length); |
678 | 689 | ||
679 | { | 690 | { |
680 | /* this looks ugly, but it seems compiler issues make it | 691 | /* |
681 | more efficient than reusing j */ | 692 | * this looks ugly, but it seems compiler issues make it |
693 | * more efficient than reusing j | ||
694 | */ | ||
682 | int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); | 695 | int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); |
683 | 696 | ||
684 | /* page alloc/put takes too long and effects small packet | 697 | /* |
685 | * throughput, so unsplit small packets and save the alloc/put*/ | 698 | * page alloc/put takes too long and effects small packet |
699 | * throughput, so unsplit small packets and save the alloc/put | ||
700 | * only valid in softirq (napi) context to call kmap_* | ||
701 | */ | ||
686 | if (l1 && (l1 <= copybreak) && | 702 | if (l1 && (l1 <= copybreak) && |
687 | ((length + l1) <= adapter->rx_ps_bsize0)) { | 703 | ((length + l1) <= adapter->rx_ps_bsize0)) { |
688 | u8 *vaddr; | 704 | u8 *vaddr; |
689 | 705 | ||
690 | ps_page = &buffer_info->ps_pages[0]; | 706 | ps_page = &buffer_info->ps_pages[0]; |
691 | 707 | ||
692 | /* there is no documentation about how to call | 708 | /* |
709 | * there is no documentation about how to call | ||
693 | * kmap_atomic, so we can't hold the mapping | 710 | * kmap_atomic, so we can't hold the mapping |
694 | * very long */ | 711 | * very long |
712 | */ | ||
695 | pci_dma_sync_single_for_cpu(pdev, ps_page->dma, | 713 | pci_dma_sync_single_for_cpu(pdev, ps_page->dma, |
696 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 714 | PAGE_SIZE, PCI_DMA_FROMDEVICE); |
697 | vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); | 715 | vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); |
@@ -836,24 +854,31 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) | |||
836 | struct e1000_hw *hw = &adapter->hw; | 854 | struct e1000_hw *hw = &adapter->hw; |
837 | u32 icr = er32(ICR); | 855 | u32 icr = er32(ICR); |
838 | 856 | ||
839 | /* read ICR disables interrupts using IAM */ | 857 | /* |
858 | * read ICR disables interrupts using IAM | ||
859 | */ | ||
840 | 860 | ||
841 | if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { | 861 | if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { |
842 | hw->mac.get_link_status = 1; | 862 | hw->mac.get_link_status = 1; |
843 | /* ICH8 workaround-- Call gig speed drop workaround on cable | 863 | /* |
844 | * disconnect (LSC) before accessing any PHY registers */ | 864 | * ICH8 workaround-- Call gig speed drop workaround on cable |
865 | * disconnect (LSC) before accessing any PHY registers | ||
866 | */ | ||
845 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && | 867 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && |
846 | (!(er32(STATUS) & E1000_STATUS_LU))) | 868 | (!(er32(STATUS) & E1000_STATUS_LU))) |
847 | e1000e_gig_downshift_workaround_ich8lan(hw); | 869 | e1000e_gig_downshift_workaround_ich8lan(hw); |
848 | 870 | ||
849 | /* 80003ES2LAN workaround-- For packet buffer work-around on | 871 | /* |
872 | * 80003ES2LAN workaround-- For packet buffer work-around on | ||
850 | * link down event; disable receives here in the ISR and reset | 873 | * link down event; disable receives here in the ISR and reset |
851 | * adapter in watchdog */ | 874 | * adapter in watchdog |
875 | */ | ||
852 | if (netif_carrier_ok(netdev) && | 876 | if (netif_carrier_ok(netdev) && |
853 | adapter->flags & FLAG_RX_NEEDS_RESTART) { | 877 | adapter->flags & FLAG_RX_NEEDS_RESTART) { |
854 | /* disable receives */ | 878 | /* disable receives */ |
855 | u32 rctl = er32(RCTL); | 879 | u32 rctl = er32(RCTL); |
856 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | 880 | ew32(RCTL, rctl & ~E1000_RCTL_EN); |
881 | adapter->flags |= FLAG_RX_RESTART_NOW; | ||
857 | } | 882 | } |
858 | /* guard against interrupt when we're going down */ | 883 | /* guard against interrupt when we're going down */ |
859 | if (!test_bit(__E1000_DOWN, &adapter->state)) | 884 | if (!test_bit(__E1000_DOWN, &adapter->state)) |
@@ -886,23 +911,31 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
886 | if (!icr) | 911 | if (!icr) |
887 | return IRQ_NONE; /* Not our interrupt */ | 912 | return IRQ_NONE; /* Not our interrupt */ |
888 | 913 | ||
889 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is | 914 | /* |
890 | * not set, then the adapter didn't send an interrupt */ | 915 | * IMS will not auto-mask if INT_ASSERTED is not set, and if it is |
916 | * not set, then the adapter didn't send an interrupt | ||
917 | */ | ||
891 | if (!(icr & E1000_ICR_INT_ASSERTED)) | 918 | if (!(icr & E1000_ICR_INT_ASSERTED)) |
892 | return IRQ_NONE; | 919 | return IRQ_NONE; |
893 | 920 | ||
894 | /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No | 921 | /* |
895 | * need for the IMC write */ | 922 | * Interrupt Auto-Mask...upon reading ICR, |
923 | * interrupts are masked. No need for the | ||
924 | * IMC write | ||
925 | */ | ||
896 | 926 | ||
897 | if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { | 927 | if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { |
898 | hw->mac.get_link_status = 1; | 928 | hw->mac.get_link_status = 1; |
899 | /* ICH8 workaround-- Call gig speed drop workaround on cable | 929 | /* |
900 | * disconnect (LSC) before accessing any PHY registers */ | 930 | * ICH8 workaround-- Call gig speed drop workaround on cable |
931 | * disconnect (LSC) before accessing any PHY registers | ||
932 | */ | ||
901 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && | 933 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && |
902 | (!(er32(STATUS) & E1000_STATUS_LU))) | 934 | (!(er32(STATUS) & E1000_STATUS_LU))) |
903 | e1000e_gig_downshift_workaround_ich8lan(hw); | 935 | e1000e_gig_downshift_workaround_ich8lan(hw); |
904 | 936 | ||
905 | /* 80003ES2LAN workaround-- | 937 | /* |
938 | * 80003ES2LAN workaround-- | ||
906 | * For packet buffer work-around on link down event; | 939 | * For packet buffer work-around on link down event; |
907 | * disable receives here in the ISR and | 940 | * disable receives here in the ISR and |
908 | * reset adapter in watchdog | 941 | * reset adapter in watchdog |
@@ -912,6 +945,7 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
912 | /* disable receives */ | 945 | /* disable receives */ |
913 | rctl = er32(RCTL); | 946 | rctl = er32(RCTL); |
914 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | 947 | ew32(RCTL, rctl & ~E1000_RCTL_EN); |
948 | adapter->flags |= FLAG_RX_RESTART_NOW; | ||
915 | } | 949 | } |
916 | /* guard against interrupt when we're going down */ | 950 | /* guard against interrupt when we're going down */ |
917 | if (!test_bit(__E1000_DOWN, &adapter->state)) | 951 | if (!test_bit(__E1000_DOWN, &adapter->state)) |
@@ -1011,8 +1045,7 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter) | |||
1011 | ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); | 1045 | ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); |
1012 | } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { | 1046 | } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { |
1013 | ctrl_ext = er32(CTRL_EXT); | 1047 | ctrl_ext = er32(CTRL_EXT); |
1014 | ew32(CTRL_EXT, | 1048 | ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); |
1015 | ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); | ||
1016 | } | 1049 | } |
1017 | } | 1050 | } |
1018 | 1051 | ||
@@ -1038,8 +1071,7 @@ static void e1000_release_hw_control(struct e1000_adapter *adapter) | |||
1038 | ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); | 1071 | ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); |
1039 | } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { | 1072 | } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { |
1040 | ctrl_ext = er32(CTRL_EXT); | 1073 | ctrl_ext = er32(CTRL_EXT); |
1041 | ew32(CTRL_EXT, | 1074 | ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); |
1042 | ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); | ||
1043 | } | 1075 | } |
1044 | } | 1076 | } |
1045 | 1077 | ||
@@ -1341,9 +1373,11 @@ static void e1000_set_itr(struct e1000_adapter *adapter) | |||
1341 | 1373 | ||
1342 | set_itr_now: | 1374 | set_itr_now: |
1343 | if (new_itr != adapter->itr) { | 1375 | if (new_itr != adapter->itr) { |
1344 | /* this attempts to bias the interrupt rate towards Bulk | 1376 | /* |
1377 | * this attempts to bias the interrupt rate towards Bulk | ||
1345 | * by adding intermediate steps when interrupt rate is | 1378 | * by adding intermediate steps when interrupt rate is |
1346 | * increasing */ | 1379 | * increasing |
1380 | */ | ||
1347 | new_itr = new_itr > adapter->itr ? | 1381 | new_itr = new_itr > adapter->itr ? |
1348 | min(adapter->itr + (new_itr >> 2), new_itr) : | 1382 | min(adapter->itr + (new_itr >> 2), new_itr) : |
1349 | new_itr; | 1383 | new_itr; |
@@ -1354,7 +1388,7 @@ set_itr_now: | |||
1354 | 1388 | ||
1355 | /** | 1389 | /** |
1356 | * e1000_clean - NAPI Rx polling callback | 1390 | * e1000_clean - NAPI Rx polling callback |
1357 | * @adapter: board private structure | 1391 | * @napi: struct associated with this polling callback |
1358 | * @budget: amount of packets driver is allowed to process this poll | 1392 | * @budget: amount of packets driver is allowed to process this poll |
1359 | **/ | 1393 | **/ |
1360 | static int e1000_clean(struct napi_struct *napi, int budget) | 1394 | static int e1000_clean(struct napi_struct *napi, int budget) |
@@ -1366,10 +1400,12 @@ static int e1000_clean(struct napi_struct *napi, int budget) | |||
1366 | /* Must NOT use netdev_priv macro here. */ | 1400 | /* Must NOT use netdev_priv macro here. */ |
1367 | adapter = poll_dev->priv; | 1401 | adapter = poll_dev->priv; |
1368 | 1402 | ||
1369 | /* e1000_clean is called per-cpu. This lock protects | 1403 | /* |
1404 | * e1000_clean is called per-cpu. This lock protects | ||
1370 | * tx_ring from being cleaned by multiple cpus | 1405 | * tx_ring from being cleaned by multiple cpus |
1371 | * simultaneously. A failure obtaining the lock means | 1406 | * simultaneously. A failure obtaining the lock means |
1372 | * tx_ring is currently being cleaned anyway. */ | 1407 | * tx_ring is currently being cleaned anyway. |
1408 | */ | ||
1373 | if (spin_trylock(&adapter->tx_queue_lock)) { | 1409 | if (spin_trylock(&adapter->tx_queue_lock)) { |
1374 | tx_cleaned = e1000_clean_tx_irq(adapter); | 1410 | tx_cleaned = e1000_clean_tx_irq(adapter); |
1375 | spin_unlock(&adapter->tx_queue_lock); | 1411 | spin_unlock(&adapter->tx_queue_lock); |
@@ -1539,9 +1575,11 @@ static void e1000_init_manageability(struct e1000_adapter *adapter) | |||
1539 | 1575 | ||
1540 | manc = er32(MANC); | 1576 | manc = er32(MANC); |
1541 | 1577 | ||
1542 | /* enable receiving management packets to the host. this will probably | 1578 | /* |
1579 | * enable receiving management packets to the host. this will probably | ||
1543 | * generate destination unreachable messages from the host OS, but | 1580 | * generate destination unreachable messages from the host OS, but |
1544 | * the packets will be handled on SMBUS */ | 1581 | * the packets will be handled on SMBUS |
1582 | */ | ||
1545 | manc |= E1000_MANC_EN_MNG2HOST; | 1583 | manc |= E1000_MANC_EN_MNG2HOST; |
1546 | manc2h = er32(MANC2H); | 1584 | manc2h = er32(MANC2H); |
1547 | #define E1000_MNG2HOST_PORT_623 (1 << 5) | 1585 | #define E1000_MNG2HOST_PORT_623 (1 << 5) |
@@ -1591,7 +1629,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
1591 | 1629 | ||
1592 | /* Set the Tx Interrupt Delay register */ | 1630 | /* Set the Tx Interrupt Delay register */ |
1593 | ew32(TIDV, adapter->tx_int_delay); | 1631 | ew32(TIDV, adapter->tx_int_delay); |
1594 | /* tx irq moderation */ | 1632 | /* Tx irq moderation */ |
1595 | ew32(TADV, adapter->tx_abs_int_delay); | 1633 | ew32(TADV, adapter->tx_abs_int_delay); |
1596 | 1634 | ||
1597 | /* Program the Transmit Control Register */ | 1635 | /* Program the Transmit Control Register */ |
@@ -1602,8 +1640,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
1602 | 1640 | ||
1603 | if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { | 1641 | if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { |
1604 | tarc = er32(TARC0); | 1642 | tarc = er32(TARC0); |
1605 | /* set the speed mode bit, we'll clear it if we're not at | 1643 | /* |
1606 | * gigabit link later */ | 1644 | * set the speed mode bit, we'll clear it if we're not at |
1645 | * gigabit link later | ||
1646 | */ | ||
1607 | #define SPEED_MODE_BIT (1 << 21) | 1647 | #define SPEED_MODE_BIT (1 << 21) |
1608 | tarc |= SPEED_MODE_BIT; | 1648 | tarc |= SPEED_MODE_BIT; |
1609 | ew32(TARC0, tarc); | 1649 | ew32(TARC0, tarc); |
@@ -1724,8 +1764,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1724 | /* Configure extra packet-split registers */ | 1764 | /* Configure extra packet-split registers */ |
1725 | rfctl = er32(RFCTL); | 1765 | rfctl = er32(RFCTL); |
1726 | rfctl |= E1000_RFCTL_EXTEN; | 1766 | rfctl |= E1000_RFCTL_EXTEN; |
1727 | /* disable packet split support for IPv6 extension headers, | 1767 | /* |
1728 | * because some malformed IPv6 headers can hang the RX */ | 1768 | * disable packet split support for IPv6 extension headers, |
1769 | * because some malformed IPv6 headers can hang the Rx | ||
1770 | */ | ||
1729 | rfctl |= (E1000_RFCTL_IPV6_EX_DIS | | 1771 | rfctl |= (E1000_RFCTL_IPV6_EX_DIS | |
1730 | E1000_RFCTL_NEW_IPV6_EXT_DIS); | 1772 | E1000_RFCTL_NEW_IPV6_EXT_DIS); |
1731 | 1773 | ||
@@ -1754,6 +1796,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1754 | } | 1796 | } |
1755 | 1797 | ||
1756 | ew32(RCTL, rctl); | 1798 | ew32(RCTL, rctl); |
1799 | /* just started the receive unit, no need to restart */ | ||
1800 | adapter->flags &= ~FLAG_RX_RESTART_NOW; | ||
1757 | } | 1801 | } |
1758 | 1802 | ||
1759 | /** | 1803 | /** |
@@ -1794,8 +1838,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
1794 | /* irq moderation */ | 1838 | /* irq moderation */ |
1795 | ew32(RADV, adapter->rx_abs_int_delay); | 1839 | ew32(RADV, adapter->rx_abs_int_delay); |
1796 | if (adapter->itr_setting != 0) | 1840 | if (adapter->itr_setting != 0) |
1797 | ew32(ITR, | 1841 | ew32(ITR, 1000000000 / (adapter->itr * 256)); |
1798 | 1000000000 / (adapter->itr * 256)); | ||
1799 | 1842 | ||
1800 | ctrl_ext = er32(CTRL_EXT); | 1843 | ctrl_ext = er32(CTRL_EXT); |
1801 | /* Reset delay timers after every interrupt */ | 1844 | /* Reset delay timers after every interrupt */ |
@@ -1806,8 +1849,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
1806 | ew32(CTRL_EXT, ctrl_ext); | 1849 | ew32(CTRL_EXT, ctrl_ext); |
1807 | e1e_flush(); | 1850 | e1e_flush(); |
1808 | 1851 | ||
1809 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 1852 | /* |
1810 | * the Base and Length of the Rx Descriptor Ring */ | 1853 | * Setup the HW Rx Head and Tail Descriptor Pointers and |
1854 | * the Base and Length of the Rx Descriptor Ring | ||
1855 | */ | ||
1811 | rdba = rx_ring->dma; | 1856 | rdba = rx_ring->dma; |
1812 | ew32(RDBAL, (rdba & DMA_32BIT_MASK)); | 1857 | ew32(RDBAL, (rdba & DMA_32BIT_MASK)); |
1813 | ew32(RDBAH, (rdba >> 32)); | 1858 | ew32(RDBAH, (rdba >> 32)); |
@@ -1822,8 +1867,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
1822 | if (adapter->flags & FLAG_RX_CSUM_ENABLED) { | 1867 | if (adapter->flags & FLAG_RX_CSUM_ENABLED) { |
1823 | rxcsum |= E1000_RXCSUM_TUOFL; | 1868 | rxcsum |= E1000_RXCSUM_TUOFL; |
1824 | 1869 | ||
1825 | /* IPv4 payload checksum for UDP fragments must be | 1870 | /* |
1826 | * used in conjunction with packet-split. */ | 1871 | * IPv4 payload checksum for UDP fragments must be |
1872 | * used in conjunction with packet-split. | ||
1873 | */ | ||
1827 | if (adapter->rx_ps_pages) | 1874 | if (adapter->rx_ps_pages) |
1828 | rxcsum |= E1000_RXCSUM_IPPCSE; | 1875 | rxcsum |= E1000_RXCSUM_IPPCSE; |
1829 | } else { | 1876 | } else { |
@@ -1832,9 +1879,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
1832 | } | 1879 | } |
1833 | ew32(RXCSUM, rxcsum); | 1880 | ew32(RXCSUM, rxcsum); |
1834 | 1881 | ||
1835 | /* Enable early receives on supported devices, only takes effect when | 1882 | /* |
1883 | * Enable early receives on supported devices, only takes effect when | ||
1836 | * packet size is equal or larger than the specified value (in 8 byte | 1884 | * packet size is equal or larger than the specified value (in 8 byte |
1837 | * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */ | 1885 | * units), e.g. using jumbo frames when setting to E1000_ERT_2048 |
1886 | */ | ||
1838 | if ((adapter->flags & FLAG_HAS_ERT) && | 1887 | if ((adapter->flags & FLAG_HAS_ERT) && |
1839 | (adapter->netdev->mtu > ETH_DATA_LEN)) | 1888 | (adapter->netdev->mtu > ETH_DATA_LEN)) |
1840 | ew32(ERT, E1000_ERT_2048); | 1889 | ew32(ERT, E1000_ERT_2048); |
@@ -1844,7 +1893,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
1844 | } | 1893 | } |
1845 | 1894 | ||
1846 | /** | 1895 | /** |
1847 | * e1000_mc_addr_list_update - Update Multicast addresses | 1896 | * e1000_update_mc_addr_list - Update Multicast addresses |
1848 | * @hw: pointer to the HW structure | 1897 | * @hw: pointer to the HW structure |
1849 | * @mc_addr_list: array of multicast addresses to program | 1898 | * @mc_addr_list: array of multicast addresses to program |
1850 | * @mc_addr_count: number of multicast addresses to program | 1899 | * @mc_addr_count: number of multicast addresses to program |
@@ -1858,11 +1907,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
1858 | * exists and all implementations are handled in the generic version of this | 1907 | * exists and all implementations are handled in the generic version of this |
1859 | * function. | 1908 | * function. |
1860 | **/ | 1909 | **/ |
1861 | static void e1000_mc_addr_list_update(struct e1000_hw *hw, u8 *mc_addr_list, | 1910 | static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, |
1862 | u32 mc_addr_count, u32 rar_used_count, | 1911 | u32 mc_addr_count, u32 rar_used_count, |
1863 | u32 rar_count) | 1912 | u32 rar_count) |
1864 | { | 1913 | { |
1865 | hw->mac.ops.mc_addr_list_update(hw, mc_addr_list, mc_addr_count, | 1914 | hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count, |
1866 | rar_used_count, rar_count); | 1915 | rar_used_count, rar_count); |
1867 | } | 1916 | } |
1868 | 1917 | ||
@@ -1916,7 +1965,7 @@ static void e1000_set_multi(struct net_device *netdev) | |||
1916 | mc_ptr = mc_ptr->next; | 1965 | mc_ptr = mc_ptr->next; |
1917 | } | 1966 | } |
1918 | 1967 | ||
1919 | e1000_mc_addr_list_update(hw, mta_list, i, 1, | 1968 | e1000_update_mc_addr_list(hw, mta_list, i, 1, |
1920 | mac->rar_entry_count); | 1969 | mac->rar_entry_count); |
1921 | kfree(mta_list); | 1970 | kfree(mta_list); |
1922 | } else { | 1971 | } else { |
@@ -1924,13 +1973,12 @@ static void e1000_set_multi(struct net_device *netdev) | |||
1924 | * if we're called from probe, we might not have | 1973 | * if we're called from probe, we might not have |
1925 | * anything to do here, so clear out the list | 1974 | * anything to do here, so clear out the list |
1926 | */ | 1975 | */ |
1927 | e1000_mc_addr_list_update(hw, NULL, 0, 1, | 1976 | e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count); |
1928 | mac->rar_entry_count); | ||
1929 | } | 1977 | } |
1930 | } | 1978 | } |
1931 | 1979 | ||
1932 | /** | 1980 | /** |
1933 | * e1000_configure - configure the hardware for RX and TX | 1981 | * e1000_configure - configure the hardware for Rx and Tx |
1934 | * @adapter: private board structure | 1982 | * @adapter: private board structure |
1935 | **/ | 1983 | **/ |
1936 | static void e1000_configure(struct e1000_adapter *adapter) | 1984 | static void e1000_configure(struct e1000_adapter *adapter) |
@@ -1943,8 +1991,7 @@ static void e1000_configure(struct e1000_adapter *adapter) | |||
1943 | e1000_configure_tx(adapter); | 1991 | e1000_configure_tx(adapter); |
1944 | e1000_setup_rctl(adapter); | 1992 | e1000_setup_rctl(adapter); |
1945 | e1000_configure_rx(adapter); | 1993 | e1000_configure_rx(adapter); |
1946 | adapter->alloc_rx_buf(adapter, | 1994 | adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring)); |
1947 | e1000_desc_unused(adapter->rx_ring)); | ||
1948 | } | 1995 | } |
1949 | 1996 | ||
1950 | /** | 1997 | /** |
@@ -1960,9 +2007,11 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter) | |||
1960 | u16 mii_reg = 0; | 2007 | u16 mii_reg = 0; |
1961 | 2008 | ||
1962 | /* Just clear the power down bit to wake the phy back up */ | 2009 | /* Just clear the power down bit to wake the phy back up */ |
1963 | if (adapter->hw.media_type == e1000_media_type_copper) { | 2010 | if (adapter->hw.phy.media_type == e1000_media_type_copper) { |
1964 | /* according to the manual, the phy will retain its | 2011 | /* |
1965 | * settings across a power-down/up cycle */ | 2012 | * According to the manual, the phy will retain its |
2013 | * settings across a power-down/up cycle | ||
2014 | */ | ||
1966 | e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg); | 2015 | e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg); |
1967 | mii_reg &= ~MII_CR_POWER_DOWN; | 2016 | mii_reg &= ~MII_CR_POWER_DOWN; |
1968 | e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg); | 2017 | e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg); |
@@ -1987,12 +2036,11 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) | |||
1987 | return; | 2036 | return; |
1988 | 2037 | ||
1989 | /* non-copper PHY? */ | 2038 | /* non-copper PHY? */ |
1990 | if (adapter->hw.media_type != e1000_media_type_copper) | 2039 | if (adapter->hw.phy.media_type != e1000_media_type_copper) |
1991 | return; | 2040 | return; |
1992 | 2041 | ||
1993 | /* reset is blocked because of a SoL/IDER session */ | 2042 | /* reset is blocked because of a SoL/IDER session */ |
1994 | if (e1000e_check_mng_mode(hw) || | 2043 | if (e1000e_check_mng_mode(hw) || e1000_check_reset_block(hw)) |
1995 | e1000_check_reset_block(hw)) | ||
1996 | return; | 2044 | return; |
1997 | 2045 | ||
1998 | /* manageability (AMT) is enabled */ | 2046 | /* manageability (AMT) is enabled */ |
@@ -2012,51 +2060,61 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) | |||
2012 | * This function boots the hardware and enables some settings that | 2060 | * This function boots the hardware and enables some settings that |
2013 | * require a configuration cycle of the hardware - those cannot be | 2061 | * require a configuration cycle of the hardware - those cannot be |
2014 | * set/changed during runtime. After reset the device needs to be | 2062 | * set/changed during runtime. After reset the device needs to be |
2015 | * properly configured for rx, tx etc. | 2063 | * properly configured for Rx, Tx etc. |
2016 | */ | 2064 | */ |
2017 | void e1000e_reset(struct e1000_adapter *adapter) | 2065 | void e1000e_reset(struct e1000_adapter *adapter) |
2018 | { | 2066 | { |
2019 | struct e1000_mac_info *mac = &adapter->hw.mac; | 2067 | struct e1000_mac_info *mac = &adapter->hw.mac; |
2068 | struct e1000_fc_info *fc = &adapter->hw.fc; | ||
2020 | struct e1000_hw *hw = &adapter->hw; | 2069 | struct e1000_hw *hw = &adapter->hw; |
2021 | u32 tx_space, min_tx_space, min_rx_space; | 2070 | u32 tx_space, min_tx_space, min_rx_space; |
2022 | u32 pba; | 2071 | u32 pba = adapter->pba; |
2023 | u16 hwm; | 2072 | u16 hwm; |
2024 | 2073 | ||
2025 | ew32(PBA, adapter->pba); | 2074 | /* reset Packet Buffer Allocation to default */ |
2075 | ew32(PBA, pba); | ||
2026 | 2076 | ||
2027 | if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) { | 2077 | if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { |
2028 | /* To maintain wire speed transmits, the Tx FIFO should be | 2078 | /* |
2079 | * To maintain wire speed transmits, the Tx FIFO should be | ||
2029 | * large enough to accommodate two full transmit packets, | 2080 | * large enough to accommodate two full transmit packets, |
2030 | * rounded up to the next 1KB and expressed in KB. Likewise, | 2081 | * rounded up to the next 1KB and expressed in KB. Likewise, |
2031 | * the Rx FIFO should be large enough to accommodate at least | 2082 | * the Rx FIFO should be large enough to accommodate at least |
2032 | * one full receive packet and is similarly rounded up and | 2083 | * one full receive packet and is similarly rounded up and |
2033 | * expressed in KB. */ | 2084 | * expressed in KB. |
2085 | */ | ||
2034 | pba = er32(PBA); | 2086 | pba = er32(PBA); |
2035 | /* upper 16 bits has Tx packet buffer allocation size in KB */ | 2087 | /* upper 16 bits has Tx packet buffer allocation size in KB */ |
2036 | tx_space = pba >> 16; | 2088 | tx_space = pba >> 16; |
2037 | /* lower 16 bits has Rx packet buffer allocation size in KB */ | 2089 | /* lower 16 bits has Rx packet buffer allocation size in KB */ |
2038 | pba &= 0xffff; | 2090 | pba &= 0xffff; |
2039 | /* the tx fifo also stores 16 bytes of information about the tx | 2091 | /* |
2040 | * but don't include ethernet FCS because hardware appends it */ | 2092 | * the Tx fifo also stores 16 bytes of information about the tx |
2041 | min_tx_space = (mac->max_frame_size + | 2093 | * but don't include ethernet FCS because hardware appends it |
2094 | */ | ||
2095 | min_tx_space = (adapter->max_frame_size + | ||
2042 | sizeof(struct e1000_tx_desc) - | 2096 | sizeof(struct e1000_tx_desc) - |
2043 | ETH_FCS_LEN) * 2; | 2097 | ETH_FCS_LEN) * 2; |
2044 | min_tx_space = ALIGN(min_tx_space, 1024); | 2098 | min_tx_space = ALIGN(min_tx_space, 1024); |
2045 | min_tx_space >>= 10; | 2099 | min_tx_space >>= 10; |
2046 | /* software strips receive CRC, so leave room for it */ | 2100 | /* software strips receive CRC, so leave room for it */ |
2047 | min_rx_space = mac->max_frame_size; | 2101 | min_rx_space = adapter->max_frame_size; |
2048 | min_rx_space = ALIGN(min_rx_space, 1024); | 2102 | min_rx_space = ALIGN(min_rx_space, 1024); |
2049 | min_rx_space >>= 10; | 2103 | min_rx_space >>= 10; |
2050 | 2104 | ||
2051 | /* If current Tx allocation is less than the min Tx FIFO size, | 2105 | /* |
2106 | * If current Tx allocation is less than the min Tx FIFO size, | ||
2052 | * and the min Tx FIFO size is less than the current Rx FIFO | 2107 | * and the min Tx FIFO size is less than the current Rx FIFO |
2053 | * allocation, take space away from current Rx allocation */ | 2108 | * allocation, take space away from current Rx allocation |
2109 | */ | ||
2054 | if ((tx_space < min_tx_space) && | 2110 | if ((tx_space < min_tx_space) && |
2055 | ((min_tx_space - tx_space) < pba)) { | 2111 | ((min_tx_space - tx_space) < pba)) { |
2056 | pba -= min_tx_space - tx_space; | 2112 | pba -= min_tx_space - tx_space; |
2057 | 2113 | ||
2058 | /* if short on rx space, rx wins and must trump tx | 2114 | /* |
2059 | * adjustment or use Early Receive if available */ | 2115 | * if short on Rx space, Rx wins and must trump tx |
2116 | * adjustment or use Early Receive if available | ||
2117 | */ | ||
2060 | if ((pba < min_rx_space) && | 2118 | if ((pba < min_rx_space) && |
2061 | (!(adapter->flags & FLAG_HAS_ERT))) | 2119 | (!(adapter->flags & FLAG_HAS_ERT))) |
2062 | /* ERT enabled in e1000_configure_rx */ | 2120 | /* ERT enabled in e1000_configure_rx */ |
@@ -2067,29 +2125,33 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2067 | } | 2125 | } |
2068 | 2126 | ||
2069 | 2127 | ||
2070 | /* flow control settings */ | 2128 | /* |
2071 | /* The high water mark must be low enough to fit one full frame | 2129 | * flow control settings |
2130 | * | ||
2131 | * The high water mark must be low enough to fit one full frame | ||
2072 | * (or the size used for early receive) above it in the Rx FIFO. | 2132 | * (or the size used for early receive) above it in the Rx FIFO. |
2073 | * Set it to the lower of: | 2133 | * Set it to the lower of: |
2074 | * - 90% of the Rx FIFO size, and | 2134 | * - 90% of the Rx FIFO size, and |
2075 | * - the full Rx FIFO size minus the early receive size (for parts | 2135 | * - the full Rx FIFO size minus the early receive size (for parts |
2076 | * with ERT support assuming ERT set to E1000_ERT_2048), or | 2136 | * with ERT support assuming ERT set to E1000_ERT_2048), or |
2077 | * - the full Rx FIFO size minus one full frame */ | 2137 | * - the full Rx FIFO size minus one full frame |
2138 | */ | ||
2078 | if (adapter->flags & FLAG_HAS_ERT) | 2139 | if (adapter->flags & FLAG_HAS_ERT) |
2079 | hwm = min(((adapter->pba << 10) * 9 / 10), | 2140 | hwm = min(((pba << 10) * 9 / 10), |
2080 | ((adapter->pba << 10) - (E1000_ERT_2048 << 3))); | 2141 | ((pba << 10) - (E1000_ERT_2048 << 3))); |
2081 | else | 2142 | else |
2082 | hwm = min(((adapter->pba << 10) * 9 / 10), | 2143 | hwm = min(((pba << 10) * 9 / 10), |
2083 | ((adapter->pba << 10) - mac->max_frame_size)); | 2144 | ((pba << 10) - adapter->max_frame_size)); |
2084 | 2145 | ||
2085 | mac->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ | 2146 | fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ |
2086 | mac->fc_low_water = mac->fc_high_water - 8; | 2147 | fc->low_water = fc->high_water - 8; |
2087 | 2148 | ||
2088 | if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) | 2149 | if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) |
2089 | mac->fc_pause_time = 0xFFFF; | 2150 | fc->pause_time = 0xFFFF; |
2090 | else | 2151 | else |
2091 | mac->fc_pause_time = E1000_FC_PAUSE_TIME; | 2152 | fc->pause_time = E1000_FC_PAUSE_TIME; |
2092 | mac->fc = mac->original_fc; | 2153 | fc->send_xon = 1; |
2154 | fc->type = fc->original_type; | ||
2093 | 2155 | ||
2094 | /* Allow time for pending master requests to run */ | 2156 | /* Allow time for pending master requests to run */ |
2095 | mac->ops.reset_hw(hw); | 2157 | mac->ops.reset_hw(hw); |
@@ -2108,9 +2170,11 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2108 | 2170 | ||
2109 | if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) { | 2171 | if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) { |
2110 | u16 phy_data = 0; | 2172 | u16 phy_data = 0; |
2111 | /* speed up time to link by disabling smart power down, ignore | 2173 | /* |
2174 | * speed up time to link by disabling smart power down, ignore | ||
2112 | * the return value of this function because there is nothing | 2175 | * the return value of this function because there is nothing |
2113 | * different we would do if it failed */ | 2176 | * different we would do if it failed |
2177 | */ | ||
2114 | e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); | 2178 | e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); |
2115 | phy_data &= ~IGP02E1000_PM_SPD; | 2179 | phy_data &= ~IGP02E1000_PM_SPD; |
2116 | e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); | 2180 | e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); |
@@ -2140,8 +2204,10 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
2140 | struct e1000_hw *hw = &adapter->hw; | 2204 | struct e1000_hw *hw = &adapter->hw; |
2141 | u32 tctl, rctl; | 2205 | u32 tctl, rctl; |
2142 | 2206 | ||
2143 | /* signal that we're down so the interrupt handler does not | 2207 | /* |
2144 | * reschedule our watchdog timer */ | 2208 | * signal that we're down so the interrupt handler does not |
2209 | * reschedule our watchdog timer | ||
2210 | */ | ||
2145 | set_bit(__E1000_DOWN, &adapter->state); | 2211 | set_bit(__E1000_DOWN, &adapter->state); |
2146 | 2212 | ||
2147 | /* disable receives in the hardware */ | 2213 | /* disable receives in the hardware */ |
@@ -2200,13 +2266,12 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter) | |||
2200 | **/ | 2266 | **/ |
2201 | static int __devinit e1000_sw_init(struct e1000_adapter *adapter) | 2267 | static int __devinit e1000_sw_init(struct e1000_adapter *adapter) |
2202 | { | 2268 | { |
2203 | struct e1000_hw *hw = &adapter->hw; | ||
2204 | struct net_device *netdev = adapter->netdev; | 2269 | struct net_device *netdev = adapter->netdev; |
2205 | 2270 | ||
2206 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; | 2271 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; |
2207 | adapter->rx_ps_bsize0 = 128; | 2272 | adapter->rx_ps_bsize0 = 128; |
2208 | hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | 2273 | adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
2209 | hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN; | 2274 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; |
2210 | 2275 | ||
2211 | adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); | 2276 | adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); |
2212 | if (!adapter->tx_ring) | 2277 | if (!adapter->tx_ring) |
@@ -2272,16 +2337,20 @@ static int e1000_open(struct net_device *netdev) | |||
2272 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) | 2337 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) |
2273 | e1000_update_mng_vlan(adapter); | 2338 | e1000_update_mng_vlan(adapter); |
2274 | 2339 | ||
2275 | /* If AMT is enabled, let the firmware know that the network | 2340 | /* |
2276 | * interface is now open */ | 2341 | * If AMT is enabled, let the firmware know that the network |
2342 | * interface is now open | ||
2343 | */ | ||
2277 | if ((adapter->flags & FLAG_HAS_AMT) && | 2344 | if ((adapter->flags & FLAG_HAS_AMT) && |
2278 | e1000e_check_mng_mode(&adapter->hw)) | 2345 | e1000e_check_mng_mode(&adapter->hw)) |
2279 | e1000_get_hw_control(adapter); | 2346 | e1000_get_hw_control(adapter); |
2280 | 2347 | ||
2281 | /* before we allocate an interrupt, we must be ready to handle it. | 2348 | /* |
2349 | * before we allocate an interrupt, we must be ready to handle it. | ||
2282 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt | 2350 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt |
2283 | * as soon as we call pci_request_irq, so we have to setup our | 2351 | * as soon as we call pci_request_irq, so we have to setup our |
2284 | * clean_rx handler before we do so. */ | 2352 | * clean_rx handler before we do so. |
2353 | */ | ||
2285 | e1000_configure(adapter); | 2354 | e1000_configure(adapter); |
2286 | 2355 | ||
2287 | err = e1000_request_irq(adapter); | 2356 | err = e1000_request_irq(adapter); |
@@ -2335,16 +2404,20 @@ static int e1000_close(struct net_device *netdev) | |||
2335 | e1000e_free_tx_resources(adapter); | 2404 | e1000e_free_tx_resources(adapter); |
2336 | e1000e_free_rx_resources(adapter); | 2405 | e1000e_free_rx_resources(adapter); |
2337 | 2406 | ||
2338 | /* kill manageability vlan ID if supported, but not if a vlan with | 2407 | /* |
2339 | * the same ID is registered on the host OS (let 8021q kill it) */ | 2408 | * kill manageability vlan ID if supported, but not if a vlan with |
2409 | * the same ID is registered on the host OS (let 8021q kill it) | ||
2410 | */ | ||
2340 | if ((adapter->hw.mng_cookie.status & | 2411 | if ((adapter->hw.mng_cookie.status & |
2341 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && | 2412 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && |
2342 | !(adapter->vlgrp && | 2413 | !(adapter->vlgrp && |
2343 | vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) | 2414 | vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) |
2344 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | 2415 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); |
2345 | 2416 | ||
2346 | /* If AMT is enabled, let the firmware know that the network | 2417 | /* |
2347 | * interface is now closed */ | 2418 | * If AMT is enabled, let the firmware know that the network |
2419 | * interface is now closed | ||
2420 | */ | ||
2348 | if ((adapter->flags & FLAG_HAS_AMT) && | 2421 | if ((adapter->flags & FLAG_HAS_AMT) && |
2349 | e1000e_check_mng_mode(&adapter->hw)) | 2422 | e1000e_check_mng_mode(&adapter->hw)) |
2350 | e1000_release_hw_control(adapter); | 2423 | e1000_release_hw_control(adapter); |
@@ -2375,12 +2448,14 @@ static int e1000_set_mac(struct net_device *netdev, void *p) | |||
2375 | /* activate the work around */ | 2448 | /* activate the work around */ |
2376 | e1000e_set_laa_state_82571(&adapter->hw, 1); | 2449 | e1000e_set_laa_state_82571(&adapter->hw, 1); |
2377 | 2450 | ||
2378 | /* Hold a copy of the LAA in RAR[14] This is done so that | 2451 | /* |
2452 | * Hold a copy of the LAA in RAR[14] This is done so that | ||
2379 | * between the time RAR[0] gets clobbered and the time it | 2453 | * between the time RAR[0] gets clobbered and the time it |
2380 | * gets fixed (in e1000_watchdog), the actual LAA is in one | 2454 | * gets fixed (in e1000_watchdog), the actual LAA is in one |
2381 | * of the RARs and no incoming packets directed to this port | 2455 | * of the RARs and no incoming packets directed to this port |
2382 | * are dropped. Eventually the LAA will be in RAR[0] and | 2456 | * are dropped. Eventually the LAA will be in RAR[0] and |
2383 | * RAR[14] */ | 2457 | * RAR[14] |
2458 | */ | ||
2384 | e1000e_rar_set(&adapter->hw, | 2459 | e1000e_rar_set(&adapter->hw, |
2385 | adapter->hw.mac.addr, | 2460 | adapter->hw.mac.addr, |
2386 | adapter->hw.mac.rar_entry_count - 1); | 2461 | adapter->hw.mac.rar_entry_count - 1); |
@@ -2389,8 +2464,10 @@ static int e1000_set_mac(struct net_device *netdev, void *p) | |||
2389 | return 0; | 2464 | return 0; |
2390 | } | 2465 | } |
2391 | 2466 | ||
2392 | /* Need to wait a few seconds after link up to get diagnostic information from | 2467 | /* |
2393 | * the phy */ | 2468 | * Need to wait a few seconds after link up to get diagnostic information from |
2469 | * the phy | ||
2470 | */ | ||
2394 | static void e1000_update_phy_info(unsigned long data) | 2471 | static void e1000_update_phy_info(unsigned long data) |
2395 | { | 2472 | { |
2396 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 2473 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
@@ -2421,7 +2498,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
2421 | 2498 | ||
2422 | spin_lock_irqsave(&adapter->stats_lock, irq_flags); | 2499 | spin_lock_irqsave(&adapter->stats_lock, irq_flags); |
2423 | 2500 | ||
2424 | /* these counters are modified from e1000_adjust_tbi_stats, | 2501 | /* |
2502 | * these counters are modified from e1000_adjust_tbi_stats, | ||
2425 | * called from the interrupt context, so they must only | 2503 | * called from the interrupt context, so they must only |
2426 | * be written while holding adapter->stats_lock | 2504 | * be written while holding adapter->stats_lock |
2427 | */ | 2505 | */ |
@@ -2515,8 +2593,10 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
2515 | 2593 | ||
2516 | /* Rx Errors */ | 2594 | /* Rx Errors */ |
2517 | 2595 | ||
2518 | /* RLEC on some newer hardware can be incorrect so build | 2596 | /* |
2519 | * our own version based on RUC and ROC */ | 2597 | * RLEC on some newer hardware can be incorrect so build |
2598 | * our own version based on RUC and ROC | ||
2599 | */ | ||
2520 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + | 2600 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + |
2521 | adapter->stats.crcerrs + adapter->stats.algnerrc + | 2601 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
2522 | adapter->stats.ruc + adapter->stats.roc + | 2602 | adapter->stats.ruc + adapter->stats.roc + |
@@ -2537,7 +2617,7 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
2537 | /* Tx Dropped needs to be maintained elsewhere */ | 2617 | /* Tx Dropped needs to be maintained elsewhere */ |
2538 | 2618 | ||
2539 | /* Phy Stats */ | 2619 | /* Phy Stats */ |
2540 | if (hw->media_type == e1000_media_type_copper) { | 2620 | if (hw->phy.media_type == e1000_media_type_copper) { |
2541 | if ((adapter->link_speed == SPEED_1000) && | 2621 | if ((adapter->link_speed == SPEED_1000) && |
2542 | (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) { | 2622 | (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) { |
2543 | phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; | 2623 | phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; |
@@ -2555,8 +2635,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
2555 | 2635 | ||
2556 | static void e1000_print_link_info(struct e1000_adapter *adapter) | 2636 | static void e1000_print_link_info(struct e1000_adapter *adapter) |
2557 | { | 2637 | { |
2558 | struct net_device *netdev = adapter->netdev; | ||
2559 | struct e1000_hw *hw = &adapter->hw; | 2638 | struct e1000_hw *hw = &adapter->hw; |
2639 | struct net_device *netdev = adapter->netdev; | ||
2560 | u32 ctrl = er32(CTRL); | 2640 | u32 ctrl = er32(CTRL); |
2561 | 2641 | ||
2562 | ndev_info(netdev, | 2642 | ndev_info(netdev, |
@@ -2570,6 +2650,62 @@ static void e1000_print_link_info(struct e1000_adapter *adapter) | |||
2570 | ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); | 2650 | ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); |
2571 | } | 2651 | } |
2572 | 2652 | ||
2653 | static bool e1000_has_link(struct e1000_adapter *adapter) | ||
2654 | { | ||
2655 | struct e1000_hw *hw = &adapter->hw; | ||
2656 | bool link_active = 0; | ||
2657 | s32 ret_val = 0; | ||
2658 | |||
2659 | /* | ||
2660 | * get_link_status is set on LSC (link status) interrupt or | ||
2661 | * Rx sequence error interrupt. get_link_status will stay | ||
2662 | * false until the check_for_link establishes link | ||
2663 | * for copper adapters ONLY | ||
2664 | */ | ||
2665 | switch (hw->phy.media_type) { | ||
2666 | case e1000_media_type_copper: | ||
2667 | if (hw->mac.get_link_status) { | ||
2668 | ret_val = hw->mac.ops.check_for_link(hw); | ||
2669 | link_active = !hw->mac.get_link_status; | ||
2670 | } else { | ||
2671 | link_active = 1; | ||
2672 | } | ||
2673 | break; | ||
2674 | case e1000_media_type_fiber: | ||
2675 | ret_val = hw->mac.ops.check_for_link(hw); | ||
2676 | link_active = !!(er32(STATUS) & E1000_STATUS_LU); | ||
2677 | break; | ||
2678 | case e1000_media_type_internal_serdes: | ||
2679 | ret_val = hw->mac.ops.check_for_link(hw); | ||
2680 | link_active = adapter->hw.mac.serdes_has_link; | ||
2681 | break; | ||
2682 | default: | ||
2683 | case e1000_media_type_unknown: | ||
2684 | break; | ||
2685 | } | ||
2686 | |||
2687 | if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && | ||
2688 | (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { | ||
2689 | /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ | ||
2690 | ndev_info(adapter->netdev, | ||
2691 | "Gigabit has been disabled, downgrading speed\n"); | ||
2692 | } | ||
2693 | |||
2694 | return link_active; | ||
2695 | } | ||
2696 | |||
2697 | static void e1000e_enable_receives(struct e1000_adapter *adapter) | ||
2698 | { | ||
2699 | /* make sure the receive unit is started */ | ||
2700 | if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && | ||
2701 | (adapter->flags & FLAG_RX_RESTART_NOW)) { | ||
2702 | struct e1000_hw *hw = &adapter->hw; | ||
2703 | u32 rctl = er32(RCTL); | ||
2704 | ew32(RCTL, rctl | E1000_RCTL_EN); | ||
2705 | adapter->flags &= ~FLAG_RX_RESTART_NOW; | ||
2706 | } | ||
2707 | } | ||
2708 | |||
2573 | /** | 2709 | /** |
2574 | * e1000_watchdog - Timer Call-back | 2710 | * e1000_watchdog - Timer Call-back |
2575 | * @data: pointer to adapter cast into an unsigned long | 2711 | * @data: pointer to adapter cast into an unsigned long |
@@ -2588,48 +2724,35 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
2588 | { | 2724 | { |
2589 | struct e1000_adapter *adapter = container_of(work, | 2725 | struct e1000_adapter *adapter = container_of(work, |
2590 | struct e1000_adapter, watchdog_task); | 2726 | struct e1000_adapter, watchdog_task); |
2591 | |||
2592 | struct net_device *netdev = adapter->netdev; | 2727 | struct net_device *netdev = adapter->netdev; |
2593 | struct e1000_mac_info *mac = &adapter->hw.mac; | 2728 | struct e1000_mac_info *mac = &adapter->hw.mac; |
2594 | struct e1000_ring *tx_ring = adapter->tx_ring; | 2729 | struct e1000_ring *tx_ring = adapter->tx_ring; |
2595 | struct e1000_hw *hw = &adapter->hw; | 2730 | struct e1000_hw *hw = &adapter->hw; |
2596 | u32 link, tctl; | 2731 | u32 link, tctl; |
2597 | s32 ret_val; | ||
2598 | int tx_pending = 0; | 2732 | int tx_pending = 0; |
2599 | 2733 | ||
2600 | if ((netif_carrier_ok(netdev)) && | 2734 | link = e1000_has_link(adapter); |
2601 | (er32(STATUS) & E1000_STATUS_LU)) | 2735 | if ((netif_carrier_ok(netdev)) && link) { |
2736 | e1000e_enable_receives(adapter); | ||
2602 | goto link_up; | 2737 | goto link_up; |
2603 | |||
2604 | ret_val = mac->ops.check_for_link(hw); | ||
2605 | if ((ret_val == E1000_ERR_PHY) && | ||
2606 | (adapter->hw.phy.type == e1000_phy_igp_3) && | ||
2607 | (er32(CTRL) & | ||
2608 | E1000_PHY_CTRL_GBE_DISABLE)) { | ||
2609 | /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ | ||
2610 | ndev_info(netdev, | ||
2611 | "Gigabit has been disabled, downgrading speed\n"); | ||
2612 | } | 2738 | } |
2613 | 2739 | ||
2614 | if ((e1000e_enable_tx_pkt_filtering(hw)) && | 2740 | if ((e1000e_enable_tx_pkt_filtering(hw)) && |
2615 | (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) | 2741 | (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) |
2616 | e1000_update_mng_vlan(adapter); | 2742 | e1000_update_mng_vlan(adapter); |
2617 | 2743 | ||
2618 | if ((adapter->hw.media_type == e1000_media_type_internal_serdes) && | ||
2619 | !(er32(TXCW) & E1000_TXCW_ANE)) | ||
2620 | link = adapter->hw.mac.serdes_has_link; | ||
2621 | else | ||
2622 | link = er32(STATUS) & E1000_STATUS_LU; | ||
2623 | |||
2624 | if (link) { | 2744 | if (link) { |
2625 | if (!netif_carrier_ok(netdev)) { | 2745 | if (!netif_carrier_ok(netdev)) { |
2626 | bool txb2b = 1; | 2746 | bool txb2b = 1; |
2747 | /* update snapshot of PHY registers on LSC */ | ||
2627 | mac->ops.get_link_up_info(&adapter->hw, | 2748 | mac->ops.get_link_up_info(&adapter->hw, |
2628 | &adapter->link_speed, | 2749 | &adapter->link_speed, |
2629 | &adapter->link_duplex); | 2750 | &adapter->link_duplex); |
2630 | e1000_print_link_info(adapter); | 2751 | e1000_print_link_info(adapter); |
2631 | /* tweak tx_queue_len according to speed/duplex | 2752 | /* |
2632 | * and adjust the timeout factor */ | 2753 | * tweak tx_queue_len according to speed/duplex |
2754 | * and adjust the timeout factor | ||
2755 | */ | ||
2633 | netdev->tx_queue_len = adapter->tx_queue_len; | 2756 | netdev->tx_queue_len = adapter->tx_queue_len; |
2634 | adapter->tx_timeout_factor = 1; | 2757 | adapter->tx_timeout_factor = 1; |
2635 | switch (adapter->link_speed) { | 2758 | switch (adapter->link_speed) { |
@@ -2645,8 +2768,10 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
2645 | break; | 2768 | break; |
2646 | } | 2769 | } |
2647 | 2770 | ||
2648 | /* workaround: re-program speed mode bit after | 2771 | /* |
2649 | * link-up event */ | 2772 | * workaround: re-program speed mode bit after |
2773 | * link-up event | ||
2774 | */ | ||
2650 | if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && | 2775 | if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && |
2651 | !txb2b) { | 2776 | !txb2b) { |
2652 | u32 tarc0; | 2777 | u32 tarc0; |
@@ -2655,8 +2780,10 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
2655 | ew32(TARC0, tarc0); | 2780 | ew32(TARC0, tarc0); |
2656 | } | 2781 | } |
2657 | 2782 | ||
2658 | /* disable TSO for pcie and 10/100 speeds, to avoid | 2783 | /* |
2659 | * some hardware issues */ | 2784 | * disable TSO for pcie and 10/100 speeds, to avoid |
2785 | * some hardware issues | ||
2786 | */ | ||
2660 | if (!(adapter->flags & FLAG_TSO_FORCE)) { | 2787 | if (!(adapter->flags & FLAG_TSO_FORCE)) { |
2661 | switch (adapter->link_speed) { | 2788 | switch (adapter->link_speed) { |
2662 | case SPEED_10: | 2789 | case SPEED_10: |
@@ -2676,8 +2803,10 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
2676 | } | 2803 | } |
2677 | } | 2804 | } |
2678 | 2805 | ||
2679 | /* enable transmits in the hardware, need to do this | 2806 | /* |
2680 | * after setting TARC0 */ | 2807 | * enable transmits in the hardware, need to do this |
2808 | * after setting TARC(0) | ||
2809 | */ | ||
2681 | tctl = er32(TCTL); | 2810 | tctl = er32(TCTL); |
2682 | tctl |= E1000_TCTL_EN; | 2811 | tctl |= E1000_TCTL_EN; |
2683 | ew32(TCTL, tctl); | 2812 | ew32(TCTL, tctl); |
@@ -2688,13 +2817,6 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
2688 | if (!test_bit(__E1000_DOWN, &adapter->state)) | 2817 | if (!test_bit(__E1000_DOWN, &adapter->state)) |
2689 | mod_timer(&adapter->phy_info_timer, | 2818 | mod_timer(&adapter->phy_info_timer, |
2690 | round_jiffies(jiffies + 2 * HZ)); | 2819 | round_jiffies(jiffies + 2 * HZ)); |
2691 | } else { | ||
2692 | /* make sure the receive unit is started */ | ||
2693 | if (adapter->flags & FLAG_RX_NEEDS_RESTART) { | ||
2694 | u32 rctl = er32(RCTL); | ||
2695 | ew32(RCTL, rctl | | ||
2696 | E1000_RCTL_EN); | ||
2697 | } | ||
2698 | } | 2820 | } |
2699 | } else { | 2821 | } else { |
2700 | if (netif_carrier_ok(netdev)) { | 2822 | if (netif_carrier_ok(netdev)) { |
@@ -2731,23 +2853,27 @@ link_up: | |||
2731 | tx_pending = (e1000_desc_unused(tx_ring) + 1 < | 2853 | tx_pending = (e1000_desc_unused(tx_ring) + 1 < |
2732 | tx_ring->count); | 2854 | tx_ring->count); |
2733 | if (tx_pending) { | 2855 | if (tx_pending) { |
2734 | /* We've lost link, so the controller stops DMA, | 2856 | /* |
2857 | * We've lost link, so the controller stops DMA, | ||
2735 | * but we've got queued Tx work that's never going | 2858 | * but we've got queued Tx work that's never going |
2736 | * to get done, so reset controller to flush Tx. | 2859 | * to get done, so reset controller to flush Tx. |
2737 | * (Do the reset outside of interrupt context). */ | 2860 | * (Do the reset outside of interrupt context). |
2861 | */ | ||
2738 | adapter->tx_timeout_count++; | 2862 | adapter->tx_timeout_count++; |
2739 | schedule_work(&adapter->reset_task); | 2863 | schedule_work(&adapter->reset_task); |
2740 | } | 2864 | } |
2741 | } | 2865 | } |
2742 | 2866 | ||
2743 | /* Cause software interrupt to ensure rx ring is cleaned */ | 2867 | /* Cause software interrupt to ensure Rx ring is cleaned */ |
2744 | ew32(ICS, E1000_ICS_RXDMT0); | 2868 | ew32(ICS, E1000_ICS_RXDMT0); |
2745 | 2869 | ||
2746 | /* Force detection of hung controller every watchdog period */ | 2870 | /* Force detection of hung controller every watchdog period */ |
2747 | adapter->detect_tx_hung = 1; | 2871 | adapter->detect_tx_hung = 1; |
2748 | 2872 | ||
2749 | /* With 82571 controllers, LAA may be overwritten due to controller | 2873 | /* |
2750 | * reset from the other port. Set the appropriate LAA in RAR[0] */ | 2874 | * With 82571 controllers, LAA may be overwritten due to controller |
2875 | * reset from the other port. Set the appropriate LAA in RAR[0] | ||
2876 | */ | ||
2751 | if (e1000e_get_laa_state_82571(hw)) | 2877 | if (e1000e_get_laa_state_82571(hw)) |
2752 | e1000e_rar_set(hw, adapter->hw.mac.addr, 0); | 2878 | e1000e_rar_set(hw, adapter->hw.mac.addr, 0); |
2753 | 2879 | ||
@@ -3023,16 +3149,20 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, | |||
3023 | 3149 | ||
3024 | tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); | 3150 | tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); |
3025 | 3151 | ||
3026 | /* Force memory writes to complete before letting h/w | 3152 | /* |
3153 | * Force memory writes to complete before letting h/w | ||
3027 | * know there are new descriptors to fetch. (Only | 3154 | * know there are new descriptors to fetch. (Only |
3028 | * applicable for weak-ordered memory model archs, | 3155 | * applicable for weak-ordered memory model archs, |
3029 | * such as IA-64). */ | 3156 | * such as IA-64). |
3157 | */ | ||
3030 | wmb(); | 3158 | wmb(); |
3031 | 3159 | ||
3032 | tx_ring->next_to_use = i; | 3160 | tx_ring->next_to_use = i; |
3033 | writel(i, adapter->hw.hw_addr + tx_ring->tail); | 3161 | writel(i, adapter->hw.hw_addr + tx_ring->tail); |
3034 | /* we need this if more than one processor can write to our tail | 3162 | /* |
3035 | * at a time, it synchronizes IO on IA64/Altix systems */ | 3163 | * we need this if more than one processor can write to our tail |
3164 | * at a time, it synchronizes IO on IA64/Altix systems | ||
3165 | */ | ||
3036 | mmiowb(); | 3166 | mmiowb(); |
3037 | } | 3167 | } |
3038 | 3168 | ||
@@ -3080,13 +3210,17 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) | |||
3080 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3210 | struct e1000_adapter *adapter = netdev_priv(netdev); |
3081 | 3211 | ||
3082 | netif_stop_queue(netdev); | 3212 | netif_stop_queue(netdev); |
3083 | /* Herbert's original patch had: | 3213 | /* |
3214 | * Herbert's original patch had: | ||
3084 | * smp_mb__after_netif_stop_queue(); | 3215 | * smp_mb__after_netif_stop_queue(); |
3085 | * but since that doesn't exist yet, just open code it. */ | 3216 | * but since that doesn't exist yet, just open code it. |
3217 | */ | ||
3086 | smp_mb(); | 3218 | smp_mb(); |
3087 | 3219 | ||
3088 | /* We need to check again in a case another CPU has just | 3220 | /* |
3089 | * made room available. */ | 3221 | * We need to check again in a case another CPU has just |
3222 | * made room available. | ||
3223 | */ | ||
3090 | if (e1000_desc_unused(adapter->tx_ring) < size) | 3224 | if (e1000_desc_unused(adapter->tx_ring) < size) |
3091 | return -EBUSY; | 3225 | return -EBUSY; |
3092 | 3226 | ||
@@ -3133,21 +3267,29 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3133 | } | 3267 | } |
3134 | 3268 | ||
3135 | mss = skb_shinfo(skb)->gso_size; | 3269 | mss = skb_shinfo(skb)->gso_size; |
3136 | /* The controller does a simple calculation to | 3270 | /* |
3271 | * The controller does a simple calculation to | ||
3137 | * make sure there is enough room in the FIFO before | 3272 | * make sure there is enough room in the FIFO before |
3138 | * initiating the DMA for each buffer. The calc is: | 3273 | * initiating the DMA for each buffer. The calc is: |
3139 | * 4 = ceil(buffer len/mss). To make sure we don't | 3274 | * 4 = ceil(buffer len/mss). To make sure we don't |
3140 | * overrun the FIFO, adjust the max buffer len if mss | 3275 | * overrun the FIFO, adjust the max buffer len if mss |
3141 | * drops. */ | 3276 | * drops. |
3277 | */ | ||
3142 | if (mss) { | 3278 | if (mss) { |
3143 | u8 hdr_len; | 3279 | u8 hdr_len; |
3144 | max_per_txd = min(mss << 2, max_per_txd); | 3280 | max_per_txd = min(mss << 2, max_per_txd); |
3145 | max_txd_pwr = fls(max_per_txd) - 1; | 3281 | max_txd_pwr = fls(max_per_txd) - 1; |
3146 | 3282 | ||
3147 | /* TSO Workaround for 82571/2/3 Controllers -- if skb->data | 3283 | /* |
3148 | * points to just header, pull a few bytes of payload from | 3284 | * TSO Workaround for 82571/2/3 Controllers -- if skb->data |
3149 | * frags into skb->data */ | 3285 | * points to just header, pull a few bytes of payload from |
3286 | * frags into skb->data | ||
3287 | */ | ||
3150 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 3288 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
3289 | /* | ||
3290 | * we do this workaround for ES2LAN, but it is un-necessary, | ||
3291 | * avoiding it could save a lot of cycles | ||
3292 | */ | ||
3151 | if (skb->data_len && (hdr_len == len)) { | 3293 | if (skb->data_len && (hdr_len == len)) { |
3152 | unsigned int pull_size; | 3294 | unsigned int pull_size; |
3153 | 3295 | ||
@@ -3181,8 +3323,10 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3181 | /* Collision - tell upper layer to requeue */ | 3323 | /* Collision - tell upper layer to requeue */ |
3182 | return NETDEV_TX_LOCKED; | 3324 | return NETDEV_TX_LOCKED; |
3183 | 3325 | ||
3184 | /* need: count + 2 desc gap to keep tail from touching | 3326 | /* |
3185 | * head, otherwise try next time */ | 3327 | * need: count + 2 desc gap to keep tail from touching |
3328 | * head, otherwise try next time | ||
3329 | */ | ||
3186 | if (e1000_maybe_stop_tx(netdev, count + 2)) { | 3330 | if (e1000_maybe_stop_tx(netdev, count + 2)) { |
3187 | spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); | 3331 | spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); |
3188 | return NETDEV_TX_BUSY; | 3332 | return NETDEV_TX_BUSY; |
@@ -3207,9 +3351,11 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3207 | else if (e1000_tx_csum(adapter, skb)) | 3351 | else if (e1000_tx_csum(adapter, skb)) |
3208 | tx_flags |= E1000_TX_FLAGS_CSUM; | 3352 | tx_flags |= E1000_TX_FLAGS_CSUM; |
3209 | 3353 | ||
3210 | /* Old method was to assume IPv4 packet by default if TSO was enabled. | 3354 | /* |
3355 | * Old method was to assume IPv4 packet by default if TSO was enabled. | ||
3211 | * 82571 hardware supports TSO capabilities for IPv6 as well... | 3356 | * 82571 hardware supports TSO capabilities for IPv6 as well... |
3212 | * no longer assume, we must. */ | 3357 | * no longer assume, we must. |
3358 | */ | ||
3213 | if (skb->protocol == htons(ETH_P_IP)) | 3359 | if (skb->protocol == htons(ETH_P_IP)) |
3214 | tx_flags |= E1000_TX_FLAGS_IPV4; | 3360 | tx_flags |= E1000_TX_FLAGS_IPV4; |
3215 | 3361 | ||
@@ -3307,14 +3453,16 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3307 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | 3453 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) |
3308 | msleep(1); | 3454 | msleep(1); |
3309 | /* e1000e_down has a dependency on max_frame_size */ | 3455 | /* e1000e_down has a dependency on max_frame_size */ |
3310 | adapter->hw.mac.max_frame_size = max_frame; | 3456 | adapter->max_frame_size = max_frame; |
3311 | if (netif_running(netdev)) | 3457 | if (netif_running(netdev)) |
3312 | e1000e_down(adapter); | 3458 | e1000e_down(adapter); |
3313 | 3459 | ||
3314 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | 3460 | /* |
3461 | * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | ||
3315 | * means we reserve 2 more, this pushes us to allocate from the next | 3462 | * means we reserve 2 more, this pushes us to allocate from the next |
3316 | * larger slab size. | 3463 | * larger slab size. |
3317 | * i.e. RXBUFFER_2048 --> size-4096 slab */ | 3464 | * i.e. RXBUFFER_2048 --> size-4096 slab |
3465 | */ | ||
3318 | 3466 | ||
3319 | if (max_frame <= 256) | 3467 | if (max_frame <= 256) |
3320 | adapter->rx_buffer_len = 256; | 3468 | adapter->rx_buffer_len = 256; |
@@ -3331,7 +3479,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3331 | if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || | 3479 | if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || |
3332 | (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) | 3480 | (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) |
3333 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN | 3481 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN |
3334 | + ETH_FCS_LEN ; | 3482 | + ETH_FCS_LEN; |
3335 | 3483 | ||
3336 | ndev_info(netdev, "changing MTU from %d to %d\n", | 3484 | ndev_info(netdev, "changing MTU from %d to %d\n", |
3337 | netdev->mtu, new_mtu); | 3485 | netdev->mtu, new_mtu); |
@@ -3354,7 +3502,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | |||
3354 | struct mii_ioctl_data *data = if_mii(ifr); | 3502 | struct mii_ioctl_data *data = if_mii(ifr); |
3355 | unsigned long irq_flags; | 3503 | unsigned long irq_flags; |
3356 | 3504 | ||
3357 | if (adapter->hw.media_type != e1000_media_type_copper) | 3505 | if (adapter->hw.phy.media_type != e1000_media_type_copper) |
3358 | return -EOPNOTSUPP; | 3506 | return -EOPNOTSUPP; |
3359 | 3507 | ||
3360 | switch (cmd) { | 3508 | switch (cmd) { |
@@ -3436,8 +3584,9 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3436 | E1000_CTRL_EN_PHY_PWR_MGMT; | 3584 | E1000_CTRL_EN_PHY_PWR_MGMT; |
3437 | ew32(CTRL, ctrl); | 3585 | ew32(CTRL, ctrl); |
3438 | 3586 | ||
3439 | if (adapter->hw.media_type == e1000_media_type_fiber || | 3587 | if (adapter->hw.phy.media_type == e1000_media_type_fiber || |
3440 | adapter->hw.media_type == e1000_media_type_internal_serdes) { | 3588 | adapter->hw.phy.media_type == |
3589 | e1000_media_type_internal_serdes) { | ||
3441 | /* keep the laser running in D3 */ | 3590 | /* keep the laser running in D3 */ |
3442 | ctrl_ext = er32(CTRL_EXT); | 3591 | ctrl_ext = er32(CTRL_EXT); |
3443 | ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; | 3592 | ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; |
@@ -3467,8 +3616,10 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3467 | if (adapter->hw.phy.type == e1000_phy_igp_3) | 3616 | if (adapter->hw.phy.type == e1000_phy_igp_3) |
3468 | e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); | 3617 | e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); |
3469 | 3618 | ||
3470 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | 3619 | /* |
3471 | * would have already happened in close and is redundant. */ | 3620 | * Release control of h/w to f/w. If f/w is AMT enabled, this |
3621 | * would have already happened in close and is redundant. | ||
3622 | */ | ||
3472 | e1000_release_hw_control(adapter); | 3623 | e1000_release_hw_control(adapter); |
3473 | 3624 | ||
3474 | pci_disable_device(pdev); | 3625 | pci_disable_device(pdev); |
@@ -3543,9 +3694,11 @@ static int e1000_resume(struct pci_dev *pdev) | |||
3543 | 3694 | ||
3544 | netif_device_attach(netdev); | 3695 | netif_device_attach(netdev); |
3545 | 3696 | ||
3546 | /* If the controller has AMT, do not set DRV_LOAD until the interface | 3697 | /* |
3698 | * If the controller has AMT, do not set DRV_LOAD until the interface | ||
3547 | * is up. For all other cases, let the f/w know that the h/w is now | 3699 | * is up. For all other cases, let the f/w know that the h/w is now |
3548 | * under the control of the driver. */ | 3700 | * under the control of the driver. |
3701 | */ | ||
3549 | if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw)) | 3702 | if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw)) |
3550 | e1000_get_hw_control(adapter); | 3703 | e1000_get_hw_control(adapter); |
3551 | 3704 | ||
@@ -3656,9 +3809,11 @@ static void e1000_io_resume(struct pci_dev *pdev) | |||
3656 | 3809 | ||
3657 | netif_device_attach(netdev); | 3810 | netif_device_attach(netdev); |
3658 | 3811 | ||
3659 | /* If the controller has AMT, do not set DRV_LOAD until the interface | 3812 | /* |
3813 | * If the controller has AMT, do not set DRV_LOAD until the interface | ||
3660 | * is up. For all other cases, let the f/w know that the h/w is now | 3814 | * is up. For all other cases, let the f/w know that the h/w is now |
3661 | * under the control of the driver. */ | 3815 | * under the control of the driver. |
3816 | */ | ||
3662 | if (!(adapter->flags & FLAG_HAS_AMT) || | 3817 | if (!(adapter->flags & FLAG_HAS_AMT) || |
3663 | !e1000e_check_mng_mode(&adapter->hw)) | 3818 | !e1000e_check_mng_mode(&adapter->hw)) |
3664 | e1000_get_hw_control(adapter); | 3819 | e1000_get_hw_control(adapter); |
@@ -3825,10 +3980,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
3825 | 3980 | ||
3826 | hw->mac.ops.get_bus_info(&adapter->hw); | 3981 | hw->mac.ops.get_bus_info(&adapter->hw); |
3827 | 3982 | ||
3828 | adapter->hw.phy.wait_for_link = 0; | 3983 | adapter->hw.phy.autoneg_wait_to_complete = 0; |
3829 | 3984 | ||
3830 | /* Copper options */ | 3985 | /* Copper options */ |
3831 | if (adapter->hw.media_type == e1000_media_type_copper) { | 3986 | if (adapter->hw.phy.media_type == e1000_media_type_copper) { |
3832 | adapter->hw.phy.mdix = AUTO_ALL_MODES; | 3987 | adapter->hw.phy.mdix = AUTO_ALL_MODES; |
3833 | adapter->hw.phy.disable_polarity_correction = 0; | 3988 | adapter->hw.phy.disable_polarity_correction = 0; |
3834 | adapter->hw.phy.ms_type = e1000_ms_hw_default; | 3989 | adapter->hw.phy.ms_type = e1000_ms_hw_default; |
@@ -3852,15 +4007,19 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
3852 | if (pci_using_dac) | 4007 | if (pci_using_dac) |
3853 | netdev->features |= NETIF_F_HIGHDMA; | 4008 | netdev->features |= NETIF_F_HIGHDMA; |
3854 | 4009 | ||
3855 | /* We should not be using LLTX anymore, but we are still TX faster with | 4010 | /* |
3856 | * it. */ | 4011 | * We should not be using LLTX anymore, but we are still Tx faster with |
4012 | * it. | ||
4013 | */ | ||
3857 | netdev->features |= NETIF_F_LLTX; | 4014 | netdev->features |= NETIF_F_LLTX; |
3858 | 4015 | ||
3859 | if (e1000e_enable_mng_pass_thru(&adapter->hw)) | 4016 | if (e1000e_enable_mng_pass_thru(&adapter->hw)) |
3860 | adapter->flags |= FLAG_MNG_PT_ENABLED; | 4017 | adapter->flags |= FLAG_MNG_PT_ENABLED; |
3861 | 4018 | ||
3862 | /* before reading the NVM, reset the controller to | 4019 | /* |
3863 | * put the device in a known good starting state */ | 4020 | * before reading the NVM, reset the controller to |
4021 | * put the device in a known good starting state | ||
4022 | */ | ||
3864 | adapter->hw.mac.ops.reset_hw(&adapter->hw); | 4023 | adapter->hw.mac.ops.reset_hw(&adapter->hw); |
3865 | 4024 | ||
3866 | /* | 4025 | /* |
@@ -3910,8 +4069,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
3910 | /* Initialize link parameters. User can change them with ethtool */ | 4069 | /* Initialize link parameters. User can change them with ethtool */ |
3911 | adapter->hw.mac.autoneg = 1; | 4070 | adapter->hw.mac.autoneg = 1; |
3912 | adapter->fc_autoneg = 1; | 4071 | adapter->fc_autoneg = 1; |
3913 | adapter->hw.mac.original_fc = e1000_fc_default; | 4072 | adapter->hw.fc.original_type = e1000_fc_default; |
3914 | adapter->hw.mac.fc = e1000_fc_default; | 4073 | adapter->hw.fc.type = e1000_fc_default; |
3915 | adapter->hw.phy.autoneg_advertised = 0x2f; | 4074 | adapter->hw.phy.autoneg_advertised = 0x2f; |
3916 | 4075 | ||
3917 | /* ring size defaults */ | 4076 | /* ring size defaults */ |
@@ -3954,9 +4113,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
3954 | /* reset the hardware with the new settings */ | 4113 | /* reset the hardware with the new settings */ |
3955 | e1000e_reset(adapter); | 4114 | e1000e_reset(adapter); |
3956 | 4115 | ||
3957 | /* If the controller has AMT, do not set DRV_LOAD until the interface | 4116 | /* |
4117 | * If the controller has AMT, do not set DRV_LOAD until the interface | ||
3958 | * is up. For all other cases, let the f/w know that the h/w is now | 4118 | * is up. For all other cases, let the f/w know that the h/w is now |
3959 | * under the control of the driver. */ | 4119 | * under the control of the driver. |
4120 | */ | ||
3960 | if (!(adapter->flags & FLAG_HAS_AMT) || | 4121 | if (!(adapter->flags & FLAG_HAS_AMT) || |
3961 | !e1000e_check_mng_mode(&adapter->hw)) | 4122 | !e1000e_check_mng_mode(&adapter->hw)) |
3962 | e1000_get_hw_control(adapter); | 4123 | e1000_get_hw_control(adapter); |
@@ -4013,16 +4174,20 @@ static void __devexit e1000_remove(struct pci_dev *pdev) | |||
4013 | struct net_device *netdev = pci_get_drvdata(pdev); | 4174 | struct net_device *netdev = pci_get_drvdata(pdev); |
4014 | struct e1000_adapter *adapter = netdev_priv(netdev); | 4175 | struct e1000_adapter *adapter = netdev_priv(netdev); |
4015 | 4176 | ||
4016 | /* flush_scheduled work may reschedule our watchdog task, so | 4177 | /* |
4017 | * explicitly disable watchdog tasks from being rescheduled */ | 4178 | * flush_scheduled work may reschedule our watchdog task, so |
4179 | * explicitly disable watchdog tasks from being rescheduled | ||
4180 | */ | ||
4018 | set_bit(__E1000_DOWN, &adapter->state); | 4181 | set_bit(__E1000_DOWN, &adapter->state); |
4019 | del_timer_sync(&adapter->watchdog_timer); | 4182 | del_timer_sync(&adapter->watchdog_timer); |
4020 | del_timer_sync(&adapter->phy_info_timer); | 4183 | del_timer_sync(&adapter->phy_info_timer); |
4021 | 4184 | ||
4022 | flush_scheduled_work(); | 4185 | flush_scheduled_work(); |
4023 | 4186 | ||
4024 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | 4187 | /* |
4025 | * would have already happened in close and is redundant. */ | 4188 | * Release control of h/w to f/w. If f/w is AMT enabled, this |
4189 | * would have already happened in close and is redundant. | ||
4190 | */ | ||
4026 | e1000_release_hw_control(adapter); | 4191 | e1000_release_hw_control(adapter); |
4027 | 4192 | ||
4028 | unregister_netdev(netdev); | 4193 | unregister_netdev(netdev); |
@@ -4060,13 +4225,16 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
4060 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, | 4225 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, |
4061 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, | 4226 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, |
4062 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, | 4227 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, |
4228 | |||
4063 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, | 4229 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, |
4064 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, | 4230 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, |
4065 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, | 4231 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, |
4066 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, | 4232 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, |
4233 | |||
4067 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, | 4234 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, |
4068 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, | 4235 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, |
4069 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, | 4236 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, |
4237 | |||
4070 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), | 4238 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), |
4071 | board_80003es2lan }, | 4239 | board_80003es2lan }, |
4072 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), | 4240 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), |
@@ -4075,6 +4243,7 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
4075 | board_80003es2lan }, | 4243 | board_80003es2lan }, |
4076 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), | 4244 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), |
4077 | board_80003es2lan }, | 4245 | board_80003es2lan }, |
4246 | |||
4078 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, | 4247 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, |
4079 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, | 4248 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, |
4080 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, | 4249 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, |
@@ -4082,6 +4251,7 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
4082 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, | 4251 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, |
4083 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, | 4252 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, |
4084 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, | 4253 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, |
4254 | |||
4085 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, | 4255 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, |
4086 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, | 4256 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, |
4087 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, | 4257 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, |
@@ -4099,7 +4269,7 @@ static struct pci_driver e1000_driver = { | |||
4099 | .probe = e1000_probe, | 4269 | .probe = e1000_probe, |
4100 | .remove = __devexit_p(e1000_remove), | 4270 | .remove = __devexit_p(e1000_remove), |
4101 | #ifdef CONFIG_PM | 4271 | #ifdef CONFIG_PM |
4102 | /* Power Managment Hooks */ | 4272 | /* Power Management Hooks */ |
4103 | .suspend = e1000_suspend, | 4273 | .suspend = e1000_suspend, |
4104 | .resume = e1000_resume, | 4274 | .resume = e1000_resume, |
4105 | #endif | 4275 | #endif |
@@ -4118,7 +4288,7 @@ static int __init e1000_init_module(void) | |||
4118 | int ret; | 4288 | int ret; |
4119 | printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", | 4289 | printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", |
4120 | e1000e_driver_name, e1000e_driver_version); | 4290 | e1000e_driver_name, e1000e_driver_version); |
4121 | printk(KERN_INFO "%s: Copyright (c) 1999-2007 Intel Corporation.\n", | 4291 | printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", |
4122 | e1000e_driver_name); | 4292 | e1000e_driver_name); |
4123 | ret = pci_register_driver(&e1000_driver); | 4293 | ret = pci_register_driver(&e1000_driver); |
4124 | 4294 | ||
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c index df266c32ac4b..a66b92efcf80 100644 --- a/drivers/net/e1000e/param.c +++ b/drivers/net/e1000e/param.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2008 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -30,7 +30,8 @@ | |||
30 | 30 | ||
31 | #include "e1000.h" | 31 | #include "e1000.h" |
32 | 32 | ||
33 | /* This is the only thing that needs to be changed to adjust the | 33 | /* |
34 | * This is the only thing that needs to be changed to adjust the | ||
34 | * maximum number of ports that the driver can manage. | 35 | * maximum number of ports that the driver can manage. |
35 | */ | 36 | */ |
36 | 37 | ||
@@ -46,7 +47,8 @@ module_param(copybreak, uint, 0644); | |||
46 | MODULE_PARM_DESC(copybreak, | 47 | MODULE_PARM_DESC(copybreak, |
47 | "Maximum size of packet that is copied to a new buffer on receive"); | 48 | "Maximum size of packet that is copied to a new buffer on receive"); |
48 | 49 | ||
49 | /* All parameters are treated the same, as an integer array of values. | 50 | /* |
51 | * All parameters are treated the same, as an integer array of values. | ||
50 | * This macro just reduces the need to repeat the same declaration code | 52 | * This macro just reduces the need to repeat the same declaration code |
51 | * over and over (plus this helps to avoid typo bugs). | 53 | * over and over (plus this helps to avoid typo bugs). |
52 | */ | 54 | */ |
@@ -60,8 +62,9 @@ MODULE_PARM_DESC(copybreak, | |||
60 | MODULE_PARM_DESC(X, desc); | 62 | MODULE_PARM_DESC(X, desc); |
61 | 63 | ||
62 | 64 | ||
63 | /* Transmit Interrupt Delay in units of 1.024 microseconds | 65 | /* |
64 | * Tx interrupt delay needs to typically be set to something non zero | 66 | * Transmit Interrupt Delay in units of 1.024 microseconds |
67 | * Tx interrupt delay needs to typically be set to something non zero | ||
65 | * | 68 | * |
66 | * Valid Range: 0-65535 | 69 | * Valid Range: 0-65535 |
67 | */ | 70 | */ |
@@ -70,7 +73,8 @@ E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); | |||
70 | #define MAX_TXDELAY 0xFFFF | 73 | #define MAX_TXDELAY 0xFFFF |
71 | #define MIN_TXDELAY 0 | 74 | #define MIN_TXDELAY 0 |
72 | 75 | ||
73 | /* Transmit Absolute Interrupt Delay in units of 1.024 microseconds | 76 | /* |
77 | * Transmit Absolute Interrupt Delay in units of 1.024 microseconds | ||
74 | * | 78 | * |
75 | * Valid Range: 0-65535 | 79 | * Valid Range: 0-65535 |
76 | */ | 80 | */ |
@@ -79,8 +83,9 @@ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); | |||
79 | #define MAX_TXABSDELAY 0xFFFF | 83 | #define MAX_TXABSDELAY 0xFFFF |
80 | #define MIN_TXABSDELAY 0 | 84 | #define MIN_TXABSDELAY 0 |
81 | 85 | ||
82 | /* Receive Interrupt Delay in units of 1.024 microseconds | 86 | /* |
83 | * hardware will likely hang if you set this to anything but zero. | 87 | * Receive Interrupt Delay in units of 1.024 microseconds |
88 | * hardware will likely hang if you set this to anything but zero. | ||
84 | * | 89 | * |
85 | * Valid Range: 0-65535 | 90 | * Valid Range: 0-65535 |
86 | */ | 91 | */ |
@@ -89,7 +94,8 @@ E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); | |||
89 | #define MAX_RXDELAY 0xFFFF | 94 | #define MAX_RXDELAY 0xFFFF |
90 | #define MIN_RXDELAY 0 | 95 | #define MIN_RXDELAY 0 |
91 | 96 | ||
92 | /* Receive Absolute Interrupt Delay in units of 1.024 microseconds | 97 | /* |
98 | * Receive Absolute Interrupt Delay in units of 1.024 microseconds | ||
93 | * | 99 | * |
94 | * Valid Range: 0-65535 | 100 | * Valid Range: 0-65535 |
95 | */ | 101 | */ |
@@ -98,7 +104,8 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); | |||
98 | #define MAX_RXABSDELAY 0xFFFF | 104 | #define MAX_RXABSDELAY 0xFFFF |
99 | #define MIN_RXABSDELAY 0 | 105 | #define MIN_RXABSDELAY 0 |
100 | 106 | ||
101 | /* Interrupt Throttle Rate (interrupts/sec) | 107 | /* |
108 | * Interrupt Throttle Rate (interrupts/sec) | ||
102 | * | 109 | * |
103 | * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) | 110 | * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) |
104 | */ | 111 | */ |
@@ -107,7 +114,8 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); | |||
107 | #define MAX_ITR 100000 | 114 | #define MAX_ITR 100000 |
108 | #define MIN_ITR 100 | 115 | #define MIN_ITR 100 |
109 | 116 | ||
110 | /* Enable Smart Power Down of the PHY | 117 | /* |
118 | * Enable Smart Power Down of the PHY | ||
111 | * | 119 | * |
112 | * Valid Range: 0, 1 | 120 | * Valid Range: 0, 1 |
113 | * | 121 | * |
@@ -115,7 +123,8 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); | |||
115 | */ | 123 | */ |
116 | E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); | 124 | E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); |
117 | 125 | ||
118 | /* Enable Kumeran Lock Loss workaround | 126 | /* |
127 | * Enable Kumeran Lock Loss workaround | ||
119 | * | 128 | * |
120 | * Valid Range: 0, 1 | 129 | * Valid Range: 0, 1 |
121 | * | 130 | * |
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index dab3c468a768..3a4574caa75b 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel PRO/1000 Linux driver | 3 | Intel PRO/1000 Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2008 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -134,7 +134,8 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) | |||
134 | return -E1000_ERR_PARAM; | 134 | return -E1000_ERR_PARAM; |
135 | } | 135 | } |
136 | 136 | ||
137 | /* Set up Op-code, Phy Address, and register offset in the MDI | 137 | /* |
138 | * Set up Op-code, Phy Address, and register offset in the MDI | ||
138 | * Control register. The MAC will take care of interfacing with the | 139 | * Control register. The MAC will take care of interfacing with the |
139 | * PHY to retrieve the desired data. | 140 | * PHY to retrieve the desired data. |
140 | */ | 141 | */ |
@@ -144,7 +145,11 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) | |||
144 | 145 | ||
145 | ew32(MDIC, mdic); | 146 | ew32(MDIC, mdic); |
146 | 147 | ||
147 | /* Poll the ready bit to see if the MDI read completed */ | 148 | /* |
149 | * Poll the ready bit to see if the MDI read completed | ||
150 | * Increasing the time out as testing showed failures with | ||
151 | * the lower time out | ||
152 | */ | ||
148 | for (i = 0; i < 64; i++) { | 153 | for (i = 0; i < 64; i++) { |
149 | udelay(50); | 154 | udelay(50); |
150 | mdic = er32(MDIC); | 155 | mdic = er32(MDIC); |
@@ -182,7 +187,8 @@ static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) | |||
182 | return -E1000_ERR_PARAM; | 187 | return -E1000_ERR_PARAM; |
183 | } | 188 | } |
184 | 189 | ||
185 | /* Set up Op-code, Phy Address, and register offset in the MDI | 190 | /* |
191 | * Set up Op-code, Phy Address, and register offset in the MDI | ||
186 | * Control register. The MAC will take care of interfacing with the | 192 | * Control register. The MAC will take care of interfacing with the |
187 | * PHY to retrieve the desired data. | 193 | * PHY to retrieve the desired data. |
188 | */ | 194 | */ |
@@ -409,14 +415,15 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) | |||
409 | s32 ret_val; | 415 | s32 ret_val; |
410 | u16 phy_data; | 416 | u16 phy_data; |
411 | 417 | ||
412 | /* Enable CRS on TX. This must be set for half-duplex operation. */ | 418 | /* Enable CRS on Tx. This must be set for half-duplex operation. */ |
413 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | 419 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); |
414 | if (ret_val) | 420 | if (ret_val) |
415 | return ret_val; | 421 | return ret_val; |
416 | 422 | ||
417 | phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; | 423 | phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; |
418 | 424 | ||
419 | /* Options: | 425 | /* |
426 | * Options: | ||
420 | * MDI/MDI-X = 0 (default) | 427 | * MDI/MDI-X = 0 (default) |
421 | * 0 - Auto for all speeds | 428 | * 0 - Auto for all speeds |
422 | * 1 - MDI mode | 429 | * 1 - MDI mode |
@@ -441,7 +448,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) | |||
441 | break; | 448 | break; |
442 | } | 449 | } |
443 | 450 | ||
444 | /* Options: | 451 | /* |
452 | * Options: | ||
445 | * disable_polarity_correction = 0 (default) | 453 | * disable_polarity_correction = 0 (default) |
446 | * Automatic Correction for Reversed Cable Polarity | 454 | * Automatic Correction for Reversed Cable Polarity |
447 | * 0 - Disabled | 455 | * 0 - Disabled |
@@ -456,7 +464,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) | |||
456 | return ret_val; | 464 | return ret_val; |
457 | 465 | ||
458 | if (phy->revision < 4) { | 466 | if (phy->revision < 4) { |
459 | /* Force TX_CLK in the Extended PHY Specific Control Register | 467 | /* |
468 | * Force TX_CLK in the Extended PHY Specific Control Register | ||
460 | * to 25MHz clock. | 469 | * to 25MHz clock. |
461 | */ | 470 | */ |
462 | ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); | 471 | ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); |
@@ -543,19 +552,21 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) | |||
543 | 552 | ||
544 | /* set auto-master slave resolution settings */ | 553 | /* set auto-master slave resolution settings */ |
545 | if (hw->mac.autoneg) { | 554 | if (hw->mac.autoneg) { |
546 | /* when autonegotiation advertisement is only 1000Mbps then we | 555 | /* |
556 | * when autonegotiation advertisement is only 1000Mbps then we | ||
547 | * should disable SmartSpeed and enable Auto MasterSlave | 557 | * should disable SmartSpeed and enable Auto MasterSlave |
548 | * resolution as hardware default. */ | 558 | * resolution as hardware default. |
559 | */ | ||
549 | if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { | 560 | if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { |
550 | /* Disable SmartSpeed */ | 561 | /* Disable SmartSpeed */ |
551 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | 562 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
552 | &data); | 563 | &data); |
553 | if (ret_val) | 564 | if (ret_val) |
554 | return ret_val; | 565 | return ret_val; |
555 | 566 | ||
556 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | 567 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; |
557 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | 568 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
558 | data); | 569 | data); |
559 | if (ret_val) | 570 | if (ret_val) |
560 | return ret_val; | 571 | return ret_val; |
561 | 572 | ||
@@ -630,14 +641,16 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
630 | return ret_val; | 641 | return ret_val; |
631 | } | 642 | } |
632 | 643 | ||
633 | /* Need to parse both autoneg_advertised and fc and set up | 644 | /* |
645 | * Need to parse both autoneg_advertised and fc and set up | ||
634 | * the appropriate PHY registers. First we will parse for | 646 | * the appropriate PHY registers. First we will parse for |
635 | * autoneg_advertised software override. Since we can advertise | 647 | * autoneg_advertised software override. Since we can advertise |
636 | * a plethora of combinations, we need to check each bit | 648 | * a plethora of combinations, we need to check each bit |
637 | * individually. | 649 | * individually. |
638 | */ | 650 | */ |
639 | 651 | ||
640 | /* First we clear all the 10/100 mb speed bits in the Auto-Neg | 652 | /* |
653 | * First we clear all the 10/100 mb speed bits in the Auto-Neg | ||
641 | * Advertisement Register (Address 4) and the 1000 mb speed bits in | 654 | * Advertisement Register (Address 4) and the 1000 mb speed bits in |
642 | * the 1000Base-T Control Register (Address 9). | 655 | * the 1000Base-T Control Register (Address 9). |
643 | */ | 656 | */ |
@@ -683,7 +696,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
683 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; | 696 | mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; |
684 | } | 697 | } |
685 | 698 | ||
686 | /* Check for a software override of the flow control settings, and | 699 | /* |
700 | * Check for a software override of the flow control settings, and | ||
687 | * setup the PHY advertisement registers accordingly. If | 701 | * setup the PHY advertisement registers accordingly. If |
688 | * auto-negotiation is enabled, then software will have to set the | 702 | * auto-negotiation is enabled, then software will have to set the |
689 | * "PAUSE" bits to the correct value in the Auto-Negotiation | 703 | * "PAUSE" bits to the correct value in the Auto-Negotiation |
@@ -696,38 +710,42 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
696 | * but not send pause frames). | 710 | * but not send pause frames). |
697 | * 2: Tx flow control is enabled (we can send pause frames | 711 | * 2: Tx flow control is enabled (we can send pause frames |
698 | * but we do not support receiving pause frames). | 712 | * but we do not support receiving pause frames). |
699 | * 3: Both Rx and TX flow control (symmetric) are enabled. | 713 | * 3: Both Rx and Tx flow control (symmetric) are enabled. |
700 | * other: No software override. The flow control configuration | 714 | * other: No software override. The flow control configuration |
701 | * in the EEPROM is used. | 715 | * in the EEPROM is used. |
702 | */ | 716 | */ |
703 | switch (hw->mac.fc) { | 717 | switch (hw->fc.type) { |
704 | case e1000_fc_none: | 718 | case e1000_fc_none: |
705 | /* Flow control (RX & TX) is completely disabled by a | 719 | /* |
720 | * Flow control (Rx & Tx) is completely disabled by a | ||
706 | * software over-ride. | 721 | * software over-ride. |
707 | */ | 722 | */ |
708 | mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | 723 | mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); |
709 | break; | 724 | break; |
710 | case e1000_fc_rx_pause: | 725 | case e1000_fc_rx_pause: |
711 | /* RX Flow control is enabled, and TX Flow control is | 726 | /* |
727 | * Rx Flow control is enabled, and Tx Flow control is | ||
712 | * disabled, by a software over-ride. | 728 | * disabled, by a software over-ride. |
713 | */ | 729 | * |
714 | /* Since there really isn't a way to advertise that we are | 730 | * Since there really isn't a way to advertise that we are |
715 | * capable of RX Pause ONLY, we will advertise that we | 731 | * capable of Rx Pause ONLY, we will advertise that we |
716 | * support both symmetric and asymmetric RX PAUSE. Later | 732 | * support both symmetric and asymmetric Rx PAUSE. Later |
717 | * (in e1000e_config_fc_after_link_up) we will disable the | 733 | * (in e1000e_config_fc_after_link_up) we will disable the |
718 | * hw's ability to send PAUSE frames. | 734 | * hw's ability to send PAUSE frames. |
719 | */ | 735 | */ |
720 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | 736 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); |
721 | break; | 737 | break; |
722 | case e1000_fc_tx_pause: | 738 | case e1000_fc_tx_pause: |
723 | /* TX Flow control is enabled, and RX Flow control is | 739 | /* |
740 | * Tx Flow control is enabled, and Rx Flow control is | ||
724 | * disabled, by a software over-ride. | 741 | * disabled, by a software over-ride. |
725 | */ | 742 | */ |
726 | mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; | 743 | mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; |
727 | mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; | 744 | mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; |
728 | break; | 745 | break; |
729 | case e1000_fc_full: | 746 | case e1000_fc_full: |
730 | /* Flow control (both RX and TX) is enabled by a software | 747 | /* |
748 | * Flow control (both Rx and Tx) is enabled by a software | ||
731 | * over-ride. | 749 | * over-ride. |
732 | */ | 750 | */ |
733 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); | 751 | mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); |
@@ -758,7 +776,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) | |||
758 | * Performs initial bounds checking on autoneg advertisement parameter, then | 776 | * Performs initial bounds checking on autoneg advertisement parameter, then |
759 | * configure to advertise the full capability. Setup the PHY to autoneg | 777 | * configure to advertise the full capability. Setup the PHY to autoneg |
760 | * and restart the negotiation process between the link partner. If | 778 | * and restart the negotiation process between the link partner. If |
761 | * wait_for_link, then wait for autoneg to complete before exiting. | 779 | * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. |
762 | **/ | 780 | **/ |
763 | static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) | 781 | static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) |
764 | { | 782 | { |
@@ -766,12 +784,14 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
766 | s32 ret_val; | 784 | s32 ret_val; |
767 | u16 phy_ctrl; | 785 | u16 phy_ctrl; |
768 | 786 | ||
769 | /* Perform some bounds checking on the autoneg advertisement | 787 | /* |
788 | * Perform some bounds checking on the autoneg advertisement | ||
770 | * parameter. | 789 | * parameter. |
771 | */ | 790 | */ |
772 | phy->autoneg_advertised &= phy->autoneg_mask; | 791 | phy->autoneg_advertised &= phy->autoneg_mask; |
773 | 792 | ||
774 | /* If autoneg_advertised is zero, we assume it was not defaulted | 793 | /* |
794 | * If autoneg_advertised is zero, we assume it was not defaulted | ||
775 | * by the calling code so we set to advertise full capability. | 795 | * by the calling code so we set to advertise full capability. |
776 | */ | 796 | */ |
777 | if (phy->autoneg_advertised == 0) | 797 | if (phy->autoneg_advertised == 0) |
@@ -785,7 +805,8 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
785 | } | 805 | } |
786 | hw_dbg(hw, "Restarting Auto-Neg\n"); | 806 | hw_dbg(hw, "Restarting Auto-Neg\n"); |
787 | 807 | ||
788 | /* Restart auto-negotiation by setting the Auto Neg Enable bit and | 808 | /* |
809 | * Restart auto-negotiation by setting the Auto Neg Enable bit and | ||
789 | * the Auto Neg Restart bit in the PHY control register. | 810 | * the Auto Neg Restart bit in the PHY control register. |
790 | */ | 811 | */ |
791 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); | 812 | ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); |
@@ -797,10 +818,11 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) | |||
797 | if (ret_val) | 818 | if (ret_val) |
798 | return ret_val; | 819 | return ret_val; |
799 | 820 | ||
800 | /* Does the user want to wait for Auto-Neg to complete here, or | 821 | /* |
822 | * Does the user want to wait for Auto-Neg to complete here, or | ||
801 | * check at a later time (for example, callback routine). | 823 | * check at a later time (for example, callback routine). |
802 | */ | 824 | */ |
803 | if (phy->wait_for_link) { | 825 | if (phy->autoneg_wait_to_complete) { |
804 | ret_val = e1000_wait_autoneg(hw); | 826 | ret_val = e1000_wait_autoneg(hw); |
805 | if (ret_val) { | 827 | if (ret_val) { |
806 | hw_dbg(hw, "Error while waiting for " | 828 | hw_dbg(hw, "Error while waiting for " |
@@ -829,14 +851,18 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw) | |||
829 | bool link; | 851 | bool link; |
830 | 852 | ||
831 | if (hw->mac.autoneg) { | 853 | if (hw->mac.autoneg) { |
832 | /* Setup autoneg and flow control advertisement and perform | 854 | /* |
833 | * autonegotiation. */ | 855 | * Setup autoneg and flow control advertisement and perform |
856 | * autonegotiation. | ||
857 | */ | ||
834 | ret_val = e1000_copper_link_autoneg(hw); | 858 | ret_val = e1000_copper_link_autoneg(hw); |
835 | if (ret_val) | 859 | if (ret_val) |
836 | return ret_val; | 860 | return ret_val; |
837 | } else { | 861 | } else { |
838 | /* PHY will be set to 10H, 10F, 100H or 100F | 862 | /* |
839 | * depending on user settings. */ | 863 | * PHY will be set to 10H, 10F, 100H or 100F |
864 | * depending on user settings. | ||
865 | */ | ||
840 | hw_dbg(hw, "Forcing Speed and Duplex\n"); | 866 | hw_dbg(hw, "Forcing Speed and Duplex\n"); |
841 | ret_val = e1000_phy_force_speed_duplex(hw); | 867 | ret_val = e1000_phy_force_speed_duplex(hw); |
842 | if (ret_val) { | 868 | if (ret_val) { |
@@ -845,7 +871,8 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw) | |||
845 | } | 871 | } |
846 | } | 872 | } |
847 | 873 | ||
848 | /* Check link status. Wait up to 100 microseconds for link to become | 874 | /* |
875 | * Check link status. Wait up to 100 microseconds for link to become | ||
849 | * valid. | 876 | * valid. |
850 | */ | 877 | */ |
851 | ret_val = e1000e_phy_has_link_generic(hw, | 878 | ret_val = e1000e_phy_has_link_generic(hw, |
@@ -891,7 +918,8 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) | |||
891 | if (ret_val) | 918 | if (ret_val) |
892 | return ret_val; | 919 | return ret_val; |
893 | 920 | ||
894 | /* Clear Auto-Crossover to force MDI manually. IGP requires MDI | 921 | /* |
922 | * Clear Auto-Crossover to force MDI manually. IGP requires MDI | ||
895 | * forced whenever speed and duplex are forced. | 923 | * forced whenever speed and duplex are forced. |
896 | */ | 924 | */ |
897 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); | 925 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); |
@@ -909,7 +937,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) | |||
909 | 937 | ||
910 | udelay(1); | 938 | udelay(1); |
911 | 939 | ||
912 | if (phy->wait_for_link) { | 940 | if (phy->autoneg_wait_to_complete) { |
913 | hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n"); | 941 | hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n"); |
914 | 942 | ||
915 | ret_val = e1000e_phy_has_link_generic(hw, | 943 | ret_val = e1000e_phy_has_link_generic(hw, |
@@ -941,7 +969,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) | |||
941 | * Calls the PHY setup function to force speed and duplex. Clears the | 969 | * Calls the PHY setup function to force speed and duplex. Clears the |
942 | * auto-crossover to force MDI manually. Resets the PHY to commit the | 970 | * auto-crossover to force MDI manually. Resets the PHY to commit the |
943 | * changes. If time expires while waiting for link up, we reset the DSP. | 971 | * changes. If time expires while waiting for link up, we reset the DSP. |
944 | * After reset, TX_CLK and CRS on TX must be set. Return successful upon | 972 | * After reset, TX_CLK and CRS on Tx must be set. Return successful upon |
945 | * successful completion, else return corresponding error code. | 973 | * successful completion, else return corresponding error code. |
946 | **/ | 974 | **/ |
947 | s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | 975 | s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) |
@@ -951,7 +979,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
951 | u16 phy_data; | 979 | u16 phy_data; |
952 | bool link; | 980 | bool link; |
953 | 981 | ||
954 | /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI | 982 | /* |
983 | * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI | ||
955 | * forced whenever speed and duplex are forced. | 984 | * forced whenever speed and duplex are forced. |
956 | */ | 985 | */ |
957 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | 986 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); |
@@ -980,7 +1009,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
980 | 1009 | ||
981 | udelay(1); | 1010 | udelay(1); |
982 | 1011 | ||
983 | if (phy->wait_for_link) { | 1012 | if (phy->autoneg_wait_to_complete) { |
984 | hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n"); | 1013 | hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n"); |
985 | 1014 | ||
986 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, | 1015 | ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, |
@@ -989,10 +1018,12 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
989 | return ret_val; | 1018 | return ret_val; |
990 | 1019 | ||
991 | if (!link) { | 1020 | if (!link) { |
992 | /* We didn't get link. | 1021 | /* |
1022 | * We didn't get link. | ||
993 | * Reset the DSP and cross our fingers. | 1023 | * Reset the DSP and cross our fingers. |
994 | */ | 1024 | */ |
995 | ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 0x001d); | 1025 | ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, |
1026 | 0x001d); | ||
996 | if (ret_val) | 1027 | if (ret_val) |
997 | return ret_val; | 1028 | return ret_val; |
998 | ret_val = e1000e_phy_reset_dsp(hw); | 1029 | ret_val = e1000e_phy_reset_dsp(hw); |
@@ -1011,7 +1042,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1011 | if (ret_val) | 1042 | if (ret_val) |
1012 | return ret_val; | 1043 | return ret_val; |
1013 | 1044 | ||
1014 | /* Resetting the phy means we need to re-force TX_CLK in the | 1045 | /* |
1046 | * Resetting the phy means we need to re-force TX_CLK in the | ||
1015 | * Extended PHY Specific Control Register to 25MHz clock from | 1047 | * Extended PHY Specific Control Register to 25MHz clock from |
1016 | * the reset value of 2.5MHz. | 1048 | * the reset value of 2.5MHz. |
1017 | */ | 1049 | */ |
@@ -1020,7 +1052,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) | |||
1020 | if (ret_val) | 1052 | if (ret_val) |
1021 | return ret_val; | 1053 | return ret_val; |
1022 | 1054 | ||
1023 | /* In addition, we must re-enable CRS on Tx for both half and full | 1055 | /* |
1056 | * In addition, we must re-enable CRS on Tx for both half and full | ||
1024 | * duplex. | 1057 | * duplex. |
1025 | */ | 1058 | */ |
1026 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); | 1059 | ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); |
@@ -1051,7 +1084,7 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) | |||
1051 | u32 ctrl; | 1084 | u32 ctrl; |
1052 | 1085 | ||
1053 | /* Turn off flow control when forcing speed/duplex */ | 1086 | /* Turn off flow control when forcing speed/duplex */ |
1054 | mac->fc = e1000_fc_none; | 1087 | hw->fc.type = e1000_fc_none; |
1055 | 1088 | ||
1056 | /* Force speed/duplex on the mac */ | 1089 | /* Force speed/duplex on the mac */ |
1057 | ctrl = er32(CTRL); | 1090 | ctrl = er32(CTRL); |
@@ -1124,30 +1157,32 @@ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active) | |||
1124 | data); | 1157 | data); |
1125 | if (ret_val) | 1158 | if (ret_val) |
1126 | return ret_val; | 1159 | return ret_val; |
1127 | /* LPLU and SmartSpeed are mutually exclusive. LPLU is used | 1160 | /* |
1161 | * LPLU and SmartSpeed are mutually exclusive. LPLU is used | ||
1128 | * during Dx states where the power conservation is most | 1162 | * during Dx states where the power conservation is most |
1129 | * important. During driver activity we should enable | 1163 | * important. During driver activity we should enable |
1130 | * SmartSpeed, so performance is maintained. */ | 1164 | * SmartSpeed, so performance is maintained. |
1165 | */ | ||
1131 | if (phy->smart_speed == e1000_smart_speed_on) { | 1166 | if (phy->smart_speed == e1000_smart_speed_on) { |
1132 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | 1167 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
1133 | &data); | 1168 | &data); |
1134 | if (ret_val) | 1169 | if (ret_val) |
1135 | return ret_val; | 1170 | return ret_val; |
1136 | 1171 | ||
1137 | data |= IGP01E1000_PSCFR_SMART_SPEED; | 1172 | data |= IGP01E1000_PSCFR_SMART_SPEED; |
1138 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | 1173 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
1139 | data); | 1174 | data); |
1140 | if (ret_val) | 1175 | if (ret_val) |
1141 | return ret_val; | 1176 | return ret_val; |
1142 | } else if (phy->smart_speed == e1000_smart_speed_off) { | 1177 | } else if (phy->smart_speed == e1000_smart_speed_off) { |
1143 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, | 1178 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
1144 | &data); | 1179 | &data); |
1145 | if (ret_val) | 1180 | if (ret_val) |
1146 | return ret_val; | 1181 | return ret_val; |
1147 | 1182 | ||
1148 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; | 1183 | data &= ~IGP01E1000_PSCFR_SMART_SPEED; |
1149 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, | 1184 | ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, |
1150 | data); | 1185 | data); |
1151 | if (ret_val) | 1186 | if (ret_val) |
1152 | return ret_val; | 1187 | return ret_val; |
1153 | } | 1188 | } |
@@ -1249,8 +1284,10 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw) | |||
1249 | s32 ret_val; | 1284 | s32 ret_val; |
1250 | u16 data, offset, mask; | 1285 | u16 data, offset, mask; |
1251 | 1286 | ||
1252 | /* Polarity is determined based on the speed of | 1287 | /* |
1253 | * our connection. */ | 1288 | * Polarity is determined based on the speed of |
1289 | * our connection. | ||
1290 | */ | ||
1254 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); | 1291 | ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); |
1255 | if (ret_val) | 1292 | if (ret_val) |
1256 | return ret_val; | 1293 | return ret_val; |
@@ -1260,7 +1297,8 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw) | |||
1260 | offset = IGP01E1000_PHY_PCS_INIT_REG; | 1297 | offset = IGP01E1000_PHY_PCS_INIT_REG; |
1261 | mask = IGP01E1000_PHY_POLARITY_MASK; | 1298 | mask = IGP01E1000_PHY_POLARITY_MASK; |
1262 | } else { | 1299 | } else { |
1263 | /* This really only applies to 10Mbps since | 1300 | /* |
1301 | * This really only applies to 10Mbps since | ||
1264 | * there is no polarity for 100Mbps (always 0). | 1302 | * there is no polarity for 100Mbps (always 0). |
1265 | */ | 1303 | */ |
1266 | offset = IGP01E1000_PHY_PORT_STATUS; | 1304 | offset = IGP01E1000_PHY_PORT_STATUS; |
@@ -1278,7 +1316,7 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw) | |||
1278 | } | 1316 | } |
1279 | 1317 | ||
1280 | /** | 1318 | /** |
1281 | * e1000_wait_autoneg - Wait for auto-neg compeletion | 1319 | * e1000_wait_autoneg - Wait for auto-neg completion |
1282 | * @hw: pointer to the HW structure | 1320 | * @hw: pointer to the HW structure |
1283 | * | 1321 | * |
1284 | * Waits for auto-negotiation to complete or for the auto-negotiation time | 1322 | * Waits for auto-negotiation to complete or for the auto-negotiation time |
@@ -1302,7 +1340,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw) | |||
1302 | msleep(100); | 1340 | msleep(100); |
1303 | } | 1341 | } |
1304 | 1342 | ||
1305 | /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation | 1343 | /* |
1344 | * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation | ||
1306 | * has completed. | 1345 | * has completed. |
1307 | */ | 1346 | */ |
1308 | return ret_val; | 1347 | return ret_val; |
@@ -1324,7 +1363,8 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, | |||
1324 | u16 i, phy_status; | 1363 | u16 i, phy_status; |
1325 | 1364 | ||
1326 | for (i = 0; i < iterations; i++) { | 1365 | for (i = 0; i < iterations; i++) { |
1327 | /* Some PHYs require the PHY_STATUS register to be read | 1366 | /* |
1367 | * Some PHYs require the PHY_STATUS register to be read | ||
1328 | * twice due to the link bit being sticky. No harm doing | 1368 | * twice due to the link bit being sticky. No harm doing |
1329 | * it across the board. | 1369 | * it across the board. |
1330 | */ | 1370 | */ |
@@ -1412,10 +1452,12 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) | |||
1412 | if (ret_val) | 1452 | if (ret_val) |
1413 | return ret_val; | 1453 | return ret_val; |
1414 | 1454 | ||
1415 | /* Getting bits 15:9, which represent the combination of | 1455 | /* |
1456 | * Getting bits 15:9, which represent the combination of | ||
1416 | * course and fine gain values. The result is a number | 1457 | * course and fine gain values. The result is a number |
1417 | * that can be put into the lookup table to obtain the | 1458 | * that can be put into the lookup table to obtain the |
1418 | * approximate cable length. */ | 1459 | * approximate cable length. |
1460 | */ | ||
1419 | cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & | 1461 | cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & |
1420 | IGP02E1000_AGC_LENGTH_MASK; | 1462 | IGP02E1000_AGC_LENGTH_MASK; |
1421 | 1463 | ||
@@ -1466,7 +1508,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) | |||
1466 | u16 phy_data; | 1508 | u16 phy_data; |
1467 | bool link; | 1509 | bool link; |
1468 | 1510 | ||
1469 | if (hw->media_type != e1000_media_type_copper) { | 1511 | if (hw->phy.media_type != e1000_media_type_copper) { |
1470 | hw_dbg(hw, "Phy info is only valid for copper media\n"); | 1512 | hw_dbg(hw, "Phy info is only valid for copper media\n"); |
1471 | return -E1000_ERR_CONFIG; | 1513 | return -E1000_ERR_CONFIG; |
1472 | } | 1514 | } |
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 93b7fb246960..26acd05c80b5 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -421,7 +421,7 @@ struct ehea_fw_handle_entry { | |||
421 | struct ehea_fw_handle_array { | 421 | struct ehea_fw_handle_array { |
422 | struct ehea_fw_handle_entry *arr; | 422 | struct ehea_fw_handle_entry *arr; |
423 | int num_entries; | 423 | int num_entries; |
424 | struct semaphore lock; | 424 | struct mutex lock; |
425 | }; | 425 | }; |
426 | 426 | ||
427 | struct ehea_bcmc_reg_entry { | 427 | struct ehea_bcmc_reg_entry { |
@@ -434,7 +434,7 @@ struct ehea_bcmc_reg_entry { | |||
434 | struct ehea_bcmc_reg_array { | 434 | struct ehea_bcmc_reg_array { |
435 | struct ehea_bcmc_reg_entry *arr; | 435 | struct ehea_bcmc_reg_entry *arr; |
436 | int num_entries; | 436 | int num_entries; |
437 | struct semaphore lock; | 437 | struct mutex lock; |
438 | }; | 438 | }; |
439 | 439 | ||
440 | #define EHEA_PORT_UP 1 | 440 | #define EHEA_PORT_UP 1 |
@@ -452,7 +452,7 @@ struct ehea_port { | |||
452 | struct vlan_group *vgrp; | 452 | struct vlan_group *vgrp; |
453 | struct ehea_eq *qp_eq; | 453 | struct ehea_eq *qp_eq; |
454 | struct work_struct reset_task; | 454 | struct work_struct reset_task; |
455 | struct semaphore port_lock; | 455 | struct mutex port_lock; |
456 | char int_aff_name[EHEA_IRQ_NAME_SIZE]; | 456 | char int_aff_name[EHEA_IRQ_NAME_SIZE]; |
457 | int allmulti; /* Indicates IFF_ALLMULTI state */ | 457 | int allmulti; /* Indicates IFF_ALLMULTI state */ |
458 | int promisc; /* Indicates IFF_PROMISC state */ | 458 | int promisc; /* Indicates IFF_PROMISC state */ |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 07c742dd3f09..0c1c360a85ca 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/notifier.h> | 36 | #include <linux/notifier.h> |
37 | #include <linux/reboot.h> | 37 | #include <linux/reboot.h> |
38 | #include <asm/kexec.h> | 38 | #include <asm/kexec.h> |
39 | #include <linux/mutex.h> | ||
39 | 40 | ||
40 | #include <net/ip.h> | 41 | #include <net/ip.h> |
41 | 42 | ||
@@ -99,7 +100,7 @@ static int port_name_cnt; | |||
99 | static LIST_HEAD(adapter_list); | 100 | static LIST_HEAD(adapter_list); |
100 | u64 ehea_driver_flags; | 101 | u64 ehea_driver_flags; |
101 | struct work_struct ehea_rereg_mr_task; | 102 | struct work_struct ehea_rereg_mr_task; |
102 | struct semaphore dlpar_mem_lock; | 103 | static DEFINE_MUTEX(dlpar_mem_lock); |
103 | struct ehea_fw_handle_array ehea_fw_handles; | 104 | struct ehea_fw_handle_array ehea_fw_handles; |
104 | struct ehea_bcmc_reg_array ehea_bcmc_regs; | 105 | struct ehea_bcmc_reg_array ehea_bcmc_regs; |
105 | 106 | ||
@@ -1758,7 +1759,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa) | |||
1758 | 1759 | ||
1759 | memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); | 1760 | memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); |
1760 | 1761 | ||
1761 | down(&ehea_bcmc_regs.lock); | 1762 | mutex_lock(&ehea_bcmc_regs.lock); |
1762 | 1763 | ||
1763 | /* Deregister old MAC in pHYP */ | 1764 | /* Deregister old MAC in pHYP */ |
1764 | ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | 1765 | ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); |
@@ -1776,7 +1777,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa) | |||
1776 | 1777 | ||
1777 | out_upregs: | 1778 | out_upregs: |
1778 | ehea_update_bcmc_registrations(); | 1779 | ehea_update_bcmc_registrations(); |
1779 | up(&ehea_bcmc_regs.lock); | 1780 | mutex_unlock(&ehea_bcmc_regs.lock); |
1780 | out_free: | 1781 | out_free: |
1781 | kfree(cb0); | 1782 | kfree(cb0); |
1782 | out: | 1783 | out: |
@@ -1938,7 +1939,7 @@ static void ehea_set_multicast_list(struct net_device *dev) | |||
1938 | } | 1939 | } |
1939 | ehea_promiscuous(dev, 0); | 1940 | ehea_promiscuous(dev, 0); |
1940 | 1941 | ||
1941 | down(&ehea_bcmc_regs.lock); | 1942 | mutex_lock(&ehea_bcmc_regs.lock); |
1942 | 1943 | ||
1943 | if (dev->flags & IFF_ALLMULTI) { | 1944 | if (dev->flags & IFF_ALLMULTI) { |
1944 | ehea_allmulti(dev, 1); | 1945 | ehea_allmulti(dev, 1); |
@@ -1969,7 +1970,7 @@ static void ehea_set_multicast_list(struct net_device *dev) | |||
1969 | } | 1970 | } |
1970 | out: | 1971 | out: |
1971 | ehea_update_bcmc_registrations(); | 1972 | ehea_update_bcmc_registrations(); |
1972 | up(&ehea_bcmc_regs.lock); | 1973 | mutex_unlock(&ehea_bcmc_regs.lock); |
1973 | return; | 1974 | return; |
1974 | } | 1975 | } |
1975 | 1976 | ||
@@ -2452,7 +2453,7 @@ static int ehea_up(struct net_device *dev) | |||
2452 | if (port->state == EHEA_PORT_UP) | 2453 | if (port->state == EHEA_PORT_UP) |
2453 | return 0; | 2454 | return 0; |
2454 | 2455 | ||
2455 | down(&ehea_fw_handles.lock); | 2456 | mutex_lock(&ehea_fw_handles.lock); |
2456 | 2457 | ||
2457 | ret = ehea_port_res_setup(port, port->num_def_qps, | 2458 | ret = ehea_port_res_setup(port, port->num_def_qps, |
2458 | port->num_add_tx_qps); | 2459 | port->num_add_tx_qps); |
@@ -2490,7 +2491,7 @@ static int ehea_up(struct net_device *dev) | |||
2490 | } | 2491 | } |
2491 | } | 2492 | } |
2492 | 2493 | ||
2493 | down(&ehea_bcmc_regs.lock); | 2494 | mutex_lock(&ehea_bcmc_regs.lock); |
2494 | 2495 | ||
2495 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); | 2496 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); |
2496 | if (ret) { | 2497 | if (ret) { |
@@ -2513,10 +2514,10 @@ out: | |||
2513 | ehea_info("Failed starting %s. ret=%i", dev->name, ret); | 2514 | ehea_info("Failed starting %s. ret=%i", dev->name, ret); |
2514 | 2515 | ||
2515 | ehea_update_bcmc_registrations(); | 2516 | ehea_update_bcmc_registrations(); |
2516 | up(&ehea_bcmc_regs.lock); | 2517 | mutex_unlock(&ehea_bcmc_regs.lock); |
2517 | 2518 | ||
2518 | ehea_update_firmware_handles(); | 2519 | ehea_update_firmware_handles(); |
2519 | up(&ehea_fw_handles.lock); | 2520 | mutex_unlock(&ehea_fw_handles.lock); |
2520 | 2521 | ||
2521 | return ret; | 2522 | return ret; |
2522 | } | 2523 | } |
@@ -2542,7 +2543,7 @@ static int ehea_open(struct net_device *dev) | |||
2542 | int ret; | 2543 | int ret; |
2543 | struct ehea_port *port = netdev_priv(dev); | 2544 | struct ehea_port *port = netdev_priv(dev); |
2544 | 2545 | ||
2545 | down(&port->port_lock); | 2546 | mutex_lock(&port->port_lock); |
2546 | 2547 | ||
2547 | if (netif_msg_ifup(port)) | 2548 | if (netif_msg_ifup(port)) |
2548 | ehea_info("enabling port %s", dev->name); | 2549 | ehea_info("enabling port %s", dev->name); |
@@ -2553,7 +2554,7 @@ static int ehea_open(struct net_device *dev) | |||
2553 | netif_start_queue(dev); | 2554 | netif_start_queue(dev); |
2554 | } | 2555 | } |
2555 | 2556 | ||
2556 | up(&port->port_lock); | 2557 | mutex_unlock(&port->port_lock); |
2557 | 2558 | ||
2558 | return ret; | 2559 | return ret; |
2559 | } | 2560 | } |
@@ -2566,18 +2567,18 @@ static int ehea_down(struct net_device *dev) | |||
2566 | if (port->state == EHEA_PORT_DOWN) | 2567 | if (port->state == EHEA_PORT_DOWN) |
2567 | return 0; | 2568 | return 0; |
2568 | 2569 | ||
2569 | down(&ehea_bcmc_regs.lock); | 2570 | mutex_lock(&ehea_fw_handles.lock); |
2571 | |||
2572 | mutex_lock(&ehea_bcmc_regs.lock); | ||
2570 | ehea_drop_multicast_list(dev); | 2573 | ehea_drop_multicast_list(dev); |
2571 | ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | 2574 | ehea_broadcast_reg_helper(port, H_DEREG_BCMC); |
2572 | 2575 | ||
2573 | ehea_free_interrupts(dev); | 2576 | ehea_free_interrupts(dev); |
2574 | 2577 | ||
2575 | down(&ehea_fw_handles.lock); | ||
2576 | |||
2577 | port->state = EHEA_PORT_DOWN; | 2578 | port->state = EHEA_PORT_DOWN; |
2578 | 2579 | ||
2579 | ehea_update_bcmc_registrations(); | 2580 | ehea_update_bcmc_registrations(); |
2580 | up(&ehea_bcmc_regs.lock); | 2581 | mutex_unlock(&ehea_bcmc_regs.lock); |
2581 | 2582 | ||
2582 | ret = ehea_clean_all_portres(port); | 2583 | ret = ehea_clean_all_portres(port); |
2583 | if (ret) | 2584 | if (ret) |
@@ -2585,7 +2586,7 @@ static int ehea_down(struct net_device *dev) | |||
2585 | dev->name, ret); | 2586 | dev->name, ret); |
2586 | 2587 | ||
2587 | ehea_update_firmware_handles(); | 2588 | ehea_update_firmware_handles(); |
2588 | up(&ehea_fw_handles.lock); | 2589 | mutex_unlock(&ehea_fw_handles.lock); |
2589 | 2590 | ||
2590 | return ret; | 2591 | return ret; |
2591 | } | 2592 | } |
@@ -2599,11 +2600,11 @@ static int ehea_stop(struct net_device *dev) | |||
2599 | ehea_info("disabling port %s", dev->name); | 2600 | ehea_info("disabling port %s", dev->name); |
2600 | 2601 | ||
2601 | flush_scheduled_work(); | 2602 | flush_scheduled_work(); |
2602 | down(&port->port_lock); | 2603 | mutex_lock(&port->port_lock); |
2603 | netif_stop_queue(dev); | 2604 | netif_stop_queue(dev); |
2604 | port_napi_disable(port); | 2605 | port_napi_disable(port); |
2605 | ret = ehea_down(dev); | 2606 | ret = ehea_down(dev); |
2606 | up(&port->port_lock); | 2607 | mutex_unlock(&port->port_lock); |
2607 | return ret; | 2608 | return ret; |
2608 | } | 2609 | } |
2609 | 2610 | ||
@@ -2801,7 +2802,7 @@ static void ehea_reset_port(struct work_struct *work) | |||
2801 | struct net_device *dev = port->netdev; | 2802 | struct net_device *dev = port->netdev; |
2802 | 2803 | ||
2803 | port->resets++; | 2804 | port->resets++; |
2804 | down(&port->port_lock); | 2805 | mutex_lock(&port->port_lock); |
2805 | netif_stop_queue(dev); | 2806 | netif_stop_queue(dev); |
2806 | 2807 | ||
2807 | port_napi_disable(port); | 2808 | port_napi_disable(port); |
@@ -2821,7 +2822,7 @@ static void ehea_reset_port(struct work_struct *work) | |||
2821 | 2822 | ||
2822 | netif_wake_queue(dev); | 2823 | netif_wake_queue(dev); |
2823 | out: | 2824 | out: |
2824 | up(&port->port_lock); | 2825 | mutex_unlock(&port->port_lock); |
2825 | return; | 2826 | return; |
2826 | } | 2827 | } |
2827 | 2828 | ||
@@ -2830,7 +2831,7 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
2830 | int ret, i; | 2831 | int ret, i; |
2831 | struct ehea_adapter *adapter; | 2832 | struct ehea_adapter *adapter; |
2832 | 2833 | ||
2833 | down(&dlpar_mem_lock); | 2834 | mutex_lock(&dlpar_mem_lock); |
2834 | ehea_info("LPAR memory enlarged - re-initializing driver"); | 2835 | ehea_info("LPAR memory enlarged - re-initializing driver"); |
2835 | 2836 | ||
2836 | list_for_each_entry(adapter, &adapter_list, list) | 2837 | list_for_each_entry(adapter, &adapter_list, list) |
@@ -2838,21 +2839,23 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
2838 | /* Shutdown all ports */ | 2839 | /* Shutdown all ports */ |
2839 | for (i = 0; i < EHEA_MAX_PORTS; i++) { | 2840 | for (i = 0; i < EHEA_MAX_PORTS; i++) { |
2840 | struct ehea_port *port = adapter->port[i]; | 2841 | struct ehea_port *port = adapter->port[i]; |
2842 | struct net_device *dev; | ||
2841 | 2843 | ||
2842 | if (port) { | 2844 | if (!port) |
2843 | struct net_device *dev = port->netdev; | 2845 | continue; |
2844 | 2846 | ||
2845 | if (dev->flags & IFF_UP) { | 2847 | dev = port->netdev; |
2846 | down(&port->port_lock); | 2848 | |
2847 | netif_stop_queue(dev); | 2849 | if (dev->flags & IFF_UP) { |
2848 | ret = ehea_stop_qps(dev); | 2850 | mutex_lock(&port->port_lock); |
2849 | if (ret) { | 2851 | netif_stop_queue(dev); |
2850 | up(&port->port_lock); | 2852 | ret = ehea_stop_qps(dev); |
2851 | goto out; | 2853 | if (ret) { |
2852 | } | 2854 | mutex_unlock(&port->port_lock); |
2853 | port_napi_disable(port); | 2855 | goto out; |
2854 | up(&port->port_lock); | ||
2855 | } | 2856 | } |
2857 | port_napi_disable(port); | ||
2858 | mutex_unlock(&port->port_lock); | ||
2856 | } | 2859 | } |
2857 | } | 2860 | } |
2858 | 2861 | ||
@@ -2892,17 +2895,17 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
2892 | struct net_device *dev = port->netdev; | 2895 | struct net_device *dev = port->netdev; |
2893 | 2896 | ||
2894 | if (dev->flags & IFF_UP) { | 2897 | if (dev->flags & IFF_UP) { |
2895 | down(&port->port_lock); | 2898 | mutex_lock(&port->port_lock); |
2896 | port_napi_enable(port); | 2899 | port_napi_enable(port); |
2897 | ret = ehea_restart_qps(dev); | 2900 | ret = ehea_restart_qps(dev); |
2898 | if (!ret) | 2901 | if (!ret) |
2899 | netif_wake_queue(dev); | 2902 | netif_wake_queue(dev); |
2900 | up(&port->port_lock); | 2903 | mutex_unlock(&port->port_lock); |
2901 | } | 2904 | } |
2902 | } | 2905 | } |
2903 | } | 2906 | } |
2904 | } | 2907 | } |
2905 | up(&dlpar_mem_lock); | 2908 | mutex_unlock(&dlpar_mem_lock); |
2906 | ehea_info("re-initializing driver complete"); | 2909 | ehea_info("re-initializing driver complete"); |
2907 | out: | 2910 | out: |
2908 | return; | 2911 | return; |
@@ -3063,7 +3066,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
3063 | 3066 | ||
3064 | port = netdev_priv(dev); | 3067 | port = netdev_priv(dev); |
3065 | 3068 | ||
3066 | sema_init(&port->port_lock, 1); | 3069 | mutex_init(&port->port_lock); |
3067 | port->state = EHEA_PORT_DOWN; | 3070 | port->state = EHEA_PORT_DOWN; |
3068 | port->sig_comp_iv = sq_entries / 10; | 3071 | port->sig_comp_iv = sq_entries / 10; |
3069 | 3072 | ||
@@ -3342,7 +3345,7 @@ static int __devinit ehea_probe_adapter(struct of_device *dev, | |||
3342 | ehea_error("Invalid ibmebus device probed"); | 3345 | ehea_error("Invalid ibmebus device probed"); |
3343 | return -EINVAL; | 3346 | return -EINVAL; |
3344 | } | 3347 | } |
3345 | down(&ehea_fw_handles.lock); | 3348 | mutex_lock(&ehea_fw_handles.lock); |
3346 | 3349 | ||
3347 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); | 3350 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); |
3348 | if (!adapter) { | 3351 | if (!adapter) { |
@@ -3426,7 +3429,7 @@ out_free_ad: | |||
3426 | 3429 | ||
3427 | out: | 3430 | out: |
3428 | ehea_update_firmware_handles(); | 3431 | ehea_update_firmware_handles(); |
3429 | up(&ehea_fw_handles.lock); | 3432 | mutex_unlock(&ehea_fw_handles.lock); |
3430 | return ret; | 3433 | return ret; |
3431 | } | 3434 | } |
3432 | 3435 | ||
@@ -3445,7 +3448,7 @@ static int __devexit ehea_remove(struct of_device *dev) | |||
3445 | 3448 | ||
3446 | flush_scheduled_work(); | 3449 | flush_scheduled_work(); |
3447 | 3450 | ||
3448 | down(&ehea_fw_handles.lock); | 3451 | mutex_lock(&ehea_fw_handles.lock); |
3449 | 3452 | ||
3450 | ibmebus_free_irq(adapter->neq->attr.ist1, adapter); | 3453 | ibmebus_free_irq(adapter->neq->attr.ist1, adapter); |
3451 | tasklet_kill(&adapter->neq_tasklet); | 3454 | tasklet_kill(&adapter->neq_tasklet); |
@@ -3456,7 +3459,7 @@ static int __devexit ehea_remove(struct of_device *dev) | |||
3456 | kfree(adapter); | 3459 | kfree(adapter); |
3457 | 3460 | ||
3458 | ehea_update_firmware_handles(); | 3461 | ehea_update_firmware_handles(); |
3459 | up(&ehea_fw_handles.lock); | 3462 | mutex_unlock(&ehea_fw_handles.lock); |
3460 | 3463 | ||
3461 | return 0; | 3464 | return 0; |
3462 | } | 3465 | } |
@@ -3543,9 +3546,8 @@ int __init ehea_module_init(void) | |||
3543 | memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles)); | 3546 | memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles)); |
3544 | memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs)); | 3547 | memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs)); |
3545 | 3548 | ||
3546 | sema_init(&dlpar_mem_lock, 1); | 3549 | mutex_init(&ehea_fw_handles.lock); |
3547 | sema_init(&ehea_fw_handles.lock, 1); | 3550 | mutex_init(&ehea_bcmc_regs.lock); |
3548 | sema_init(&ehea_bcmc_regs.lock, 1); | ||
3549 | 3551 | ||
3550 | ret = check_module_parm(); | 3552 | ret = check_module_parm(); |
3551 | if (ret) | 3553 | if (ret) |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 7ad2993dc581..cb371a8c24a7 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -367,7 +367,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) | |||
367 | /* Always use CB2 mode, difference is masked | 367 | /* Always use CB2 mode, difference is masked |
368 | * in the CB driver. */ | 368 | * in the CB driver. */ |
369 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); | 369 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); |
370 | if (dca_add_requester(dev) == IXGBE_SUCCESS) { | 370 | if (dca_add_requester(dev) == 0) { |
371 | ixgbe_setup_dca(adapter); | 371 | ixgbe_setup_dca(adapter); |
372 | break; | 372 | break; |
373 | } | 373 | } |
@@ -381,7 +381,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) | |||
381 | break; | 381 | break; |
382 | } | 382 | } |
383 | 383 | ||
384 | return IXGBE_SUCCESS; | 384 | return 0; |
385 | } | 385 | } |
386 | 386 | ||
387 | #endif /* CONFIG_DCA */ | 387 | #endif /* CONFIG_DCA */ |
@@ -3605,7 +3605,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
3605 | goto err_register; | 3605 | goto err_register; |
3606 | 3606 | ||
3607 | #ifdef CONFIG_DCA | 3607 | #ifdef CONFIG_DCA |
3608 | if (dca_add_requester(&pdev->dev) == IXGBE_SUCCESS) { | 3608 | if (dca_add_requester(&pdev->dev) == 0) { |
3609 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; | 3609 | adapter->flags |= IXGBE_FLAG_DCA_ENABLED; |
3610 | /* always use CB2 mode, difference is masked | 3610 | /* always use CB2 mode, difference is masked |
3611 | * in the CB driver */ | 3611 | * in the CB driver */ |
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index 44a06f8b588f..45208a0e69a0 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c | |||
@@ -42,6 +42,7 @@ | |||
42 | 42 | ||
43 | #define XL_DEBUG 0 | 43 | #define XL_DEBUG 0 |
44 | 44 | ||
45 | #include <linux/jiffies.h> | ||
45 | #include <linux/module.h> | 46 | #include <linux/module.h> |
46 | #include <linux/kernel.h> | 47 | #include <linux/kernel.h> |
47 | #include <linux/errno.h> | 48 | #include <linux/errno.h> |
@@ -408,7 +409,7 @@ static int xl_hw_reset(struct net_device *dev) | |||
408 | t=jiffies; | 409 | t=jiffies; |
409 | while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { | 410 | while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { |
410 | schedule(); | 411 | schedule(); |
411 | if(jiffies-t > 40*HZ) { | 412 | if (time_after(jiffies, t + 40 * HZ)) { |
412 | printk(KERN_ERR "%s: 3COM 3C359 Velocity XL card not responding to global reset.\n", dev->name); | 413 | printk(KERN_ERR "%s: 3COM 3C359 Velocity XL card not responding to global reset.\n", dev->name); |
413 | return -ENODEV; | 414 | return -ENODEV; |
414 | } | 415 | } |
@@ -519,7 +520,7 @@ static int xl_hw_reset(struct net_device *dev) | |||
519 | t=jiffies; | 520 | t=jiffies; |
520 | while ( !(readw(xl_mmio + MMIO_INTSTATUS_AUTO) & INTSTAT_SRB) ) { | 521 | while ( !(readw(xl_mmio + MMIO_INTSTATUS_AUTO) & INTSTAT_SRB) ) { |
521 | schedule(); | 522 | schedule(); |
522 | if(jiffies-t > 15*HZ) { | 523 | if (time_after(jiffies, t + 15 * HZ)) { |
523 | printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n"); | 524 | printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n"); |
524 | return -ENODEV; | 525 | return -ENODEV; |
525 | } | 526 | } |
@@ -790,7 +791,7 @@ static int xl_open_hw(struct net_device *dev) | |||
790 | t=jiffies; | 791 | t=jiffies; |
791 | while (! (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) { | 792 | while (! (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) { |
792 | schedule(); | 793 | schedule(); |
793 | if(jiffies-t > 40*HZ) { | 794 | if (time_after(jiffies, t + 40 * HZ)) { |
794 | printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n"); | 795 | printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n"); |
795 | break ; | 796 | break ; |
796 | } | 797 | } |
@@ -1003,7 +1004,7 @@ static void xl_reset(struct net_device *dev) | |||
1003 | 1004 | ||
1004 | t=jiffies; | 1005 | t=jiffies; |
1005 | while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { | 1006 | while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { |
1006 | if(jiffies-t > 40*HZ) { | 1007 | if (time_after(jiffies, t + 40 * HZ)) { |
1007 | printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n"); | 1008 | printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n"); |
1008 | break ; | 1009 | break ; |
1009 | } | 1010 | } |
@@ -1270,7 +1271,7 @@ static int xl_close(struct net_device *dev) | |||
1270 | t=jiffies; | 1271 | t=jiffies; |
1271 | while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { | 1272 | while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { |
1272 | schedule(); | 1273 | schedule(); |
1273 | if(jiffies-t > 10*HZ) { | 1274 | if (time_after(jiffies, t + 10 * HZ)) { |
1274 | printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNSTALL not responding.\n", dev->name); | 1275 | printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNSTALL not responding.\n", dev->name); |
1275 | break ; | 1276 | break ; |
1276 | } | 1277 | } |
@@ -1279,7 +1280,7 @@ static int xl_close(struct net_device *dev) | |||
1279 | t=jiffies; | 1280 | t=jiffies; |
1280 | while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { | 1281 | while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { |
1281 | schedule(); | 1282 | schedule(); |
1282 | if(jiffies-t > 10*HZ) { | 1283 | if (time_after(jiffies, t + 10 * HZ)) { |
1283 | printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNDISABLE not responding.\n", dev->name); | 1284 | printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNDISABLE not responding.\n", dev->name); |
1284 | break ; | 1285 | break ; |
1285 | } | 1286 | } |
@@ -1288,7 +1289,7 @@ static int xl_close(struct net_device *dev) | |||
1288 | t=jiffies; | 1289 | t=jiffies; |
1289 | while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { | 1290 | while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { |
1290 | schedule(); | 1291 | schedule(); |
1291 | if(jiffies-t > 10*HZ) { | 1292 | if (time_after(jiffies, t + 10 * HZ)) { |
1292 | printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPSTALL not responding.\n", dev->name); | 1293 | printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPSTALL not responding.\n", dev->name); |
1293 | break ; | 1294 | break ; |
1294 | } | 1295 | } |
@@ -1305,7 +1306,7 @@ static int xl_close(struct net_device *dev) | |||
1305 | t=jiffies; | 1306 | t=jiffies; |
1306 | while (!(readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) { | 1307 | while (!(readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) { |
1307 | schedule(); | 1308 | schedule(); |
1308 | if(jiffies-t > 10*HZ) { | 1309 | if (time_after(jiffies, t + 10 * HZ)) { |
1309 | printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-CLOSENIC not responding.\n", dev->name); | 1310 | printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-CLOSENIC not responding.\n", dev->name); |
1310 | break ; | 1311 | break ; |
1311 | } | 1312 | } |
@@ -1334,7 +1335,7 @@ static int xl_close(struct net_device *dev) | |||
1334 | t=jiffies; | 1335 | t=jiffies; |
1335 | while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { | 1336 | while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { |
1336 | schedule(); | 1337 | schedule(); |
1337 | if(jiffies-t > 10*HZ) { | 1338 | if (time_after(jiffies, t + 10 * HZ)) { |
1338 | printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPRESET not responding.\n", dev->name); | 1339 | printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPRESET not responding.\n", dev->name); |
1339 | break ; | 1340 | break ; |
1340 | } | 1341 | } |
@@ -1343,7 +1344,7 @@ static int xl_close(struct net_device *dev) | |||
1343 | t=jiffies; | 1344 | t=jiffies; |
1344 | while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { | 1345 | while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { |
1345 | schedule(); | 1346 | schedule(); |
1346 | if(jiffies-t > 10*HZ) { | 1347 | if (time_after(jiffies, t + 10 * HZ)) { |
1347 | printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNRESET not responding.\n", dev->name); | 1348 | printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNRESET not responding.\n", dev->name); |
1348 | break ; | 1349 | break ; |
1349 | } | 1350 | } |
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index fe6ff3e3d525..24640726f8bb 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c | |||
@@ -770,14 +770,14 @@ static void yellowfin_init_ring(struct net_device *dev) | |||
770 | /* Branch on Tx error. */ | 770 | /* Branch on Tx error. */ |
771 | yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP); | 771 | yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP); |
772 | yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + | 772 | yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + |
773 | (j+1)*sizeof(struct yellowfin_desc); | 773 | (j+1)*sizeof(struct yellowfin_desc)); |
774 | j++; | 774 | j++; |
775 | if (yp->flags & FullTxStatus) { | 775 | if (yp->flags & FullTxStatus) { |
776 | yp->tx_ring[j].dbdma_cmd = | 776 | yp->tx_ring[j].dbdma_cmd = |
777 | cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status)); | 777 | cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status)); |
778 | yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status); | 778 | yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status); |
779 | yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + | 779 | yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + |
780 | i*sizeof(struct tx_status_words); | 780 | i*sizeof(struct tx_status_words)); |
781 | } else { | 781 | } else { |
782 | /* Symbios chips write only tx_errs word. */ | 782 | /* Symbios chips write only tx_errs word. */ |
783 | yp->tx_ring[j].dbdma_cmd = | 783 | yp->tx_ring[j].dbdma_cmd = |
diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h index fde675872c56..a1916078fd08 100644 --- a/include/linux/arcdevice.h +++ b/include/linux/arcdevice.h | |||
@@ -283,8 +283,8 @@ struct arcnet_local { | |||
283 | int next_buf, first_free_buf; | 283 | int next_buf, first_free_buf; |
284 | 284 | ||
285 | /* network "reconfiguration" handling */ | 285 | /* network "reconfiguration" handling */ |
286 | time_t first_recon, /* time of "first" RECON message to count */ | 286 | unsigned long first_recon; /* time of "first" RECON message to count */ |
287 | last_recon; /* time of most recent RECON */ | 287 | unsigned long last_recon; /* time of most recent RECON */ |
288 | int num_recons; /* number of RECONs between first and last. */ | 288 | int num_recons; /* number of RECONs between first and last. */ |
289 | bool network_down; /* do we think the network is down? */ | 289 | bool network_down; /* do we think the network is down? */ |
290 | 290 | ||