diff options
author | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-29 13:03:54 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-29 13:03:54 -0500 |
commit | 6c722e90d7ede7db2d2b28a3cc69a8545db67ea1 (patch) | |
tree | 2473530190795c11f841db37b7d74df9bcc0416b /drivers | |
parent | 007fb598b4674de82492a9961e82826875012229 (diff) | |
parent | 81f4e6c190a0fa016fd7eecaf76a5f95d121afc2 (diff) |
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (42 commits)
r8169: extraneous Cmd{Tx/Rx}Enb write
forcedeth: modified comment header
NetXen: Reducing ring sizes for IOMMU issue.
NetXen: Fix for PPC machines.
NetXen: work queue fixes.
NetXen: Link status message correction for quad port cards.
NetXen: Multiple adapter fix.
NetXen: Using correct CHECKSUM flag.
NetXen: driver reload fix for newer firmware.
NetXen: Adding new device ids.
PHY probe not working properly for ibm_emac (PPC4xx)
ep93xx: some minor cleanups to the ep93xx eth driver
sky2: phy power down needs PCI config write enabled
sky2: power management/MSI workaround
sky2: dual port NAPI problem
via-velocity uses INET interfaces
e1000: Do not truncate TSO TCP header with 82544 workaround
myri10ge: handle failures in suspend and resume
myri10ge: no need to save MSI and PCIe state in the driver
myri10ge: make msi configurable at runtime through sysfs
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/8139cp.c | 6 | ||||
-rw-r--r-- | drivers/net/arm/ep93xx_eth.c | 4 | ||||
-rw-r--r-- | drivers/net/b44.c | 6 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_ethtool.c | 3 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_hw.c | 296 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_hw.h | 310 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 345 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_param.c | 4 | ||||
-rw-r--r-- | drivers/net/forcedeth.c | 16 | ||||
-rw-r--r-- | drivers/net/ibm_emac/ibm_emac_phy.c | 4 | ||||
-rw-r--r-- | drivers/net/myri10ge/myri10ge.c | 163 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic.h | 14 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_hw.c | 2 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_init.c | 14 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_isr.c | 3 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_main.c | 40 | ||||
-rw-r--r-- | drivers/net/r8169.c | 6 | ||||
-rw-r--r-- | drivers/net/skge.c | 5 | ||||
-rw-r--r-- | drivers/net/sky2.c | 35 | ||||
-rw-r--r-- | drivers/net/via-velocity.c | 18 | ||||
-rw-r--r-- | drivers/net/wireless/zd1211rw/zd_mac.c | 96 | ||||
-rw-r--r-- | drivers/net/wireless/zd1211rw/zd_mac.h | 5 | ||||
-rw-r--r-- | drivers/net/wireless/zd1211rw/zd_usb.c | 4 |
23 files changed, 827 insertions, 572 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 458dd9f830c4..e2cb19b582a1 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -617,13 +617,15 @@ rx_next: | |||
617 | * this round of polling | 617 | * this round of polling |
618 | */ | 618 | */ |
619 | if (rx_work) { | 619 | if (rx_work) { |
620 | unsigned long flags; | ||
621 | |||
620 | if (cpr16(IntrStatus) & cp_rx_intr_mask) | 622 | if (cpr16(IntrStatus) & cp_rx_intr_mask) |
621 | goto rx_status_loop; | 623 | goto rx_status_loop; |
622 | 624 | ||
623 | local_irq_disable(); | 625 | local_irq_save(flags); |
624 | cpw16_f(IntrMask, cp_intr_mask); | 626 | cpw16_f(IntrMask, cp_intr_mask); |
625 | __netif_rx_complete(dev); | 627 | __netif_rx_complete(dev); |
626 | local_irq_enable(); | 628 | local_irq_restore(flags); |
627 | 629 | ||
628 | return 0; /* done */ | 630 | return 0; /* done */ |
629 | } | 631 | } |
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index 8ebd68e2af98..dd698b033a62 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c | |||
@@ -780,12 +780,10 @@ static struct ethtool_ops ep93xx_ethtool_ops = { | |||
780 | struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) | 780 | struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) |
781 | { | 781 | { |
782 | struct net_device *dev; | 782 | struct net_device *dev; |
783 | struct ep93xx_priv *ep; | ||
784 | 783 | ||
785 | dev = alloc_etherdev(sizeof(struct ep93xx_priv)); | 784 | dev = alloc_etherdev(sizeof(struct ep93xx_priv)); |
786 | if (dev == NULL) | 785 | if (dev == NULL) |
787 | return NULL; | 786 | return NULL; |
788 | ep = netdev_priv(dev); | ||
789 | 787 | ||
790 | memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); | 788 | memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); |
791 | 789 | ||
@@ -840,9 +838,9 @@ static int ep93xx_eth_probe(struct platform_device *pdev) | |||
840 | struct ep93xx_priv *ep; | 838 | struct ep93xx_priv *ep; |
841 | int err; | 839 | int err; |
842 | 840 | ||
843 | data = pdev->dev.platform_data; | ||
844 | if (pdev == NULL) | 841 | if (pdev == NULL) |
845 | return -ENODEV; | 842 | return -ENODEV; |
843 | data = pdev->dev.platform_data; | ||
846 | 844 | ||
847 | dev = ep93xx_dev_alloc(data); | 845 | dev = ep93xx_dev_alloc(data); |
848 | if (dev == NULL) { | 846 | if (dev == NULL) { |
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 474a4e3438db..5eb2ec68393f 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -879,12 +879,14 @@ static int b44_poll(struct net_device *netdev, int *budget) | |||
879 | } | 879 | } |
880 | 880 | ||
881 | if (bp->istat & ISTAT_ERRORS) { | 881 | if (bp->istat & ISTAT_ERRORS) { |
882 | spin_lock_irq(&bp->lock); | 882 | unsigned long flags; |
883 | |||
884 | spin_lock_irqsave(&bp->lock, flags); | ||
883 | b44_halt(bp); | 885 | b44_halt(bp); |
884 | b44_init_rings(bp); | 886 | b44_init_rings(bp); |
885 | b44_init_hw(bp, 1); | 887 | b44_init_hw(bp, 1); |
886 | netif_wake_queue(bp->dev); | 888 | netif_wake_queue(bp->dev); |
887 | spin_unlock_irq(&bp->lock); | 889 | spin_unlock_irqrestore(&bp->lock, flags); |
888 | done = 1; | 890 | done = 1; |
889 | } | 891 | } |
890 | 892 | ||
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index da459f7177c6..fb96c87f9e56 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c | |||
@@ -100,6 +100,9 @@ static const struct e1000_stats e1000_gstrings_stats[] = { | |||
100 | { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, | 100 | { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, |
101 | { "rx_header_split", E1000_STAT(rx_hdr_split) }, | 101 | { "rx_header_split", E1000_STAT(rx_hdr_split) }, |
102 | { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, | 102 | { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, |
103 | { "tx_smbus", E1000_STAT(stats.mgptc) }, | ||
104 | { "rx_smbus", E1000_STAT(stats.mgprc) }, | ||
105 | { "dropped_smbus", E1000_STAT(stats.mgpdc) }, | ||
103 | }; | 106 | }; |
104 | 107 | ||
105 | #define E1000_QUEUE_STATS_LEN 0 | 108 | #define E1000_QUEUE_STATS_LEN 0 |
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 3655d902b0bd..9be44699300b 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c | |||
@@ -308,141 +308,160 @@ e1000_phy_init_script(struct e1000_hw *hw) | |||
308 | int32_t | 308 | int32_t |
309 | e1000_set_mac_type(struct e1000_hw *hw) | 309 | e1000_set_mac_type(struct e1000_hw *hw) |
310 | { | 310 | { |
311 | DEBUGFUNC("e1000_set_mac_type"); | 311 | DEBUGFUNC("e1000_set_mac_type"); |
312 | 312 | ||
313 | switch (hw->device_id) { | 313 | switch (hw->device_id) { |
314 | case E1000_DEV_ID_82542: | 314 | case E1000_DEV_ID_82542: |
315 | switch (hw->revision_id) { | 315 | switch (hw->revision_id) { |
316 | case E1000_82542_2_0_REV_ID: | 316 | case E1000_82542_2_0_REV_ID: |
317 | hw->mac_type = e1000_82542_rev2_0; | 317 | hw->mac_type = e1000_82542_rev2_0; |
318 | break; | 318 | break; |
319 | case E1000_82542_2_1_REV_ID: | 319 | case E1000_82542_2_1_REV_ID: |
320 | hw->mac_type = e1000_82542_rev2_1; | 320 | hw->mac_type = e1000_82542_rev2_1; |
321 | break; | 321 | break; |
322 | default: | 322 | default: |
323 | /* Invalid 82542 revision ID */ | 323 | /* Invalid 82542 revision ID */ |
324 | return -E1000_ERR_MAC_TYPE; | 324 | return -E1000_ERR_MAC_TYPE; |
325 | } | 325 | } |
326 | break; | 326 | break; |
327 | case E1000_DEV_ID_82543GC_FIBER: | 327 | case E1000_DEV_ID_82543GC_FIBER: |
328 | case E1000_DEV_ID_82543GC_COPPER: | 328 | case E1000_DEV_ID_82543GC_COPPER: |
329 | hw->mac_type = e1000_82543; | 329 | hw->mac_type = e1000_82543; |
330 | break; | 330 | break; |
331 | case E1000_DEV_ID_82544EI_COPPER: | 331 | case E1000_DEV_ID_82544EI_COPPER: |
332 | case E1000_DEV_ID_82544EI_FIBER: | 332 | case E1000_DEV_ID_82544EI_FIBER: |
333 | case E1000_DEV_ID_82544GC_COPPER: | 333 | case E1000_DEV_ID_82544GC_COPPER: |
334 | case E1000_DEV_ID_82544GC_LOM: | 334 | case E1000_DEV_ID_82544GC_LOM: |
335 | hw->mac_type = e1000_82544; | 335 | hw->mac_type = e1000_82544; |
336 | break; | 336 | break; |
337 | case E1000_DEV_ID_82540EM: | 337 | case E1000_DEV_ID_82540EM: |
338 | case E1000_DEV_ID_82540EM_LOM: | 338 | case E1000_DEV_ID_82540EM_LOM: |
339 | case E1000_DEV_ID_82540EP: | 339 | case E1000_DEV_ID_82540EP: |
340 | case E1000_DEV_ID_82540EP_LOM: | 340 | case E1000_DEV_ID_82540EP_LOM: |
341 | case E1000_DEV_ID_82540EP_LP: | 341 | case E1000_DEV_ID_82540EP_LP: |
342 | hw->mac_type = e1000_82540; | 342 | hw->mac_type = e1000_82540; |
343 | break; | 343 | break; |
344 | case E1000_DEV_ID_82545EM_COPPER: | 344 | case E1000_DEV_ID_82545EM_COPPER: |
345 | case E1000_DEV_ID_82545EM_FIBER: | 345 | case E1000_DEV_ID_82545EM_FIBER: |
346 | hw->mac_type = e1000_82545; | 346 | hw->mac_type = e1000_82545; |
347 | break; | 347 | break; |
348 | case E1000_DEV_ID_82545GM_COPPER: | 348 | case E1000_DEV_ID_82545GM_COPPER: |
349 | case E1000_DEV_ID_82545GM_FIBER: | 349 | case E1000_DEV_ID_82545GM_FIBER: |
350 | case E1000_DEV_ID_82545GM_SERDES: | 350 | case E1000_DEV_ID_82545GM_SERDES: |
351 | hw->mac_type = e1000_82545_rev_3; | 351 | hw->mac_type = e1000_82545_rev_3; |
352 | break; | 352 | break; |
353 | case E1000_DEV_ID_82546EB_COPPER: | 353 | case E1000_DEV_ID_82546EB_COPPER: |
354 | case E1000_DEV_ID_82546EB_FIBER: | 354 | case E1000_DEV_ID_82546EB_FIBER: |
355 | case E1000_DEV_ID_82546EB_QUAD_COPPER: | 355 | case E1000_DEV_ID_82546EB_QUAD_COPPER: |
356 | hw->mac_type = e1000_82546; | 356 | hw->mac_type = e1000_82546; |
357 | break; | 357 | break; |
358 | case E1000_DEV_ID_82546GB_COPPER: | 358 | case E1000_DEV_ID_82546GB_COPPER: |
359 | case E1000_DEV_ID_82546GB_FIBER: | 359 | case E1000_DEV_ID_82546GB_FIBER: |
360 | case E1000_DEV_ID_82546GB_SERDES: | 360 | case E1000_DEV_ID_82546GB_SERDES: |
361 | case E1000_DEV_ID_82546GB_PCIE: | 361 | case E1000_DEV_ID_82546GB_PCIE: |
362 | case E1000_DEV_ID_82546GB_QUAD_COPPER: | 362 | case E1000_DEV_ID_82546GB_QUAD_COPPER: |
363 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: | 363 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: |
364 | hw->mac_type = e1000_82546_rev_3; | 364 | hw->mac_type = e1000_82546_rev_3; |
365 | break; | 365 | break; |
366 | case E1000_DEV_ID_82541EI: | 366 | case E1000_DEV_ID_82541EI: |
367 | case E1000_DEV_ID_82541EI_MOBILE: | 367 | case E1000_DEV_ID_82541EI_MOBILE: |
368 | case E1000_DEV_ID_82541ER_LOM: | 368 | case E1000_DEV_ID_82541ER_LOM: |
369 | hw->mac_type = e1000_82541; | 369 | hw->mac_type = e1000_82541; |
370 | break; | 370 | break; |
371 | case E1000_DEV_ID_82541ER: | 371 | case E1000_DEV_ID_82541ER: |
372 | case E1000_DEV_ID_82541GI: | 372 | case E1000_DEV_ID_82541GI: |
373 | case E1000_DEV_ID_82541GI_LF: | 373 | case E1000_DEV_ID_82541GI_LF: |
374 | case E1000_DEV_ID_82541GI_MOBILE: | 374 | case E1000_DEV_ID_82541GI_MOBILE: |
375 | hw->mac_type = e1000_82541_rev_2; | 375 | hw->mac_type = e1000_82541_rev_2; |
376 | break; | 376 | break; |
377 | case E1000_DEV_ID_82547EI: | 377 | case E1000_DEV_ID_82547EI: |
378 | case E1000_DEV_ID_82547EI_MOBILE: | 378 | case E1000_DEV_ID_82547EI_MOBILE: |
379 | hw->mac_type = e1000_82547; | 379 | hw->mac_type = e1000_82547; |
380 | break; | 380 | break; |
381 | case E1000_DEV_ID_82547GI: | 381 | case E1000_DEV_ID_82547GI: |
382 | hw->mac_type = e1000_82547_rev_2; | 382 | hw->mac_type = e1000_82547_rev_2; |
383 | break; | 383 | break; |
384 | case E1000_DEV_ID_82571EB_COPPER: | 384 | case E1000_DEV_ID_82571EB_COPPER: |
385 | case E1000_DEV_ID_82571EB_FIBER: | 385 | case E1000_DEV_ID_82571EB_FIBER: |
386 | case E1000_DEV_ID_82571EB_SERDES: | 386 | case E1000_DEV_ID_82571EB_SERDES: |
387 | case E1000_DEV_ID_82571EB_QUAD_COPPER: | 387 | case E1000_DEV_ID_82571EB_QUAD_COPPER: |
388 | case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: | 388 | case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: |
389 | hw->mac_type = e1000_82571; | 389 | hw->mac_type = e1000_82571; |
390 | break; | 390 | break; |
391 | case E1000_DEV_ID_82572EI_COPPER: | 391 | case E1000_DEV_ID_82572EI_COPPER: |
392 | case E1000_DEV_ID_82572EI_FIBER: | 392 | case E1000_DEV_ID_82572EI_FIBER: |
393 | case E1000_DEV_ID_82572EI_SERDES: | 393 | case E1000_DEV_ID_82572EI_SERDES: |
394 | case E1000_DEV_ID_82572EI: | 394 | case E1000_DEV_ID_82572EI: |
395 | hw->mac_type = e1000_82572; | 395 | hw->mac_type = e1000_82572; |
396 | break; | 396 | break; |
397 | case E1000_DEV_ID_82573E: | 397 | case E1000_DEV_ID_82573E: |
398 | case E1000_DEV_ID_82573E_IAMT: | 398 | case E1000_DEV_ID_82573E_IAMT: |
399 | case E1000_DEV_ID_82573L: | 399 | case E1000_DEV_ID_82573L: |
400 | hw->mac_type = e1000_82573; | 400 | hw->mac_type = e1000_82573; |
401 | break; | 401 | break; |
402 | case E1000_DEV_ID_80003ES2LAN_COPPER_SPT: | 402 | case E1000_DEV_ID_80003ES2LAN_COPPER_SPT: |
403 | case E1000_DEV_ID_80003ES2LAN_SERDES_SPT: | 403 | case E1000_DEV_ID_80003ES2LAN_SERDES_SPT: |
404 | case E1000_DEV_ID_80003ES2LAN_COPPER_DPT: | 404 | case E1000_DEV_ID_80003ES2LAN_COPPER_DPT: |
405 | case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: | 405 | case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: |
406 | hw->mac_type = e1000_80003es2lan; | 406 | hw->mac_type = e1000_80003es2lan; |
407 | break; | 407 | break; |
408 | case E1000_DEV_ID_ICH8_IGP_M_AMT: | 408 | case E1000_DEV_ID_ICH8_IGP_M_AMT: |
409 | case E1000_DEV_ID_ICH8_IGP_AMT: | 409 | case E1000_DEV_ID_ICH8_IGP_AMT: |
410 | case E1000_DEV_ID_ICH8_IGP_C: | 410 | case E1000_DEV_ID_ICH8_IGP_C: |
411 | case E1000_DEV_ID_ICH8_IFE: | 411 | case E1000_DEV_ID_ICH8_IFE: |
412 | case E1000_DEV_ID_ICH8_IFE_GT: | 412 | case E1000_DEV_ID_ICH8_IFE_GT: |
413 | case E1000_DEV_ID_ICH8_IFE_G: | 413 | case E1000_DEV_ID_ICH8_IFE_G: |
414 | case E1000_DEV_ID_ICH8_IGP_M: | 414 | case E1000_DEV_ID_ICH8_IGP_M: |
415 | hw->mac_type = e1000_ich8lan; | 415 | hw->mac_type = e1000_ich8lan; |
416 | break; | 416 | break; |
417 | default: | 417 | default: |
418 | /* Should never have loaded on this device */ | 418 | /* Should never have loaded on this device */ |
419 | return -E1000_ERR_MAC_TYPE; | 419 | return -E1000_ERR_MAC_TYPE; |
420 | } | 420 | } |
421 | 421 | ||
422 | switch (hw->mac_type) { | 422 | switch (hw->mac_type) { |
423 | case e1000_ich8lan: | 423 | case e1000_ich8lan: |
424 | hw->swfwhw_semaphore_present = TRUE; | 424 | hw->swfwhw_semaphore_present = TRUE; |
425 | hw->asf_firmware_present = TRUE; | 425 | hw->asf_firmware_present = TRUE; |
426 | break; | 426 | break; |
427 | case e1000_80003es2lan: | 427 | case e1000_80003es2lan: |
428 | hw->swfw_sync_present = TRUE; | 428 | hw->swfw_sync_present = TRUE; |
429 | /* fall through */ | 429 | /* fall through */ |
430 | case e1000_82571: | 430 | case e1000_82571: |
431 | case e1000_82572: | 431 | case e1000_82572: |
432 | case e1000_82573: | 432 | case e1000_82573: |
433 | hw->eeprom_semaphore_present = TRUE; | 433 | hw->eeprom_semaphore_present = TRUE; |
434 | /* fall through */ | 434 | /* fall through */ |
435 | case e1000_82541: | 435 | case e1000_82541: |
436 | case e1000_82547: | 436 | case e1000_82547: |
437 | case e1000_82541_rev_2: | 437 | case e1000_82541_rev_2: |
438 | case e1000_82547_rev_2: | 438 | case e1000_82547_rev_2: |
439 | hw->asf_firmware_present = TRUE; | 439 | hw->asf_firmware_present = TRUE; |
440 | break; | 440 | break; |
441 | default: | 441 | default: |
442 | break; | 442 | break; |
443 | } | 443 | } |
444 | 444 | ||
445 | return E1000_SUCCESS; | 445 | /* The 82543 chip does not count tx_carrier_errors properly in |
446 | * FD mode | ||
447 | */ | ||
448 | if (hw->mac_type == e1000_82543) | ||
449 | hw->bad_tx_carr_stats_fd = TRUE; | ||
450 | |||
451 | /* capable of receiving management packets to the host */ | ||
452 | if (hw->mac_type >= e1000_82571) | ||
453 | hw->has_manc2h = TRUE; | ||
454 | |||
455 | /* In rare occasions, ESB2 systems would end up started without | ||
456 | * the RX unit being turned on. | ||
457 | */ | ||
458 | if (hw->mac_type == e1000_80003es2lan) | ||
459 | hw->rx_needs_kicking = TRUE; | ||
460 | |||
461 | if (hw->mac_type > e1000_82544) | ||
462 | hw->has_smbus = TRUE; | ||
463 | |||
464 | return E1000_SUCCESS; | ||
446 | } | 465 | } |
447 | 466 | ||
448 | /***************************************************************************** | 467 | /***************************************************************************** |
@@ -6575,7 +6594,7 @@ e1000_get_bus_info(struct e1000_hw *hw) | |||
6575 | switch (hw->mac_type) { | 6594 | switch (hw->mac_type) { |
6576 | case e1000_82542_rev2_0: | 6595 | case e1000_82542_rev2_0: |
6577 | case e1000_82542_rev2_1: | 6596 | case e1000_82542_rev2_1: |
6578 | hw->bus_type = e1000_bus_type_unknown; | 6597 | hw->bus_type = e1000_bus_type_pci; |
6579 | hw->bus_speed = e1000_bus_speed_unknown; | 6598 | hw->bus_speed = e1000_bus_speed_unknown; |
6580 | hw->bus_width = e1000_bus_width_unknown; | 6599 | hw->bus_width = e1000_bus_width_unknown; |
6581 | break; | 6600 | break; |
@@ -7817,9 +7836,8 @@ e1000_enable_mng_pass_thru(struct e1000_hw *hw) | |||
7817 | fwsm = E1000_READ_REG(hw, FWSM); | 7836 | fwsm = E1000_READ_REG(hw, FWSM); |
7818 | factps = E1000_READ_REG(hw, FACTPS); | 7837 | factps = E1000_READ_REG(hw, FACTPS); |
7819 | 7838 | ||
7820 | if (((fwsm & E1000_FWSM_MODE_MASK) == | 7839 | if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) == |
7821 | (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)) && | 7840 | e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG)) |
7822 | (factps & E1000_FACTPS_MNGCG)) | ||
7823 | return TRUE; | 7841 | return TRUE; |
7824 | } else | 7842 | } else |
7825 | if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) | 7843 | if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) |
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index 3321fb13bfa9..d67105883341 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h | |||
@@ -1301,165 +1301,170 @@ struct e1000_ffvt_entry { | |||
1301 | #define E1000_82542_RSSIR E1000_RSSIR | 1301 | #define E1000_82542_RSSIR E1000_RSSIR |
1302 | #define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA | 1302 | #define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA |
1303 | #define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC | 1303 | #define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC |
1304 | #define E1000_82542_MANC2H E1000_MANC2H | ||
1304 | 1305 | ||
1305 | /* Statistics counters collected by the MAC */ | 1306 | /* Statistics counters collected by the MAC */ |
1306 | struct e1000_hw_stats { | 1307 | struct e1000_hw_stats { |
1307 | uint64_t crcerrs; | 1308 | uint64_t crcerrs; |
1308 | uint64_t algnerrc; | 1309 | uint64_t algnerrc; |
1309 | uint64_t symerrs; | 1310 | uint64_t symerrs; |
1310 | uint64_t rxerrc; | 1311 | uint64_t rxerrc; |
1311 | uint64_t txerrc; | 1312 | uint64_t txerrc; |
1312 | uint64_t mpc; | 1313 | uint64_t mpc; |
1313 | uint64_t scc; | 1314 | uint64_t scc; |
1314 | uint64_t ecol; | 1315 | uint64_t ecol; |
1315 | uint64_t mcc; | 1316 | uint64_t mcc; |
1316 | uint64_t latecol; | 1317 | uint64_t latecol; |
1317 | uint64_t colc; | 1318 | uint64_t colc; |
1318 | uint64_t dc; | 1319 | uint64_t dc; |
1319 | uint64_t tncrs; | 1320 | uint64_t tncrs; |
1320 | uint64_t sec; | 1321 | uint64_t sec; |
1321 | uint64_t cexterr; | 1322 | uint64_t cexterr; |
1322 | uint64_t rlec; | 1323 | uint64_t rlec; |
1323 | uint64_t xonrxc; | 1324 | uint64_t xonrxc; |
1324 | uint64_t xontxc; | 1325 | uint64_t xontxc; |
1325 | uint64_t xoffrxc; | 1326 | uint64_t xoffrxc; |
1326 | uint64_t xofftxc; | 1327 | uint64_t xofftxc; |
1327 | uint64_t fcruc; | 1328 | uint64_t fcruc; |
1328 | uint64_t prc64; | 1329 | uint64_t prc64; |
1329 | uint64_t prc127; | 1330 | uint64_t prc127; |
1330 | uint64_t prc255; | 1331 | uint64_t prc255; |
1331 | uint64_t prc511; | 1332 | uint64_t prc511; |
1332 | uint64_t prc1023; | 1333 | uint64_t prc1023; |
1333 | uint64_t prc1522; | 1334 | uint64_t prc1522; |
1334 | uint64_t gprc; | 1335 | uint64_t gprc; |
1335 | uint64_t bprc; | 1336 | uint64_t bprc; |
1336 | uint64_t mprc; | 1337 | uint64_t mprc; |
1337 | uint64_t gptc; | 1338 | uint64_t gptc; |
1338 | uint64_t gorcl; | 1339 | uint64_t gorcl; |
1339 | uint64_t gorch; | 1340 | uint64_t gorch; |
1340 | uint64_t gotcl; | 1341 | uint64_t gotcl; |
1341 | uint64_t gotch; | 1342 | uint64_t gotch; |
1342 | uint64_t rnbc; | 1343 | uint64_t rnbc; |
1343 | uint64_t ruc; | 1344 | uint64_t ruc; |
1344 | uint64_t rfc; | 1345 | uint64_t rfc; |
1345 | uint64_t roc; | 1346 | uint64_t roc; |
1346 | uint64_t rlerrc; | 1347 | uint64_t rlerrc; |
1347 | uint64_t rjc; | 1348 | uint64_t rjc; |
1348 | uint64_t mgprc; | 1349 | uint64_t mgprc; |
1349 | uint64_t mgpdc; | 1350 | uint64_t mgpdc; |
1350 | uint64_t mgptc; | 1351 | uint64_t mgptc; |
1351 | uint64_t torl; | 1352 | uint64_t torl; |
1352 | uint64_t torh; | 1353 | uint64_t torh; |
1353 | uint64_t totl; | 1354 | uint64_t totl; |
1354 | uint64_t toth; | 1355 | uint64_t toth; |
1355 | uint64_t tpr; | 1356 | uint64_t tpr; |
1356 | uint64_t tpt; | 1357 | uint64_t tpt; |
1357 | uint64_t ptc64; | 1358 | uint64_t ptc64; |
1358 | uint64_t ptc127; | 1359 | uint64_t ptc127; |
1359 | uint64_t ptc255; | 1360 | uint64_t ptc255; |
1360 | uint64_t ptc511; | 1361 | uint64_t ptc511; |
1361 | uint64_t ptc1023; | 1362 | uint64_t ptc1023; |
1362 | uint64_t ptc1522; | 1363 | uint64_t ptc1522; |
1363 | uint64_t mptc; | 1364 | uint64_t mptc; |
1364 | uint64_t bptc; | 1365 | uint64_t bptc; |
1365 | uint64_t tsctc; | 1366 | uint64_t tsctc; |
1366 | uint64_t tsctfc; | 1367 | uint64_t tsctfc; |
1367 | uint64_t iac; | 1368 | uint64_t iac; |
1368 | uint64_t icrxptc; | 1369 | uint64_t icrxptc; |
1369 | uint64_t icrxatc; | 1370 | uint64_t icrxatc; |
1370 | uint64_t ictxptc; | 1371 | uint64_t ictxptc; |
1371 | uint64_t ictxatc; | 1372 | uint64_t ictxatc; |
1372 | uint64_t ictxqec; | 1373 | uint64_t ictxqec; |
1373 | uint64_t ictxqmtc; | 1374 | uint64_t ictxqmtc; |
1374 | uint64_t icrxdmtc; | 1375 | uint64_t icrxdmtc; |
1375 | uint64_t icrxoc; | 1376 | uint64_t icrxoc; |
1376 | }; | 1377 | }; |
1377 | 1378 | ||
1378 | /* Structure containing variables used by the shared code (e1000_hw.c) */ | 1379 | /* Structure containing variables used by the shared code (e1000_hw.c) */ |
1379 | struct e1000_hw { | 1380 | struct e1000_hw { |
1380 | uint8_t __iomem *hw_addr; | 1381 | uint8_t __iomem *hw_addr; |
1381 | uint8_t __iomem *flash_address; | 1382 | uint8_t __iomem *flash_address; |
1382 | e1000_mac_type mac_type; | 1383 | e1000_mac_type mac_type; |
1383 | e1000_phy_type phy_type; | 1384 | e1000_phy_type phy_type; |
1384 | uint32_t phy_init_script; | 1385 | uint32_t phy_init_script; |
1385 | e1000_media_type media_type; | 1386 | e1000_media_type media_type; |
1386 | void *back; | 1387 | void *back; |
1387 | struct e1000_shadow_ram *eeprom_shadow_ram; | 1388 | struct e1000_shadow_ram *eeprom_shadow_ram; |
1388 | uint32_t flash_bank_size; | 1389 | uint32_t flash_bank_size; |
1389 | uint32_t flash_base_addr; | 1390 | uint32_t flash_base_addr; |
1390 | e1000_fc_type fc; | 1391 | e1000_fc_type fc; |
1391 | e1000_bus_speed bus_speed; | 1392 | e1000_bus_speed bus_speed; |
1392 | e1000_bus_width bus_width; | 1393 | e1000_bus_width bus_width; |
1393 | e1000_bus_type bus_type; | 1394 | e1000_bus_type bus_type; |
1394 | struct e1000_eeprom_info eeprom; | 1395 | struct e1000_eeprom_info eeprom; |
1395 | e1000_ms_type master_slave; | 1396 | e1000_ms_type master_slave; |
1396 | e1000_ms_type original_master_slave; | 1397 | e1000_ms_type original_master_slave; |
1397 | e1000_ffe_config ffe_config_state; | 1398 | e1000_ffe_config ffe_config_state; |
1398 | uint32_t asf_firmware_present; | 1399 | uint32_t asf_firmware_present; |
1399 | uint32_t eeprom_semaphore_present; | 1400 | uint32_t eeprom_semaphore_present; |
1400 | uint32_t swfw_sync_present; | 1401 | uint32_t swfw_sync_present; |
1401 | uint32_t swfwhw_semaphore_present; | 1402 | uint32_t swfwhw_semaphore_present; |
1402 | unsigned long io_base; | 1403 | unsigned long io_base; |
1403 | uint32_t phy_id; | 1404 | uint32_t phy_id; |
1404 | uint32_t phy_revision; | 1405 | uint32_t phy_revision; |
1405 | uint32_t phy_addr; | 1406 | uint32_t phy_addr; |
1406 | uint32_t original_fc; | 1407 | uint32_t original_fc; |
1407 | uint32_t txcw; | 1408 | uint32_t txcw; |
1408 | uint32_t autoneg_failed; | 1409 | uint32_t autoneg_failed; |
1409 | uint32_t max_frame_size; | 1410 | uint32_t max_frame_size; |
1410 | uint32_t min_frame_size; | 1411 | uint32_t min_frame_size; |
1411 | uint32_t mc_filter_type; | 1412 | uint32_t mc_filter_type; |
1412 | uint32_t num_mc_addrs; | 1413 | uint32_t num_mc_addrs; |
1413 | uint32_t collision_delta; | 1414 | uint32_t collision_delta; |
1414 | uint32_t tx_packet_delta; | 1415 | uint32_t tx_packet_delta; |
1415 | uint32_t ledctl_default; | 1416 | uint32_t ledctl_default; |
1416 | uint32_t ledctl_mode1; | 1417 | uint32_t ledctl_mode1; |
1417 | uint32_t ledctl_mode2; | 1418 | uint32_t ledctl_mode2; |
1418 | boolean_t tx_pkt_filtering; | 1419 | boolean_t tx_pkt_filtering; |
1419 | struct e1000_host_mng_dhcp_cookie mng_cookie; | 1420 | struct e1000_host_mng_dhcp_cookie mng_cookie; |
1420 | uint16_t phy_spd_default; | 1421 | uint16_t phy_spd_default; |
1421 | uint16_t autoneg_advertised; | 1422 | uint16_t autoneg_advertised; |
1422 | uint16_t pci_cmd_word; | 1423 | uint16_t pci_cmd_word; |
1423 | uint16_t fc_high_water; | 1424 | uint16_t fc_high_water; |
1424 | uint16_t fc_low_water; | 1425 | uint16_t fc_low_water; |
1425 | uint16_t fc_pause_time; | 1426 | uint16_t fc_pause_time; |
1426 | uint16_t current_ifs_val; | 1427 | uint16_t current_ifs_val; |
1427 | uint16_t ifs_min_val; | 1428 | uint16_t ifs_min_val; |
1428 | uint16_t ifs_max_val; | 1429 | uint16_t ifs_max_val; |
1429 | uint16_t ifs_step_size; | 1430 | uint16_t ifs_step_size; |
1430 | uint16_t ifs_ratio; | 1431 | uint16_t ifs_ratio; |
1431 | uint16_t device_id; | 1432 | uint16_t device_id; |
1432 | uint16_t vendor_id; | 1433 | uint16_t vendor_id; |
1433 | uint16_t subsystem_id; | 1434 | uint16_t subsystem_id; |
1434 | uint16_t subsystem_vendor_id; | 1435 | uint16_t subsystem_vendor_id; |
1435 | uint8_t revision_id; | 1436 | uint8_t revision_id; |
1436 | uint8_t autoneg; | 1437 | uint8_t autoneg; |
1437 | uint8_t mdix; | 1438 | uint8_t mdix; |
1438 | uint8_t forced_speed_duplex; | 1439 | uint8_t forced_speed_duplex; |
1439 | uint8_t wait_autoneg_complete; | 1440 | uint8_t wait_autoneg_complete; |
1440 | uint8_t dma_fairness; | 1441 | uint8_t dma_fairness; |
1441 | uint8_t mac_addr[NODE_ADDRESS_SIZE]; | 1442 | uint8_t mac_addr[NODE_ADDRESS_SIZE]; |
1442 | uint8_t perm_mac_addr[NODE_ADDRESS_SIZE]; | 1443 | uint8_t perm_mac_addr[NODE_ADDRESS_SIZE]; |
1443 | boolean_t disable_polarity_correction; | 1444 | boolean_t disable_polarity_correction; |
1444 | boolean_t speed_downgraded; | 1445 | boolean_t speed_downgraded; |
1445 | e1000_smart_speed smart_speed; | 1446 | e1000_smart_speed smart_speed; |
1446 | e1000_dsp_config dsp_config_state; | 1447 | e1000_dsp_config dsp_config_state; |
1447 | boolean_t get_link_status; | 1448 | boolean_t get_link_status; |
1448 | boolean_t serdes_link_down; | 1449 | boolean_t serdes_link_down; |
1449 | boolean_t tbi_compatibility_en; | 1450 | boolean_t tbi_compatibility_en; |
1450 | boolean_t tbi_compatibility_on; | 1451 | boolean_t tbi_compatibility_on; |
1451 | boolean_t laa_is_present; | 1452 | boolean_t laa_is_present; |
1452 | boolean_t phy_reset_disable; | 1453 | boolean_t phy_reset_disable; |
1453 | boolean_t initialize_hw_bits_disable; | 1454 | boolean_t initialize_hw_bits_disable; |
1454 | boolean_t fc_send_xon; | 1455 | boolean_t fc_send_xon; |
1455 | boolean_t fc_strict_ieee; | 1456 | boolean_t fc_strict_ieee; |
1456 | boolean_t report_tx_early; | 1457 | boolean_t report_tx_early; |
1457 | boolean_t adaptive_ifs; | 1458 | boolean_t adaptive_ifs; |
1458 | boolean_t ifs_params_forced; | 1459 | boolean_t ifs_params_forced; |
1459 | boolean_t in_ifs_mode; | 1460 | boolean_t in_ifs_mode; |
1460 | boolean_t mng_reg_access_disabled; | 1461 | boolean_t mng_reg_access_disabled; |
1461 | boolean_t leave_av_bit_off; | 1462 | boolean_t leave_av_bit_off; |
1462 | boolean_t kmrn_lock_loss_workaround_disabled; | 1463 | boolean_t kmrn_lock_loss_workaround_disabled; |
1464 | boolean_t bad_tx_carr_stats_fd; | ||
1465 | boolean_t has_manc2h; | ||
1466 | boolean_t rx_needs_kicking; | ||
1467 | boolean_t has_smbus; | ||
1463 | }; | 1468 | }; |
1464 | 1469 | ||
1465 | 1470 | ||
@@ -2418,6 +2423,7 @@ struct e1000_host_command_info { | |||
2418 | #define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ | 2423 | #define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ |
2419 | #define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ | 2424 | #define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ |
2420 | #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ | 2425 | #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ |
2426 | #define E1000_PBA_20K 0x0014 | ||
2421 | #define E1000_PBA_22K 0x0016 | 2427 | #define E1000_PBA_22K 0x0016 |
2422 | #define E1000_PBA_24K 0x0018 | 2428 | #define E1000_PBA_24K 0x0018 |
2423 | #define E1000_PBA_30K 0x001E | 2429 | #define E1000_PBA_30K 0x001E |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 73f3a85fd238..4c1ff752048c 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -213,6 +213,12 @@ static void e1000_netpoll (struct net_device *netdev); | |||
213 | 213 | ||
214 | extern void e1000_check_options(struct e1000_adapter *adapter); | 214 | extern void e1000_check_options(struct e1000_adapter *adapter); |
215 | 215 | ||
216 | #define COPYBREAK_DEFAULT 256 | ||
217 | static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; | ||
218 | module_param(copybreak, uint, 0644); | ||
219 | MODULE_PARM_DESC(copybreak, | ||
220 | "Maximum size of packet that is copied to a new buffer on receive"); | ||
221 | |||
216 | static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, | 222 | static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, |
217 | pci_channel_state_t state); | 223 | pci_channel_state_t state); |
218 | static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); | 224 | static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); |
@@ -264,7 +270,13 @@ e1000_init_module(void) | |||
264 | printk(KERN_INFO "%s\n", e1000_copyright); | 270 | printk(KERN_INFO "%s\n", e1000_copyright); |
265 | 271 | ||
266 | ret = pci_register_driver(&e1000_driver); | 272 | ret = pci_register_driver(&e1000_driver); |
267 | 273 | if (copybreak != COPYBREAK_DEFAULT) { | |
274 | if (copybreak == 0) | ||
275 | printk(KERN_INFO "e1000: copybreak disabled\n"); | ||
276 | else | ||
277 | printk(KERN_INFO "e1000: copybreak enabled for " | ||
278 | "packets <= %u bytes\n", copybreak); | ||
279 | } | ||
268 | return ret; | 280 | return ret; |
269 | } | 281 | } |
270 | 282 | ||
@@ -464,6 +476,52 @@ e1000_get_hw_control(struct e1000_adapter *adapter) | |||
464 | } | 476 | } |
465 | } | 477 | } |
466 | 478 | ||
479 | static void | ||
480 | e1000_init_manageability(struct e1000_adapter *adapter) | ||
481 | { | ||
482 | if (adapter->en_mng_pt) { | ||
483 | uint32_t manc = E1000_READ_REG(&adapter->hw, MANC); | ||
484 | |||
485 | /* disable hardware interception of ARP */ | ||
486 | manc &= ~(E1000_MANC_ARP_EN); | ||
487 | |||
488 | /* enable receiving management packets to the host */ | ||
489 | /* this will probably generate destination unreachable messages | ||
490 | * from the host OS, but the packets will be handled on SMBUS */ | ||
491 | if (adapter->hw.has_manc2h) { | ||
492 | uint32_t manc2h = E1000_READ_REG(&adapter->hw, MANC2H); | ||
493 | |||
494 | manc |= E1000_MANC_EN_MNG2HOST; | ||
495 | #define E1000_MNG2HOST_PORT_623 (1 << 5) | ||
496 | #define E1000_MNG2HOST_PORT_664 (1 << 6) | ||
497 | manc2h |= E1000_MNG2HOST_PORT_623; | ||
498 | manc2h |= E1000_MNG2HOST_PORT_664; | ||
499 | E1000_WRITE_REG(&adapter->hw, MANC2H, manc2h); | ||
500 | } | ||
501 | |||
502 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | ||
503 | } | ||
504 | } | ||
505 | |||
506 | static void | ||
507 | e1000_release_manageability(struct e1000_adapter *adapter) | ||
508 | { | ||
509 | if (adapter->en_mng_pt) { | ||
510 | uint32_t manc = E1000_READ_REG(&adapter->hw, MANC); | ||
511 | |||
512 | /* re-enable hardware interception of ARP */ | ||
513 | manc |= E1000_MANC_ARP_EN; | ||
514 | |||
515 | if (adapter->hw.has_manc2h) | ||
516 | manc &= ~E1000_MANC_EN_MNG2HOST; | ||
517 | |||
518 | /* don't explicitly have to mess with MANC2H since | ||
519 | * MANC has an enable disable that gates MANC2H */ | ||
520 | |||
521 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | ||
522 | } | ||
523 | } | ||
524 | |||
467 | int | 525 | int |
468 | e1000_up(struct e1000_adapter *adapter) | 526 | e1000_up(struct e1000_adapter *adapter) |
469 | { | 527 | { |
@@ -475,6 +533,7 @@ e1000_up(struct e1000_adapter *adapter) | |||
475 | e1000_set_multi(netdev); | 533 | e1000_set_multi(netdev); |
476 | 534 | ||
477 | e1000_restore_vlan(adapter); | 535 | e1000_restore_vlan(adapter); |
536 | e1000_init_manageability(adapter); | ||
478 | 537 | ||
479 | e1000_configure_tx(adapter); | 538 | e1000_configure_tx(adapter); |
480 | e1000_setup_rctl(adapter); | 539 | e1000_setup_rctl(adapter); |
@@ -497,7 +556,8 @@ e1000_up(struct e1000_adapter *adapter) | |||
497 | 556 | ||
498 | clear_bit(__E1000_DOWN, &adapter->flags); | 557 | clear_bit(__E1000_DOWN, &adapter->flags); |
499 | 558 | ||
500 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); | 559 | /* fire a link change interrupt to start the watchdog */ |
560 | E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC); | ||
501 | return 0; | 561 | return 0; |
502 | } | 562 | } |
503 | 563 | ||
@@ -614,16 +674,34 @@ e1000_reinit_locked(struct e1000_adapter *adapter) | |||
614 | void | 674 | void |
615 | e1000_reset(struct e1000_adapter *adapter) | 675 | e1000_reset(struct e1000_adapter *adapter) |
616 | { | 676 | { |
617 | uint32_t pba, manc; | 677 | uint32_t pba = 0, tx_space, min_tx_space, min_rx_space; |
618 | uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; | 678 | uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; |
679 | boolean_t legacy_pba_adjust = FALSE; | ||
619 | 680 | ||
620 | /* Repartition Pba for greater than 9k mtu | 681 | /* Repartition Pba for greater than 9k mtu |
621 | * To take effect CTRL.RST is required. | 682 | * To take effect CTRL.RST is required. |
622 | */ | 683 | */ |
623 | 684 | ||
624 | switch (adapter->hw.mac_type) { | 685 | switch (adapter->hw.mac_type) { |
686 | case e1000_82542_rev2_0: | ||
687 | case e1000_82542_rev2_1: | ||
688 | case e1000_82543: | ||
689 | case e1000_82544: | ||
690 | case e1000_82540: | ||
691 | case e1000_82541: | ||
692 | case e1000_82541_rev_2: | ||
693 | legacy_pba_adjust = TRUE; | ||
694 | pba = E1000_PBA_48K; | ||
695 | break; | ||
696 | case e1000_82545: | ||
697 | case e1000_82545_rev_3: | ||
698 | case e1000_82546: | ||
699 | case e1000_82546_rev_3: | ||
700 | pba = E1000_PBA_48K; | ||
701 | break; | ||
625 | case e1000_82547: | 702 | case e1000_82547: |
626 | case e1000_82547_rev_2: | 703 | case e1000_82547_rev_2: |
704 | legacy_pba_adjust = TRUE; | ||
627 | pba = E1000_PBA_30K; | 705 | pba = E1000_PBA_30K; |
628 | break; | 706 | break; |
629 | case e1000_82571: | 707 | case e1000_82571: |
@@ -632,27 +710,80 @@ e1000_reset(struct e1000_adapter *adapter) | |||
632 | pba = E1000_PBA_38K; | 710 | pba = E1000_PBA_38K; |
633 | break; | 711 | break; |
634 | case e1000_82573: | 712 | case e1000_82573: |
635 | pba = E1000_PBA_12K; | 713 | pba = E1000_PBA_20K; |
636 | break; | 714 | break; |
637 | case e1000_ich8lan: | 715 | case e1000_ich8lan: |
638 | pba = E1000_PBA_8K; | 716 | pba = E1000_PBA_8K; |
639 | break; | 717 | case e1000_undefined: |
640 | default: | 718 | case e1000_num_macs: |
641 | pba = E1000_PBA_48K; | ||
642 | break; | 719 | break; |
643 | } | 720 | } |
644 | 721 | ||
645 | if ((adapter->hw.mac_type != e1000_82573) && | 722 | if (legacy_pba_adjust == TRUE) { |
646 | (adapter->netdev->mtu > E1000_RXBUFFER_8192)) | 723 | if (adapter->netdev->mtu > E1000_RXBUFFER_8192) |
647 | pba -= 8; /* allocate more FIFO for Tx */ | 724 | pba -= 8; /* allocate more FIFO for Tx */ |
648 | 725 | ||
726 | if (adapter->hw.mac_type == e1000_82547) { | ||
727 | adapter->tx_fifo_head = 0; | ||
728 | adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; | ||
729 | adapter->tx_fifo_size = | ||
730 | (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; | ||
731 | atomic_set(&adapter->tx_fifo_stall, 0); | ||
732 | } | ||
733 | } else if (adapter->hw.max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) { | ||
734 | /* adjust PBA for jumbo frames */ | ||
735 | E1000_WRITE_REG(&adapter->hw, PBA, pba); | ||
736 | |||
737 | /* To maintain wire speed transmits, the Tx FIFO should be | ||
738 | * large enough to accomodate two full transmit packets, | ||
739 | * rounded up to the next 1KB and expressed in KB. Likewise, | ||
740 | * the Rx FIFO should be large enough to accomodate at least | ||
741 | * one full receive packet and is similarly rounded up and | ||
742 | * expressed in KB. */ | ||
743 | pba = E1000_READ_REG(&adapter->hw, PBA); | ||
744 | /* upper 16 bits has Tx packet buffer allocation size in KB */ | ||
745 | tx_space = pba >> 16; | ||
746 | /* lower 16 bits has Rx packet buffer allocation size in KB */ | ||
747 | pba &= 0xffff; | ||
748 | /* don't include ethernet FCS because hardware appends/strips */ | ||
749 | min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE + | ||
750 | VLAN_TAG_SIZE; | ||
751 | min_tx_space = min_rx_space; | ||
752 | min_tx_space *= 2; | ||
753 | E1000_ROUNDUP(min_tx_space, 1024); | ||
754 | min_tx_space >>= 10; | ||
755 | E1000_ROUNDUP(min_rx_space, 1024); | ||
756 | min_rx_space >>= 10; | ||
757 | |||
758 | /* If current Tx allocation is less than the min Tx FIFO size, | ||
759 | * and the min Tx FIFO size is less than the current Rx FIFO | ||
760 | * allocation, take space away from current Rx allocation */ | ||
761 | if (tx_space < min_tx_space && | ||
762 | ((min_tx_space - tx_space) < pba)) { | ||
763 | pba = pba - (min_tx_space - tx_space); | ||
764 | |||
765 | /* PCI/PCIx hardware has PBA alignment constraints */ | ||
766 | switch (adapter->hw.mac_type) { | ||
767 | case e1000_82545 ... e1000_82546_rev_3: | ||
768 | pba &= ~(E1000_PBA_8K - 1); | ||
769 | break; | ||
770 | default: | ||
771 | break; | ||
772 | } | ||
649 | 773 | ||
650 | if (adapter->hw.mac_type == e1000_82547) { | 774 | /* if short on rx space, rx wins and must trump tx |
651 | adapter->tx_fifo_head = 0; | 775 | * adjustment or use Early Receive if available */ |
652 | adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; | 776 | if (pba < min_rx_space) { |
653 | adapter->tx_fifo_size = | 777 | switch (adapter->hw.mac_type) { |
654 | (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; | 778 | case e1000_82573: |
655 | atomic_set(&adapter->tx_fifo_stall, 0); | 779 | /* ERT enabled in e1000_configure_rx */ |
780 | break; | ||
781 | default: | ||
782 | pba = min_rx_space; | ||
783 | break; | ||
784 | } | ||
785 | } | ||
786 | } | ||
656 | } | 787 | } |
657 | 788 | ||
658 | E1000_WRITE_REG(&adapter->hw, PBA, pba); | 789 | E1000_WRITE_REG(&adapter->hw, PBA, pba); |
@@ -685,6 +816,20 @@ e1000_reset(struct e1000_adapter *adapter) | |||
685 | if (e1000_init_hw(&adapter->hw)) | 816 | if (e1000_init_hw(&adapter->hw)) |
686 | DPRINTK(PROBE, ERR, "Hardware Error\n"); | 817 | DPRINTK(PROBE, ERR, "Hardware Error\n"); |
687 | e1000_update_mng_vlan(adapter); | 818 | e1000_update_mng_vlan(adapter); |
819 | |||
820 | /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ | ||
821 | if (adapter->hw.mac_type >= e1000_82544 && | ||
822 | adapter->hw.mac_type <= e1000_82547_rev_2 && | ||
823 | adapter->hw.autoneg == 1 && | ||
824 | adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) { | ||
825 | uint32_t ctrl = E1000_READ_REG(&adapter->hw, CTRL); | ||
826 | /* clear phy power management bit if we are in gig only mode, | ||
827 | * which if enabled will attempt negotiation to 100Mb, which | ||
828 | * can cause a loss of link at power off or driver unload */ | ||
829 | ctrl &= ~E1000_CTRL_SWDPIN3; | ||
830 | E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); | ||
831 | } | ||
832 | |||
688 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ | 833 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ |
689 | E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); | 834 | E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); |
690 | 835 | ||
@@ -705,14 +850,7 @@ e1000_reset(struct e1000_adapter *adapter) | |||
705 | phy_data); | 850 | phy_data); |
706 | } | 851 | } |
707 | 852 | ||
708 | if ((adapter->en_mng_pt) && | 853 | e1000_release_manageability(adapter); |
709 | (adapter->hw.mac_type >= e1000_82540) && | ||
710 | (adapter->hw.mac_type < e1000_82571) && | ||
711 | (adapter->hw.media_type == e1000_media_type_copper)) { | ||
712 | manc = E1000_READ_REG(&adapter->hw, MANC); | ||
713 | manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST); | ||
714 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | ||
715 | } | ||
716 | } | 854 | } |
717 | 855 | ||
718 | /** | 856 | /** |
@@ -857,6 +995,12 @@ e1000_probe(struct pci_dev *pdev, | |||
857 | (adapter->hw.mac_type != e1000_82547)) | 995 | (adapter->hw.mac_type != e1000_82547)) |
858 | netdev->features |= NETIF_F_TSO; | 996 | netdev->features |= NETIF_F_TSO; |
859 | 997 | ||
998 | #ifdef CONFIG_DEBUG_SLAB | ||
999 | /* 82544's work arounds do not play nicely with DEBUG SLAB */ | ||
1000 | if (adapter->hw.mac_type == e1000_82544) | ||
1001 | netdev->features &= ~NETIF_F_TSO; | ||
1002 | #endif | ||
1003 | |||
860 | #ifdef NETIF_F_TSO6 | 1004 | #ifdef NETIF_F_TSO6 |
861 | if (adapter->hw.mac_type > e1000_82547_rev_2) | 1005 | if (adapter->hw.mac_type > e1000_82547_rev_2) |
862 | netdev->features |= NETIF_F_TSO6; | 1006 | netdev->features |= NETIF_F_TSO6; |
@@ -1078,22 +1222,13 @@ e1000_remove(struct pci_dev *pdev) | |||
1078 | { | 1222 | { |
1079 | struct net_device *netdev = pci_get_drvdata(pdev); | 1223 | struct net_device *netdev = pci_get_drvdata(pdev); |
1080 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1224 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1081 | uint32_t manc; | ||
1082 | #ifdef CONFIG_E1000_NAPI | 1225 | #ifdef CONFIG_E1000_NAPI |
1083 | int i; | 1226 | int i; |
1084 | #endif | 1227 | #endif |
1085 | 1228 | ||
1086 | flush_scheduled_work(); | 1229 | flush_scheduled_work(); |
1087 | 1230 | ||
1088 | if (adapter->hw.mac_type >= e1000_82540 && | 1231 | e1000_release_manageability(adapter); |
1089 | adapter->hw.mac_type < e1000_82571 && | ||
1090 | adapter->hw.media_type == e1000_media_type_copper) { | ||
1091 | manc = E1000_READ_REG(&adapter->hw, MANC); | ||
1092 | if (manc & E1000_MANC_SMBUS_EN) { | ||
1093 | manc |= E1000_MANC_ARP_EN; | ||
1094 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | ||
1095 | } | ||
1096 | } | ||
1097 | 1232 | ||
1098 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | 1233 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
1099 | * would have already happened in close and is redundant. */ | 1234 | * would have already happened in close and is redundant. */ |
@@ -1531,9 +1666,9 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1531 | } | 1666 | } |
1532 | 1667 | ||
1533 | /* Set the default values for the Tx Inter Packet Gap timer */ | 1668 | /* Set the default values for the Tx Inter Packet Gap timer */ |
1534 | 1669 | if (adapter->hw.mac_type <= e1000_82547_rev_2 && | |
1535 | if (hw->media_type == e1000_media_type_fiber || | 1670 | (hw->media_type == e1000_media_type_fiber || |
1536 | hw->media_type == e1000_media_type_internal_serdes) | 1671 | hw->media_type == e1000_media_type_internal_serdes)) |
1537 | tipg = DEFAULT_82543_TIPG_IPGT_FIBER; | 1672 | tipg = DEFAULT_82543_TIPG_IPGT_FIBER; |
1538 | else | 1673 | else |
1539 | tipg = DEFAULT_82543_TIPG_IPGT_COPPER; | 1674 | tipg = DEFAULT_82543_TIPG_IPGT_COPPER; |
@@ -2528,6 +2663,13 @@ e1000_watchdog(unsigned long data) | |||
2528 | netif_wake_queue(netdev); | 2663 | netif_wake_queue(netdev); |
2529 | mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); | 2664 | mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); |
2530 | adapter->smartspeed = 0; | 2665 | adapter->smartspeed = 0; |
2666 | } else { | ||
2667 | /* make sure the receive unit is started */ | ||
2668 | if (adapter->hw.rx_needs_kicking) { | ||
2669 | struct e1000_hw *hw = &adapter->hw; | ||
2670 | uint32_t rctl = E1000_READ_REG(hw, RCTL); | ||
2671 | E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN); | ||
2672 | } | ||
2531 | } | 2673 | } |
2532 | } else { | 2674 | } else { |
2533 | if (netif_carrier_ok(netdev)) { | 2675 | if (netif_carrier_ok(netdev)) { |
@@ -2628,29 +2770,34 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter, | |||
2628 | if (packets == 0) | 2770 | if (packets == 0) |
2629 | goto update_itr_done; | 2771 | goto update_itr_done; |
2630 | 2772 | ||
2631 | |||
2632 | switch (itr_setting) { | 2773 | switch (itr_setting) { |
2633 | case lowest_latency: | 2774 | case lowest_latency: |
2634 | if ((packets < 5) && (bytes > 512)) | 2775 | /* jumbo frames get bulk treatment*/ |
2776 | if (bytes/packets > 8000) | ||
2777 | retval = bulk_latency; | ||
2778 | else if ((packets < 5) && (bytes > 512)) | ||
2635 | retval = low_latency; | 2779 | retval = low_latency; |
2636 | break; | 2780 | break; |
2637 | case low_latency: /* 50 usec aka 20000 ints/s */ | 2781 | case low_latency: /* 50 usec aka 20000 ints/s */ |
2638 | if (bytes > 10000) { | 2782 | if (bytes > 10000) { |
2639 | if ((packets < 10) || | 2783 | /* jumbo frames need bulk latency setting */ |
2640 | ((bytes/packets) > 1200)) | 2784 | if (bytes/packets > 8000) |
2785 | retval = bulk_latency; | ||
2786 | else if ((packets < 10) || ((bytes/packets) > 1200)) | ||
2641 | retval = bulk_latency; | 2787 | retval = bulk_latency; |
2642 | else if ((packets > 35)) | 2788 | else if ((packets > 35)) |
2643 | retval = lowest_latency; | 2789 | retval = lowest_latency; |
2644 | } else if (packets <= 2 && bytes < 512) | 2790 | } else if (bytes/packets > 2000) |
2791 | retval = bulk_latency; | ||
2792 | else if (packets <= 2 && bytes < 512) | ||
2645 | retval = lowest_latency; | 2793 | retval = lowest_latency; |
2646 | break; | 2794 | break; |
2647 | case bulk_latency: /* 250 usec aka 4000 ints/s */ | 2795 | case bulk_latency: /* 250 usec aka 4000 ints/s */ |
2648 | if (bytes > 25000) { | 2796 | if (bytes > 25000) { |
2649 | if (packets > 35) | 2797 | if (packets > 35) |
2650 | retval = low_latency; | 2798 | retval = low_latency; |
2651 | } else { | 2799 | } else if (bytes < 6000) { |
2652 | if (bytes < 6000) | 2800 | retval = low_latency; |
2653 | retval = low_latency; | ||
2654 | } | 2801 | } |
2655 | break; | 2802 | break; |
2656 | } | 2803 | } |
@@ -2679,17 +2826,20 @@ static void e1000_set_itr(struct e1000_adapter *adapter) | |||
2679 | adapter->tx_itr, | 2826 | adapter->tx_itr, |
2680 | adapter->total_tx_packets, | 2827 | adapter->total_tx_packets, |
2681 | adapter->total_tx_bytes); | 2828 | adapter->total_tx_bytes); |
2829 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | ||
2830 | if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) | ||
2831 | adapter->tx_itr = low_latency; | ||
2832 | |||
2682 | adapter->rx_itr = e1000_update_itr(adapter, | 2833 | adapter->rx_itr = e1000_update_itr(adapter, |
2683 | adapter->rx_itr, | 2834 | adapter->rx_itr, |
2684 | adapter->total_rx_packets, | 2835 | adapter->total_rx_packets, |
2685 | adapter->total_rx_bytes); | 2836 | adapter->total_rx_bytes); |
2837 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | ||
2838 | if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) | ||
2839 | adapter->rx_itr = low_latency; | ||
2686 | 2840 | ||
2687 | current_itr = max(adapter->rx_itr, adapter->tx_itr); | 2841 | current_itr = max(adapter->rx_itr, adapter->tx_itr); |
2688 | 2842 | ||
2689 | /* conservative mode eliminates the lowest_latency setting */ | ||
2690 | if (current_itr == lowest_latency && (adapter->itr_setting == 3)) | ||
2691 | current_itr = low_latency; | ||
2692 | |||
2693 | switch (current_itr) { | 2843 | switch (current_itr) { |
2694 | /* counts and packets in update_itr are dependent on these numbers */ | 2844 | /* counts and packets in update_itr are dependent on these numbers */ |
2695 | case lowest_latency: | 2845 | case lowest_latency: |
@@ -3168,6 +3318,16 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3168 | if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { | 3318 | if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { |
3169 | switch (adapter->hw.mac_type) { | 3319 | switch (adapter->hw.mac_type) { |
3170 | unsigned int pull_size; | 3320 | unsigned int pull_size; |
3321 | case e1000_82544: | ||
3322 | /* Make sure we have room to chop off 4 bytes, | ||
3323 | * and that the end alignment will work out to | ||
3324 | * this hardware's requirements | ||
3325 | * NOTE: this is a TSO only workaround | ||
3326 | * if end byte alignment not correct move us | ||
3327 | * into the next dword */ | ||
3328 | if ((unsigned long)(skb->tail - 1) & 4) | ||
3329 | break; | ||
3330 | /* fall through */ | ||
3171 | case e1000_82571: | 3331 | case e1000_82571: |
3172 | case e1000_82572: | 3332 | case e1000_82572: |
3173 | case e1000_82573: | 3333 | case e1000_82573: |
@@ -3419,12 +3579,11 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3419 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; | 3579 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
3420 | 3580 | ||
3421 | netdev->mtu = new_mtu; | 3581 | netdev->mtu = new_mtu; |
3582 | adapter->hw.max_frame_size = max_frame; | ||
3422 | 3583 | ||
3423 | if (netif_running(netdev)) | 3584 | if (netif_running(netdev)) |
3424 | e1000_reinit_locked(adapter); | 3585 | e1000_reinit_locked(adapter); |
3425 | 3586 | ||
3426 | adapter->hw.max_frame_size = max_frame; | ||
3427 | |||
3428 | return 0; | 3587 | return 0; |
3429 | } | 3588 | } |
3430 | 3589 | ||
@@ -3573,6 +3732,11 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3573 | adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; | 3732 | adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; |
3574 | adapter->net_stats.tx_window_errors = adapter->stats.latecol; | 3733 | adapter->net_stats.tx_window_errors = adapter->stats.latecol; |
3575 | adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; | 3734 | adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; |
3735 | if (adapter->hw.bad_tx_carr_stats_fd && | ||
3736 | adapter->link_duplex == FULL_DUPLEX) { | ||
3737 | adapter->net_stats.tx_carrier_errors = 0; | ||
3738 | adapter->stats.tncrs = 0; | ||
3739 | } | ||
3576 | 3740 | ||
3577 | /* Tx Dropped needs to be maintained elsewhere */ | 3741 | /* Tx Dropped needs to be maintained elsewhere */ |
3578 | 3742 | ||
@@ -3590,6 +3754,13 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3590 | adapter->phy_stats.receive_errors += phy_tmp; | 3754 | adapter->phy_stats.receive_errors += phy_tmp; |
3591 | } | 3755 | } |
3592 | 3756 | ||
3757 | /* Management Stats */ | ||
3758 | if (adapter->hw.has_smbus) { | ||
3759 | adapter->stats.mgptc += E1000_READ_REG(hw, MGTPTC); | ||
3760 | adapter->stats.mgprc += E1000_READ_REG(hw, MGTPRC); | ||
3761 | adapter->stats.mgpdc += E1000_READ_REG(hw, MGTPDC); | ||
3762 | } | ||
3763 | |||
3593 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 3764 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
3594 | } | 3765 | } |
3595 | #ifdef CONFIG_PCI_MSI | 3766 | #ifdef CONFIG_PCI_MSI |
@@ -3868,11 +4039,11 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3868 | cleaned = (i == eop); | 4039 | cleaned = (i == eop); |
3869 | 4040 | ||
3870 | if (cleaned) { | 4041 | if (cleaned) { |
3871 | /* this packet count is wrong for TSO but has a | 4042 | struct sk_buff *skb = buffer_info->skb; |
3872 | * tendency to make dynamic ITR change more | 4043 | unsigned int segs = skb_shinfo(skb)->gso_segs; |
3873 | * towards bulk */ | 4044 | total_tx_packets += segs; |
3874 | total_tx_packets++; | 4045 | total_tx_packets++; |
3875 | total_tx_bytes += buffer_info->skb->len; | 4046 | total_tx_bytes += skb->len; |
3876 | } | 4047 | } |
3877 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | 4048 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
3878 | tx_desc->upper.data = 0; | 4049 | tx_desc->upper.data = 0; |
@@ -4094,8 +4265,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
4094 | /* code added for copybreak, this should improve | 4265 | /* code added for copybreak, this should improve |
4095 | * performance for small packets with large amounts | 4266 | * performance for small packets with large amounts |
4096 | * of reassembly being done in the stack */ | 4267 | * of reassembly being done in the stack */ |
4097 | #define E1000_CB_LENGTH 256 | 4268 | if (length < copybreak) { |
4098 | if (length < E1000_CB_LENGTH) { | ||
4099 | struct sk_buff *new_skb = | 4269 | struct sk_buff *new_skb = |
4100 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); | 4270 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); |
4101 | if (new_skb) { | 4271 | if (new_skb) { |
@@ -4253,7 +4423,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
4253 | 4423 | ||
4254 | /* page alloc/put takes too long and effects small packet | 4424 | /* page alloc/put takes too long and effects small packet |
4255 | * throughput, so unsplit small packets and save the alloc/put*/ | 4425 | * throughput, so unsplit small packets and save the alloc/put*/ |
4256 | if (l1 && ((length + l1) <= adapter->rx_ps_bsize0)) { | 4426 | if (l1 && (l1 <= copybreak) && ((length + l1) <= adapter->rx_ps_bsize0)) { |
4257 | u8 *vaddr; | 4427 | u8 *vaddr; |
4258 | /* there is no documentation about how to call | 4428 | /* there is no documentation about how to call |
4259 | * kmap_atomic, so we can't hold the mapping | 4429 | * kmap_atomic, so we can't hold the mapping |
@@ -4998,7 +5168,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4998 | { | 5168 | { |
4999 | struct net_device *netdev = pci_get_drvdata(pdev); | 5169 | struct net_device *netdev = pci_get_drvdata(pdev); |
5000 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5170 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5001 | uint32_t ctrl, ctrl_ext, rctl, manc, status; | 5171 | uint32_t ctrl, ctrl_ext, rctl, status; |
5002 | uint32_t wufc = adapter->wol; | 5172 | uint32_t wufc = adapter->wol; |
5003 | #ifdef CONFIG_PM | 5173 | #ifdef CONFIG_PM |
5004 | int retval = 0; | 5174 | int retval = 0; |
@@ -5067,16 +5237,12 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5067 | pci_enable_wake(pdev, PCI_D3cold, 0); | 5237 | pci_enable_wake(pdev, PCI_D3cold, 0); |
5068 | } | 5238 | } |
5069 | 5239 | ||
5070 | if (adapter->hw.mac_type >= e1000_82540 && | 5240 | e1000_release_manageability(adapter); |
5071 | adapter->hw.mac_type < e1000_82571 && | 5241 | |
5072 | adapter->hw.media_type == e1000_media_type_copper) { | 5242 | /* make sure adapter isn't asleep if manageability is enabled */ |
5073 | manc = E1000_READ_REG(&adapter->hw, MANC); | 5243 | if (adapter->en_mng_pt) { |
5074 | if (manc & E1000_MANC_SMBUS_EN) { | 5244 | pci_enable_wake(pdev, PCI_D3hot, 1); |
5075 | manc |= E1000_MANC_ARP_EN; | 5245 | pci_enable_wake(pdev, PCI_D3cold, 1); |
5076 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | ||
5077 | pci_enable_wake(pdev, PCI_D3hot, 1); | ||
5078 | pci_enable_wake(pdev, PCI_D3cold, 1); | ||
5079 | } | ||
5080 | } | 5246 | } |
5081 | 5247 | ||
5082 | if (adapter->hw.phy_type == e1000_phy_igp_3) | 5248 | if (adapter->hw.phy_type == e1000_phy_igp_3) |
@@ -5102,7 +5268,7 @@ e1000_resume(struct pci_dev *pdev) | |||
5102 | { | 5268 | { |
5103 | struct net_device *netdev = pci_get_drvdata(pdev); | 5269 | struct net_device *netdev = pci_get_drvdata(pdev); |
5104 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5270 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5105 | uint32_t manc, err; | 5271 | uint32_t err; |
5106 | 5272 | ||
5107 | pci_set_power_state(pdev, PCI_D0); | 5273 | pci_set_power_state(pdev, PCI_D0); |
5108 | e1000_pci_restore_state(adapter); | 5274 | e1000_pci_restore_state(adapter); |
@@ -5122,19 +5288,13 @@ e1000_resume(struct pci_dev *pdev) | |||
5122 | e1000_reset(adapter); | 5288 | e1000_reset(adapter); |
5123 | E1000_WRITE_REG(&adapter->hw, WUS, ~0); | 5289 | E1000_WRITE_REG(&adapter->hw, WUS, ~0); |
5124 | 5290 | ||
5291 | e1000_init_manageability(adapter); | ||
5292 | |||
5125 | if (netif_running(netdev)) | 5293 | if (netif_running(netdev)) |
5126 | e1000_up(adapter); | 5294 | e1000_up(adapter); |
5127 | 5295 | ||
5128 | netif_device_attach(netdev); | 5296 | netif_device_attach(netdev); |
5129 | 5297 | ||
5130 | if (adapter->hw.mac_type >= e1000_82540 && | ||
5131 | adapter->hw.mac_type < e1000_82571 && | ||
5132 | adapter->hw.media_type == e1000_media_type_copper) { | ||
5133 | manc = E1000_READ_REG(&adapter->hw, MANC); | ||
5134 | manc &= ~(E1000_MANC_ARP_EN); | ||
5135 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | ||
5136 | } | ||
5137 | |||
5138 | /* If the controller is 82573 and f/w is AMT, do not set | 5298 | /* If the controller is 82573 and f/w is AMT, do not set |
5139 | * DRV_LOAD until the interface is up. For all other cases, | 5299 | * DRV_LOAD until the interface is up. For all other cases, |
5140 | * let the f/w know that the h/w is now under the control | 5300 | * let the f/w know that the h/w is now under the control |
@@ -5235,7 +5395,8 @@ static void e1000_io_resume(struct pci_dev *pdev) | |||
5235 | { | 5395 | { |
5236 | struct net_device *netdev = pci_get_drvdata(pdev); | 5396 | struct net_device *netdev = pci_get_drvdata(pdev); |
5237 | struct e1000_adapter *adapter = netdev->priv; | 5397 | struct e1000_adapter *adapter = netdev->priv; |
5238 | uint32_t manc, swsm; | 5398 | |
5399 | e1000_init_manageability(adapter); | ||
5239 | 5400 | ||
5240 | if (netif_running(netdev)) { | 5401 | if (netif_running(netdev)) { |
5241 | if (e1000_up(adapter)) { | 5402 | if (e1000_up(adapter)) { |
@@ -5246,26 +5407,14 @@ static void e1000_io_resume(struct pci_dev *pdev) | |||
5246 | 5407 | ||
5247 | netif_device_attach(netdev); | 5408 | netif_device_attach(netdev); |
5248 | 5409 | ||
5249 | if (adapter->hw.mac_type >= e1000_82540 && | 5410 | /* If the controller is 82573 and f/w is AMT, do not set |
5250 | adapter->hw.mac_type < e1000_82571 && | 5411 | * DRV_LOAD until the interface is up. For all other cases, |
5251 | adapter->hw.media_type == e1000_media_type_copper) { | 5412 | * let the f/w know that the h/w is now under the control |
5252 | manc = E1000_READ_REG(&adapter->hw, MANC); | 5413 | * of the driver. */ |
5253 | manc &= ~(E1000_MANC_ARP_EN); | 5414 | if (adapter->hw.mac_type != e1000_82573 || |
5254 | E1000_WRITE_REG(&adapter->hw, MANC, manc); | 5415 | !e1000_check_mng_mode(&adapter->hw)) |
5255 | } | 5416 | e1000_get_hw_control(adapter); |
5256 | |||
5257 | switch (adapter->hw.mac_type) { | ||
5258 | case e1000_82573: | ||
5259 | swsm = E1000_READ_REG(&adapter->hw, SWSM); | ||
5260 | E1000_WRITE_REG(&adapter->hw, SWSM, | ||
5261 | swsm | E1000_SWSM_DRV_LOAD); | ||
5262 | break; | ||
5263 | default: | ||
5264 | break; | ||
5265 | } | ||
5266 | 5417 | ||
5267 | if (netif_running(netdev)) | ||
5268 | mod_timer(&adapter->watchdog_timer, jiffies); | ||
5269 | } | 5418 | } |
5270 | 5419 | ||
5271 | /* e1000_main.c */ | 5420 | /* e1000_main.c */ |
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c index cbfcd7f2889f..cf2a279307e1 100644 --- a/drivers/net/e1000/e1000_param.c +++ b/drivers/net/e1000/e1000_param.c | |||
@@ -487,7 +487,9 @@ e1000_check_options(struct e1000_adapter *adapter) | |||
487 | e1000_validate_option(&adapter->itr, &opt, | 487 | e1000_validate_option(&adapter->itr, &opt, |
488 | adapter); | 488 | adapter); |
489 | /* save the setting, because the dynamic bits change itr */ | 489 | /* save the setting, because the dynamic bits change itr */ |
490 | adapter->itr_setting = adapter->itr; | 490 | /* clear the lower two bits because they are |
491 | * used as control */ | ||
492 | adapter->itr_setting = adapter->itr & ~3; | ||
491 | break; | 493 | break; |
492 | } | 494 | } |
493 | } else { | 495 | } else { |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 439f41338291..2f48fe9a29a7 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -3,8 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Note: This driver is a cleanroom reimplementation based on reverse | 4 | * Note: This driver is a cleanroom reimplementation based on reverse |
5 | * engineered documentation written by Carl-Daniel Hailfinger | 5 | * engineered documentation written by Carl-Daniel Hailfinger |
6 | * and Andrew de Quincey. It's neither supported nor endorsed | 6 | * and Andrew de Quincey. |
7 | * by NVIDIA Corp. Use at your own risk. | ||
8 | * | 7 | * |
9 | * NVIDIA, nForce and other NVIDIA marks are trademarks or registered | 8 | * NVIDIA, nForce and other NVIDIA marks are trademarks or registered |
10 | * trademarks of NVIDIA Corporation in the United States and other | 9 | * trademarks of NVIDIA Corporation in the United States and other |
@@ -14,7 +13,7 @@ | |||
14 | * Copyright (C) 2004 Andrew de Quincey (wol support) | 13 | * Copyright (C) 2004 Andrew de Quincey (wol support) |
15 | * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane | 14 | * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane |
16 | * IRQ rate fixes, bigendian fixes, cleanups, verification) | 15 | * IRQ rate fixes, bigendian fixes, cleanups, verification) |
17 | * Copyright (c) 2004 NVIDIA Corporation | 16 | * Copyright (c) 2004,5,6 NVIDIA Corporation |
18 | * | 17 | * |
19 | * This program is free software; you can redistribute it and/or modify | 18 | * This program is free software; you can redistribute it and/or modify |
20 | * it under the terms of the GNU General Public License as published by | 19 | * it under the terms of the GNU General Public License as published by |
@@ -2576,14 +2575,15 @@ static int nv_napi_poll(struct net_device *dev, int *budget) | |||
2576 | int pkts, limit = min(*budget, dev->quota); | 2575 | int pkts, limit = min(*budget, dev->quota); |
2577 | struct fe_priv *np = netdev_priv(dev); | 2576 | struct fe_priv *np = netdev_priv(dev); |
2578 | u8 __iomem *base = get_hwbase(dev); | 2577 | u8 __iomem *base = get_hwbase(dev); |
2578 | unsigned long flags; | ||
2579 | 2579 | ||
2580 | pkts = nv_rx_process(dev, limit); | 2580 | pkts = nv_rx_process(dev, limit); |
2581 | 2581 | ||
2582 | if (nv_alloc_rx(dev)) { | 2582 | if (nv_alloc_rx(dev)) { |
2583 | spin_lock_irq(&np->lock); | 2583 | spin_lock_irqsave(&np->lock, flags); |
2584 | if (!np->in_shutdown) | 2584 | if (!np->in_shutdown) |
2585 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 2585 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
2586 | spin_unlock_irq(&np->lock); | 2586 | spin_unlock_irqrestore(&np->lock, flags); |
2587 | } | 2587 | } |
2588 | 2588 | ||
2589 | if (pkts < limit) { | 2589 | if (pkts < limit) { |
@@ -2591,13 +2591,15 @@ static int nv_napi_poll(struct net_device *dev, int *budget) | |||
2591 | netif_rx_complete(dev); | 2591 | netif_rx_complete(dev); |
2592 | 2592 | ||
2593 | /* re-enable receive interrupts */ | 2593 | /* re-enable receive interrupts */ |
2594 | spin_lock_irq(&np->lock); | 2594 | spin_lock_irqsave(&np->lock, flags); |
2595 | |||
2595 | np->irqmask |= NVREG_IRQ_RX_ALL; | 2596 | np->irqmask |= NVREG_IRQ_RX_ALL; |
2596 | if (np->msi_flags & NV_MSI_X_ENABLED) | 2597 | if (np->msi_flags & NV_MSI_X_ENABLED) |
2597 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | 2598 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); |
2598 | else | 2599 | else |
2599 | writel(np->irqmask, base + NvRegIrqMask); | 2600 | writel(np->irqmask, base + NvRegIrqMask); |
2600 | spin_unlock_irq(&np->lock); | 2601 | |
2602 | spin_unlock_irqrestore(&np->lock, flags); | ||
2601 | return 0; | 2603 | return 0; |
2602 | } else { | 2604 | } else { |
2603 | /* used up our quantum, so reschedule */ | 2605 | /* used up our quantum, so reschedule */ |
diff --git a/drivers/net/ibm_emac/ibm_emac_phy.c b/drivers/net/ibm_emac/ibm_emac_phy.c index 4a97024061e5..9074f76ee2bf 100644 --- a/drivers/net/ibm_emac/ibm_emac_phy.c +++ b/drivers/net/ibm_emac/ibm_emac_phy.c | |||
@@ -309,7 +309,7 @@ int mii_phy_probe(struct mii_phy *phy, int address) | |||
309 | { | 309 | { |
310 | struct mii_phy_def *def; | 310 | struct mii_phy_def *def; |
311 | int i; | 311 | int i; |
312 | u32 id; | 312 | int id; |
313 | 313 | ||
314 | phy->autoneg = AUTONEG_DISABLE; | 314 | phy->autoneg = AUTONEG_DISABLE; |
315 | phy->advertising = 0; | 315 | phy->advertising = 0; |
@@ -324,6 +324,8 @@ int mii_phy_probe(struct mii_phy *phy, int address) | |||
324 | 324 | ||
325 | /* Read ID and find matching entry */ | 325 | /* Read ID and find matching entry */ |
326 | id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2); | 326 | id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2); |
327 | if (id < 0) | ||
328 | return -ENODEV; | ||
327 | for (i = 0; (def = mii_phy_table[i]) != NULL; i++) | 329 | for (i = 0; (def = mii_phy_table[i]) != NULL; i++) |
328 | if ((id & def->phy_id_mask) == def->phy_id) | 330 | if ((id & def->phy_id_mask) == def->phy_id) |
329 | break; | 331 | break; |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 94ac168be593..07cf574197e5 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -199,8 +199,6 @@ struct myri10ge_priv { | |||
199 | unsigned long serial_number; | 199 | unsigned long serial_number; |
200 | int vendor_specific_offset; | 200 | int vendor_specific_offset; |
201 | int fw_multicast_support; | 201 | int fw_multicast_support; |
202 | u32 devctl; | ||
203 | u16 msi_flags; | ||
204 | u32 read_dma; | 202 | u32 read_dma; |
205 | u32 write_dma; | 203 | u32 write_dma; |
206 | u32 read_write_dma; | 204 | u32 read_write_dma; |
@@ -228,7 +226,7 @@ module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR); | |||
228 | MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n"); | 226 | MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n"); |
229 | 227 | ||
230 | static int myri10ge_msi = 1; /* enable msi by default */ | 228 | static int myri10ge_msi = 1; /* enable msi by default */ |
231 | module_param(myri10ge_msi, int, S_IRUGO); | 229 | module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR); |
232 | MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n"); | 230 | MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n"); |
233 | 231 | ||
234 | static int myri10ge_intr_coal_delay = 25; | 232 | static int myri10ge_intr_coal_delay = 25; |
@@ -721,12 +719,10 @@ static int myri10ge_reset(struct myri10ge_priv *mgp) | |||
721 | status |= | 719 | status |= |
722 | myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); | 720 | myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); |
723 | mgp->irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); | 721 | mgp->irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); |
724 | if (!mgp->msi_enabled) { | 722 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, |
725 | status |= myri10ge_send_cmd | 723 | &cmd, 0); |
726 | (mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, &cmd, 0); | 724 | mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); |
727 | mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); | ||
728 | 725 | ||
729 | } | ||
730 | status |= myri10ge_send_cmd | 726 | status |= myri10ge_send_cmd |
731 | (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0); | 727 | (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0); |
732 | mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0); | 728 | mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0); |
@@ -1619,6 +1615,41 @@ static void myri10ge_free_rings(struct net_device *dev) | |||
1619 | mgp->tx.req_list = NULL; | 1615 | mgp->tx.req_list = NULL; |
1620 | } | 1616 | } |
1621 | 1617 | ||
1618 | static int myri10ge_request_irq(struct myri10ge_priv *mgp) | ||
1619 | { | ||
1620 | struct pci_dev *pdev = mgp->pdev; | ||
1621 | int status; | ||
1622 | |||
1623 | if (myri10ge_msi) { | ||
1624 | status = pci_enable_msi(pdev); | ||
1625 | if (status != 0) | ||
1626 | dev_err(&pdev->dev, | ||
1627 | "Error %d setting up MSI; falling back to xPIC\n", | ||
1628 | status); | ||
1629 | else | ||
1630 | mgp->msi_enabled = 1; | ||
1631 | } else { | ||
1632 | mgp->msi_enabled = 0; | ||
1633 | } | ||
1634 | status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED, | ||
1635 | mgp->dev->name, mgp); | ||
1636 | if (status != 0) { | ||
1637 | dev_err(&pdev->dev, "failed to allocate IRQ\n"); | ||
1638 | if (mgp->msi_enabled) | ||
1639 | pci_disable_msi(pdev); | ||
1640 | } | ||
1641 | return status; | ||
1642 | } | ||
1643 | |||
1644 | static void myri10ge_free_irq(struct myri10ge_priv *mgp) | ||
1645 | { | ||
1646 | struct pci_dev *pdev = mgp->pdev; | ||
1647 | |||
1648 | free_irq(pdev->irq, mgp); | ||
1649 | if (mgp->msi_enabled) | ||
1650 | pci_disable_msi(pdev); | ||
1651 | } | ||
1652 | |||
1622 | static int myri10ge_open(struct net_device *dev) | 1653 | static int myri10ge_open(struct net_device *dev) |
1623 | { | 1654 | { |
1624 | struct myri10ge_priv *mgp; | 1655 | struct myri10ge_priv *mgp; |
@@ -1634,10 +1665,13 @@ static int myri10ge_open(struct net_device *dev) | |||
1634 | status = myri10ge_reset(mgp); | 1665 | status = myri10ge_reset(mgp); |
1635 | if (status != 0) { | 1666 | if (status != 0) { |
1636 | printk(KERN_ERR "myri10ge: %s: failed reset\n", dev->name); | 1667 | printk(KERN_ERR "myri10ge: %s: failed reset\n", dev->name); |
1637 | mgp->running = MYRI10GE_ETH_STOPPED; | 1668 | goto abort_with_nothing; |
1638 | return -ENXIO; | ||
1639 | } | 1669 | } |
1640 | 1670 | ||
1671 | status = myri10ge_request_irq(mgp); | ||
1672 | if (status != 0) | ||
1673 | goto abort_with_nothing; | ||
1674 | |||
1641 | /* decide what small buffer size to use. For good TCP rx | 1675 | /* decide what small buffer size to use. For good TCP rx |
1642 | * performance, it is important to not receive 1514 byte | 1676 | * performance, it is important to not receive 1514 byte |
1643 | * frames into jumbo buffers, as it confuses the socket buffer | 1677 | * frames into jumbo buffers, as it confuses the socket buffer |
@@ -1677,7 +1711,7 @@ static int myri10ge_open(struct net_device *dev) | |||
1677 | "myri10ge: %s: failed to get ring sizes or locations\n", | 1711 | "myri10ge: %s: failed to get ring sizes or locations\n", |
1678 | dev->name); | 1712 | dev->name); |
1679 | mgp->running = MYRI10GE_ETH_STOPPED; | 1713 | mgp->running = MYRI10GE_ETH_STOPPED; |
1680 | return -ENXIO; | 1714 | goto abort_with_irq; |
1681 | } | 1715 | } |
1682 | 1716 | ||
1683 | if (mgp->mtrr >= 0) { | 1717 | if (mgp->mtrr >= 0) { |
@@ -1708,7 +1742,7 @@ static int myri10ge_open(struct net_device *dev) | |||
1708 | 1742 | ||
1709 | status = myri10ge_allocate_rings(dev); | 1743 | status = myri10ge_allocate_rings(dev); |
1710 | if (status != 0) | 1744 | if (status != 0) |
1711 | goto abort_with_nothing; | 1745 | goto abort_with_irq; |
1712 | 1746 | ||
1713 | /* now give firmware buffers sizes, and MTU */ | 1747 | /* now give firmware buffers sizes, and MTU */ |
1714 | cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN; | 1748 | cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN; |
@@ -1771,6 +1805,9 @@ static int myri10ge_open(struct net_device *dev) | |||
1771 | abort_with_rings: | 1805 | abort_with_rings: |
1772 | myri10ge_free_rings(dev); | 1806 | myri10ge_free_rings(dev); |
1773 | 1807 | ||
1808 | abort_with_irq: | ||
1809 | myri10ge_free_irq(mgp); | ||
1810 | |||
1774 | abort_with_nothing: | 1811 | abort_with_nothing: |
1775 | mgp->running = MYRI10GE_ETH_STOPPED; | 1812 | mgp->running = MYRI10GE_ETH_STOPPED; |
1776 | return -ENOMEM; | 1813 | return -ENOMEM; |
@@ -1807,7 +1844,7 @@ static int myri10ge_close(struct net_device *dev) | |||
1807 | printk(KERN_ERR "myri10ge: %s never got down irq\n", dev->name); | 1844 | printk(KERN_ERR "myri10ge: %s never got down irq\n", dev->name); |
1808 | 1845 | ||
1809 | netif_tx_disable(dev); | 1846 | netif_tx_disable(dev); |
1810 | 1847 | myri10ge_free_irq(mgp); | |
1811 | myri10ge_free_rings(dev); | 1848 | myri10ge_free_rings(dev); |
1812 | 1849 | ||
1813 | mgp->running = MYRI10GE_ETH_STOPPED; | 1850 | mgp->running = MYRI10GE_ETH_STOPPED; |
@@ -2481,34 +2518,6 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp) | |||
2481 | } | 2518 | } |
2482 | } | 2519 | } |
2483 | 2520 | ||
2484 | static void myri10ge_save_state(struct myri10ge_priv *mgp) | ||
2485 | { | ||
2486 | struct pci_dev *pdev = mgp->pdev; | ||
2487 | int cap; | ||
2488 | |||
2489 | pci_save_state(pdev); | ||
2490 | /* now save PCIe and MSI state that Linux will not | ||
2491 | * save for us */ | ||
2492 | cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
2493 | pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &mgp->devctl); | ||
2494 | cap = pci_find_capability(pdev, PCI_CAP_ID_MSI); | ||
2495 | pci_read_config_word(pdev, cap + PCI_MSI_FLAGS, &mgp->msi_flags); | ||
2496 | } | ||
2497 | |||
2498 | static void myri10ge_restore_state(struct myri10ge_priv *mgp) | ||
2499 | { | ||
2500 | struct pci_dev *pdev = mgp->pdev; | ||
2501 | int cap; | ||
2502 | |||
2503 | /* restore PCIe and MSI state that linux will not */ | ||
2504 | cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
2505 | pci_write_config_dword(pdev, cap + PCI_CAP_ID_EXP, mgp->devctl); | ||
2506 | cap = pci_find_capability(pdev, PCI_CAP_ID_MSI); | ||
2507 | pci_write_config_word(pdev, cap + PCI_MSI_FLAGS, mgp->msi_flags); | ||
2508 | |||
2509 | pci_restore_state(pdev); | ||
2510 | } | ||
2511 | |||
2512 | #ifdef CONFIG_PM | 2521 | #ifdef CONFIG_PM |
2513 | 2522 | ||
2514 | static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state) | 2523 | static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state) |
@@ -2529,11 +2538,10 @@ static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2529 | rtnl_unlock(); | 2538 | rtnl_unlock(); |
2530 | } | 2539 | } |
2531 | myri10ge_dummy_rdma(mgp, 0); | 2540 | myri10ge_dummy_rdma(mgp, 0); |
2532 | free_irq(pdev->irq, mgp); | 2541 | pci_save_state(pdev); |
2533 | myri10ge_save_state(mgp); | ||
2534 | pci_disable_device(pdev); | 2542 | pci_disable_device(pdev); |
2535 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 2543 | |
2536 | return 0; | 2544 | return pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
2537 | } | 2545 | } |
2538 | 2546 | ||
2539 | static int myri10ge_resume(struct pci_dev *pdev) | 2547 | static int myri10ge_resume(struct pci_dev *pdev) |
@@ -2555,34 +2563,33 @@ static int myri10ge_resume(struct pci_dev *pdev) | |||
2555 | mgp->dev->name); | 2563 | mgp->dev->name); |
2556 | return -EIO; | 2564 | return -EIO; |
2557 | } | 2565 | } |
2558 | myri10ge_restore_state(mgp); | 2566 | |
2567 | status = pci_restore_state(pdev); | ||
2568 | if (status) | ||
2569 | return status; | ||
2559 | 2570 | ||
2560 | status = pci_enable_device(pdev); | 2571 | status = pci_enable_device(pdev); |
2561 | if (status < 0) { | 2572 | if (status) { |
2562 | dev_err(&pdev->dev, "failed to enable device\n"); | 2573 | dev_err(&pdev->dev, "failed to enable device\n"); |
2563 | return -EIO; | 2574 | return status; |
2564 | } | 2575 | } |
2565 | 2576 | ||
2566 | pci_set_master(pdev); | 2577 | pci_set_master(pdev); |
2567 | 2578 | ||
2568 | status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED, | ||
2569 | netdev->name, mgp); | ||
2570 | if (status != 0) { | ||
2571 | dev_err(&pdev->dev, "failed to allocate IRQ\n"); | ||
2572 | goto abort_with_enabled; | ||
2573 | } | ||
2574 | |||
2575 | myri10ge_reset(mgp); | 2579 | myri10ge_reset(mgp); |
2576 | myri10ge_dummy_rdma(mgp, 1); | 2580 | myri10ge_dummy_rdma(mgp, 1); |
2577 | 2581 | ||
2578 | /* Save configuration space to be restored if the | 2582 | /* Save configuration space to be restored if the |
2579 | * nic resets due to a parity error */ | 2583 | * nic resets due to a parity error */ |
2580 | myri10ge_save_state(mgp); | 2584 | pci_save_state(pdev); |
2581 | 2585 | ||
2582 | if (netif_running(netdev)) { | 2586 | if (netif_running(netdev)) { |
2583 | rtnl_lock(); | 2587 | rtnl_lock(); |
2584 | myri10ge_open(netdev); | 2588 | status = myri10ge_open(netdev); |
2585 | rtnl_unlock(); | 2589 | rtnl_unlock(); |
2590 | if (status != 0) | ||
2591 | goto abort_with_enabled; | ||
2592 | |||
2586 | } | 2593 | } |
2587 | netif_device_attach(netdev); | 2594 | netif_device_attach(netdev); |
2588 | 2595 | ||
@@ -2640,7 +2647,11 @@ static void myri10ge_watchdog(struct work_struct *work) | |||
2640 | * when the driver was loaded, or the last time the | 2647 | * when the driver was loaded, or the last time the |
2641 | * nic was resumed from power saving mode. | 2648 | * nic was resumed from power saving mode. |
2642 | */ | 2649 | */ |
2643 | myri10ge_restore_state(mgp); | 2650 | pci_restore_state(mgp->pdev); |
2651 | |||
2652 | /* save state again for accounting reasons */ | ||
2653 | pci_save_state(mgp->pdev); | ||
2654 | |||
2644 | } else { | 2655 | } else { |
2645 | /* if we get back -1's from our slot, perhaps somebody | 2656 | /* if we get back -1's from our slot, perhaps somebody |
2646 | * powered off our card. Don't try to reset it in | 2657 | * powered off our card. Don't try to reset it in |
@@ -2856,23 +2867,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2856 | goto abort_with_firmware; | 2867 | goto abort_with_firmware; |
2857 | } | 2868 | } |
2858 | 2869 | ||
2859 | if (myri10ge_msi) { | ||
2860 | status = pci_enable_msi(pdev); | ||
2861 | if (status != 0) | ||
2862 | dev_err(&pdev->dev, | ||
2863 | "Error %d setting up MSI; falling back to xPIC\n", | ||
2864 | status); | ||
2865 | else | ||
2866 | mgp->msi_enabled = 1; | ||
2867 | } | ||
2868 | |||
2869 | status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED, | ||
2870 | netdev->name, mgp); | ||
2871 | if (status != 0) { | ||
2872 | dev_err(&pdev->dev, "failed to allocate IRQ\n"); | ||
2873 | goto abort_with_firmware; | ||
2874 | } | ||
2875 | |||
2876 | pci_set_drvdata(pdev, mgp); | 2870 | pci_set_drvdata(pdev, mgp); |
2877 | if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU) | 2871 | if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU) |
2878 | myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; | 2872 | myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; |
@@ -2896,7 +2890,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2896 | 2890 | ||
2897 | /* Save configuration space to be restored if the | 2891 | /* Save configuration space to be restored if the |
2898 | * nic resets due to a parity error */ | 2892 | * nic resets due to a parity error */ |
2899 | myri10ge_save_state(mgp); | 2893 | pci_save_state(pdev); |
2900 | 2894 | ||
2901 | /* Setup the watchdog timer */ | 2895 | /* Setup the watchdog timer */ |
2902 | setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer, | 2896 | setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer, |
@@ -2907,19 +2901,16 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2907 | status = register_netdev(netdev); | 2901 | status = register_netdev(netdev); |
2908 | if (status != 0) { | 2902 | if (status != 0) { |
2909 | dev_err(&pdev->dev, "register_netdev failed: %d\n", status); | 2903 | dev_err(&pdev->dev, "register_netdev failed: %d\n", status); |
2910 | goto abort_with_irq; | 2904 | goto abort_with_state; |
2911 | } | 2905 | } |
2912 | dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", | 2906 | dev_info(dev, "%d, tx bndry %d, fw %s, WC %s\n", |
2913 | (mgp->msi_enabled ? "MSI" : "xPIC"), | ||
2914 | pdev->irq, mgp->tx.boundary, mgp->fw_name, | 2907 | pdev->irq, mgp->tx.boundary, mgp->fw_name, |
2915 | (mgp->mtrr >= 0 ? "Enabled" : "Disabled")); | 2908 | (mgp->mtrr >= 0 ? "Enabled" : "Disabled")); |
2916 | 2909 | ||
2917 | return 0; | 2910 | return 0; |
2918 | 2911 | ||
2919 | abort_with_irq: | 2912 | abort_with_state: |
2920 | free_irq(pdev->irq, mgp); | 2913 | pci_restore_state(pdev); |
2921 | if (mgp->msi_enabled) | ||
2922 | pci_disable_msi(pdev); | ||
2923 | 2914 | ||
2924 | abort_with_firmware: | 2915 | abort_with_firmware: |
2925 | myri10ge_dummy_rdma(mgp, 0); | 2916 | myri10ge_dummy_rdma(mgp, 0); |
@@ -2970,12 +2961,12 @@ static void myri10ge_remove(struct pci_dev *pdev) | |||
2970 | flush_scheduled_work(); | 2961 | flush_scheduled_work(); |
2971 | netdev = mgp->dev; | 2962 | netdev = mgp->dev; |
2972 | unregister_netdev(netdev); | 2963 | unregister_netdev(netdev); |
2973 | free_irq(pdev->irq, mgp); | ||
2974 | if (mgp->msi_enabled) | ||
2975 | pci_disable_msi(pdev); | ||
2976 | 2964 | ||
2977 | myri10ge_dummy_rdma(mgp, 0); | 2965 | myri10ge_dummy_rdma(mgp, 0); |
2978 | 2966 | ||
2967 | /* avoid a memory leak */ | ||
2968 | pci_restore_state(pdev); | ||
2969 | |||
2979 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); | 2970 | bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); |
2980 | dma_free_coherent(&pdev->dev, bytes, | 2971 | dma_free_coherent(&pdev->dev, bytes, |
2981 | mgp->rx_done.entry, mgp->rx_done.bus); | 2972 | mgp->rx_done.entry, mgp->rx_done.bus); |
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index b4c4fc0c7218..6490acf05305 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -63,7 +63,7 @@ | |||
63 | 63 | ||
64 | #include "netxen_nic_hw.h" | 64 | #include "netxen_nic_hw.h" |
65 | 65 | ||
66 | #define NETXEN_NIC_BUILD_NO "1" | 66 | #define NETXEN_NIC_BUILD_NO "4" |
67 | #define _NETXEN_NIC_LINUX_MAJOR 3 | 67 | #define _NETXEN_NIC_LINUX_MAJOR 3 |
68 | #define _NETXEN_NIC_LINUX_MINOR 3 | 68 | #define _NETXEN_NIC_LINUX_MINOR 3 |
69 | #define _NETXEN_NIC_LINUX_SUBVERSION 2 | 69 | #define _NETXEN_NIC_LINUX_SUBVERSION 2 |
@@ -137,7 +137,7 @@ extern struct workqueue_struct *netxen_workq; | |||
137 | #define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START | 137 | #define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START |
138 | 138 | ||
139 | #define MAX_RX_BUFFER_LENGTH 1760 | 139 | #define MAX_RX_BUFFER_LENGTH 1760 |
140 | #define MAX_RX_JUMBO_BUFFER_LENGTH 9046 | 140 | #define MAX_RX_JUMBO_BUFFER_LENGTH 8062 |
141 | #define MAX_RX_LRO_BUFFER_LENGTH ((48*1024)-512) | 141 | #define MAX_RX_LRO_BUFFER_LENGTH ((48*1024)-512) |
142 | #define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - 2) | 142 | #define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - 2) |
143 | #define RX_JUMBO_DMA_MAP_LEN \ | 143 | #define RX_JUMBO_DMA_MAP_LEN \ |
@@ -199,9 +199,9 @@ enum { | |||
199 | (RCV_DESC_NORMAL))) | 199 | (RCV_DESC_NORMAL))) |
200 | 200 | ||
201 | #define MAX_CMD_DESCRIPTORS 1024 | 201 | #define MAX_CMD_DESCRIPTORS 1024 |
202 | #define MAX_RCV_DESCRIPTORS 32768 | 202 | #define MAX_RCV_DESCRIPTORS 16384 |
203 | #define MAX_JUMBO_RCV_DESCRIPTORS 4096 | 203 | #define MAX_JUMBO_RCV_DESCRIPTORS 1024 |
204 | #define MAX_LRO_RCV_DESCRIPTORS 2048 | 204 | #define MAX_LRO_RCV_DESCRIPTORS 64 |
205 | #define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS | 205 | #define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS |
206 | #define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS | 206 | #define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS |
207 | #define MAX_RCV_DESC MAX_RCV_DESCRIPTORS | 207 | #define MAX_RCV_DESC MAX_RCV_DESCRIPTORS |
@@ -852,8 +852,6 @@ struct netxen_adapter { | |||
852 | spinlock_t tx_lock; | 852 | spinlock_t tx_lock; |
853 | spinlock_t lock; | 853 | spinlock_t lock; |
854 | struct work_struct watchdog_task; | 854 | struct work_struct watchdog_task; |
855 | struct work_struct tx_timeout_task; | ||
856 | struct net_device *netdev; | ||
857 | struct timer_list watchdog_timer; | 855 | struct timer_list watchdog_timer; |
858 | 856 | ||
859 | u32 curr_window; | 857 | u32 curr_window; |
@@ -887,7 +885,6 @@ struct netxen_adapter { | |||
887 | struct netxen_recv_context recv_ctx[MAX_RCV_CTX]; | 885 | struct netxen_recv_context recv_ctx[MAX_RCV_CTX]; |
888 | 886 | ||
889 | int is_up; | 887 | int is_up; |
890 | int number; | ||
891 | struct netxen_dummy_dma dummy_dma; | 888 | struct netxen_dummy_dma dummy_dma; |
892 | 889 | ||
893 | /* Context interface shared between card and host */ | 890 | /* Context interface shared between card and host */ |
@@ -950,6 +947,7 @@ struct netxen_port { | |||
950 | struct pci_dev *pdev; | 947 | struct pci_dev *pdev; |
951 | struct net_device_stats net_stats; | 948 | struct net_device_stats net_stats; |
952 | struct netxen_port_stats stats; | 949 | struct netxen_port_stats stats; |
950 | struct work_struct tx_timeout_task; | ||
953 | }; | 951 | }; |
954 | 952 | ||
955 | #define PCI_OFFSET_FIRST_RANGE(adapter, off) \ | 953 | #define PCI_OFFSET_FIRST_RANGE(adapter, off) \ |
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 5dac50c87075..c0c31d1914a7 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c | |||
@@ -376,7 +376,7 @@ void netxen_tso_check(struct netxen_adapter *adapter, | |||
376 | ((skb->nh.iph)->ihl * sizeof(u32)) + | 376 | ((skb->nh.iph)->ihl * sizeof(u32)) + |
377 | ((skb->h.th)->doff * sizeof(u32)); | 377 | ((skb->h.th)->doff * sizeof(u32)); |
378 | netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO); | 378 | netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO); |
379 | } else if (skb->ip_summed == CHECKSUM_COMPLETE) { | 379 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
380 | if (skb->nh.iph->protocol == IPPROTO_TCP) { | 380 | if (skb->nh.iph->protocol == IPPROTO_TCP) { |
381 | netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT); | 381 | netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT); |
382 | } else if (skb->nh.iph->protocol == IPPROTO_UDP) { | 382 | } else if (skb->nh.iph->protocol == IPPROTO_UDP) { |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index deacc61e199c..c3e41f368554 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -927,7 +927,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | |||
927 | } | 927 | } |
928 | netxen_process_rcv(adapter, ctxid, desc); | 928 | netxen_process_rcv(adapter, ctxid, desc); |
929 | netxen_clear_sts_owner(desc); | 929 | netxen_clear_sts_owner(desc); |
930 | netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM); | 930 | netxen_set_sts_owner(desc, cpu_to_le16(STATUS_OWNER_PHANTOM)); |
931 | consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1); | 931 | consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1); |
932 | count++; | 932 | count++; |
933 | } | 933 | } |
@@ -1022,7 +1022,7 @@ int netxen_process_cmd_ring(unsigned long data) | |||
1022 | && netif_carrier_ok(port->netdev)) | 1022 | && netif_carrier_ok(port->netdev)) |
1023 | && ((jiffies - port->netdev->trans_start) > | 1023 | && ((jiffies - port->netdev->trans_start) > |
1024 | port->netdev->watchdog_timeo)) { | 1024 | port->netdev->watchdog_timeo)) { |
1025 | SCHEDULE_WORK(&port->adapter->tx_timeout_task); | 1025 | SCHEDULE_WORK(&port->tx_timeout_task); |
1026 | } | 1026 | } |
1027 | 1027 | ||
1028 | last_consumer = get_next_index(last_consumer, | 1028 | last_consumer = get_next_index(last_consumer, |
@@ -1137,13 +1137,13 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1137 | */ | 1137 | */ |
1138 | dma = pci_map_single(pdev, skb->data, rcv_desc->dma_size, | 1138 | dma = pci_map_single(pdev, skb->data, rcv_desc->dma_size, |
1139 | PCI_DMA_FROMDEVICE); | 1139 | PCI_DMA_FROMDEVICE); |
1140 | pdesc->addr_buffer = dma; | 1140 | pdesc->addr_buffer = cpu_to_le64(dma); |
1141 | buffer->skb = skb; | 1141 | buffer->skb = skb; |
1142 | buffer->state = NETXEN_BUFFER_BUSY; | 1142 | buffer->state = NETXEN_BUFFER_BUSY; |
1143 | buffer->dma = dma; | 1143 | buffer->dma = dma; |
1144 | /* make a rcv descriptor */ | 1144 | /* make a rcv descriptor */ |
1145 | pdesc->reference_handle = buffer->ref_handle; | 1145 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
1146 | pdesc->buffer_length = rcv_desc->dma_size; | 1146 | pdesc->buffer_length = cpu_to_le32(rcv_desc->dma_size); |
1147 | DPRINTK(INFO, "done writing descripter\n"); | 1147 | DPRINTK(INFO, "done writing descripter\n"); |
1148 | producer = | 1148 | producer = |
1149 | get_next_index(producer, rcv_desc->max_rx_desc_count); | 1149 | get_next_index(producer, rcv_desc->max_rx_desc_count); |
@@ -1231,8 +1231,8 @@ void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ctx, | |||
1231 | PCI_DMA_FROMDEVICE); | 1231 | PCI_DMA_FROMDEVICE); |
1232 | 1232 | ||
1233 | /* make a rcv descriptor */ | 1233 | /* make a rcv descriptor */ |
1234 | pdesc->reference_handle = le16_to_cpu(buffer->ref_handle); | 1234 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
1235 | pdesc->buffer_length = le16_to_cpu(rcv_desc->dma_size); | 1235 | pdesc->buffer_length = cpu_to_le16(rcv_desc->dma_size); |
1236 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); | 1236 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); |
1237 | DPRINTK(INFO, "done writing descripter\n"); | 1237 | DPRINTK(INFO, "done writing descripter\n"); |
1238 | producer = | 1238 | producer = |
diff --git a/drivers/net/netxen/netxen_nic_isr.c b/drivers/net/netxen/netxen_nic_isr.c index 1b45f50fa6aa..06847d4252c3 100644 --- a/drivers/net/netxen/netxen_nic_isr.c +++ b/drivers/net/netxen/netxen_nic_isr.c | |||
@@ -157,7 +157,8 @@ void netxen_nic_isr_other(struct netxen_adapter *adapter) | |||
157 | for (portno = 0; portno < NETXEN_NIU_MAX_GBE_PORTS; portno++) { | 157 | for (portno = 0; portno < NETXEN_NIU_MAX_GBE_PORTS; portno++) { |
158 | linkup = val & 1; | 158 | linkup = val & 1; |
159 | if (linkup != (qg_linksup & 1)) { | 159 | if (linkup != (qg_linksup & 1)) { |
160 | printk(KERN_INFO "%s: PORT %d link %s\n", | 160 | printk(KERN_INFO "%s: %s PORT %d link %s\n", |
161 | adapter->port[portno]->netdev->name, | ||
161 | netxen_nic_driver_name, portno, | 162 | netxen_nic_driver_name, portno, |
162 | ((linkup == 0) ? "down" : "up")); | 163 | ((linkup == 0) ? "down" : "up")); |
163 | netxen_indicate_link_status(adapter, portno, linkup); | 164 | netxen_indicate_link_status(adapter, portno, linkup); |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 1658ca1fa230..8a5792fea774 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -52,8 +52,6 @@ char netxen_nic_driver_name[] = "netxen-nic"; | |||
52 | static char netxen_nic_driver_string[] = "NetXen Network Driver version " | 52 | static char netxen_nic_driver_string[] = "NetXen Network Driver version " |
53 | NETXEN_NIC_LINUX_VERSIONID; | 53 | NETXEN_NIC_LINUX_VERSIONID; |
54 | 54 | ||
55 | struct netxen_adapter *g_adapter = NULL; | ||
56 | |||
57 | #define NETXEN_NETDEV_WEIGHT 120 | 55 | #define NETXEN_NETDEV_WEIGHT 120 |
58 | #define NETXEN_ADAPTER_UP_MAGIC 777 | 56 | #define NETXEN_ADAPTER_UP_MAGIC 777 |
59 | #define NETXEN_NIC_PEG_TUNE 0 | 57 | #define NETXEN_NIC_PEG_TUNE 0 |
@@ -87,6 +85,8 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = { | |||
87 | {PCI_DEVICE(0x4040, 0x0003)}, | 85 | {PCI_DEVICE(0x4040, 0x0003)}, |
88 | {PCI_DEVICE(0x4040, 0x0004)}, | 86 | {PCI_DEVICE(0x4040, 0x0004)}, |
89 | {PCI_DEVICE(0x4040, 0x0005)}, | 87 | {PCI_DEVICE(0x4040, 0x0005)}, |
88 | {PCI_DEVICE(0x4040, 0x0024)}, | ||
89 | {PCI_DEVICE(0x4040, 0x0025)}, | ||
90 | {0,} | 90 | {0,} |
91 | }; | 91 | }; |
92 | 92 | ||
@@ -126,7 +126,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
126 | struct netxen_cmd_buffer *cmd_buf_arr = NULL; | 126 | struct netxen_cmd_buffer *cmd_buf_arr = NULL; |
127 | u64 mac_addr[FLASH_NUM_PORTS + 1]; | 127 | u64 mac_addr[FLASH_NUM_PORTS + 1]; |
128 | int valid_mac = 0; | 128 | int valid_mac = 0; |
129 | static int netxen_cards_found = 0; | ||
130 | 129 | ||
131 | printk(KERN_INFO "%s \n", netxen_nic_driver_string); | 130 | printk(KERN_INFO "%s \n", netxen_nic_driver_string); |
132 | /* In current scheme, we use only PCI function 0 */ | 131 | /* In current scheme, we use only PCI function 0 */ |
@@ -217,9 +216,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
217 | goto err_out_dbunmap; | 216 | goto err_out_dbunmap; |
218 | } | 217 | } |
219 | 218 | ||
220 | if (netxen_cards_found == 0) { | ||
221 | g_adapter = adapter; | ||
222 | } | ||
223 | adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS; | 219 | adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS; |
224 | adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS; | 220 | adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS; |
225 | adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS; | 221 | adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS; |
@@ -424,8 +420,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
424 | netdev->dev_addr); | 420 | netdev->dev_addr); |
425 | } | 421 | } |
426 | } | 422 | } |
427 | adapter->netdev = netdev; | 423 | INIT_WORK(&port->tx_timeout_task, netxen_tx_timeout_task); |
428 | INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); | ||
429 | netif_carrier_off(netdev); | 424 | netif_carrier_off(netdev); |
430 | netif_stop_queue(netdev); | 425 | netif_stop_queue(netdev); |
431 | 426 | ||
@@ -440,6 +435,11 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
440 | adapter->port[i] = port; | 435 | adapter->port[i] = port; |
441 | } | 436 | } |
442 | 437 | ||
438 | writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); | ||
439 | netxen_pinit_from_rom(adapter, 0); | ||
440 | udelay(500); | ||
441 | netxen_load_firmware(adapter); | ||
442 | netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); | ||
443 | /* | 443 | /* |
444 | * delay a while to ensure that the Pegs are up & running. | 444 | * delay a while to ensure that the Pegs are up & running. |
445 | * Otherwise, we might see some flaky behaviour. | 445 | * Otherwise, we might see some flaky behaviour. |
@@ -457,7 +457,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
457 | break; | 457 | break; |
458 | } | 458 | } |
459 | 459 | ||
460 | adapter->number = netxen_cards_found; | ||
461 | adapter->driver_mismatch = 0; | 460 | adapter->driver_mismatch = 0; |
462 | 461 | ||
463 | return 0; | 462 | return 0; |
@@ -527,6 +526,8 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) | |||
527 | 526 | ||
528 | netxen_nic_stop_all_ports(adapter); | 527 | netxen_nic_stop_all_ports(adapter); |
529 | /* leave the hw in the same state as reboot */ | 528 | /* leave the hw in the same state as reboot */ |
529 | netxen_pinit_from_rom(adapter, 0); | ||
530 | writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); | ||
530 | netxen_load_firmware(adapter); | 531 | netxen_load_firmware(adapter); |
531 | netxen_free_adapter_offload(adapter); | 532 | netxen_free_adapter_offload(adapter); |
532 | 533 | ||
@@ -817,8 +818,8 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
817 | /* Take skb->data itself */ | 818 | /* Take skb->data itself */ |
818 | pbuf = &adapter->cmd_buf_arr[producer]; | 819 | pbuf = &adapter->cmd_buf_arr[producer]; |
819 | if ((netdev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size > 0) { | 820 | if ((netdev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size > 0) { |
820 | pbuf->mss = skb_shinfo(skb)->gso_size; | 821 | pbuf->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); |
821 | hwdesc->mss = skb_shinfo(skb)->gso_size; | 822 | hwdesc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); |
822 | } else { | 823 | } else { |
823 | pbuf->mss = 0; | 824 | pbuf->mss = 0; |
824 | hwdesc->mss = 0; | 825 | hwdesc->mss = 0; |
@@ -952,11 +953,6 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
952 | static void netxen_watchdog(unsigned long v) | 953 | static void netxen_watchdog(unsigned long v) |
953 | { | 954 | { |
954 | struct netxen_adapter *adapter = (struct netxen_adapter *)v; | 955 | struct netxen_adapter *adapter = (struct netxen_adapter *)v; |
955 | if (adapter != g_adapter) { | ||
956 | printk("%s: ***BUG*** adapter[%p] != g_adapter[%p]\n", | ||
957 | __FUNCTION__, adapter, g_adapter); | ||
958 | return; | ||
959 | } | ||
960 | 956 | ||
961 | SCHEDULE_WORK(&adapter->watchdog_task); | 957 | SCHEDULE_WORK(&adapter->watchdog_task); |
962 | } | 958 | } |
@@ -965,23 +961,23 @@ static void netxen_tx_timeout(struct net_device *netdev) | |||
965 | { | 961 | { |
966 | struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev); | 962 | struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev); |
967 | 963 | ||
968 | SCHEDULE_WORK(&port->adapter->tx_timeout_task); | 964 | SCHEDULE_WORK(&port->tx_timeout_task); |
969 | } | 965 | } |
970 | 966 | ||
971 | static void netxen_tx_timeout_task(struct work_struct *work) | 967 | static void netxen_tx_timeout_task(struct work_struct *work) |
972 | { | 968 | { |
973 | struct netxen_adapter *adapter = | 969 | struct netxen_port *port = |
974 | container_of(work, struct netxen_adapter, tx_timeout_task); | 970 | container_of(work, struct netxen_port, tx_timeout_task); |
975 | struct net_device *netdev = adapter->netdev; | 971 | struct net_device *netdev = port->netdev; |
976 | unsigned long flags; | 972 | unsigned long flags; |
977 | 973 | ||
978 | printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", | 974 | printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", |
979 | netxen_nic_driver_name, netdev->name); | 975 | netxen_nic_driver_name, netdev->name); |
980 | 976 | ||
981 | spin_lock_irqsave(&adapter->lock, flags); | 977 | spin_lock_irqsave(&port->adapter->lock, flags); |
982 | netxen_nic_close(netdev); | 978 | netxen_nic_close(netdev); |
983 | netxen_nic_open(netdev); | 979 | netxen_nic_open(netdev); |
984 | spin_unlock_irqrestore(&adapter->lock, flags); | 980 | spin_unlock_irqrestore(&port->adapter->lock, flags); |
985 | netdev->trans_start = jiffies; | 981 | netdev->trans_start = jiffies; |
986 | netif_wake_queue(netdev); | 982 | netif_wake_queue(netdev); |
987 | } | 983 | } |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index f83b41d4cb0e..577babd4c938 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -225,7 +225,6 @@ MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); | |||
225 | 225 | ||
226 | static int rx_copybreak = 200; | 226 | static int rx_copybreak = 200; |
227 | static int use_dac; | 227 | static int use_dac; |
228 | static int ignore_parity_err; | ||
229 | static struct { | 228 | static struct { |
230 | u32 msg_enable; | 229 | u32 msg_enable; |
231 | } debug = { -1 }; | 230 | } debug = { -1 }; |
@@ -471,8 +470,6 @@ module_param(use_dac, int, 0); | |||
471 | MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); | 470 | MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); |
472 | module_param_named(debug, debug.msg_enable, int, 0); | 471 | module_param_named(debug, debug.msg_enable, int, 0); |
473 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); | 472 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); |
474 | module_param_named(ignore_parity_err, ignore_parity_err, bool, 0); | ||
475 | MODULE_PARM_DESC(ignore_parity_err, "Ignore PCI parity error as target. Default: false"); | ||
476 | MODULE_LICENSE("GPL"); | 473 | MODULE_LICENSE("GPL"); |
477 | MODULE_VERSION(RTL8169_VERSION); | 474 | MODULE_VERSION(RTL8169_VERSION); |
478 | 475 | ||
@@ -1885,7 +1882,6 @@ static void rtl8169_hw_start(struct net_device *dev) | |||
1885 | (tp->mac_version == RTL_GIGA_MAC_VER_02) || | 1882 | (tp->mac_version == RTL_GIGA_MAC_VER_02) || |
1886 | (tp->mac_version == RTL_GIGA_MAC_VER_03) || | 1883 | (tp->mac_version == RTL_GIGA_MAC_VER_03) || |
1887 | (tp->mac_version == RTL_GIGA_MAC_VER_04)) | 1884 | (tp->mac_version == RTL_GIGA_MAC_VER_04)) |
1888 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); | ||
1889 | rtl8169_set_rx_tx_config_registers(tp); | 1885 | rtl8169_set_rx_tx_config_registers(tp); |
1890 | 1886 | ||
1891 | cmd = RTL_R16(CPlusCmd); | 1887 | cmd = RTL_R16(CPlusCmd); |
@@ -2388,7 +2384,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) | |||
2388 | * | 2384 | * |
2389 | * Feel free to adjust to your needs. | 2385 | * Feel free to adjust to your needs. |
2390 | */ | 2386 | */ |
2391 | if (ignore_parity_err) | 2387 | if (pdev->broken_parity_status) |
2392 | pci_cmd &= ~PCI_COMMAND_PARITY; | 2388 | pci_cmd &= ~PCI_COMMAND_PARITY; |
2393 | else | 2389 | else |
2394 | pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY; | 2390 | pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY; |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 8a39376f87dc..deedfd5f8226 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -2920,6 +2920,7 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2920 | struct skge_hw *hw = skge->hw; | 2920 | struct skge_hw *hw = skge->hw; |
2921 | struct skge_ring *ring = &skge->rx_ring; | 2921 | struct skge_ring *ring = &skge->rx_ring; |
2922 | struct skge_element *e; | 2922 | struct skge_element *e; |
2923 | unsigned long flags; | ||
2923 | int to_do = min(dev->quota, *budget); | 2924 | int to_do = min(dev->quota, *budget); |
2924 | int work_done = 0; | 2925 | int work_done = 0; |
2925 | 2926 | ||
@@ -2957,12 +2958,12 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2957 | if (work_done >= to_do) | 2958 | if (work_done >= to_do) |
2958 | return 1; /* not done */ | 2959 | return 1; /* not done */ |
2959 | 2960 | ||
2960 | spin_lock_irq(&hw->hw_lock); | 2961 | spin_lock_irqsave(&hw->hw_lock, flags); |
2961 | __netif_rx_complete(dev); | 2962 | __netif_rx_complete(dev); |
2962 | hw->intr_mask |= irqmask[skge->port]; | 2963 | hw->intr_mask |= irqmask[skge->port]; |
2963 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2964 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
2964 | skge_read32(hw, B0_IMSK); | 2965 | skge_read32(hw, B0_IMSK); |
2965 | spin_unlock_irq(&hw->hw_lock); | 2966 | spin_unlock_irqrestore(&hw->hw_lock, flags); |
2966 | 2967 | ||
2967 | return 0; | 2968 | return 0; |
2968 | } | 2969 | } |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index fb1d2c30c1bb..a6601e8d423c 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -569,8 +569,8 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) | |||
569 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) | 569 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) |
570 | onoff = !onoff; | 570 | onoff = !onoff; |
571 | 571 | ||
572 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
572 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); | 573 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); |
573 | |||
574 | if (onoff) | 574 | if (onoff) |
575 | /* Turn off phy power saving */ | 575 | /* Turn off phy power saving */ |
576 | reg1 &= ~phy_power[port]; | 576 | reg1 &= ~phy_power[port]; |
@@ -579,6 +579,7 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) | |||
579 | 579 | ||
580 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); | 580 | sky2_pci_write32(hw, PCI_DEV_REG1, reg1); |
581 | sky2_pci_read32(hw, PCI_DEV_REG1); | 581 | sky2_pci_read32(hw, PCI_DEV_REG1); |
582 | sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
582 | udelay(100); | 583 | udelay(100); |
583 | } | 584 | } |
584 | 585 | ||
@@ -1511,6 +1512,13 @@ static int sky2_down(struct net_device *dev) | |||
1511 | imask &= ~portirq_msk[port]; | 1512 | imask &= ~portirq_msk[port]; |
1512 | sky2_write32(hw, B0_IMSK, imask); | 1513 | sky2_write32(hw, B0_IMSK, imask); |
1513 | 1514 | ||
1515 | /* | ||
1516 | * Both ports share the NAPI poll on port 0, so if necessary undo the | ||
1517 | * the disable that is done in dev_close. | ||
1518 | */ | ||
1519 | if (sky2->port == 0 && hw->ports > 1) | ||
1520 | netif_poll_enable(dev); | ||
1521 | |||
1514 | sky2_gmac_reset(hw, port); | 1522 | sky2_gmac_reset(hw, port); |
1515 | 1523 | ||
1516 | /* Stop transmitter */ | 1524 | /* Stop transmitter */ |
@@ -3631,6 +3639,29 @@ static int sky2_resume(struct pci_dev *pdev) | |||
3631 | out: | 3639 | out: |
3632 | return err; | 3640 | return err; |
3633 | } | 3641 | } |
3642 | |||
3643 | /* BIOS resume runs after device (it's a bug in PM) | ||
3644 | * as a temporary workaround on suspend/resume leave MSI disabled | ||
3645 | */ | ||
3646 | static int sky2_suspend_late(struct pci_dev *pdev, pm_message_t state) | ||
3647 | { | ||
3648 | struct sky2_hw *hw = pci_get_drvdata(pdev); | ||
3649 | |||
3650 | free_irq(pdev->irq, hw); | ||
3651 | if (hw->msi) { | ||
3652 | pci_disable_msi(pdev); | ||
3653 | hw->msi = 0; | ||
3654 | } | ||
3655 | return 0; | ||
3656 | } | ||
3657 | |||
3658 | static int sky2_resume_early(struct pci_dev *pdev) | ||
3659 | { | ||
3660 | struct sky2_hw *hw = pci_get_drvdata(pdev); | ||
3661 | struct net_device *dev = hw->dev[0]; | ||
3662 | |||
3663 | return request_irq(pdev->irq, sky2_intr, IRQF_SHARED, dev->name, hw); | ||
3664 | } | ||
3634 | #endif | 3665 | #endif |
3635 | 3666 | ||
3636 | static struct pci_driver sky2_driver = { | 3667 | static struct pci_driver sky2_driver = { |
@@ -3641,6 +3672,8 @@ static struct pci_driver sky2_driver = { | |||
3641 | #ifdef CONFIG_PM | 3672 | #ifdef CONFIG_PM |
3642 | .suspend = sky2_suspend, | 3673 | .suspend = sky2_suspend, |
3643 | .resume = sky2_resume, | 3674 | .resume = sky2_resume, |
3675 | .suspend_late = sky2_suspend_late, | ||
3676 | .resume_early = sky2_resume_early, | ||
3644 | #endif | 3677 | #endif |
3645 | }; | 3678 | }; |
3646 | 3679 | ||
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 4587f23f4e4b..8e5d82051bd4 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -265,15 +265,19 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status); | |||
265 | static int velocity_suspend(struct pci_dev *pdev, pm_message_t state); | 265 | static int velocity_suspend(struct pci_dev *pdev, pm_message_t state); |
266 | static int velocity_resume(struct pci_dev *pdev); | 266 | static int velocity_resume(struct pci_dev *pdev); |
267 | 267 | ||
268 | static DEFINE_SPINLOCK(velocity_dev_list_lock); | ||
269 | static LIST_HEAD(velocity_dev_list); | ||
270 | |||
271 | #endif | ||
272 | |||
273 | #if defined(CONFIG_PM) && defined(CONFIG_INET) | ||
274 | |||
268 | static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr); | 275 | static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr); |
269 | 276 | ||
270 | static struct notifier_block velocity_inetaddr_notifier = { | 277 | static struct notifier_block velocity_inetaddr_notifier = { |
271 | .notifier_call = velocity_netdev_event, | 278 | .notifier_call = velocity_netdev_event, |
272 | }; | 279 | }; |
273 | 280 | ||
274 | static DEFINE_SPINLOCK(velocity_dev_list_lock); | ||
275 | static LIST_HEAD(velocity_dev_list); | ||
276 | |||
277 | static void velocity_register_notifier(void) | 281 | static void velocity_register_notifier(void) |
278 | { | 282 | { |
279 | register_inetaddr_notifier(&velocity_inetaddr_notifier); | 283 | register_inetaddr_notifier(&velocity_inetaddr_notifier); |
@@ -284,12 +288,12 @@ static void velocity_unregister_notifier(void) | |||
284 | unregister_inetaddr_notifier(&velocity_inetaddr_notifier); | 288 | unregister_inetaddr_notifier(&velocity_inetaddr_notifier); |
285 | } | 289 | } |
286 | 290 | ||
287 | #else /* CONFIG_PM */ | 291 | #else |
288 | 292 | ||
289 | #define velocity_register_notifier() do {} while (0) | 293 | #define velocity_register_notifier() do {} while (0) |
290 | #define velocity_unregister_notifier() do {} while (0) | 294 | #define velocity_unregister_notifier() do {} while (0) |
291 | 295 | ||
292 | #endif /* !CONFIG_PM */ | 296 | #endif |
293 | 297 | ||
294 | /* | 298 | /* |
295 | * Internal board variants. At the moment we have only one | 299 | * Internal board variants. At the moment we have only one |
@@ -3292,6 +3296,8 @@ static int velocity_resume(struct pci_dev *pdev) | |||
3292 | return 0; | 3296 | return 0; |
3293 | } | 3297 | } |
3294 | 3298 | ||
3299 | #ifdef CONFIG_INET | ||
3300 | |||
3295 | static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr) | 3301 | static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr) |
3296 | { | 3302 | { |
3297 | struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; | 3303 | struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; |
@@ -3312,4 +3318,6 @@ static int velocity_netdev_event(struct notifier_block *nb, unsigned long notifi | |||
3312 | } | 3318 | } |
3313 | return NOTIFY_DONE; | 3319 | return NOTIFY_DONE; |
3314 | } | 3320 | } |
3321 | |||
3322 | #endif | ||
3315 | #endif | 3323 | #endif |
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 00ca704ece35..a08524191b5d 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c | |||
@@ -41,6 +41,8 @@ static void housekeeping_disable(struct zd_mac *mac); | |||
41 | 41 | ||
42 | static void set_multicast_hash_handler(struct work_struct *work); | 42 | static void set_multicast_hash_handler(struct work_struct *work); |
43 | 43 | ||
44 | static void do_rx(unsigned long mac_ptr); | ||
45 | |||
44 | int zd_mac_init(struct zd_mac *mac, | 46 | int zd_mac_init(struct zd_mac *mac, |
45 | struct net_device *netdev, | 47 | struct net_device *netdev, |
46 | struct usb_interface *intf) | 48 | struct usb_interface *intf) |
@@ -53,6 +55,10 @@ int zd_mac_init(struct zd_mac *mac, | |||
53 | INIT_DELAYED_WORK(&mac->set_rts_cts_work, set_rts_cts_work); | 55 | INIT_DELAYED_WORK(&mac->set_rts_cts_work, set_rts_cts_work); |
54 | INIT_DELAYED_WORK(&mac->set_basic_rates_work, set_basic_rates_work); | 56 | INIT_DELAYED_WORK(&mac->set_basic_rates_work, set_basic_rates_work); |
55 | 57 | ||
58 | skb_queue_head_init(&mac->rx_queue); | ||
59 | tasklet_init(&mac->rx_tasklet, do_rx, (unsigned long)mac); | ||
60 | tasklet_disable(&mac->rx_tasklet); | ||
61 | |||
56 | ieee_init(ieee); | 62 | ieee_init(ieee); |
57 | softmac_init(ieee80211_priv(netdev)); | 63 | softmac_init(ieee80211_priv(netdev)); |
58 | zd_chip_init(&mac->chip, netdev, intf); | 64 | zd_chip_init(&mac->chip, netdev, intf); |
@@ -140,6 +146,8 @@ out: | |||
140 | void zd_mac_clear(struct zd_mac *mac) | 146 | void zd_mac_clear(struct zd_mac *mac) |
141 | { | 147 | { |
142 | flush_workqueue(zd_workqueue); | 148 | flush_workqueue(zd_workqueue); |
149 | skb_queue_purge(&mac->rx_queue); | ||
150 | tasklet_kill(&mac->rx_tasklet); | ||
143 | zd_chip_clear(&mac->chip); | 151 | zd_chip_clear(&mac->chip); |
144 | ZD_ASSERT(!spin_is_locked(&mac->lock)); | 152 | ZD_ASSERT(!spin_is_locked(&mac->lock)); |
145 | ZD_MEMCLEAR(mac, sizeof(struct zd_mac)); | 153 | ZD_MEMCLEAR(mac, sizeof(struct zd_mac)); |
@@ -168,6 +176,8 @@ int zd_mac_open(struct net_device *netdev) | |||
168 | struct zd_chip *chip = &mac->chip; | 176 | struct zd_chip *chip = &mac->chip; |
169 | int r; | 177 | int r; |
170 | 178 | ||
179 | tasklet_enable(&mac->rx_tasklet); | ||
180 | |||
171 | r = zd_chip_enable_int(chip); | 181 | r = zd_chip_enable_int(chip); |
172 | if (r < 0) | 182 | if (r < 0) |
173 | goto out; | 183 | goto out; |
@@ -218,6 +228,8 @@ int zd_mac_stop(struct net_device *netdev) | |||
218 | */ | 228 | */ |
219 | 229 | ||
220 | zd_chip_disable_rx(chip); | 230 | zd_chip_disable_rx(chip); |
231 | skb_queue_purge(&mac->rx_queue); | ||
232 | tasklet_disable(&mac->rx_tasklet); | ||
221 | housekeeping_disable(mac); | 233 | housekeeping_disable(mac); |
222 | ieee80211softmac_stop(netdev); | 234 | ieee80211softmac_stop(netdev); |
223 | 235 | ||
@@ -470,13 +482,13 @@ static void bssinfo_change(struct net_device *netdev, u32 changes) | |||
470 | 482 | ||
471 | if (changes & IEEE80211SOFTMAC_BSSINFOCHG_RATES) { | 483 | if (changes & IEEE80211SOFTMAC_BSSINFOCHG_RATES) { |
472 | /* Set RTS rate to highest available basic rate */ | 484 | /* Set RTS rate to highest available basic rate */ |
473 | u8 rate = ieee80211softmac_highest_supported_rate(softmac, | 485 | u8 hi_rate = ieee80211softmac_highest_supported_rate(softmac, |
474 | &bssinfo->supported_rates, 1); | 486 | &bssinfo->supported_rates, 1); |
475 | rate = rate_to_zd_rate(rate); | 487 | hi_rate = rate_to_zd_rate(hi_rate); |
476 | 488 | ||
477 | spin_lock_irqsave(&mac->lock, flags); | 489 | spin_lock_irqsave(&mac->lock, flags); |
478 | if (rate != mac->rts_rate) { | 490 | if (hi_rate != mac->rts_rate) { |
479 | mac->rts_rate = rate; | 491 | mac->rts_rate = hi_rate; |
480 | need_set_rts_cts = 1; | 492 | need_set_rts_cts = 1; |
481 | } | 493 | } |
482 | spin_unlock_irqrestore(&mac->lock, flags); | 494 | spin_unlock_irqrestore(&mac->lock, flags); |
@@ -1072,43 +1084,75 @@ static int fill_rx_stats(struct ieee80211_rx_stats *stats, | |||
1072 | return 0; | 1084 | return 0; |
1073 | } | 1085 | } |
1074 | 1086 | ||
1075 | int zd_mac_rx(struct zd_mac *mac, const u8 *buffer, unsigned int length) | 1087 | static void zd_mac_rx(struct zd_mac *mac, struct sk_buff *skb) |
1076 | { | 1088 | { |
1077 | int r; | 1089 | int r; |
1078 | struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); | 1090 | struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); |
1079 | struct ieee80211_rx_stats stats; | 1091 | struct ieee80211_rx_stats stats; |
1080 | const struct rx_status *status; | 1092 | const struct rx_status *status; |
1081 | struct sk_buff *skb; | ||
1082 | 1093 | ||
1083 | if (length < ZD_PLCP_HEADER_SIZE + IEEE80211_1ADDR_LEN + | 1094 | if (skb->len < ZD_PLCP_HEADER_SIZE + IEEE80211_1ADDR_LEN + |
1084 | IEEE80211_FCS_LEN + sizeof(struct rx_status)) | 1095 | IEEE80211_FCS_LEN + sizeof(struct rx_status)) |
1085 | return -EINVAL; | 1096 | { |
1097 | dev_dbg_f(zd_mac_dev(mac), "Packet with length %u to small.\n", | ||
1098 | skb->len); | ||
1099 | goto free_skb; | ||
1100 | } | ||
1086 | 1101 | ||
1087 | r = fill_rx_stats(&stats, &status, mac, buffer, length); | 1102 | r = fill_rx_stats(&stats, &status, mac, skb->data, skb->len); |
1088 | if (r) | 1103 | if (r) { |
1089 | return r; | 1104 | /* Only packets with rx errors are included here. */ |
1105 | goto free_skb; | ||
1106 | } | ||
1090 | 1107 | ||
1091 | length -= ZD_PLCP_HEADER_SIZE+IEEE80211_FCS_LEN+ | 1108 | __skb_pull(skb, ZD_PLCP_HEADER_SIZE); |
1092 | sizeof(struct rx_status); | 1109 | __skb_trim(skb, skb->len - |
1093 | buffer += ZD_PLCP_HEADER_SIZE; | 1110 | (IEEE80211_FCS_LEN + sizeof(struct rx_status))); |
1094 | 1111 | ||
1095 | update_qual_rssi(mac, buffer, length, stats.signal, stats.rssi); | 1112 | update_qual_rssi(mac, skb->data, skb->len, stats.signal, |
1113 | status->signal_strength); | ||
1096 | 1114 | ||
1097 | r = filter_rx(ieee, buffer, length, &stats); | 1115 | r = filter_rx(ieee, skb->data, skb->len, &stats); |
1098 | if (r <= 0) | 1116 | if (r <= 0) { |
1099 | return r; | 1117 | if (r < 0) |
1118 | dev_dbg_f(zd_mac_dev(mac), "Error in packet.\n"); | ||
1119 | goto free_skb; | ||
1120 | } | ||
1100 | 1121 | ||
1101 | skb = dev_alloc_skb(sizeof(struct zd_rt_hdr) + length); | ||
1102 | if (!skb) | ||
1103 | return -ENOMEM; | ||
1104 | if (ieee->iw_mode == IW_MODE_MONITOR) | 1122 | if (ieee->iw_mode == IW_MODE_MONITOR) |
1105 | fill_rt_header(skb_put(skb, sizeof(struct zd_rt_hdr)), mac, | 1123 | fill_rt_header(skb_push(skb, sizeof(struct zd_rt_hdr)), mac, |
1106 | &stats, status); | 1124 | &stats, status); |
1107 | memcpy(skb_put(skb, length), buffer, length); | ||
1108 | 1125 | ||
1109 | r = ieee80211_rx(ieee, skb, &stats); | 1126 | r = ieee80211_rx(ieee, skb, &stats); |
1110 | if (!r) | 1127 | if (r) |
1111 | dev_kfree_skb_any(skb); | 1128 | return; |
1129 | free_skb: | ||
1130 | /* We are always in a soft irq. */ | ||
1131 | dev_kfree_skb(skb); | ||
1132 | } | ||
1133 | |||
1134 | static void do_rx(unsigned long mac_ptr) | ||
1135 | { | ||
1136 | struct zd_mac *mac = (struct zd_mac *)mac_ptr; | ||
1137 | struct sk_buff *skb; | ||
1138 | |||
1139 | while ((skb = skb_dequeue(&mac->rx_queue)) != NULL) | ||
1140 | zd_mac_rx(mac, skb); | ||
1141 | } | ||
1142 | |||
1143 | int zd_mac_rx_irq(struct zd_mac *mac, const u8 *buffer, unsigned int length) | ||
1144 | { | ||
1145 | struct sk_buff *skb; | ||
1146 | |||
1147 | skb = dev_alloc_skb(sizeof(struct zd_rt_hdr) + length); | ||
1148 | if (!skb) { | ||
1149 | dev_warn(zd_mac_dev(mac), "Could not allocate skb.\n"); | ||
1150 | return -ENOMEM; | ||
1151 | } | ||
1152 | skb_reserve(skb, sizeof(struct zd_rt_hdr)); | ||
1153 | memcpy(__skb_put(skb, length), buffer, length); | ||
1154 | skb_queue_tail(&mac->rx_queue, skb); | ||
1155 | tasklet_schedule(&mac->rx_tasklet); | ||
1112 | return 0; | 1156 | return 0; |
1113 | } | 1157 | } |
1114 | 1158 | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h index f0cf05dc7d3e..faf4c7828d4e 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.h +++ b/drivers/net/wireless/zd1211rw/zd_mac.h | |||
@@ -138,6 +138,9 @@ struct zd_mac { | |||
138 | struct delayed_work set_rts_cts_work; | 138 | struct delayed_work set_rts_cts_work; |
139 | struct delayed_work set_basic_rates_work; | 139 | struct delayed_work set_basic_rates_work; |
140 | 140 | ||
141 | struct tasklet_struct rx_tasklet; | ||
142 | struct sk_buff_head rx_queue; | ||
143 | |||
141 | unsigned int stats_count; | 144 | unsigned int stats_count; |
142 | u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE]; | 145 | u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE]; |
143 | u8 rssi_buffer[ZD_MAC_STATS_BUFFER_SIZE]; | 146 | u8 rssi_buffer[ZD_MAC_STATS_BUFFER_SIZE]; |
@@ -193,7 +196,7 @@ int zd_mac_stop(struct net_device *netdev); | |||
193 | int zd_mac_set_mac_address(struct net_device *dev, void *p); | 196 | int zd_mac_set_mac_address(struct net_device *dev, void *p); |
194 | void zd_mac_set_multicast_list(struct net_device *netdev); | 197 | void zd_mac_set_multicast_list(struct net_device *netdev); |
195 | 198 | ||
196 | int zd_mac_rx(struct zd_mac *mac, const u8 *buffer, unsigned int length); | 199 | int zd_mac_rx_irq(struct zd_mac *mac, const u8 *buffer, unsigned int length); |
197 | 200 | ||
198 | int zd_mac_set_regdomain(struct zd_mac *zd_mac, u8 regdomain); | 201 | int zd_mac_set_regdomain(struct zd_mac *zd_mac, u8 regdomain); |
199 | u8 zd_mac_get_regdomain(struct zd_mac *zd_mac); | 202 | u8 zd_mac_get_regdomain(struct zd_mac *zd_mac); |
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index aa782e88754b..605e96e74057 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
@@ -598,13 +598,13 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, | |||
598 | n = l+k; | 598 | n = l+k; |
599 | if (n > length) | 599 | if (n > length) |
600 | return; | 600 | return; |
601 | zd_mac_rx(mac, buffer+l, k); | 601 | zd_mac_rx_irq(mac, buffer+l, k); |
602 | if (i >= 2) | 602 | if (i >= 2) |
603 | return; | 603 | return; |
604 | l = (n+3) & ~3; | 604 | l = (n+3) & ~3; |
605 | } | 605 | } |
606 | } else { | 606 | } else { |
607 | zd_mac_rx(mac, buffer, length); | 607 | zd_mac_rx_irq(mac, buffer, length); |
608 | } | 608 | } |
609 | } | 609 | } |
610 | 610 | ||